problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_15389
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-381
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bioclim data missing hdr files when downloaded to path
retriever download Bioclim -p
</issue>
<code>
[start of scripts/bioclim_2pt5.py]
1 #retriever
2
3 """Retriever script for direct download of Bioclim data"""
4
5 from retriever.lib.templates import Script
6
7
8 class main(Script):
9 def __init__(self, **kwargs):
10 Script.__init__(self, **kwargs)
11 self.name = "Bioclim 2.5 Minute Climate Data"
12 self.shortname = "Bioclim"
13 self.ref = "http://worldclim.org/bioclim"
14 self.urls = {"climate": "http://biogeo.ucdavis.edu/data/climate/worldclim/1_4/grid/cur/bio_2-5m_bil.zip"}
15 self.description = "Bioclimatic variables that are derived from the monthly temperature and rainfall values in order to generate more biologically meaningful variables."
16 self.citation = "Hijmans, R.J., S.E. Cameron, J.L. Parra, P.G. Jones and A. Jarvis, 2005. Very high resolution interpolated climate surfaces for global land areas. International Journal of Climatology 25: 1965-1978."
17 self.tags = ["Data Type > Compilation"]
18
19 def download(self, engine=None, debug=False):
20 if engine.name != "Download Only":
21 raise Exception("The Bioclim dataset contains only non-tabular data files, and can only be used with the 'download only' engine.")
22 Script.download(self, engine, debug)
23 file_names = ["bio%s.bil" % file_num for file_num in range(1, 20)]
24 self.engine.download_files_from_archive(self.urls["climate"], file_names)
25 self.engine.register_files(file_names)
26
27 SCRIPT = main()
28
[end of scripts/bioclim_2pt5.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/bioclim_2pt5.py b/scripts/bioclim_2pt5.py
--- a/scripts/bioclim_2pt5.py
+++ b/scripts/bioclim_2pt5.py
@@ -20,8 +20,12 @@
if engine.name != "Download Only":
raise Exception("The Bioclim dataset contains only non-tabular data files, and can only be used with the 'download only' engine.")
Script.download(self, engine, debug)
- file_names = ["bio%s.bil" % file_num for file_num in range(1, 20)]
+ file_names = []
+ for file_num in range(1, 20):
+ for ext in (['bil', 'hdr']):
+ file_names += ["bio{0}.{1}".format(file_num, ext)]
self.engine.download_files_from_archive(self.urls["climate"], file_names)
self.engine.register_files(file_names)
SCRIPT = main()
+
|
{"golden_diff": "diff --git a/scripts/bioclim_2pt5.py b/scripts/bioclim_2pt5.py\n--- a/scripts/bioclim_2pt5.py\n+++ b/scripts/bioclim_2pt5.py\n@@ -20,8 +20,12 @@\n if engine.name != \"Download Only\":\n raise Exception(\"The Bioclim dataset contains only non-tabular data files, and can only be used with the 'download only' engine.\")\n Script.download(self, engine, debug)\n- file_names = [\"bio%s.bil\" % file_num for file_num in range(1, 20)]\n+ file_names = []\n+ for file_num in range(1, 20):\n+ for ext in (['bil', 'hdr']):\n+ file_names += [\"bio{0}.{1}\".format(file_num, ext)]\n self.engine.download_files_from_archive(self.urls[\"climate\"], file_names)\n self.engine.register_files(file_names)\n \n SCRIPT = main()\n+\n", "issue": "Bioclim data missing hdr files when downloaded to path\n retriever download Bioclim -p\n\n", "before_files": [{"content": "#retriever\n\n\"\"\"Retriever script for direct download of Bioclim data\"\"\"\n\nfrom retriever.lib.templates import Script\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"Bioclim 2.5 Minute Climate Data\"\n self.shortname = \"Bioclim\"\n self.ref = \"http://worldclim.org/bioclim\"\n self.urls = {\"climate\": \"http://biogeo.ucdavis.edu/data/climate/worldclim/1_4/grid/cur/bio_2-5m_bil.zip\"}\n self.description = \"Bioclimatic variables that are derived from the monthly temperature and rainfall values in order to generate more biologically meaningful variables.\"\n self.citation = \"Hijmans, R.J., S.E. Cameron, J.L. Parra, P.G. Jones and A. Jarvis, 2005. Very high resolution interpolated climate surfaces for global land areas. International Journal of Climatology 25: 1965-1978.\"\n self.tags = [\"Data Type > Compilation\"]\n \n def download(self, engine=None, debug=False):\n if engine.name != \"Download Only\":\n raise Exception(\"The Bioclim dataset contains only non-tabular data files, and can only be used with the 'download only' engine.\")\n Script.download(self, engine, debug)\n file_names = [\"bio%s.bil\" % file_num for file_num in range(1, 20)]\n self.engine.download_files_from_archive(self.urls[\"climate\"], file_names)\n self.engine.register_files(file_names)\n\nSCRIPT = main()\n", "path": "scripts/bioclim_2pt5.py"}]}
| 972 | 214 |
gh_patches_debug_64312
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1932
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.108
On the docket:
+ [x] Fix slow PEX boot time when there are many extras. #1929
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.107"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.107"
+__version__ = "2.1.108"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.107\"\n+__version__ = \"2.1.108\"\n", "issue": "Release 2.1.108\nOn the docket:\r\n+ [x] Fix slow PEX boot time when there are many extras. #1929\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.107\"\n", "path": "pex/version.py"}]}
| 620 | 98 |
gh_patches_debug_30746
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2445
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add aggregation temporality conversion for all point types
Can you add a todo for the other point types?
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2380#discussion_r787074866_
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from abc import ABC, abstractmethod
16 from bisect import bisect_left
17 from dataclasses import replace
18 from logging import getLogger
19 from math import inf
20 from threading import Lock
21 from typing import Generic, List, Optional, Sequence, TypeVar
22
23 from opentelemetry.sdk._metrics.measurement import Measurement
24 from opentelemetry.sdk._metrics.point import (
25 AggregationTemporality,
26 Gauge,
27 Histogram,
28 PointT,
29 Sum,
30 )
31 from opentelemetry.util._time import _time_ns
32
33 _PointVarT = TypeVar("_PointVarT", bound=PointT)
34
35 _logger = getLogger(__name__)
36
37
38 class Aggregation(ABC, Generic[_PointVarT]):
39 def __init__(self):
40 self._lock = Lock()
41
42 @abstractmethod
43 def aggregate(self, measurement: Measurement) -> None:
44 pass
45
46 @abstractmethod
47 def collect(self) -> Optional[_PointVarT]:
48 pass
49
50
51 class SumAggregation(Aggregation[Sum]):
52 def __init__(
53 self,
54 instrument_is_monotonic: bool,
55 instrument_temporality: AggregationTemporality,
56 ):
57 super().__init__()
58
59 self._start_time_unix_nano = _time_ns()
60 self._instrument_temporality = instrument_temporality
61 self._instrument_is_monotonic = instrument_is_monotonic
62
63 if self._instrument_temporality is AggregationTemporality.DELTA:
64 self._value = 0
65 else:
66 self._value = None
67
68 def aggregate(self, measurement: Measurement) -> None:
69 with self._lock:
70 if self._value is None:
71 self._value = 0
72 self._value = self._value + measurement.value
73
74 def collect(self) -> Optional[Sum]:
75 """
76 Atomically return a point for the current value of the metric and
77 reset the aggregation value.
78 """
79 now = _time_ns()
80
81 if self._instrument_temporality is AggregationTemporality.DELTA:
82
83 with self._lock:
84 value = self._value
85 start_time_unix_nano = self._start_time_unix_nano
86
87 self._value = 0
88 self._start_time_unix_nano = now + 1
89
90 return Sum(
91 aggregation_temporality=AggregationTemporality.DELTA,
92 is_monotonic=self._instrument_is_monotonic,
93 start_time_unix_nano=start_time_unix_nano,
94 time_unix_nano=now,
95 value=value,
96 )
97
98 if self._value is None:
99 return None
100
101 return Sum(
102 aggregation_temporality=AggregationTemporality.CUMULATIVE,
103 is_monotonic=self._instrument_is_monotonic,
104 start_time_unix_nano=self._start_time_unix_nano,
105 time_unix_nano=now,
106 value=self._value,
107 )
108
109
110 class LastValueAggregation(Aggregation[Gauge]):
111 def __init__(self):
112 super().__init__()
113 self._value = None
114
115 def aggregate(self, measurement: Measurement):
116 with self._lock:
117 self._value = measurement.value
118
119 def collect(self) -> Optional[Gauge]:
120 """
121 Atomically return a point for the current value of the metric.
122 """
123 if self._value is None:
124 return None
125
126 return Gauge(
127 time_unix_nano=_time_ns(),
128 value=self._value,
129 )
130
131
132 class ExplicitBucketHistogramAggregation(Aggregation[Histogram]):
133 def __init__(
134 self,
135 boundaries: Sequence[float] = (
136 0.0,
137 5.0,
138 10.0,
139 25.0,
140 50.0,
141 75.0,
142 100.0,
143 250.0,
144 500.0,
145 1000.0,
146 ),
147 record_min_max: bool = True,
148 ):
149 super().__init__()
150 self._boundaries = tuple(boundaries)
151 self._bucket_counts = self._get_empty_bucket_counts()
152 self._min = inf
153 self._max = -inf
154 self._sum = 0
155 self._record_min_max = record_min_max
156 self._start_time_unix_nano = _time_ns()
157
158 def _get_empty_bucket_counts(self) -> List[int]:
159 return [0] * (len(self._boundaries) + 1)
160
161 def aggregate(self, measurement: Measurement) -> None:
162
163 value = measurement.value
164
165 if self._record_min_max:
166 self._min = min(self._min, value)
167 self._max = max(self._max, value)
168
169 self._sum += value
170
171 self._bucket_counts[bisect_left(self._boundaries, value)] += 1
172
173 def collect(self) -> Histogram:
174 """
175 Atomically return a point for the current value of the metric.
176 """
177 now = _time_ns()
178
179 with self._lock:
180 value = self._bucket_counts
181 start_time_unix_nano = self._start_time_unix_nano
182
183 self._bucket_counts = self._get_empty_bucket_counts()
184 self._start_time_unix_nano = now + 1
185
186 return Histogram(
187 start_time_unix_nano=start_time_unix_nano,
188 time_unix_nano=now,
189 bucket_counts=tuple(value),
190 explicit_bounds=self._boundaries,
191 aggregation_temporality=AggregationTemporality.DELTA,
192 sum=self._sum,
193 )
194
195
196 def _convert_aggregation_temporality(
197 previous_point: Optional[_PointVarT],
198 current_point: _PointVarT,
199 aggregation_temporality: AggregationTemporality,
200 ) -> _PointVarT:
201 """Converts `current_point` to the requested `aggregation_temporality`
202 given the `previous_point`.
203
204 `previous_point` must have `CUMULATIVE` temporality. `current_point` may
205 have `DELTA` or `CUMULATIVE` temporality.
206
207 The output point will have temporality `aggregation_temporality`. Since
208 `GAUGE` points have no temporality, they are returned unchanged.
209 """
210
211 current_point_type = type(current_point)
212
213 if current_point_type is Gauge:
214 return current_point
215
216 if previous_point is not None and type(previous_point) is not type(
217 current_point
218 ):
219 _logger.warning(
220 "convert_aggregation_temporality called with mismatched "
221 "point types: %s and %s",
222 type(previous_point),
223 current_point_type,
224 )
225
226 return current_point
227
228 if current_point_type is Sum:
229 if previous_point is None:
230 # Output CUMULATIVE for a synchronous instrument
231 # There is no previous value, return the delta point as a
232 # cumulative
233 return replace(
234 current_point, aggregation_temporality=aggregation_temporality
235 )
236 if previous_point.aggregation_temporality is not (
237 AggregationTemporality.CUMULATIVE
238 ):
239 raise Exception(
240 "previous_point aggregation temporality must be CUMULATIVE"
241 )
242
243 if current_point.aggregation_temporality is aggregation_temporality:
244 # Output DELTA for a synchronous instrument
245 # Output CUMULATIVE for an asynchronous instrument
246 return current_point
247
248 if aggregation_temporality is AggregationTemporality.DELTA:
249 # Output temporality DELTA for an asynchronous instrument
250 value = current_point.value - previous_point.value
251 output_start_time_unix_nano = previous_point.time_unix_nano
252
253 else:
254 # Output CUMULATIVE for a synchronous instrument
255 value = current_point.value + previous_point.value
256 output_start_time_unix_nano = previous_point.start_time_unix_nano
257
258 is_monotonic = (
259 previous_point.is_monotonic and current_point.is_monotonic
260 )
261
262 return Sum(
263 start_time_unix_nano=output_start_time_unix_nano,
264 time_unix_nano=current_point.time_unix_nano,
265 value=value,
266 aggregation_temporality=aggregation_temporality,
267 is_monotonic=is_monotonic,
268 )
269
270 return None
271
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
@@ -193,6 +193,7 @@
)
+# pylint: disable=too-many-return-statements,too-many-branches
def _convert_aggregation_temporality(
previous_point: Optional[_PointVarT],
current_point: _PointVarT,
@@ -267,4 +268,46 @@
is_monotonic=is_monotonic,
)
+ if current_point_type is Histogram:
+ if previous_point is None:
+ return replace(
+ current_point, aggregation_temporality=aggregation_temporality
+ )
+ if previous_point.aggregation_temporality is not (
+ AggregationTemporality.CUMULATIVE
+ ):
+ raise Exception(
+ "previous_point aggregation temporality must be CUMULATIVE"
+ )
+
+ if current_point.aggregation_temporality is aggregation_temporality:
+ return current_point
+
+ if aggregation_temporality is AggregationTemporality.CUMULATIVE:
+ start_time_unix_nano = previous_point.start_time_unix_nano
+ sum_ = current_point.sum + previous_point.sum
+ bucket_counts = [
+ curr_count + prev_count
+ for curr_count, prev_count in zip(
+ current_point.bucket_counts, previous_point.bucket_counts
+ )
+ ]
+ else:
+ start_time_unix_nano = previous_point.time_unix_nano
+ sum_ = current_point.sum - previous_point.sum
+ bucket_counts = [
+ curr_count - prev_count
+ for curr_count, prev_count in zip(
+ current_point.bucket_counts, previous_point.bucket_counts
+ )
+ ]
+
+ return Histogram(
+ start_time_unix_nano=start_time_unix_nano,
+ time_unix_nano=current_point.time_unix_nano,
+ bucket_counts=bucket_counts,
+ explicit_bounds=current_point.explicit_bounds,
+ sum=sum_,
+ aggregation_temporality=aggregation_temporality,
+ )
return None
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n@@ -193,6 +193,7 @@\n )\n \n \n+# pylint: disable=too-many-return-statements,too-many-branches\n def _convert_aggregation_temporality(\n previous_point: Optional[_PointVarT],\n current_point: _PointVarT,\n@@ -267,4 +268,46 @@\n is_monotonic=is_monotonic,\n )\n \n+ if current_point_type is Histogram:\n+ if previous_point is None:\n+ return replace(\n+ current_point, aggregation_temporality=aggregation_temporality\n+ )\n+ if previous_point.aggregation_temporality is not (\n+ AggregationTemporality.CUMULATIVE\n+ ):\n+ raise Exception(\n+ \"previous_point aggregation temporality must be CUMULATIVE\"\n+ )\n+\n+ if current_point.aggregation_temporality is aggregation_temporality:\n+ return current_point\n+\n+ if aggregation_temporality is AggregationTemporality.CUMULATIVE:\n+ start_time_unix_nano = previous_point.start_time_unix_nano\n+ sum_ = current_point.sum + previous_point.sum\n+ bucket_counts = [\n+ curr_count + prev_count\n+ for curr_count, prev_count in zip(\n+ current_point.bucket_counts, previous_point.bucket_counts\n+ )\n+ ]\n+ else:\n+ start_time_unix_nano = previous_point.time_unix_nano\n+ sum_ = current_point.sum - previous_point.sum\n+ bucket_counts = [\n+ curr_count - prev_count\n+ for curr_count, prev_count in zip(\n+ current_point.bucket_counts, previous_point.bucket_counts\n+ )\n+ ]\n+\n+ return Histogram(\n+ start_time_unix_nano=start_time_unix_nano,\n+ time_unix_nano=current_point.time_unix_nano,\n+ bucket_counts=bucket_counts,\n+ explicit_bounds=current_point.explicit_bounds,\n+ sum=sum_,\n+ aggregation_temporality=aggregation_temporality,\n+ )\n return None\n", "issue": "Add aggregation temporality conversion for all point types\nCan you add a todo for the other point types?\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2380#discussion_r787074866_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom bisect import bisect_left\nfrom dataclasses import replace\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, List, Optional, Sequence, TypeVar\n\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass SumAggregation(Aggregation[Sum]):\n def __init__(\n self,\n instrument_is_monotonic: bool,\n instrument_temporality: AggregationTemporality,\n ):\n super().__init__()\n\n self._start_time_unix_nano = _time_ns()\n self._instrument_temporality = instrument_temporality\n self._instrument_is_monotonic = instrument_is_monotonic\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n self._value = 0\n else:\n self._value = None\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n if self._value is None:\n self._value = 0\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n if self._instrument_temporality is AggregationTemporality.DELTA:\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n if self._value is None:\n return None\n\n return Sum(\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=now,\n value=self._value,\n )\n\n\nclass LastValueAggregation(Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=self._value,\n )\n\n\nclass ExplicitBucketHistogramAggregation(Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._boundaries = tuple(boundaries)\n self._bucket_counts = self._get_empty_bucket_counts()\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n\n def _get_empty_bucket_counts(self) -> List[int]:\n return [0] * (len(self._boundaries) + 1)\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n self._bucket_counts[bisect_left(self._boundaries, value)] += 1\n\n def collect(self) -> Histogram:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._bucket_counts\n start_time_unix_nano = self._start_time_unix_nano\n\n self._bucket_counts = self._get_empty_bucket_counts()\n self._start_time_unix_nano = now + 1\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n sum=self._sum,\n )\n\n\ndef _convert_aggregation_temporality(\n previous_point: Optional[_PointVarT],\n current_point: _PointVarT,\n aggregation_temporality: AggregationTemporality,\n) -> _PointVarT:\n \"\"\"Converts `current_point` to the requested `aggregation_temporality`\n given the `previous_point`.\n\n `previous_point` must have `CUMULATIVE` temporality. `current_point` may\n have `DELTA` or `CUMULATIVE` temporality.\n\n The output point will have temporality `aggregation_temporality`. Since\n `GAUGE` points have no temporality, they are returned unchanged.\n \"\"\"\n\n current_point_type = type(current_point)\n\n if current_point_type is Gauge:\n return current_point\n\n if previous_point is not None and type(previous_point) is not type(\n current_point\n ):\n _logger.warning(\n \"convert_aggregation_temporality called with mismatched \"\n \"point types: %s and %s\",\n type(previous_point),\n current_point_type,\n )\n\n return current_point\n\n if current_point_type is Sum:\n if previous_point is None:\n # Output CUMULATIVE for a synchronous instrument\n # There is no previous value, return the delta point as a\n # cumulative\n return replace(\n current_point, aggregation_temporality=aggregation_temporality\n )\n if previous_point.aggregation_temporality is not (\n AggregationTemporality.CUMULATIVE\n ):\n raise Exception(\n \"previous_point aggregation temporality must be CUMULATIVE\"\n )\n\n if current_point.aggregation_temporality is aggregation_temporality:\n # Output DELTA for a synchronous instrument\n # Output CUMULATIVE for an asynchronous instrument\n return current_point\n\n if aggregation_temporality is AggregationTemporality.DELTA:\n # Output temporality DELTA for an asynchronous instrument\n value = current_point.value - previous_point.value\n output_start_time_unix_nano = previous_point.time_unix_nano\n\n else:\n # Output CUMULATIVE for a synchronous instrument\n value = current_point.value + previous_point.value\n output_start_time_unix_nano = previous_point.start_time_unix_nano\n\n is_monotonic = (\n previous_point.is_monotonic and current_point.is_monotonic\n )\n\n return Sum(\n start_time_unix_nano=output_start_time_unix_nano,\n time_unix_nano=current_point.time_unix_nano,\n value=value,\n aggregation_temporality=aggregation_temporality,\n is_monotonic=is_monotonic,\n )\n\n return None\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}]}
| 3,237 | 513 |
gh_patches_debug_35624
|
rasdani/github-patches
|
git_diff
|
ResonantGeoData__ResonantGeoData-756
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 error on FMV Detail View page
I noticed this bug when trying to use the FMV module on Danesfield. I tested it on a fresh RGD instance and it still occured.
To reproduce -
- Run the `rgd_fmv_wasabi` management command to populate your DB with FMV data
- Attempt to navigate to `http://localhost:8000/rgd_fmv/<:id>/` and it will return a 500 error with the error `string indices must be integers
`.
500 error on `/rgd_fmv/{spatial_id}` endpoint
I noticed this bug when trying to use the FMV module on Danesfield. I tested it on a fresh RGD instance and it still occurred.
To reproduce -
- Run the `rgd_fmv_wasabi` management command to populate your DB with FMV data
- Hit the `/rgd_fmv/{spatial_id}` API endpoint and a 500 will be returned with an error ```AttributeError at /api/rgd_fmv/1
Got AttributeError when attempting to get a value for field `file` on serializer `FMVSerializer`.
The serializer field might be named incorrectly and not match any attribute or key on the `FMVMeta` instance.
Original exception text was: 'FMVMeta' object has no attribute 'file'.```
</issue>
<code>
[start of django-rgd-fmv/rgd_fmv/rest/viewsets.py]
1 from rest_framework.decorators import action
2 from rgd.rest.base import ModelViewSet
3 from rgd_fmv import models, serializers
4
5
6 class FMVViewSet(ModelViewSet):
7 queryset = models.FMVMeta.objects.all()
8
9 def get_serializer_class(self):
10 if self.action in ['get', 'list']:
11 return serializers.FMVMetaSerializer
12 return serializers.FMVSerializer
13
14 @action(detail=True, serializer_class=serializers.FMVMetaDataSerializer)
15 def data(self, request, *args, **kwargs):
16 return self.retrieve(request, *args, **kwargs)
17
[end of django-rgd-fmv/rgd_fmv/rest/viewsets.py]
[start of django-rgd-fmv/rgd_fmv/views.py]
1 import json
2
3 from rgd.views import SpatialDetailView
4
5 from . import models
6
7
8 class FMVMetaDetailView(SpatialDetailView):
9 model = models.FMVMeta
10
11 def get_context_data(self, *args, **kwargs):
12 context = super().get_context_data(*args, **kwargs)
13 context['frame_rate'] = json.dumps(self.object.fmv_file.frame_rate)
14 extents = context['extents']
15 if self.object.ground_union is not None:
16 # All or none of these will be set, only check one
17 extents['collect'] = self.object.ground_union.json
18 extents['ground_frames'] = self.object.ground_frames.json
19 extents['frame_numbers'] = self.object._blob_to_array(self.object.frame_numbers)
20 return context
21
[end of django-rgd-fmv/rgd_fmv/views.py]
[start of django-rgd-fmv/rgd_fmv/urls.py]
1 from django.urls import path
2 from rest_framework.routers import SimpleRouter
3 from rgd_fmv import models, views
4 from rgd_fmv.rest import viewsets
5
6 router = SimpleRouter(trailing_slash=False)
7 router.register(r'api/rgd_fmv', viewsets.FMVViewSet)
8
9 urlpatterns = [
10 # Pages
11 path(
12 'rgd_fmv/<int:pk>/',
13 views.FMVMetaDetailView.as_view(),
14 name=models.FMVMeta.detail_view_name,
15 ),
16 ] + router.urls
17
[end of django-rgd-fmv/rgd_fmv/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django-rgd-fmv/rgd_fmv/rest/viewsets.py b/django-rgd-fmv/rgd_fmv/rest/viewsets.py
--- a/django-rgd-fmv/rgd_fmv/rest/viewsets.py
+++ b/django-rgd-fmv/rgd_fmv/rest/viewsets.py
@@ -1,16 +1,17 @@
from rest_framework.decorators import action
-from rgd.rest.base import ModelViewSet
+from rgd.rest.base import ModelViewSet, ReadOnlyModelViewSet
from rgd_fmv import models, serializers
-class FMVViewSet(ModelViewSet):
+class FMVMetaViewSet(ReadOnlyModelViewSet):
queryset = models.FMVMeta.objects.all()
-
- def get_serializer_class(self):
- if self.action in ['get', 'list']:
- return serializers.FMVMetaSerializer
- return serializers.FMVSerializer
+ serializer_class = serializers.FMVMetaSerializer
@action(detail=True, serializer_class=serializers.FMVMetaDataSerializer)
def data(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
+
+
+class FMVViewSet(ModelViewSet):
+ queryset = models.FMV.objects.all()
+ serializer_class = serializers.FMVSerializer
diff --git a/django-rgd-fmv/rgd_fmv/urls.py b/django-rgd-fmv/rgd_fmv/urls.py
--- a/django-rgd-fmv/rgd_fmv/urls.py
+++ b/django-rgd-fmv/rgd_fmv/urls.py
@@ -4,7 +4,8 @@
from rgd_fmv.rest import viewsets
router = SimpleRouter(trailing_slash=False)
-router.register(r'api/rgd_fmv', viewsets.FMVViewSet)
+router.register(r'api/rgd_fmv', viewsets.FMVMetaViewSet, basename='fmv-meta')
+router.register(r'api/rgd_fmv/model', viewsets.FMVViewSet, basename='fmv')
urlpatterns = [
# Pages
diff --git a/django-rgd-fmv/rgd_fmv/views.py b/django-rgd-fmv/rgd_fmv/views.py
--- a/django-rgd-fmv/rgd_fmv/views.py
+++ b/django-rgd-fmv/rgd_fmv/views.py
@@ -11,10 +11,11 @@
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['frame_rate'] = json.dumps(self.object.fmv_file.frame_rate)
- extents = context['extents']
+ extents = json.loads(context['extents'])
if self.object.ground_union is not None:
# All or none of these will be set, only check one
extents['collect'] = self.object.ground_union.json
extents['ground_frames'] = self.object.ground_frames.json
extents['frame_numbers'] = self.object._blob_to_array(self.object.frame_numbers)
+ context['extents'] = json.dumps(extents)
return context
|
{"golden_diff": "diff --git a/django-rgd-fmv/rgd_fmv/rest/viewsets.py b/django-rgd-fmv/rgd_fmv/rest/viewsets.py\n--- a/django-rgd-fmv/rgd_fmv/rest/viewsets.py\n+++ b/django-rgd-fmv/rgd_fmv/rest/viewsets.py\n@@ -1,16 +1,17 @@\n from rest_framework.decorators import action\n-from rgd.rest.base import ModelViewSet\n+from rgd.rest.base import ModelViewSet, ReadOnlyModelViewSet\n from rgd_fmv import models, serializers\n \n \n-class FMVViewSet(ModelViewSet):\n+class FMVMetaViewSet(ReadOnlyModelViewSet):\n queryset = models.FMVMeta.objects.all()\n-\n- def get_serializer_class(self):\n- if self.action in ['get', 'list']:\n- return serializers.FMVMetaSerializer\n- return serializers.FMVSerializer\n+ serializer_class = serializers.FMVMetaSerializer\n \n @action(detail=True, serializer_class=serializers.FMVMetaDataSerializer)\n def data(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n+\n+\n+class FMVViewSet(ModelViewSet):\n+ queryset = models.FMV.objects.all()\n+ serializer_class = serializers.FMVSerializer\ndiff --git a/django-rgd-fmv/rgd_fmv/urls.py b/django-rgd-fmv/rgd_fmv/urls.py\n--- a/django-rgd-fmv/rgd_fmv/urls.py\n+++ b/django-rgd-fmv/rgd_fmv/urls.py\n@@ -4,7 +4,8 @@\n from rgd_fmv.rest import viewsets\n \n router = SimpleRouter(trailing_slash=False)\n-router.register(r'api/rgd_fmv', viewsets.FMVViewSet)\n+router.register(r'api/rgd_fmv', viewsets.FMVMetaViewSet, basename='fmv-meta')\n+router.register(r'api/rgd_fmv/model', viewsets.FMVViewSet, basename='fmv')\n \n urlpatterns = [\n # Pages\ndiff --git a/django-rgd-fmv/rgd_fmv/views.py b/django-rgd-fmv/rgd_fmv/views.py\n--- a/django-rgd-fmv/rgd_fmv/views.py\n+++ b/django-rgd-fmv/rgd_fmv/views.py\n@@ -11,10 +11,11 @@\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['frame_rate'] = json.dumps(self.object.fmv_file.frame_rate)\n- extents = context['extents']\n+ extents = json.loads(context['extents'])\n if self.object.ground_union is not None:\n # All or none of these will be set, only check one\n extents['collect'] = self.object.ground_union.json\n extents['ground_frames'] = self.object.ground_frames.json\n extents['frame_numbers'] = self.object._blob_to_array(self.object.frame_numbers)\n+ context['extents'] = json.dumps(extents)\n return context\n", "issue": "500 error on FMV Detail View page\nI noticed this bug when trying to use the FMV module on Danesfield. I tested it on a fresh RGD instance and it still occured.\r\n\r\nTo reproduce -\r\n- Run the `rgd_fmv_wasabi` management command to populate your DB with FMV data\r\n- Attempt to navigate to `http://localhost:8000/rgd_fmv/<:id>/` and it will return a 500 error with the error `string indices must be integers\r\n`.\n500 error on `/rgd_fmv/{spatial_id}` endpoint\nI noticed this bug when trying to use the FMV module on Danesfield. I tested it on a fresh RGD instance and it still occurred.\r\n\r\nTo reproduce -\r\n\r\n- Run the `rgd_fmv_wasabi` management command to populate your DB with FMV data\r\n- Hit the `/rgd_fmv/{spatial_id}` API endpoint and a 500 will be returned with an error ```AttributeError at /api/rgd_fmv/1\r\nGot AttributeError when attempting to get a value for field `file` on serializer `FMVSerializer`.\r\nThe serializer field might be named incorrectly and not match any attribute or key on the `FMVMeta` instance.\r\nOriginal exception text was: 'FMVMeta' object has no attribute 'file'.```\n", "before_files": [{"content": "from rest_framework.decorators import action\nfrom rgd.rest.base import ModelViewSet\nfrom rgd_fmv import models, serializers\n\n\nclass FMVViewSet(ModelViewSet):\n queryset = models.FMVMeta.objects.all()\n\n def get_serializer_class(self):\n if self.action in ['get', 'list']:\n return serializers.FMVMetaSerializer\n return serializers.FMVSerializer\n\n @action(detail=True, serializer_class=serializers.FMVMetaDataSerializer)\n def data(self, request, *args, **kwargs):\n return self.retrieve(request, *args, **kwargs)\n", "path": "django-rgd-fmv/rgd_fmv/rest/viewsets.py"}, {"content": "import json\n\nfrom rgd.views import SpatialDetailView\n\nfrom . import models\n\n\nclass FMVMetaDetailView(SpatialDetailView):\n model = models.FMVMeta\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['frame_rate'] = json.dumps(self.object.fmv_file.frame_rate)\n extents = context['extents']\n if self.object.ground_union is not None:\n # All or none of these will be set, only check one\n extents['collect'] = self.object.ground_union.json\n extents['ground_frames'] = self.object.ground_frames.json\n extents['frame_numbers'] = self.object._blob_to_array(self.object.frame_numbers)\n return context\n", "path": "django-rgd-fmv/rgd_fmv/views.py"}, {"content": "from django.urls import path\nfrom rest_framework.routers import SimpleRouter\nfrom rgd_fmv import models, views\nfrom rgd_fmv.rest import viewsets\n\nrouter = SimpleRouter(trailing_slash=False)\nrouter.register(r'api/rgd_fmv', viewsets.FMVViewSet)\n\nurlpatterns = [\n # Pages\n path(\n 'rgd_fmv/<int:pk>/',\n views.FMVMetaDetailView.as_view(),\n name=models.FMVMeta.detail_view_name,\n ),\n] + router.urls\n", "path": "django-rgd-fmv/rgd_fmv/urls.py"}]}
| 1,401 | 693 |
gh_patches_debug_7468
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1661
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OpenID payload cache uses the wrong cache key
The `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51
No matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.
OpenID payload cache uses the wrong cache key
The `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51
No matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.
</issue>
<code>
[start of kinto/plugins/openid/__init__.py]
1 import re
2
3 import requests
4 from pyramid import authentication as base_auth
5 from pyramid.interfaces import IAuthenticationPolicy
6 from zope.interface import implementer
7
8 from kinto.core import logger
9 from kinto.core import utils as core_utils
10 from kinto.core.openapi import OpenAPI
11
12 from .utils import fetch_openid_config
13
14
15 @implementer(IAuthenticationPolicy)
16 class OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):
17 def __init__(self, issuer, client_id, realm='Realm', **kwargs):
18 self.realm = realm
19 self.issuer = issuer
20 self.client_id = client_id
21 self.client_secret = kwargs.get('client_secret', '')
22 self.header_type = kwargs.get('header_type', 'Bearer')
23 self.userid_field = kwargs.get('userid_field', 'sub')
24 self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))
25
26 # Fetch OpenID config (at instantiation, ie. startup)
27 self.oid_config = fetch_openid_config(issuer)
28
29 self._jwt_keys = None
30
31 def unauthenticated_userid(self, request):
32 """Return the userid or ``None`` if token could not be verified.
33 """
34 settings = request.registry.settings
35 hmac_secret = settings['userid_hmac_secret']
36
37 authorization = request.headers.get('Authorization', '')
38 try:
39 authmeth, access_token = authorization.split(' ', 1)
40 except ValueError:
41 return None
42
43 if authmeth.lower() != self.header_type.lower():
44 return None
45
46 # XXX JWT Access token
47 # https://auth0.com/docs/tokens/access-token#access-token-format
48
49 # Check cache if these tokens were already verified.
50 hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
51 cache_key = 'openid:verify:%s'.format(hmac_tokens)
52 payload = request.registry.cache.get(cache_key)
53 if payload is None:
54 # This can take some time.
55 payload = self._verify_token(access_token)
56 if payload is None:
57 return None
58 # Save for next time / refresh ttl.
59 request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)
60 # Extract meaningful field from userinfo (eg. email or sub)
61 return payload.get(self.userid_field)
62
63 def forget(self, request):
64 """A no-op. Credentials are sent on every request.
65 Return WWW-Authenticate Realm header for Bearer token.
66 """
67 return [('WWW-Authenticate', '%s realm="%s"' % (self.header_type, self.realm))]
68
69 def _verify_token(self, access_token):
70 uri = self.oid_config['userinfo_endpoint']
71 # Opaque access token string. Fetch user info from profile.
72 try:
73 resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})
74 resp.raise_for_status()
75 userprofile = resp.json()
76 return userprofile
77
78 except (requests.exceptions.HTTPError, ValueError, KeyError) as e:
79 logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))
80 return None
81
82
83 def includeme(config):
84 # Activate end-points.
85 config.scan('kinto.plugins.openid.views')
86
87 settings = config.get_settings()
88
89 openid_policies = []
90 for k, v in settings.items():
91 m = re.match('multiauth\.policy\.(.*)\.use', k)
92 if m:
93 if v.endswith('OpenIDConnectPolicy'):
94 openid_policies.append(m.group(1))
95
96 if len(openid_policies) == 0:
97 # Do not add the capability if no policy is configured.
98 return
99
100 providers_infos = []
101 for name in openid_policies:
102 issuer = settings['multiauth.policy.%s.issuer' % name]
103 openid_config = fetch_openid_config(issuer)
104
105 client_id = settings['multiauth.policy.%s.client_id' % name]
106 header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')
107
108 providers_infos.append({
109 'name': name,
110 'issuer': openid_config['issuer'],
111 'auth_path': '/openid/%s/login' % name,
112 'client_id': client_id,
113 'header_type': header_type,
114 'userinfo_endpoint': openid_config['userinfo_endpoint'],
115 })
116
117 OpenAPI.expose_authentication_method(name, {
118 'type': 'oauth2',
119 'authorizationUrl': openid_config['authorization_endpoint'],
120 })
121
122 config.add_api_capability(
123 'openid',
124 description='OpenID connect support.',
125 url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',
126 providers=providers_infos)
127
[end of kinto/plugins/openid/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py
--- a/kinto/plugins/openid/__init__.py
+++ b/kinto/plugins/openid/__init__.py
@@ -48,7 +48,7 @@
# Check cache if these tokens were already verified.
hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)
- cache_key = 'openid:verify:%s'.format(hmac_tokens)
+ cache_key = 'openid:verify:{}'.format(hmac_tokens)
payload = request.registry.cache.get(cache_key)
if payload is None:
# This can take some time.
|
{"golden_diff": "diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py\n--- a/kinto/plugins/openid/__init__.py\n+++ b/kinto/plugins/openid/__init__.py\n@@ -48,7 +48,7 @@\n \n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n- cache_key = 'openid:verify:%s'.format(hmac_tokens)\n+ cache_key = 'openid:verify:{}'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n", "issue": "OpenID payload cache uses the wrong cache key\nThe `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51\r\nNo matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.\r\n\r\n\nOpenID payload cache uses the wrong cache key\nThe `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51\r\nNo matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.\r\n\r\n\n", "before_files": [{"content": "import re\n\nimport requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm='Realm', **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get('client_secret', '')\n self.header_type = kwargs.get('header_type', 'Bearer')\n self.userid_field = kwargs.get('userid_field', 'sub')\n self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings['userid_hmac_secret']\n\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, access_token = authorization.split(' ', 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = 'openid:verify:%s'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [('WWW-Authenticate', '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config['userinfo_endpoint']\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))\n return None\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan('kinto.plugins.openid.views')\n\n settings = config.get_settings()\n\n openid_policies = []\n for k, v in settings.items():\n m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n if m:\n if v.endswith('OpenIDConnectPolicy'):\n openid_policies.append(m.group(1))\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings['multiauth.policy.%s.issuer' % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings['multiauth.policy.%s.client_id' % name]\n header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')\n\n providers_infos.append({\n 'name': name,\n 'issuer': openid_config['issuer'],\n 'auth_path': '/openid/%s/login' % name,\n 'client_id': client_id,\n 'header_type': header_type,\n 'userinfo_endpoint': openid_config['userinfo_endpoint'],\n })\n\n OpenAPI.expose_authentication_method(name, {\n 'type': 'oauth2',\n 'authorizationUrl': openid_config['authorization_endpoint'],\n })\n\n config.add_api_capability(\n 'openid',\n description='OpenID connect support.',\n url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',\n providers=providers_infos)\n", "path": "kinto/plugins/openid/__init__.py"}]}
| 2,046 | 145 |
gh_patches_debug_15374
|
rasdani/github-patches
|
git_diff
|
avocado-framework__avocado-4175
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: Review and test examples on user's guide (section: Introduction)
</issue>
<code>
[start of avocado/core/parser.py]
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 #
10 # See LICENSE for more details.
11 #
12 # Copyright: Red Hat Inc. 2013-2014
13 # Author: Ruda Moura <[email protected]>
14
15 """
16 Avocado application command line parsing.
17 """
18
19 import argparse
20 from configparser import ConfigParser, NoOptionError
21 from glob import glob
22
23 from . import exit_codes
24 from .nrunner import Runnable
25 from .output import LOG_UI
26 from .resolver import ReferenceResolution, ReferenceResolutionResult
27 from .settings import ConfigFileNotFound, SettingsError, settings
28 from .version import VERSION
29
30 PROG = 'avocado'
31 DESCRIPTION = 'Avocado Test Runner'
32
33
34 class ArgumentParser(argparse.ArgumentParser):
35
36 """
37 Class to override argparse functions
38 """
39
40 def error(self, message):
41 LOG_UI.debug(self.format_help())
42 LOG_UI.error("%s: error: %s", self.prog, message)
43 if "unrecognized arguments" in message:
44 LOG_UI.warning("Perhaps a plugin is missing; run 'avocado"
45 " plugins' to list the installed ones")
46 self.exit(exit_codes.AVOCADO_FAIL)
47
48 def _get_option_tuples(self, option_string):
49 return []
50
51
52 class FileOrStdoutAction(argparse.Action):
53
54 """
55 Controls claiming the right to write to the application standard output
56 """
57
58 def __call__(self, parser, namespace, values, option_string=None):
59 if values == '-':
60 stdout_claimed_by = getattr(namespace, 'stdout_claimed_by', None)
61 if stdout_claimed_by is not None:
62 msg = ('Options %s %s are trying to use stdout '
63 'simultaneously' % (stdout_claimed_by,
64 option_string))
65 raise argparse.ArgumentError(self, msg)
66 else:
67 setattr(namespace, 'stdout_claimed_by', option_string)
68 setattr(namespace, self.dest, values)
69
70
71 class Parser:
72
73 """
74 Class to Parse the command line arguments.
75 """
76
77 def __init__(self):
78 self.args = argparse.Namespace()
79 self.config = {}
80 self.subcommands = None
81 self.application = ArgumentParser(prog=PROG,
82 add_help=False, # see parent parsing
83 description=DESCRIPTION)
84 self.application.add_argument('-v', '--version', action='version',
85 version='Avocado %s' % VERSION)
86 self.application.add_argument('--config', metavar='CONFIG_FILE',
87 nargs='?',
88 help='Use custom configuration from a file')
89
90 help_msg = ('Turn the paginator on/off. Useful when outputs are too'
91 'long. This will be a boolean soon.')
92 settings.register_option(section='core',
93 key='paginator',
94 help_msg=help_msg,
95 default='off',
96 choices=('on', 'off'),
97 parser=self.application,
98 long_arg='--paginator')
99
100 help_msg = ('Some commands can produce more information. This option '
101 'will enable the verbosity when applicable.')
102 settings.register_option(section='core',
103 key='verbose',
104 help_msg=help_msg,
105 default=False,
106 key_type=bool,
107 parser=self.application,
108 long_arg='--verbose',
109 short_arg='-V')
110
111 settings.add_argparser_to_option(namespace='core.show',
112 parser=self.application,
113 long_arg='--show')
114
115 def start(self):
116 """
117 Start to parsing arguments.
118
119 At the end of this method, the support for subparsers is activated.
120 Side effect: update attribute `args` (the namespace).
121 """
122 self.args, _ = self.application.parse_known_args()
123
124 # Load settings from file, if user provides one
125 if self.args.config is not None:
126 settings.process_config_path(self.args.config)
127
128 # Use parent parsing to avoid breaking the output of --help option
129 self.application = ArgumentParser(prog=PROG,
130 description=DESCRIPTION,
131 parents=[self.application])
132
133 # Subparsers where Avocado subcommands are plugged
134 self.subcommands = self.application.add_subparsers(
135 title='subcommands',
136 description='valid subcommands',
137 help='subcommand help',
138 dest='subcommand')
139 # On Python 2, required doesn't make a difference because a
140 # subparser is considered an unconsumed positional arguments,
141 # and not providing one will error with a "too few arguments"
142 # message. On Python 3, required arguments are used instead.
143 # Unfortunately, there's no way to pass this as an option when
144 # constructing the sub parsers, but it is possible to set that
145 # option afterwards.
146 self.subcommands.required = True
147
148 def finish(self):
149 """
150 Finish the process of parsing arguments.
151
152 Side effect: set the final value on attribute `config`.
153 """
154 args, extra = self.application.parse_known_args(namespace=self.args)
155 if extra:
156 msg = 'unrecognized arguments: %s' % ' '.join(extra)
157 for sub in self.application._subparsers._actions: # pylint: disable=W0212
158 if sub.dest == 'subcommand':
159 sub.choices[self.args.subcommand].error(msg)
160
161 self.application.error(msg)
162 # from this point on, config is a dictionary based on a argparse.Namespace
163 self.config = vars(args)
164
165
166 class HintParser:
167 def __init__(self, filename):
168 self.filename = filename
169 self.config = None
170 self.hints = []
171 self._parse()
172
173 def _get_args_from_section(self, section):
174 try:
175 args = self.config.get(section, 'args')
176 if args == '$testpath':
177 return [args]
178 return args.split(',')
179 except NoOptionError:
180 return []
181
182 def _get_kwargs_from_section(self, section):
183 result = {}
184 kwargs = self.config.get(section, 'kwargs', fallback='')
185 for kwarg in kwargs.split(','):
186 if kwarg == '':
187 continue
188 key, value = kwarg.split('=')
189 result[key] = value
190 return result
191
192 def _get_resolutions_by_kind(self, kind, paths):
193 self.validate_kind_section(kind)
194
195 resolutions = []
196 success = ReferenceResolutionResult.SUCCESS
197
198 config = {'uri': self._get_uri_from_section(kind),
199 'args': self._get_args_from_section(kind),
200 'kwargs': self._get_kwargs_from_section(kind)}
201 for path in paths:
202 uri = config.get('uri')
203 args = config.get('args')
204 kwargs = config.get('kwargs')
205 if uri == '$testpath':
206 uri = path
207 if '$testpath' in args:
208 args = [item.replace('$testpath', path) for item in args]
209 if '$testpath' in kwargs.values():
210 kwargs = {k: v.replace('$testpath', path)
211 for k, v in kwargs.items()}
212 runnable = Runnable(kind, uri, *args, **kwargs)
213 resolutions.append(ReferenceResolution(reference=path,
214 result=success,
215 resolutions=[runnable],
216 origin=path))
217 return resolutions
218
219 def _get_uri_from_section(self, section):
220 return self.config.get(section, 'uri')
221
222 def _parse(self):
223 self.config = ConfigParser()
224 config_paths = self.config.read(self.filename)
225 if not config_paths:
226 raise ConfigFileNotFound(self.filename)
227
228 def get_resolutions(self):
229 """Return a list of resolutions based on the file definitions."""
230 resolutions = []
231 for kind in self.config['kinds']:
232 files = self.config.get('kinds', kind)
233 resolutions.extend(self._get_resolutions_by_kind(kind,
234 glob(files)))
235 return resolutions
236
237 def validate_kind_section(self, kind):
238 """Validates a specific "kind section".
239
240 This method will raise a `settings.SettingsError` if any problem is
241 found on the file.
242
243 :param kind: a string with the specific section.
244 """
245 if kind not in self.config:
246 msg = 'Section {} is not defined. Please check your hint file.'
247 raise SettingsError(msg.format(kind))
248
249 uri = self._get_uri_from_section(kind)
250 if uri is None:
251 msg = "uri needs to be defined inside {}".format(kind)
252 raise SettingsError(msg)
253
[end of avocado/core/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/avocado/core/parser.py b/avocado/core/parser.py
--- a/avocado/core/parser.py
+++ b/avocado/core/parser.py
@@ -59,9 +59,9 @@
if values == '-':
stdout_claimed_by = getattr(namespace, 'stdout_claimed_by', None)
if stdout_claimed_by is not None:
- msg = ('Options %s %s are trying to use stdout '
- 'simultaneously' % (stdout_claimed_by,
- option_string))
+ msg = ('Options %s %s are trying to use stdout simultaneously.'
+ ' Please set at least one of them to a file to avoid '
+ 'conflicts' % (stdout_claimed_by, option_string))
raise argparse.ArgumentError(self, msg)
else:
setattr(namespace, 'stdout_claimed_by', option_string)
|
{"golden_diff": "diff --git a/avocado/core/parser.py b/avocado/core/parser.py\n--- a/avocado/core/parser.py\n+++ b/avocado/core/parser.py\n@@ -59,9 +59,9 @@\n if values == '-':\n stdout_claimed_by = getattr(namespace, 'stdout_claimed_by', None)\n if stdout_claimed_by is not None:\n- msg = ('Options %s %s are trying to use stdout '\n- 'simultaneously' % (stdout_claimed_by,\n- option_string))\n+ msg = ('Options %s %s are trying to use stdout simultaneously.'\n+ ' Please set at least one of them to a file to avoid '\n+ 'conflicts' % (stdout_claimed_by, option_string))\n raise argparse.ArgumentError(self, msg)\n else:\n setattr(namespace, 'stdout_claimed_by', option_string)\n", "issue": "docs: Review and test examples on user's guide (section: Introduction)\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2013-2014\n# Author: Ruda Moura <[email protected]>\n\n\"\"\"\nAvocado application command line parsing.\n\"\"\"\n\nimport argparse\nfrom configparser import ConfigParser, NoOptionError\nfrom glob import glob\n\nfrom . import exit_codes\nfrom .nrunner import Runnable\nfrom .output import LOG_UI\nfrom .resolver import ReferenceResolution, ReferenceResolutionResult\nfrom .settings import ConfigFileNotFound, SettingsError, settings\nfrom .version import VERSION\n\nPROG = 'avocado'\nDESCRIPTION = 'Avocado Test Runner'\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n\n \"\"\"\n Class to override argparse functions\n \"\"\"\n\n def error(self, message):\n LOG_UI.debug(self.format_help())\n LOG_UI.error(\"%s: error: %s\", self.prog, message)\n if \"unrecognized arguments\" in message:\n LOG_UI.warning(\"Perhaps a plugin is missing; run 'avocado\"\n \" plugins' to list the installed ones\")\n self.exit(exit_codes.AVOCADO_FAIL)\n\n def _get_option_tuples(self, option_string):\n return []\n\n\nclass FileOrStdoutAction(argparse.Action):\n\n \"\"\"\n Controls claiming the right to write to the application standard output\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n if values == '-':\n stdout_claimed_by = getattr(namespace, 'stdout_claimed_by', None)\n if stdout_claimed_by is not None:\n msg = ('Options %s %s are trying to use stdout '\n 'simultaneously' % (stdout_claimed_by,\n option_string))\n raise argparse.ArgumentError(self, msg)\n else:\n setattr(namespace, 'stdout_claimed_by', option_string)\n setattr(namespace, self.dest, values)\n\n\nclass Parser:\n\n \"\"\"\n Class to Parse the command line arguments.\n \"\"\"\n\n def __init__(self):\n self.args = argparse.Namespace()\n self.config = {}\n self.subcommands = None\n self.application = ArgumentParser(prog=PROG,\n add_help=False, # see parent parsing\n description=DESCRIPTION)\n self.application.add_argument('-v', '--version', action='version',\n version='Avocado %s' % VERSION)\n self.application.add_argument('--config', metavar='CONFIG_FILE',\n nargs='?',\n help='Use custom configuration from a file')\n\n help_msg = ('Turn the paginator on/off. Useful when outputs are too'\n 'long. This will be a boolean soon.')\n settings.register_option(section='core',\n key='paginator',\n help_msg=help_msg,\n default='off',\n choices=('on', 'off'),\n parser=self.application,\n long_arg='--paginator')\n\n help_msg = ('Some commands can produce more information. This option '\n 'will enable the verbosity when applicable.')\n settings.register_option(section='core',\n key='verbose',\n help_msg=help_msg,\n default=False,\n key_type=bool,\n parser=self.application,\n long_arg='--verbose',\n short_arg='-V')\n\n settings.add_argparser_to_option(namespace='core.show',\n parser=self.application,\n long_arg='--show')\n\n def start(self):\n \"\"\"\n Start to parsing arguments.\n\n At the end of this method, the support for subparsers is activated.\n Side effect: update attribute `args` (the namespace).\n \"\"\"\n self.args, _ = self.application.parse_known_args()\n\n # Load settings from file, if user provides one\n if self.args.config is not None:\n settings.process_config_path(self.args.config)\n\n # Use parent parsing to avoid breaking the output of --help option\n self.application = ArgumentParser(prog=PROG,\n description=DESCRIPTION,\n parents=[self.application])\n\n # Subparsers where Avocado subcommands are plugged\n self.subcommands = self.application.add_subparsers(\n title='subcommands',\n description='valid subcommands',\n help='subcommand help',\n dest='subcommand')\n # On Python 2, required doesn't make a difference because a\n # subparser is considered an unconsumed positional arguments,\n # and not providing one will error with a \"too few arguments\"\n # message. On Python 3, required arguments are used instead.\n # Unfortunately, there's no way to pass this as an option when\n # constructing the sub parsers, but it is possible to set that\n # option afterwards.\n self.subcommands.required = True\n\n def finish(self):\n \"\"\"\n Finish the process of parsing arguments.\n\n Side effect: set the final value on attribute `config`.\n \"\"\"\n args, extra = self.application.parse_known_args(namespace=self.args)\n if extra:\n msg = 'unrecognized arguments: %s' % ' '.join(extra)\n for sub in self.application._subparsers._actions: # pylint: disable=W0212\n if sub.dest == 'subcommand':\n sub.choices[self.args.subcommand].error(msg)\n\n self.application.error(msg)\n # from this point on, config is a dictionary based on a argparse.Namespace\n self.config = vars(args)\n\n\nclass HintParser:\n def __init__(self, filename):\n self.filename = filename\n self.config = None\n self.hints = []\n self._parse()\n\n def _get_args_from_section(self, section):\n try:\n args = self.config.get(section, 'args')\n if args == '$testpath':\n return [args]\n return args.split(',')\n except NoOptionError:\n return []\n\n def _get_kwargs_from_section(self, section):\n result = {}\n kwargs = self.config.get(section, 'kwargs', fallback='')\n for kwarg in kwargs.split(','):\n if kwarg == '':\n continue\n key, value = kwarg.split('=')\n result[key] = value\n return result\n\n def _get_resolutions_by_kind(self, kind, paths):\n self.validate_kind_section(kind)\n\n resolutions = []\n success = ReferenceResolutionResult.SUCCESS\n\n config = {'uri': self._get_uri_from_section(kind),\n 'args': self._get_args_from_section(kind),\n 'kwargs': self._get_kwargs_from_section(kind)}\n for path in paths:\n uri = config.get('uri')\n args = config.get('args')\n kwargs = config.get('kwargs')\n if uri == '$testpath':\n uri = path\n if '$testpath' in args:\n args = [item.replace('$testpath', path) for item in args]\n if '$testpath' in kwargs.values():\n kwargs = {k: v.replace('$testpath', path)\n for k, v in kwargs.items()}\n runnable = Runnable(kind, uri, *args, **kwargs)\n resolutions.append(ReferenceResolution(reference=path,\n result=success,\n resolutions=[runnable],\n origin=path))\n return resolutions\n\n def _get_uri_from_section(self, section):\n return self.config.get(section, 'uri')\n\n def _parse(self):\n self.config = ConfigParser()\n config_paths = self.config.read(self.filename)\n if not config_paths:\n raise ConfigFileNotFound(self.filename)\n\n def get_resolutions(self):\n \"\"\"Return a list of resolutions based on the file definitions.\"\"\"\n resolutions = []\n for kind in self.config['kinds']:\n files = self.config.get('kinds', kind)\n resolutions.extend(self._get_resolutions_by_kind(kind,\n glob(files)))\n return resolutions\n\n def validate_kind_section(self, kind):\n \"\"\"Validates a specific \"kind section\".\n\n This method will raise a `settings.SettingsError` if any problem is\n found on the file.\n\n :param kind: a string with the specific section.\n \"\"\"\n if kind not in self.config:\n msg = 'Section {} is not defined. Please check your hint file.'\n raise SettingsError(msg.format(kind))\n\n uri = self._get_uri_from_section(kind)\n if uri is None:\n msg = \"uri needs to be defined inside {}\".format(kind)\n raise SettingsError(msg)\n", "path": "avocado/core/parser.py"}]}
| 3,046 | 190 |
gh_patches_debug_729
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-453
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pickle5 may cause setup errors on Python 3.8 (future-proofing)
**Describe the bug**
When installing GaNDLF on Python 3.8, an error occurs when installing the dependency "pickle5".
Note that pickle5 is redundant in 3.8 -- the backported functionality is the default/standard [[ref](https://github.com/pitrou/pickle5-backport/issues/12)].
You can solve this by adding this annotation in setup.py so that pickle5 is only installed on Python versions 3.7 or lower (example of this syntax: https://stackoverflow.com/a/32643122).
If pickle5 is imported directly in your code, you may also need to do a version check at import time, something like this:
``` python
# Both these should come standard if you have setuptools anyway
import platform
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.8.0"):
import pickle5 as pickle
else:
import pickle
```
**To Reproduce**
Steps to reproduce the behavior:
1. Create a Python 3.8 environment using your mechanism of choice.
2. Install GaNDLF per instructions.
3. Receive error message while installing pickle5.
**GaNDLF Version**
Latest master (0.0.14.dev0 I think)
**Desktop (please complete the following information):**
Occurs in any system with Python 3.8 or greater. At least for me on Ubuntu-based machines.
**Additional context**
This issue is just a heads up for supporting 3.8 and greater. Hope this helps.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 with open("README.md") as readme_file:
13 readme = readme_file.read()
14
15
16 def git_submodule_update():
17 ## submodule update
18 os.system("git submodule update --init --recursive")
19
20
21 class CustomInstallCommand(install):
22 def run(self):
23 install.run(self)
24 git_submodule_update()
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30 git_submodule_update()
31
32
33 class CustomEggInfoCommand(egg_info):
34 def run(self):
35 egg_info.run(self)
36 git_submodule_update()
37
38
39 # read version.py
40 import sys, re
41
42 try:
43 filepath = "GANDLF/version.py"
44 version_file = open(filepath)
45 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
46
47 except Exception as error:
48 __version__ = "0.0.1"
49 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
50
51 requirements = [
52 "black",
53 "numpy==1.21.0",
54 "scipy",
55 "SimpleITK!=2.0.*",
56 "torchvision",
57 "tqdm",
58 "torchio==0.18.57",
59 "pandas",
60 "pylint",
61 "scikit-learn>=0.23.2",
62 "scikit-image>=0.19.1",
63 "pickle5>=0.0.11",
64 "setuptools",
65 "seaborn",
66 "pyyaml",
67 "tiffslide",
68 "matplotlib",
69 "requests>=2.25.0",
70 "pyvips",
71 "pytest",
72 "coverage",
73 "pytest-cov",
74 "psutil",
75 "medcam",
76 "opencv-python",
77 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
78 "OpenPatchMiner==0.1.8",
79 "zarr==2.10.3",
80 "pydicom",
81 "onnx",
82 ]
83
84 # pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389
85 if sys.platform == "darwin":
86 requirements.append("torch==1.9.0")
87 else:
88 requirements.append("torch==1.8.2")
89
90 setup(
91 name="GANDLF",
92 version=__version__,
93 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
94 author_email="[email protected]",
95 python_requires=">=3.7",
96 packages=find_packages(),
97 cmdclass={ # this ensures git_submodule_update is called during install
98 "install": CustomInstallCommand,
99 "develop": CustomDevelopCommand,
100 "egg_info": CustomEggInfoCommand,
101 },
102 scripts=[
103 "gandlf_run",
104 "gandlf_constructCSV",
105 "gandlf_collectStats",
106 "gandlf_patchMiner",
107 "gandlf_preprocess",
108 "gandlf_anonymizer",
109 "gandlf_verifyInstall",
110 ],
111 classifiers=[
112 "Development Status :: 3 - Alpha",
113 "Intended Audience :: Science/Research",
114 "License :: OSI Approved :: BSD License",
115 "Natural Language :: English",
116 "Operating System :: OS Independent",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Topic :: Scientific/Engineering :: Medical Science Apps",
121 ],
122 description=(
123 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
124 ),
125 install_requires=requirements,
126 license="BSD-3-Clause License",
127 long_description=readme,
128 long_description_content_type="text/markdown",
129 include_package_data=True,
130 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
131 zip_safe=False,
132 )
133
134 ## windows vips installation
135 if os.name == "nt": # proceed for windows
136 from pathlib import Path
137
138 # download and extract if main dll is absent
139 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
140 print("Downloading and extracting VIPS for Windows")
141 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
142 zip_to_extract = "./vips.zip"
143 import urllib.request, zipfile
144
145 urllib.request.urlretrieve(url, zip_to_extract)
146 z = zipfile.ZipFile(zip_to_extract)
147 z.extractall("./vips")
148 z.close()
149 os.remove(zip_to_extract)
150
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -60,7 +60,7 @@
"pylint",
"scikit-learn>=0.23.2",
"scikit-image>=0.19.1",
- "pickle5>=0.0.11",
+ 'pickle5>=0.0.11; python_version < "3.8.0"',
"setuptools",
"seaborn",
"pyyaml",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,7 +60,7 @@\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n- \"pickle5>=0.0.11\",\n+ 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n", "issue": "Pickle5 may cause setup errors on Python 3.8 (future-proofing)\n**Describe the bug**\r\nWhen installing GaNDLF on Python 3.8, an error occurs when installing the dependency \"pickle5\".\r\nNote that pickle5 is redundant in 3.8 -- the backported functionality is the default/standard [[ref](https://github.com/pitrou/pickle5-backport/issues/12)]. \r\n\r\nYou can solve this by adding this annotation in setup.py so that pickle5 is only installed on Python versions 3.7 or lower (example of this syntax: https://stackoverflow.com/a/32643122).\r\nIf pickle5 is imported directly in your code, you may also need to do a version check at import time, something like this:\r\n``` python\r\n# Both these should come standard if you have setuptools anyway\r\nimport platform\r\nfrom packaging import version\r\nif version.parse(platform.python_version()) < version.parse(\"3.8.0\"):\r\n import pickle5 as pickle\r\nelse:\r\n import pickle\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a Python 3.8 environment using your mechanism of choice.\r\n2. Install GaNDLF per instructions.\r\n3. Receive error message while installing pickle5.\r\n\r\n**GaNDLF Version**\r\nLatest master (0.0.14.dev0 I think)\r\n\r\n**Desktop (please complete the following information):**\r\nOccurs in any system with Python 3.8 or greater. At least for me on Ubuntu-based machines.\r\n\r\n**Additional context**\r\nThis issue is just a heads up for supporting 3.8 and greater. Hope this helps.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.21.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"pickle5>=0.0.11\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}]}
| 2,392 | 116 |
gh_patches_debug_33176
|
rasdani/github-patches
|
git_diff
|
coala__coala-5814
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imported classes should be arranged lexicographically
Classes being imported in `coala/coalib/bearlib/languages/__init__.py` file should be arranged alphabetically.
A good newcomer issue.
</issue>
<code>
[start of coalib/bearlib/languages/__init__.py]
1 """
2 This directory holds means to get generic information for specific languages.
3 """
4
5 # Start ignoring PyUnusedCodeBear
6 from .Language import Language
7 from .Language import Languages
8
9 from .definitions.Unknown import Unknown
10 from .definitions.antlr import antlr
11 from .definitions.Bash import Bash
12 from .definitions.C import C
13 from .definitions.CPP import CPP
14 from .definitions.CSharp import CSharp
15 from .definitions.CSS import CSS
16 from .definitions.D import D
17 from .definitions.Fortran import Fortran
18 from .definitions.Golang import Golang
19 from .definitions.GraphQL import GraphQL
20 from .definitions.html import HTML
21 from .definitions.Java import Java
22 from .definitions.JavaScript import JavaScript
23 from .definitions.JSON import JSON
24 from .definitions.JSP import JSP
25 from .definitions.KornShell import KornShell
26 from .definitions.m4 import m4
27 from .definitions.Matlab import Matlab
28 from .definitions.Markdown import Markdown
29 from .definitions.ObjectiveC import ObjectiveC
30 from .definitions.PHP import PHP
31 from .definitions.PLSQL import PLSQL
32 from .definitions.PowerShell import PowerShell
33 from .definitions.Python import Python
34 from .definitions.Ruby import Ruby
35 from .definitions.Scala import Scala
36 from .definitions.Swift import Swift
37 from .definitions.Tcl import Tcl
38 from .definitions.TinyBasic import TinyBasic
39 from .definitions.Vala import Vala
40 from .definitions.TypeScript import TypeScript
41 from .definitions.Shell import Shell
42 from .definitions.Jinja2 import Jinja2
43 from .definitions.VisualBasic import VisualBasic
44 from .definitions.XML import XML
45 from.definitions.ZShell import ZShell
46 # Stop ignoring PyUnusedCodeBear
47
[end of coalib/bearlib/languages/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/coalib/bearlib/languages/__init__.py b/coalib/bearlib/languages/__init__.py
--- a/coalib/bearlib/languages/__init__.py
+++ b/coalib/bearlib/languages/__init__.py
@@ -7,6 +7,7 @@
from .Language import Languages
from .definitions.Unknown import Unknown
+
from .definitions.antlr import antlr
from .definitions.Bash import Bash
from .definitions.C import C
@@ -20,12 +21,13 @@
from .definitions.html import HTML
from .definitions.Java import Java
from .definitions.JavaScript import JavaScript
+from .definitions.Jinja2 import Jinja2
from .definitions.JSON import JSON
from .definitions.JSP import JSP
from .definitions.KornShell import KornShell
from .definitions.m4 import m4
-from .definitions.Matlab import Matlab
from .definitions.Markdown import Markdown
+from .definitions.Matlab import Matlab
from .definitions.ObjectiveC import ObjectiveC
from .definitions.PHP import PHP
from .definitions.PLSQL import PLSQL
@@ -33,14 +35,14 @@
from .definitions.Python import Python
from .definitions.Ruby import Ruby
from .definitions.Scala import Scala
+from .definitions.Shell import Shell
from .definitions.Swift import Swift
from .definitions.Tcl import Tcl
from .definitions.TinyBasic import TinyBasic
-from .definitions.Vala import Vala
from .definitions.TypeScript import TypeScript
-from .definitions.Shell import Shell
-from .definitions.Jinja2 import Jinja2
+from .definitions.Vala import Vala
from .definitions.VisualBasic import VisualBasic
from .definitions.XML import XML
-from.definitions.ZShell import ZShell
+from .definitions.ZShell import ZShell
+
# Stop ignoring PyUnusedCodeBear
|
{"golden_diff": "diff --git a/coalib/bearlib/languages/__init__.py b/coalib/bearlib/languages/__init__.py\n--- a/coalib/bearlib/languages/__init__.py\n+++ b/coalib/bearlib/languages/__init__.py\n@@ -7,6 +7,7 @@\n from .Language import Languages\n \n from .definitions.Unknown import Unknown\n+\n from .definitions.antlr import antlr\n from .definitions.Bash import Bash\n from .definitions.C import C\n@@ -20,12 +21,13 @@\n from .definitions.html import HTML\n from .definitions.Java import Java\n from .definitions.JavaScript import JavaScript\n+from .definitions.Jinja2 import Jinja2\n from .definitions.JSON import JSON\n from .definitions.JSP import JSP\n from .definitions.KornShell import KornShell\n from .definitions.m4 import m4\n-from .definitions.Matlab import Matlab\n from .definitions.Markdown import Markdown\n+from .definitions.Matlab import Matlab\n from .definitions.ObjectiveC import ObjectiveC\n from .definitions.PHP import PHP\n from .definitions.PLSQL import PLSQL\n@@ -33,14 +35,14 @@\n from .definitions.Python import Python\n from .definitions.Ruby import Ruby\n from .definitions.Scala import Scala\n+from .definitions.Shell import Shell\n from .definitions.Swift import Swift\n from .definitions.Tcl import Tcl\n from .definitions.TinyBasic import TinyBasic\n-from .definitions.Vala import Vala\n from .definitions.TypeScript import TypeScript\n-from .definitions.Shell import Shell\n-from .definitions.Jinja2 import Jinja2\n+from .definitions.Vala import Vala\n from .definitions.VisualBasic import VisualBasic\n from .definitions.XML import XML\n-from.definitions.ZShell import ZShell\n+from .definitions.ZShell import ZShell\n+\n # Stop ignoring PyUnusedCodeBear\n", "issue": "Imported classes should be arranged lexicographically\nClasses being imported in `coala/coalib/bearlib/languages/__init__.py` file should be arranged alphabetically.\r\n\r\nA good newcomer issue. \n", "before_files": [{"content": "\"\"\"\nThis directory holds means to get generic information for specific languages.\n\"\"\"\n\n# Start ignoring PyUnusedCodeBear\nfrom .Language import Language\nfrom .Language import Languages\n\nfrom .definitions.Unknown import Unknown\nfrom .definitions.antlr import antlr\nfrom .definitions.Bash import Bash\nfrom .definitions.C import C\nfrom .definitions.CPP import CPP\nfrom .definitions.CSharp import CSharp\nfrom .definitions.CSS import CSS\nfrom .definitions.D import D\nfrom .definitions.Fortran import Fortran\nfrom .definitions.Golang import Golang\nfrom .definitions.GraphQL import GraphQL\nfrom .definitions.html import HTML\nfrom .definitions.Java import Java\nfrom .definitions.JavaScript import JavaScript\nfrom .definitions.JSON import JSON\nfrom .definitions.JSP import JSP\nfrom .definitions.KornShell import KornShell\nfrom .definitions.m4 import m4\nfrom .definitions.Matlab import Matlab\nfrom .definitions.Markdown import Markdown\nfrom .definitions.ObjectiveC import ObjectiveC\nfrom .definitions.PHP import PHP\nfrom .definitions.PLSQL import PLSQL\nfrom .definitions.PowerShell import PowerShell\nfrom .definitions.Python import Python\nfrom .definitions.Ruby import Ruby\nfrom .definitions.Scala import Scala\nfrom .definitions.Swift import Swift\nfrom .definitions.Tcl import Tcl\nfrom .definitions.TinyBasic import TinyBasic\nfrom .definitions.Vala import Vala\nfrom .definitions.TypeScript import TypeScript\nfrom .definitions.Shell import Shell\nfrom .definitions.Jinja2 import Jinja2\nfrom .definitions.VisualBasic import VisualBasic\nfrom .definitions.XML import XML\nfrom.definitions.ZShell import ZShell\n# Stop ignoring PyUnusedCodeBear\n", "path": "coalib/bearlib/languages/__init__.py"}]}
| 1,028 | 408 |
gh_patches_debug_23983
|
rasdani/github-patches
|
git_diff
|
lnbits__lnbits-675
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LNURLp: Edit existing LNURLP from sat to fiat currency unintentionally divides value by 100
Reproduce:
* Create 100 sat link, save
* Edit link, change price to 5€, save
* Observe that amount is now 0.05€ instead of 5€
Most likely cause by PR #663
</issue>
<code>
[start of lnbits/extensions/lnurlp/views_api.py]
1 from http import HTTPStatus
2
3 from fastapi import Request
4 from fastapi.param_functions import Query
5 from fastapi.params import Depends
6 from lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore
7 from starlette.exceptions import HTTPException
8
9 from lnbits.core.crud import get_user
10 from lnbits.decorators import WalletTypeInfo, get_key_type
11 from lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis
12
13 from . import lnurlp_ext
14 from .crud import (
15 create_pay_link,
16 delete_pay_link,
17 get_pay_link,
18 get_pay_links,
19 update_pay_link,
20 )
21 from .models import CreatePayLinkData
22
23
24 @lnurlp_ext.get("/api/v1/currencies")
25 async def api_list_currencies_available():
26 return list(currencies.keys())
27
28
29 @lnurlp_ext.get("/api/v1/links", status_code=HTTPStatus.OK)
30 async def api_links(
31 req: Request,
32 wallet: WalletTypeInfo = Depends(get_key_type),
33 all_wallets: bool = Query(False),
34 ):
35 wallet_ids = [wallet.wallet.id]
36
37 if all_wallets:
38 wallet_ids = (await get_user(wallet.wallet.user)).wallet_ids
39
40 try:
41 return [
42 {**link.dict(), "lnurl": link.lnurl(req)}
43 for link in await get_pay_links(wallet_ids)
44 ]
45
46 except LnurlInvalidUrl:
47 raise HTTPException(
48 status_code=HTTPStatus.UPGRADE_REQUIRED,
49 detail="LNURLs need to be delivered over a publically accessible `https` domain or Tor.",
50 )
51
52
53 @lnurlp_ext.get("/api/v1/links/{link_id}", status_code=HTTPStatus.OK)
54 async def api_link_retrieve(
55 r: Request, link_id, wallet: WalletTypeInfo = Depends(get_key_type)
56 ):
57 link = await get_pay_link(link_id)
58
59 if not link:
60 raise HTTPException(
61 detail="Pay link does not exist.", status_code=HTTPStatus.NOT_FOUND
62 )
63
64 if link.wallet != wallet.wallet.id:
65 raise HTTPException(
66 detail="Not your pay link.", status_code=HTTPStatus.FORBIDDEN
67 )
68
69 return {**link.dict(), **{"lnurl": link.lnurl(r)}}
70
71
72 @lnurlp_ext.post("/api/v1/links", status_code=HTTPStatus.CREATED)
73 @lnurlp_ext.put("/api/v1/links/{link_id}", status_code=HTTPStatus.OK)
74 async def api_link_create_or_update(
75 data: CreatePayLinkData,
76 link_id=None,
77 wallet: WalletTypeInfo = Depends(get_key_type),
78 ):
79
80 if data.min > data.max:
81 raise HTTPException(
82 detail="Min is greater than max.", status_code=HTTPStatus.BAD_REQUEST
83 )
84
85 if data.currency == None and (
86 round(data.min) != data.min or round(data.max) != data.max or data.min < 1
87 ):
88 raise HTTPException(
89 detail="Must use full satoshis.", status_code=HTTPStatus.BAD_REQUEST
90 )
91
92 if "success_url" in data and data.success_url[:8] != "https://":
93 raise HTTPException(
94 detail="Success URL must be secure https://...",
95 status_code=HTTPStatus.BAD_REQUEST,
96 )
97
98 if link_id:
99 link = await get_pay_link(link_id)
100
101 if not link:
102 raise HTTPException(
103 detail="Pay link does not exist.", status_code=HTTPStatus.NOT_FOUND
104 )
105
106 if link.wallet != wallet.wallet.id:
107 raise HTTPException(
108 detail="Not your pay link.", status_code=HTTPStatus.FORBIDDEN
109 )
110
111 link = await update_pay_link(**data.dict(), link_id=link_id)
112 else:
113 link = await create_pay_link(data, wallet_id=wallet.wallet.id)
114 return {**link.dict(), "lnurl": link.lnurl}
115
116
117 @lnurlp_ext.delete("/api/v1/links/{link_id}")
118 async def api_link_delete(link_id, wallet: WalletTypeInfo = Depends(get_key_type)):
119 link = await get_pay_link(link_id)
120
121 if not link:
122 raise HTTPException(
123 detail="Pay link does not exist.", status_code=HTTPStatus.NOT_FOUND
124 )
125
126 if link.wallet != wallet.wallet.id:
127 raise HTTPException(
128 detail="Not your pay link.", status_code=HTTPStatus.FORBIDDEN
129 )
130
131 await delete_pay_link(link_id)
132 raise HTTPException(status_code=HTTPStatus.NO_CONTENT)
133
134
135 @lnurlp_ext.get("/api/v1/rate/{currency}", status_code=HTTPStatus.OK)
136 async def api_check_fiat_rate(currency):
137 try:
138 rate = await get_fiat_rate_satoshis(currency)
139 except AssertionError:
140 rate = None
141
142 return {"rate": rate}
143
[end of lnbits/extensions/lnurlp/views_api.py]
[start of lnbits/extensions/lnurlp/crud.py]
1 from typing import List, Optional, Union
2
3 from lnbits.db import SQLITE
4 from . import db
5 from .models import PayLink, CreatePayLinkData
6
7
8 async def create_pay_link(data: CreatePayLinkData, wallet_id: str) -> PayLink:
9
10 returning = "" if db.type == SQLITE else "RETURNING ID"
11 method = db.execute if db.type == SQLITE else db.fetchone
12 # database only allows int4 entries for min and max. For fiat currencies,
13 # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.
14 if data.currency and data.fiat_base_multiplier:
15 data.min *= data.fiat_base_multiplier
16 data.max *= data.fiat_base_multiplier
17
18 result = await (method)(
19 f"""
20 INSERT INTO lnurlp.pay_links (
21 wallet,
22 description,
23 min,
24 max,
25 served_meta,
26 served_pr,
27 webhook_url,
28 success_text,
29 success_url,
30 comment_chars,
31 currency,
32 fiat_base_multiplier
33 )
34 VALUES (?, ?, ?, ?, 0, 0, ?, ?, ?, ?, ?, ?)
35 {returning}
36 """,
37 (
38 wallet_id,
39 data.description,
40 data.min,
41 data.max,
42 data.webhook_url,
43 data.success_text,
44 data.success_url,
45 data.comment_chars,
46 data.currency,
47 data.fiat_base_multiplier,
48 ),
49 )
50 if db.type == SQLITE:
51 link_id = result._result_proxy.lastrowid
52 else:
53 link_id = result[0]
54
55 link = await get_pay_link(link_id)
56 assert link, "Newly created link couldn't be retrieved"
57 return link
58
59
60 async def get_pay_link(link_id: int) -> Optional[PayLink]:
61 row = await db.fetchone("SELECT * FROM lnurlp.pay_links WHERE id = ?", (link_id,))
62 return PayLink.from_row(row) if row else None
63
64
65 async def get_pay_links(wallet_ids: Union[str, List[str]]) -> List[PayLink]:
66 if isinstance(wallet_ids, str):
67 wallet_ids = [wallet_ids]
68
69 q = ",".join(["?"] * len(wallet_ids))
70 rows = await db.fetchall(
71 f"""
72 SELECT * FROM lnurlp.pay_links WHERE wallet IN ({q})
73 ORDER BY Id
74 """,
75 (*wallet_ids,),
76 )
77 return [PayLink.from_row(row) for row in rows]
78
79
80 async def update_pay_link(link_id: int, **kwargs) -> Optional[PayLink]:
81 q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()])
82 await db.execute(
83 f"UPDATE lnurlp.pay_links SET {q} WHERE id = ?", (*kwargs.values(), link_id)
84 )
85 row = await db.fetchone("SELECT * FROM lnurlp.pay_links WHERE id = ?", (link_id,))
86 return PayLink.from_row(row) if row else None
87
88
89 async def increment_pay_link(link_id: int, **kwargs) -> Optional[PayLink]:
90 q = ", ".join([f"{field[0]} = {field[0]} + ?" for field in kwargs.items()])
91 await db.execute(
92 f"UPDATE lnurlp.pay_links SET {q} WHERE id = ?", (*kwargs.values(), link_id)
93 )
94 row = await db.fetchone("SELECT * FROM lnurlp.pay_links WHERE id = ?", (link_id,))
95 return PayLink.from_row(row) if row else None
96
97
98 async def delete_pay_link(link_id: int) -> None:
99 await db.execute("DELETE FROM lnurlp.pay_links WHERE id = ?", (link_id,))
100
[end of lnbits/extensions/lnurlp/crud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lnbits/extensions/lnurlp/crud.py b/lnbits/extensions/lnurlp/crud.py
--- a/lnbits/extensions/lnurlp/crud.py
+++ b/lnbits/extensions/lnurlp/crud.py
@@ -9,11 +9,6 @@
returning = "" if db.type == SQLITE else "RETURNING ID"
method = db.execute if db.type == SQLITE else db.fetchone
- # database only allows int4 entries for min and max. For fiat currencies,
- # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.
- if data.currency and data.fiat_base_multiplier:
- data.min *= data.fiat_base_multiplier
- data.max *= data.fiat_base_multiplier
result = await (method)(
f"""
diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py
--- a/lnbits/extensions/lnurlp/views_api.py
+++ b/lnbits/extensions/lnurlp/views_api.py
@@ -89,6 +89,12 @@
detail="Must use full satoshis.", status_code=HTTPStatus.BAD_REQUEST
)
+ # database only allows int4 entries for min and max. For fiat currencies,
+ # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.
+ if data.currency and data.fiat_base_multiplier:
+ data.min *= data.fiat_base_multiplier
+ data.max *= data.fiat_base_multiplier
+
if "success_url" in data and data.success_url[:8] != "https://":
raise HTTPException(
detail="Success URL must be secure https://...",
|
{"golden_diff": "diff --git a/lnbits/extensions/lnurlp/crud.py b/lnbits/extensions/lnurlp/crud.py\n--- a/lnbits/extensions/lnurlp/crud.py\n+++ b/lnbits/extensions/lnurlp/crud.py\n@@ -9,11 +9,6 @@\n \n returning = \"\" if db.type == SQLITE else \"RETURNING ID\"\n method = db.execute if db.type == SQLITE else db.fetchone\n- # database only allows int4 entries for min and max. For fiat currencies,\n- # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.\n- if data.currency and data.fiat_base_multiplier:\n- data.min *= data.fiat_base_multiplier\n- data.max *= data.fiat_base_multiplier\n \n result = await (method)(\n f\"\"\"\ndiff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py\n--- a/lnbits/extensions/lnurlp/views_api.py\n+++ b/lnbits/extensions/lnurlp/views_api.py\n@@ -89,6 +89,12 @@\n detail=\"Must use full satoshis.\", status_code=HTTPStatus.BAD_REQUEST\n )\n \n+ # database only allows int4 entries for min and max. For fiat currencies,\n+ # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.\n+ if data.currency and data.fiat_base_multiplier:\n+ data.min *= data.fiat_base_multiplier\n+ data.max *= data.fiat_base_multiplier\n+\n if \"success_url\" in data and data.success_url[:8] != \"https://\":\n raise HTTPException(\n detail=\"Success URL must be secure https://...\",\n", "issue": "LNURLp: Edit existing LNURLP from sat to fiat currency unintentionally divides value by 100 \nReproduce: \r\n* Create 100 sat link, save\r\n* Edit link, change price to 5\u20ac, save\r\n* Observe that amount is now 0.05\u20ac instead of 5\u20ac\r\n\r\nMost likely cause by PR #663 \n", "before_files": [{"content": "from http import HTTPStatus\n\nfrom fastapi import Request\nfrom fastapi.param_functions import Query\nfrom fastapi.params import Depends\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\nfrom starlette.exceptions import HTTPException\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import WalletTypeInfo, get_key_type\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n delete_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n)\nfrom .models import CreatePayLinkData\n\n\n@lnurlp_ext.get(\"/api/v1/currencies\")\nasync def api_list_currencies_available():\n return list(currencies.keys())\n\n\n@lnurlp_ext.get(\"/api/v1/links\", status_code=HTTPStatus.OK)\nasync def api_links(\n req: Request,\n wallet: WalletTypeInfo = Depends(get_key_type),\n all_wallets: bool = Query(False),\n):\n wallet_ids = [wallet.wallet.id]\n\n if all_wallets:\n wallet_ids = (await get_user(wallet.wallet.user)).wallet_ids\n\n try:\n return [\n {**link.dict(), \"lnurl\": link.lnurl(req)}\n for link in await get_pay_links(wallet_ids)\n ]\n\n except LnurlInvalidUrl:\n raise HTTPException(\n status_code=HTTPStatus.UPGRADE_REQUIRED,\n detail=\"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\",\n )\n\n\n@lnurlp_ext.get(\"/api/v1/links/{link_id}\", status_code=HTTPStatus.OK)\nasync def api_link_retrieve(\n r: Request, link_id, wallet: WalletTypeInfo = Depends(get_key_type)\n):\n link = await get_pay_link(link_id)\n\n if not link:\n raise HTTPException(\n detail=\"Pay link does not exist.\", status_code=HTTPStatus.NOT_FOUND\n )\n\n if link.wallet != wallet.wallet.id:\n raise HTTPException(\n detail=\"Not your pay link.\", status_code=HTTPStatus.FORBIDDEN\n )\n\n return {**link.dict(), **{\"lnurl\": link.lnurl(r)}}\n\n\n@lnurlp_ext.post(\"/api/v1/links\", status_code=HTTPStatus.CREATED)\n@lnurlp_ext.put(\"/api/v1/links/{link_id}\", status_code=HTTPStatus.OK)\nasync def api_link_create_or_update(\n data: CreatePayLinkData,\n link_id=None,\n wallet: WalletTypeInfo = Depends(get_key_type),\n):\n\n if data.min > data.max:\n raise HTTPException(\n detail=\"Min is greater than max.\", status_code=HTTPStatus.BAD_REQUEST\n )\n\n if data.currency == None and (\n round(data.min) != data.min or round(data.max) != data.max or data.min < 1\n ):\n raise HTTPException(\n detail=\"Must use full satoshis.\", status_code=HTTPStatus.BAD_REQUEST\n )\n\n if \"success_url\" in data and data.success_url[:8] != \"https://\":\n raise HTTPException(\n detail=\"Success URL must be secure https://...\",\n status_code=HTTPStatus.BAD_REQUEST,\n )\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n raise HTTPException(\n detail=\"Pay link does not exist.\", status_code=HTTPStatus.NOT_FOUND\n )\n\n if link.wallet != wallet.wallet.id:\n raise HTTPException(\n detail=\"Not your pay link.\", status_code=HTTPStatus.FORBIDDEN\n )\n\n link = await update_pay_link(**data.dict(), link_id=link_id)\n else:\n link = await create_pay_link(data, wallet_id=wallet.wallet.id)\n return {**link.dict(), \"lnurl\": link.lnurl}\n\n\n@lnurlp_ext.delete(\"/api/v1/links/{link_id}\")\nasync def api_link_delete(link_id, wallet: WalletTypeInfo = Depends(get_key_type)):\n link = await get_pay_link(link_id)\n\n if not link:\n raise HTTPException(\n detail=\"Pay link does not exist.\", status_code=HTTPStatus.NOT_FOUND\n )\n\n if link.wallet != wallet.wallet.id:\n raise HTTPException(\n detail=\"Not your pay link.\", status_code=HTTPStatus.FORBIDDEN\n )\n\n await delete_pay_link(link_id)\n raise HTTPException(status_code=HTTPStatus.NO_CONTENT)\n\n\n@lnurlp_ext.get(\"/api/v1/rate/{currency}\", status_code=HTTPStatus.OK)\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return {\"rate\": rate}\n", "path": "lnbits/extensions/lnurlp/views_api.py"}, {"content": "from typing import List, Optional, Union\n\nfrom lnbits.db import SQLITE\nfrom . import db\nfrom .models import PayLink, CreatePayLinkData\n\n\nasync def create_pay_link(data: CreatePayLinkData, wallet_id: str) -> PayLink:\n\n returning = \"\" if db.type == SQLITE else \"RETURNING ID\"\n method = db.execute if db.type == SQLITE else db.fetchone\n # database only allows int4 entries for min and max. For fiat currencies,\n # we multiply by data.fiat_base_multiplier (usually 100) to save the value in cents.\n if data.currency and data.fiat_base_multiplier:\n data.min *= data.fiat_base_multiplier\n data.max *= data.fiat_base_multiplier\n\n result = await (method)(\n f\"\"\"\n INSERT INTO lnurlp.pay_links (\n wallet,\n description,\n min,\n max,\n served_meta,\n served_pr,\n webhook_url,\n success_text,\n success_url,\n comment_chars,\n currency,\n fiat_base_multiplier\n )\n VALUES (?, ?, ?, ?, 0, 0, ?, ?, ?, ?, ?, ?)\n {returning}\n \"\"\",\n (\n wallet_id,\n data.description,\n data.min,\n data.max,\n data.webhook_url,\n data.success_text,\n data.success_url,\n data.comment_chars,\n data.currency,\n data.fiat_base_multiplier,\n ),\n )\n if db.type == SQLITE:\n link_id = result._result_proxy.lastrowid\n else:\n link_id = result[0]\n\n link = await get_pay_link(link_id)\n assert link, \"Newly created link couldn't be retrieved\"\n return link\n\n\nasync def get_pay_link(link_id: int) -> Optional[PayLink]:\n row = await db.fetchone(\"SELECT * FROM lnurlp.pay_links WHERE id = ?\", (link_id,))\n return PayLink.from_row(row) if row else None\n\n\nasync def get_pay_links(wallet_ids: Union[str, List[str]]) -> List[PayLink]:\n if isinstance(wallet_ids, str):\n wallet_ids = [wallet_ids]\n\n q = \",\".join([\"?\"] * len(wallet_ids))\n rows = await db.fetchall(\n f\"\"\"\n SELECT * FROM lnurlp.pay_links WHERE wallet IN ({q})\n ORDER BY Id\n \"\"\",\n (*wallet_ids,),\n )\n return [PayLink.from_row(row) for row in rows]\n\n\nasync def update_pay_link(link_id: int, **kwargs) -> Optional[PayLink]:\n q = \", \".join([f\"{field[0]} = ?\" for field in kwargs.items()])\n await db.execute(\n f\"UPDATE lnurlp.pay_links SET {q} WHERE id = ?\", (*kwargs.values(), link_id)\n )\n row = await db.fetchone(\"SELECT * FROM lnurlp.pay_links WHERE id = ?\", (link_id,))\n return PayLink.from_row(row) if row else None\n\n\nasync def increment_pay_link(link_id: int, **kwargs) -> Optional[PayLink]:\n q = \", \".join([f\"{field[0]} = {field[0]} + ?\" for field in kwargs.items()])\n await db.execute(\n f\"UPDATE lnurlp.pay_links SET {q} WHERE id = ?\", (*kwargs.values(), link_id)\n )\n row = await db.fetchone(\"SELECT * FROM lnurlp.pay_links WHERE id = ?\", (link_id,))\n return PayLink.from_row(row) if row else None\n\n\nasync def delete_pay_link(link_id: int) -> None:\n await db.execute(\"DELETE FROM lnurlp.pay_links WHERE id = ?\", (link_id,))\n", "path": "lnbits/extensions/lnurlp/crud.py"}]}
| 2,987 | 388 |
gh_patches_debug_232
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-1532
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AHF answer tests are flaky
We're seeing random failures from the AHF answer tests on some PRs.
See e.g. https://tests.yt-project.org/job/yt_py3_git/414/.
</issue>
<code>
[start of yt/frontends/ahf/data_structures.py]
1 """
2 AHF data structures
3
4
5
6 """
7
8 #-----------------------------------------------------------------------------
9 # Copyright (c) 2017, yt Development Team.
10 #
11 # Distributed under the terms of the Modified BSD License.
12 #
13 # The full license is in the file COPYING.txt, distributed with this software.
14 #-----------------------------------------------------------------------------
15
16 import glob
17 import os
18 import stat
19
20 import numpy as np
21
22 from yt.data_objects.static_output import \
23 Dataset, \
24 ParticleFile
25 from yt.funcs import \
26 setdefaultattr
27 from yt.geometry.particle_geometry_handler import \
28 ParticleIndex
29 from yt.utilities.cosmology import \
30 Cosmology
31
32 from .fields import AHFHalosFieldInfo
33
34
35 class AHFHalosFile(ParticleFile):
36 def __init__(self, ds, io, filename, file_id):
37 root, _ = os.path.splitext(filename)
38 candidates = glob.glob(root + '*.AHF_halos')
39 if len(candidates) == 1:
40 filename = candidates[0]
41 else:
42 raise ValueError('Too many AHF_halos files.')
43 self.col_names = self._read_column_names(filename)
44 super(AHFHalosFile, self).__init__(ds, io, filename, file_id)
45
46 def read_data(self, usecols=None):
47 return np.genfromtxt(self.filename, names=self.col_names,
48 usecols=usecols)
49
50 def _read_column_names(self, filename):
51 with open(filename) as f:
52 line = f.readline()
53 # Remove leading '#'
54 line = line[1:]
55 names = line.split()
56 # Remove trailing '()'
57 names = [name.split('(')[0] for name in names]
58 return names
59
60
61 class AHFHalosDataset(Dataset):
62 _index_class = ParticleIndex
63 _file_class = AHFHalosFile
64 _field_info_class = AHFHalosFieldInfo
65
66 def __init__(self, filename, dataset_type='ahf',
67 n_ref=16, over_refine_factor=1,
68 units_override=None, unit_system='cgs',
69 hubble_constant=1.0):
70 root, _ = os.path.splitext(filename)
71 self.log_filename = root + '.log'
72 self.hubble_constant = hubble_constant
73
74 self.n_ref = n_ref
75 self.over_refine_factor = over_refine_factor
76 super(AHFHalosDataset, self).__init__(
77 filename, dataset_type=dataset_type,
78 units_override=units_override, unit_system=unit_system
79 )
80
81 def _set_code_unit_attributes(self):
82 setdefaultattr(self, 'length_unit', self.quan(1.0, 'kpccm/h'))
83 setdefaultattr(self, 'mass_unit', self.quan(1.0, 'Msun/h'))
84 setdefaultattr(self, 'time_unit', self.quan(1.0, 's'))
85 setdefaultattr(self, 'velocity_unit', self.quan(1.0, 'km/s'))
86
87 def _parse_parameter_file(self):
88 # Read all parameters.
89 simu = self._read_log_simu()
90 param = self._read_parameter()
91
92 # Set up general information.
93 self.filename_template = self.parameter_filename
94 self.file_count = 1
95 self.parameters.update(param)
96 self.particle_types = ('halos')
97 self.particle_types_raw = ('halos')
98 self.unique_identifier = \
99 int(os.stat(self.parameter_filename)[stat.ST_CTIME])
100
101 # Set up geometrical information.
102 self.refine_by = 2
103 self.dimensionality = 3
104 nz = 1 << self.over_refine_factor
105 self.domain_dimensions = np.ones(self.dimensionality, "int32") * nz
106 self.domain_left_edge = np.array([0.0, 0.0, 0.0])
107 # Note that boxsize is in Mpc but particle positions are in kpc.
108 self.domain_right_edge = np.array([simu['boxsize']] * 3) * 1000
109 self.periodicity = (True, True, True)
110
111 # Set up cosmological information.
112 self.cosmological_simulation = 1
113 self.current_redshift = param['z']
114 self.omega_lambda = simu['lambda0']
115 self.omega_matter = simu['omega0']
116 cosmo = Cosmology(self.hubble_constant,
117 self.omega_matter, self.omega_lambda)
118 self.current_time = cosmo.hubble_time(param['z']).in_units('s')
119
120 @classmethod
121 def _is_valid(self, *args, **kwargs):
122 filename = args[0]
123 if not filename.endswith('.parameter'):
124 return False
125 with open(filename, 'r') as f:
126 if f.readlines()[11].startswith('AHF'):
127 return True
128 return False
129
130 # Helper methods
131
132 def _read_log_simu(self):
133 simu = {}
134 with open(self.log_filename) as f:
135 for l in f:
136 if l.startswith('simu.'):
137 name, val = l.split(':')
138 key = name.strip().split('.')[1]
139 try:
140 val = float(val)
141 except:
142 val = float.fromhex(val)
143 simu[key] = val
144 return simu
145
146 def _read_parameter(self):
147 param = {}
148 with open(self.parameter_filename) as f:
149 for l in f:
150 words = l.split()
151 if len(words) == 2:
152 key, val = words
153 try:
154 val = float(val)
155 param[key] = val
156 except:
157 pass
158 return param
159
[end of yt/frontends/ahf/data_structures.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt/frontends/ahf/data_structures.py b/yt/frontends/ahf/data_structures.py
--- a/yt/frontends/ahf/data_structures.py
+++ b/yt/frontends/ahf/data_structures.py
@@ -156,3 +156,7 @@
except:
pass
return param
+
+ @property
+ def _skip_cache(self):
+ return True
|
{"golden_diff": "diff --git a/yt/frontends/ahf/data_structures.py b/yt/frontends/ahf/data_structures.py\n--- a/yt/frontends/ahf/data_structures.py\n+++ b/yt/frontends/ahf/data_structures.py\n@@ -156,3 +156,7 @@\n except:\n pass\n return param\n+\n+ @property\n+ def _skip_cache(self):\n+ return True\n", "issue": "AHF answer tests are flaky\nWe're seeing random failures from the AHF answer tests on some PRs.\r\n\r\nSee e.g. https://tests.yt-project.org/job/yt_py3_git/414/.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nAHF data structures\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2017, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport glob\nimport os\nimport stat\n\nimport numpy as np\n\nfrom yt.data_objects.static_output import \\\n Dataset, \\\n ParticleFile\nfrom yt.funcs import \\\n setdefaultattr\nfrom yt.geometry.particle_geometry_handler import \\\n ParticleIndex\nfrom yt.utilities.cosmology import \\\n Cosmology\n\nfrom .fields import AHFHalosFieldInfo\n\n\nclass AHFHalosFile(ParticleFile):\n def __init__(self, ds, io, filename, file_id):\n root, _ = os.path.splitext(filename)\n candidates = glob.glob(root + '*.AHF_halos')\n if len(candidates) == 1:\n filename = candidates[0]\n else:\n raise ValueError('Too many AHF_halos files.')\n self.col_names = self._read_column_names(filename)\n super(AHFHalosFile, self).__init__(ds, io, filename, file_id)\n\n def read_data(self, usecols=None):\n return np.genfromtxt(self.filename, names=self.col_names,\n usecols=usecols)\n\n def _read_column_names(self, filename):\n with open(filename) as f:\n line = f.readline()\n # Remove leading '#'\n line = line[1:]\n names = line.split()\n # Remove trailing '()'\n names = [name.split('(')[0] for name in names]\n return names\n\n\nclass AHFHalosDataset(Dataset):\n _index_class = ParticleIndex\n _file_class = AHFHalosFile\n _field_info_class = AHFHalosFieldInfo\n\n def __init__(self, filename, dataset_type='ahf',\n n_ref=16, over_refine_factor=1,\n units_override=None, unit_system='cgs',\n hubble_constant=1.0):\n root, _ = os.path.splitext(filename)\n self.log_filename = root + '.log'\n self.hubble_constant = hubble_constant\n\n self.n_ref = n_ref\n self.over_refine_factor = over_refine_factor\n super(AHFHalosDataset, self).__init__(\n filename, dataset_type=dataset_type,\n units_override=units_override, unit_system=unit_system\n )\n\n def _set_code_unit_attributes(self):\n setdefaultattr(self, 'length_unit', self.quan(1.0, 'kpccm/h'))\n setdefaultattr(self, 'mass_unit', self.quan(1.0, 'Msun/h'))\n setdefaultattr(self, 'time_unit', self.quan(1.0, 's'))\n setdefaultattr(self, 'velocity_unit', self.quan(1.0, 'km/s'))\n\n def _parse_parameter_file(self):\n # Read all parameters.\n simu = self._read_log_simu()\n param = self._read_parameter()\n\n # Set up general information.\n self.filename_template = self.parameter_filename\n self.file_count = 1\n self.parameters.update(param)\n self.particle_types = ('halos')\n self.particle_types_raw = ('halos')\n self.unique_identifier = \\\n int(os.stat(self.parameter_filename)[stat.ST_CTIME])\n\n # Set up geometrical information.\n self.refine_by = 2\n self.dimensionality = 3\n nz = 1 << self.over_refine_factor\n self.domain_dimensions = np.ones(self.dimensionality, \"int32\") * nz\n self.domain_left_edge = np.array([0.0, 0.0, 0.0])\n # Note that boxsize is in Mpc but particle positions are in kpc.\n self.domain_right_edge = np.array([simu['boxsize']] * 3) * 1000\n self.periodicity = (True, True, True)\n\n # Set up cosmological information.\n self.cosmological_simulation = 1\n self.current_redshift = param['z']\n self.omega_lambda = simu['lambda0']\n self.omega_matter = simu['omega0']\n cosmo = Cosmology(self.hubble_constant,\n self.omega_matter, self.omega_lambda)\n self.current_time = cosmo.hubble_time(param['z']).in_units('s')\n\n @classmethod\n def _is_valid(self, *args, **kwargs):\n filename = args[0]\n if not filename.endswith('.parameter'):\n return False\n with open(filename, 'r') as f:\n if f.readlines()[11].startswith('AHF'):\n return True\n return False\n\n # Helper methods\n\n def _read_log_simu(self):\n simu = {}\n with open(self.log_filename) as f:\n for l in f:\n if l.startswith('simu.'):\n name, val = l.split(':')\n key = name.strip().split('.')[1]\n try:\n val = float(val)\n except:\n val = float.fromhex(val)\n simu[key] = val\n return simu\n\n def _read_parameter(self):\n param = {}\n with open(self.parameter_filename) as f:\n for l in f:\n words = l.split()\n if len(words) == 2:\n key, val = words\n try:\n val = float(val)\n param[key] = val\n except:\n pass\n return param\n", "path": "yt/frontends/ahf/data_structures.py"}]}
| 2,185 | 99 |
gh_patches_debug_38460
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-2443
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can not change link from free to lendable
**Describe the bug**
I tried to correct a link on a book from "free" to lendable in the tinkliest. This throws a 500 error. I get this form the logs
```equest, *callback_args, **callback_kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/views/generic/base.py", line 70, in view
return self.dispatch(request, *args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/utils/decorators.py", line 43, in _wrapper
return bound_method(*args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/utils/decorators.py", line 43, in _wrapper
return bound_method(*args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/views/generic/base.py", line 98, in dispatch
return handler(request, *args, **kwargs)
File "/home/wyrm/bookwyrm/bookwyrm/views/books/links.py", line 37, in post
form.save(request)
File "/home/wyrm/bookwyrm/bookwyrm/forms/custom_form.py", line 36, in save
return super().save(*args, **kwargs)
File "/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/forms/models.py", line 460, in save
raise ValueError(
ValueError: The FileLink could not be changed because the data didn't validate.
```
**To Reproduce**
Go to Link list of a book
Change link action fromm anything to anything
**Expected behavior**
Link is saved.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Instance**
wyrm.jascha.wtf, version 5.0.1
local instance on main branch
</issue>
<code>
[start of bookwyrm/forms/links.py]
1 """ using django model forms """
2 from urllib.parse import urlparse
3
4 from django.utils.translation import gettext_lazy as _
5
6 from bookwyrm import models
7 from .custom_form import CustomForm
8
9
10 # pylint: disable=missing-class-docstring
11 class LinkDomainForm(CustomForm):
12 class Meta:
13 model = models.LinkDomain
14 fields = ["name"]
15
16
17 class FileLinkForm(CustomForm):
18 class Meta:
19 model = models.FileLink
20 fields = ["url", "filetype", "availability", "book", "added_by"]
21
22 def clean(self):
23 """make sure the domain isn't blocked or pending"""
24 cleaned_data = super().clean()
25 url = cleaned_data.get("url")
26 filetype = cleaned_data.get("filetype")
27 book = cleaned_data.get("book")
28 domain = urlparse(url).netloc
29 if models.LinkDomain.objects.filter(domain=domain).exists():
30 status = models.LinkDomain.objects.get(domain=domain).status
31 if status == "blocked":
32 # pylint: disable=line-too-long
33 self.add_error(
34 "url",
35 _(
36 "This domain is blocked. Please contact your administrator if you think this is an error."
37 ),
38 )
39 elif models.FileLink.objects.filter(
40 url=url, book=book, filetype=filetype
41 ).exists():
42 # pylint: disable=line-too-long
43 self.add_error(
44 "url",
45 _(
46 "This link with file type has already been added for this book. If it is not visible, the domain is still pending."
47 ),
48 )
49
[end of bookwyrm/forms/links.py]
[start of bookwyrm/views/books/links.py]
1 """ the good stuff! the books! """
2 from django.contrib.auth.decorators import login_required, permission_required
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect
5 from django.template.response import TemplateResponse
6 from django.views import View
7 from django.utils.decorators import method_decorator
8 from django.views.decorators.http import require_POST
9
10 from bookwyrm import forms, models
11
12
13 # pylint: disable=no-self-use
14 @method_decorator(login_required, name="dispatch")
15 @method_decorator(
16 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch"
17 )
18 class BookFileLinks(View):
19 """View all links"""
20
21 def get(self, request, book_id):
22 """view links"""
23 book = get_object_or_404(models.Edition, id=book_id)
24 links = book.file_links.order_by("domain__status", "created_date")
25 annotated_links = []
26 for link in links.all():
27 link.form = forms.FileLinkForm(instance=link)
28 annotated_links.append(link)
29
30 data = {"book": book, "links": annotated_links}
31 return TemplateResponse(request, "book/file_links/edit_links.html", data)
32
33 def post(self, request, book_id, link_id):
34 """Edit a link"""
35 link = get_object_or_404(models.FileLink, id=link_id, book=book_id)
36 form = forms.FileLinkForm(request.POST, instance=link)
37 form.save(request)
38 return self.get(request, book_id)
39
40
41 @require_POST
42 @login_required
43 # pylint: disable=unused-argument
44 def delete_link(request, book_id, link_id):
45 """delete link"""
46 link = get_object_or_404(models.FileLink, id=link_id, book=book_id)
47 link.delete()
48 return redirect("file-link", book_id)
49
50
51 @method_decorator(login_required, name="dispatch")
52 @method_decorator(
53 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch"
54 )
55 class AddFileLink(View):
56 """a book! this is the stuff"""
57
58 def get(self, request, book_id):
59 """Create link form"""
60 book = get_object_or_404(models.Edition, id=book_id)
61 data = {
62 "file_link_form": forms.FileLinkForm(),
63 "book": book,
64 }
65 return TemplateResponse(request, "book/file_links/file_link_page.html", data)
66
67 @transaction.atomic
68 def post(self, request, book_id, link_id=None):
69 """Add a link to a copy of the book you can read"""
70 book = get_object_or_404(models.Book.objects.select_subclasses(), id=book_id)
71 link = get_object_or_404(models.FileLink, id=link_id) if link_id else None
72 form = forms.FileLinkForm(request.POST, instance=link)
73 if not form.is_valid():
74 data = {"file_link_form": form, "book": book}
75 return TemplateResponse(
76 request, "book/file_links/file_link_page.html", data
77 )
78
79 link = form.save(request)
80 book.file_links.add(link)
81 book.last_edited_by = request.user
82 book.save()
83 return redirect("book", book.id)
84
[end of bookwyrm/views/books/links.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/forms/links.py b/bookwyrm/forms/links.py
--- a/bookwyrm/forms/links.py
+++ b/bookwyrm/forms/links.py
@@ -36,13 +36,16 @@
"This domain is blocked. Please contact your administrator if you think this is an error."
),
)
- elif models.FileLink.objects.filter(
+ if (
+ not self.instance
+ and models.FileLink.objects.filter(
url=url, book=book, filetype=filetype
- ).exists():
- # pylint: disable=line-too-long
- self.add_error(
- "url",
- _(
- "This link with file type has already been added for this book. If it is not visible, the domain is still pending."
- ),
- )
+ ).exists()
+ ):
+ # pylint: disable=line-too-long
+ self.add_error(
+ "url",
+ _(
+ "This link with file type has already been added for this book. If it is not visible, the domain is still pending."
+ ),
+ )
diff --git a/bookwyrm/views/books/links.py b/bookwyrm/views/books/links.py
--- a/bookwyrm/views/books/links.py
+++ b/bookwyrm/views/books/links.py
@@ -21,11 +21,7 @@
def get(self, request, book_id):
"""view links"""
book = get_object_or_404(models.Edition, id=book_id)
- links = book.file_links.order_by("domain__status", "created_date")
- annotated_links = []
- for link in links.all():
- link.form = forms.FileLinkForm(instance=link)
- annotated_links.append(link)
+ annotated_links = get_annotated_links(book)
data = {"book": book, "links": annotated_links}
return TemplateResponse(request, "book/file_links/edit_links.html", data)
@@ -34,8 +30,30 @@
"""Edit a link"""
link = get_object_or_404(models.FileLink, id=link_id, book=book_id)
form = forms.FileLinkForm(request.POST, instance=link)
- form.save(request)
- return self.get(request, book_id)
+ if form.is_valid():
+ form.save(request)
+ return redirect("file-link", book_id)
+
+ # this form shouldn't ever really get here, since it's just a dropdown
+ # get the data again rather than redirecting
+ book = get_object_or_404(models.Edition, id=book_id)
+ annotated_links = get_annotated_links(book, form=form)
+
+ data = {"book": book, "links": annotated_links}
+ return TemplateResponse(request, "book/file_links/edit_links.html", data)
+
+
+def get_annotated_links(book, form=None):
+ """The links for this book, plus the forms to edit those links"""
+ links = book.file_links.order_by("domain__status", "created_date")
+ annotated_links = []
+ for link in links.all():
+ if form and link.id == form.instance.id:
+ link.form = form
+ else:
+ link.form = forms.FileLinkForm(instance=link)
+ annotated_links.append(link)
+ return annotated_links
@require_POST
|
{"golden_diff": "diff --git a/bookwyrm/forms/links.py b/bookwyrm/forms/links.py\n--- a/bookwyrm/forms/links.py\n+++ b/bookwyrm/forms/links.py\n@@ -36,13 +36,16 @@\n \"This domain is blocked. Please contact your administrator if you think this is an error.\"\n ),\n )\n- elif models.FileLink.objects.filter(\n+ if (\n+ not self.instance\n+ and models.FileLink.objects.filter(\n url=url, book=book, filetype=filetype\n- ).exists():\n- # pylint: disable=line-too-long\n- self.add_error(\n- \"url\",\n- _(\n- \"This link with file type has already been added for this book. If it is not visible, the domain is still pending.\"\n- ),\n- )\n+ ).exists()\n+ ):\n+ # pylint: disable=line-too-long\n+ self.add_error(\n+ \"url\",\n+ _(\n+ \"This link with file type has already been added for this book. If it is not visible, the domain is still pending.\"\n+ ),\n+ )\ndiff --git a/bookwyrm/views/books/links.py b/bookwyrm/views/books/links.py\n--- a/bookwyrm/views/books/links.py\n+++ b/bookwyrm/views/books/links.py\n@@ -21,11 +21,7 @@\n def get(self, request, book_id):\n \"\"\"view links\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n- links = book.file_links.order_by(\"domain__status\", \"created_date\")\n- annotated_links = []\n- for link in links.all():\n- link.form = forms.FileLinkForm(instance=link)\n- annotated_links.append(link)\n+ annotated_links = get_annotated_links(book)\n \n data = {\"book\": book, \"links\": annotated_links}\n return TemplateResponse(request, \"book/file_links/edit_links.html\", data)\n@@ -34,8 +30,30 @@\n \"\"\"Edit a link\"\"\"\n link = get_object_or_404(models.FileLink, id=link_id, book=book_id)\n form = forms.FileLinkForm(request.POST, instance=link)\n- form.save(request)\n- return self.get(request, book_id)\n+ if form.is_valid():\n+ form.save(request)\n+ return redirect(\"file-link\", book_id)\n+\n+ # this form shouldn't ever really get here, since it's just a dropdown\n+ # get the data again rather than redirecting\n+ book = get_object_or_404(models.Edition, id=book_id)\n+ annotated_links = get_annotated_links(book, form=form)\n+\n+ data = {\"book\": book, \"links\": annotated_links}\n+ return TemplateResponse(request, \"book/file_links/edit_links.html\", data)\n+\n+\n+def get_annotated_links(book, form=None):\n+ \"\"\"The links for this book, plus the forms to edit those links\"\"\"\n+ links = book.file_links.order_by(\"domain__status\", \"created_date\")\n+ annotated_links = []\n+ for link in links.all():\n+ if form and link.id == form.instance.id:\n+ link.form = form\n+ else:\n+ link.form = forms.FileLinkForm(instance=link)\n+ annotated_links.append(link)\n+ return annotated_links\n \n \n @require_POST\n", "issue": "Can not change link from free to lendable\n**Describe the bug**\r\nI tried to correct a link on a book from \"free\" to lendable in the tinkliest. This throws a 500 error. I get this form the logs\r\n\r\n```equest, *callback_args, **callback_kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/views/generic/base.py\", line 70, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/utils/decorators.py\", line 43, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/utils/decorators.py\", line 43, in _wrapper\r\n return bound_method(*args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/views/generic/base.py\", line 98, in dispatch\r\n return handler(request, *args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/bookwyrm/views/books/links.py\", line 37, in post\r\n form.save(request)\r\n File \"/home/wyrm/bookwyrm/bookwyrm/forms/custom_form.py\", line 36, in save\r\n return super().save(*args, **kwargs)\r\n File \"/home/wyrm/bookwyrm/venv/lib64/python3.9/site-packages/django/forms/models.py\", line 460, in save\r\n raise ValueError(\r\nValueError: The FileLink could not be changed because the data didn't validate.\r\n```\r\n**To Reproduce**\r\nGo to Link list of a book\r\nChange link action fromm anything to anything\r\n\r\n**Expected behavior**\r\nLink is saved. \r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Instance**\r\nwyrm.jascha.wtf, version 5.0.1\r\nlocal instance on main branch\r\n\r\n\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom urllib.parse import urlparse\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom .custom_form import CustomForm\n\n\n# pylint: disable=missing-class-docstring\nclass LinkDomainForm(CustomForm):\n class Meta:\n model = models.LinkDomain\n fields = [\"name\"]\n\n\nclass FileLinkForm(CustomForm):\n class Meta:\n model = models.FileLink\n fields = [\"url\", \"filetype\", \"availability\", \"book\", \"added_by\"]\n\n def clean(self):\n \"\"\"make sure the domain isn't blocked or pending\"\"\"\n cleaned_data = super().clean()\n url = cleaned_data.get(\"url\")\n filetype = cleaned_data.get(\"filetype\")\n book = cleaned_data.get(\"book\")\n domain = urlparse(url).netloc\n if models.LinkDomain.objects.filter(domain=domain).exists():\n status = models.LinkDomain.objects.get(domain=domain).status\n if status == \"blocked\":\n # pylint: disable=line-too-long\n self.add_error(\n \"url\",\n _(\n \"This domain is blocked. Please contact your administrator if you think this is an error.\"\n ),\n )\n elif models.FileLink.objects.filter(\n url=url, book=book, filetype=filetype\n ).exists():\n # pylint: disable=line-too-long\n self.add_error(\n \"url\",\n _(\n \"This link with file type has already been added for this book. If it is not visible, the domain is still pending.\"\n ),\n )\n", "path": "bookwyrm/forms/links.py"}, {"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\n\nfrom bookwyrm import forms, models\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass BookFileLinks(View):\n \"\"\"View all links\"\"\"\n\n def get(self, request, book_id):\n \"\"\"view links\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n links = book.file_links.order_by(\"domain__status\", \"created_date\")\n annotated_links = []\n for link in links.all():\n link.form = forms.FileLinkForm(instance=link)\n annotated_links.append(link)\n\n data = {\"book\": book, \"links\": annotated_links}\n return TemplateResponse(request, \"book/file_links/edit_links.html\", data)\n\n def post(self, request, book_id, link_id):\n \"\"\"Edit a link\"\"\"\n link = get_object_or_404(models.FileLink, id=link_id, book=book_id)\n form = forms.FileLinkForm(request.POST, instance=link)\n form.save(request)\n return self.get(request, book_id)\n\n\n@require_POST\n@login_required\n# pylint: disable=unused-argument\ndef delete_link(request, book_id, link_id):\n \"\"\"delete link\"\"\"\n link = get_object_or_404(models.FileLink, id=link_id, book=book_id)\n link.delete()\n return redirect(\"file-link\", book_id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass AddFileLink(View):\n \"\"\"a book! this is the stuff\"\"\"\n\n def get(self, request, book_id):\n \"\"\"Create link form\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n data = {\n \"file_link_form\": forms.FileLinkForm(),\n \"book\": book,\n }\n return TemplateResponse(request, \"book/file_links/file_link_page.html\", data)\n\n @transaction.atomic\n def post(self, request, book_id, link_id=None):\n \"\"\"Add a link to a copy of the book you can read\"\"\"\n book = get_object_or_404(models.Book.objects.select_subclasses(), id=book_id)\n link = get_object_or_404(models.FileLink, id=link_id) if link_id else None\n form = forms.FileLinkForm(request.POST, instance=link)\n if not form.is_valid():\n data = {\"file_link_form\": form, \"book\": book}\n return TemplateResponse(\n request, \"book/file_links/file_link_page.html\", data\n )\n\n link = form.save(request)\n book.file_links.add(link)\n book.last_edited_by = request.user\n book.save()\n return redirect(\"book\", book.id)\n", "path": "bookwyrm/views/books/links.py"}]}
| 2,413 | 748 |
gh_patches_debug_5195
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect render patching in pyramid.
https://github.com/DataDog/dd-trace-py/blob/261136e112b23862a78308a2423e15364ae4aaa6/ddtrace/contrib/pyramid/trace.py#L31
Here we're removing request from kwargs but pyramid's render has a request kwarg so we need to keep it.
</issue>
<code>
[start of ddtrace/contrib/pyramid/trace.py]
1
2 # 3p
3 import logging
4 import pyramid.renderers
5 from pyramid.settings import asbool
6 import wrapt
7
8 # project
9 import ddtrace
10 from ...ext import http, AppTypes
11 from .constants import SETTINGS_SERVICE, SETTINGS_TRACE_ENABLED, SETTINGS_TRACER
12
13 log = logging.getLogger(__name__)
14
15 DD_TWEEN_NAME = 'ddtrace.contrib.pyramid:trace_tween_factory'
16 DD_SPAN = '_datadog_span'
17
18 def trace_pyramid(config):
19 config.include('ddtrace.contrib.pyramid')
20
21 def includeme(config):
22 # Add our tween just before the default exception handler
23 config.add_tween(DD_TWEEN_NAME, over=pyramid.tweens.EXCVIEW)
24 # ensure we only patch the renderer once.
25 if not isinstance(pyramid.renderers.RendererHelper.render, wrapt.ObjectProxy):
26 wrapt.wrap_function_wrapper('pyramid.renderers', 'RendererHelper.render', trace_render)
27
28
29 def trace_render(func, instance, args, kwargs):
30 # If the request is not traced, we do not trace
31 request = kwargs.pop('request', {})
32 if not request:
33 log.debug("No request passed to render, will not be traced")
34 return func(*args, **kwargs)
35 span = getattr(request, DD_SPAN, None)
36 if not span:
37 log.debug("No span found in request, will not be traced")
38 return func(*args, **kwargs)
39
40 tracer = span.tracer()
41 with tracer.trace('pyramid.render') as span:
42 span.span_type = http.TEMPLATE
43 return func(*args, **kwargs)
44
45 def trace_tween_factory(handler, registry):
46 # configuration
47 settings = registry.settings
48 service = settings.get(SETTINGS_SERVICE) or 'pyramid'
49 tracer = settings.get(SETTINGS_TRACER) or ddtrace.tracer
50 enabled = asbool(settings.get(SETTINGS_TRACE_ENABLED, tracer.enabled))
51
52 # set the service info
53 tracer.set_service_info(
54 service=service,
55 app="pyramid",
56 app_type=AppTypes.web)
57
58 if enabled:
59 # make a request tracing function
60 def trace_tween(request):
61 with tracer.trace('pyramid.request', service=service, resource='404') as span:
62 setattr(request, DD_SPAN, span) # used to find the tracer in templates
63 response = None
64 try:
65 response = handler(request)
66 except BaseException:
67 span.set_tag(http.STATUS_CODE, 500)
68 raise
69 finally:
70 span.span_type = http.TYPE
71 # set request tags
72 span.set_tag(http.URL, request.path)
73 span.set_tag(http.METHOD, request.method)
74 if request.matched_route:
75 span.resource = '{} {}'.format(request.method, request.matched_route.name)
76 span.set_tag('pyramid.route.name', request.matched_route.name)
77 # set response tags
78 if response:
79 span.set_tag(http.STATUS_CODE, response.status_code)
80 if 500 <= response.status_code < 600:
81 span.error = 1
82 return response
83 return trace_tween
84
85 # if timing support is not enabled, return the original handler
86 return handler
87
[end of ddtrace/contrib/pyramid/trace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/pyramid/trace.py b/ddtrace/contrib/pyramid/trace.py
--- a/ddtrace/contrib/pyramid/trace.py
+++ b/ddtrace/contrib/pyramid/trace.py
@@ -28,7 +28,7 @@
def trace_render(func, instance, args, kwargs):
# If the request is not traced, we do not trace
- request = kwargs.pop('request', {})
+ request = kwargs.get('request', {})
if not request:
log.debug("No request passed to render, will not be traced")
return func(*args, **kwargs)
|
{"golden_diff": "diff --git a/ddtrace/contrib/pyramid/trace.py b/ddtrace/contrib/pyramid/trace.py\n--- a/ddtrace/contrib/pyramid/trace.py\n+++ b/ddtrace/contrib/pyramid/trace.py\n@@ -28,7 +28,7 @@\n \n def trace_render(func, instance, args, kwargs):\n # If the request is not traced, we do not trace\n- request = kwargs.pop('request', {})\n+ request = kwargs.get('request', {})\n if not request:\n log.debug(\"No request passed to render, will not be traced\")\n return func(*args, **kwargs)\n", "issue": "Incorrect render patching in pyramid.\nhttps://github.com/DataDog/dd-trace-py/blob/261136e112b23862a78308a2423e15364ae4aaa6/ddtrace/contrib/pyramid/trace.py#L31\r\n\r\nHere we're removing request from kwargs but pyramid's render has a request kwarg so we need to keep it.\n", "before_files": [{"content": "\n# 3p\nimport logging\nimport pyramid.renderers\nfrom pyramid.settings import asbool\nimport wrapt\n\n# project\nimport ddtrace\nfrom ...ext import http, AppTypes\nfrom .constants import SETTINGS_SERVICE, SETTINGS_TRACE_ENABLED, SETTINGS_TRACER\n\nlog = logging.getLogger(__name__)\n\nDD_TWEEN_NAME = 'ddtrace.contrib.pyramid:trace_tween_factory'\nDD_SPAN = '_datadog_span'\n\ndef trace_pyramid(config):\n config.include('ddtrace.contrib.pyramid')\n\ndef includeme(config):\n # Add our tween just before the default exception handler\n config.add_tween(DD_TWEEN_NAME, over=pyramid.tweens.EXCVIEW)\n # ensure we only patch the renderer once.\n if not isinstance(pyramid.renderers.RendererHelper.render, wrapt.ObjectProxy):\n wrapt.wrap_function_wrapper('pyramid.renderers', 'RendererHelper.render', trace_render)\n\n\ndef trace_render(func, instance, args, kwargs):\n # If the request is not traced, we do not trace\n request = kwargs.pop('request', {})\n if not request:\n log.debug(\"No request passed to render, will not be traced\")\n return func(*args, **kwargs)\n span = getattr(request, DD_SPAN, None)\n if not span:\n log.debug(\"No span found in request, will not be traced\")\n return func(*args, **kwargs)\n\n tracer = span.tracer()\n with tracer.trace('pyramid.render') as span:\n span.span_type = http.TEMPLATE\n return func(*args, **kwargs)\n\ndef trace_tween_factory(handler, registry):\n # configuration\n settings = registry.settings\n service = settings.get(SETTINGS_SERVICE) or 'pyramid'\n tracer = settings.get(SETTINGS_TRACER) or ddtrace.tracer\n enabled = asbool(settings.get(SETTINGS_TRACE_ENABLED, tracer.enabled))\n\n # set the service info\n tracer.set_service_info(\n service=service,\n app=\"pyramid\",\n app_type=AppTypes.web)\n\n if enabled:\n # make a request tracing function\n def trace_tween(request):\n with tracer.trace('pyramid.request', service=service, resource='404') as span:\n setattr(request, DD_SPAN, span) # used to find the tracer in templates\n response = None\n try:\n response = handler(request)\n except BaseException:\n span.set_tag(http.STATUS_CODE, 500)\n raise\n finally:\n span.span_type = http.TYPE\n # set request tags\n span.set_tag(http.URL, request.path)\n span.set_tag(http.METHOD, request.method)\n if request.matched_route:\n span.resource = '{} {}'.format(request.method, request.matched_route.name)\n span.set_tag('pyramid.route.name', request.matched_route.name)\n # set response tags\n if response:\n span.set_tag(http.STATUS_CODE, response.status_code)\n if 500 <= response.status_code < 600:\n span.error = 1\n return response\n return trace_tween\n\n # if timing support is not enabled, return the original handler\n return handler\n", "path": "ddtrace/contrib/pyramid/trace.py"}]}
| 1,492 | 135 |
gh_patches_debug_20471
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-2745
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect types for patch() and patch_all() keyword arguments
### Which version of dd-trace-py are you using?
0.51.1
### Which version of pip are you using?
20.2.3
### Which version of the libraries are you using?
fastapi==0.63.0
### How can we reproduce your problem?
I have this line of code: ` ddtrace.patch(fastapi=True)`
And then in the command line I run `mypy project_dir`
### What is the result that you get?
`error: Argument "fastapi" to "patch" has incompatible type "bool"; expected "Dict[str, bool]"`
### What is the result that you expected?
No type errors
I believe this is because the type of the patch_modules parameter should be just `bool` and not `Dict[str, bool]` because it is a keyword arguments parameter (declared with **) for these two functions. See: https://www.python.org/dev/peps/pep-0484/#arbitrary-argument-lists-and-default-argument-values
</issue>
<code>
[start of ddtrace/monkey.py]
1 """Patch libraries to be automatically instrumented.
2
3 It can monkey patch supported standard libraries and third party modules.
4 A patched module will automatically report spans with its default configuration.
5
6 A library instrumentation can be configured (for instance, to report as another service)
7 using Pin. For that, check its documentation.
8 """
9 import importlib
10 import os
11 import sys
12 import threading
13 from typing import Any
14 from typing import Callable
15 from typing import Dict
16 from typing import List
17
18 from ddtrace.vendor.wrapt.importer import when_imported
19
20 from .internal.logger import get_logger
21 from .settings import _config as config
22 from .utils import formats
23 from .utils.deprecation import deprecated
24
25
26 log = get_logger(__name__)
27
28 # Default set of modules to automatically patch or not
29 PATCH_MODULES = {
30 "asyncio": True,
31 "boto": True,
32 "botocore": True,
33 "bottle": False,
34 "cassandra": True,
35 "celery": True,
36 "consul": True,
37 "django": True,
38 "elasticsearch": True,
39 "algoliasearch": True,
40 "futures": True,
41 "grpc": True,
42 "mongoengine": True,
43 "mysql": True,
44 "mysqldb": True,
45 "pymysql": True,
46 "mariadb": True,
47 "psycopg": True,
48 "pylibmc": True,
49 "pymemcache": True,
50 "pymongo": True,
51 "redis": True,
52 "rediscluster": True,
53 "requests": True,
54 "sanic": True,
55 "sqlalchemy": False, # Prefer DB client instrumentation
56 "sqlite3": True,
57 "aiohttp": True, # requires asyncio (Python 3.4+)
58 "aiopg": True,
59 "aiobotocore": False,
60 "httplib": False,
61 "urllib3": False,
62 "vertica": True,
63 "molten": True,
64 "jinja2": True,
65 "mako": True,
66 "flask": True,
67 "kombu": False,
68 "starlette": True,
69 # Ignore some web framework integrations that might be configured explicitly in code
70 "falcon": False,
71 "pylons": False,
72 "pyramid": False,
73 # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true
74 "logging": config.logs_injection,
75 "pynamodb": True,
76 "pyodbc": True,
77 "fastapi": True,
78 "dogpile_cache": True,
79 }
80
81 _LOCK = threading.Lock()
82 _PATCHED_MODULES = set()
83
84 # Modules which are patched on first use
85 # DEV: These modules are patched when the user first imports them, rather than
86 # explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`
87 # DEV: This ensures we do not patch a module until it is needed
88 # DEV: <contrib name> => <list of module names that trigger a patch>
89 _PATCH_ON_IMPORT = {
90 "aiohttp": ("aiohttp",),
91 "aiobotocore": ("aiobotocore",),
92 "celery": ("celery",),
93 "flask": ("flask",),
94 "gevent": ("gevent",),
95 "requests": ("requests",),
96 "botocore": ("botocore",),
97 "elasticsearch": (
98 "elasticsearch",
99 "elasticsearch2",
100 "elasticsearch5",
101 "elasticsearch6",
102 "elasticsearch7",
103 ),
104 "pynamodb": ("pynamodb",),
105 }
106
107
108 class PatchException(Exception):
109 """Wraps regular `Exception` class when patching modules"""
110
111 pass
112
113
114 class ModuleNotFoundException(PatchException):
115 pass
116
117
118 def _on_import_factory(module, raise_errors=True):
119 # type: (str, bool) -> Callable[[Any], None]
120 """Factory to create an import hook for the provided module name"""
121
122 def on_import(hook):
123 # Import and patch module
124 path = "ddtrace.contrib.%s" % module
125 try:
126 imported_module = importlib.import_module(path)
127 except ImportError:
128 if raise_errors:
129 raise
130 log.error("failed to import ddtrace module %r when patching on import", path, exc_info=True)
131 else:
132 imported_module.patch()
133
134 return on_import
135
136
137 def patch_all(**patch_modules):
138 # type: (Dict[str, bool]) -> None
139 """Automatically patches all available modules.
140
141 In addition to ``patch_modules``, an override can be specified via an
142 environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.
143
144 ``patch_modules`` have the highest precedence for overriding.
145
146 :param dict patch_modules: Override whether particular modules are patched or not.
147
148 >>> patch_all(redis=False, cassandra=False)
149 """
150 modules = PATCH_MODULES.copy()
151
152 # The enabled setting can be overridden by environment variables
153 for module, enabled in modules.items():
154 env_var = "DD_TRACE_%s_ENABLED" % module.upper()
155 if env_var not in os.environ:
156 continue
157
158 override_enabled = formats.asbool(os.environ[env_var])
159 modules[module] = override_enabled
160
161 # Arguments take precedence over the environment and the defaults.
162 modules.update(patch_modules)
163
164 patch(raise_errors=False, **modules)
165
166
167 def patch(raise_errors=True, **patch_modules):
168 # type: (bool, Dict[str, bool]) -> None
169 """Patch only a set of given modules.
170
171 :param bool raise_errors: Raise error if one patch fail.
172 :param dict patch_modules: List of modules to patch.
173
174 >>> patch(psycopg=True, elasticsearch=True)
175 """
176 modules = [m for (m, should_patch) in patch_modules.items() if should_patch]
177 for module in modules:
178 if module in _PATCH_ON_IMPORT:
179 modules_to_poi = _PATCH_ON_IMPORT[module]
180 for m in modules_to_poi:
181 # If the module has already been imported then patch immediately
182 if m in sys.modules:
183 _patch_module(module, raise_errors=raise_errors)
184 break
185 # Otherwise, add a hook to patch when it is imported for the first time
186 else:
187 # Use factory to create handler to close over `module` and `raise_errors` values from this loop
188 when_imported(m)(_on_import_factory(module, raise_errors))
189
190 # manually add module to patched modules
191 with _LOCK:
192 _PATCHED_MODULES.add(module)
193 else:
194 _patch_module(module, raise_errors=raise_errors)
195
196 patched_modules = _get_patched_modules()
197 log.info(
198 "patched %s/%s modules (%s)",
199 len(patched_modules),
200 len(modules),
201 ",".join(patched_modules),
202 )
203
204
205 @deprecated(
206 message="This function will be removed.",
207 version="1.0.0",
208 )
209 def patch_module(module, raise_errors=True):
210 # type: (str, bool) -> bool
211 return _patch_module(module, raise_errors=raise_errors)
212
213
214 def _patch_module(module, raise_errors=True):
215 # type: (str, bool) -> bool
216 """Patch a single module
217
218 Returns if the module got properly patched.
219 """
220 try:
221 return _attempt_patch_module(module)
222 except ModuleNotFoundException:
223 if raise_errors:
224 raise
225 return False
226 except Exception:
227 if raise_errors:
228 raise
229 log.debug("failed to patch %s", module, exc_info=True)
230 return False
231
232
233 @deprecated(
234 message="This function will be removed.",
235 version="1.0.0",
236 )
237 def get_patched_modules():
238 # type: () -> List[str]
239 return _get_patched_modules()
240
241
242 def _get_patched_modules():
243 # type: () -> List[str]
244 """Get the list of patched modules"""
245 with _LOCK:
246 return sorted(_PATCHED_MODULES)
247
248
249 def _attempt_patch_module(module):
250 # type: (str) -> bool
251 """_patch_module will attempt to monkey patch the module.
252
253 Returns if the module got patched.
254 Can also raise errors if it fails.
255 """
256 path = "ddtrace.contrib.%s" % module
257 with _LOCK:
258 if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
259 log.debug("already patched: %s", path)
260 return False
261
262 try:
263 imported_module = importlib.import_module(path)
264 except ImportError:
265 # if the import fails, the integration is not available
266 raise PatchException("integration '%s' not available" % path)
267 else:
268 # if patch() is not available in the module, it means
269 # that the library is not installed in the environment
270 if not hasattr(imported_module, "patch"):
271 raise ModuleNotFoundException("module '%s' not installed" % module)
272
273 imported_module.patch() # type: ignore
274 _PATCHED_MODULES.add(module)
275 return True
276
[end of ddtrace/monkey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -12,7 +12,6 @@
import threading
from typing import Any
from typing import Callable
-from typing import Dict
from typing import List
from ddtrace.vendor.wrapt.importer import when_imported
@@ -135,7 +134,7 @@
def patch_all(**patch_modules):
- # type: (Dict[str, bool]) -> None
+ # type: (bool) -> None
"""Automatically patches all available modules.
In addition to ``patch_modules``, an override can be specified via an
@@ -165,7 +164,7 @@
def patch(raise_errors=True, **patch_modules):
- # type: (bool, Dict[str, bool]) -> None
+ # type: (bool, bool) -> None
"""Patch only a set of given modules.
:param bool raise_errors: Raise error if one patch fail.
|
{"golden_diff": "diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -12,7 +12,6 @@\n import threading\n from typing import Any\n from typing import Callable\n-from typing import Dict\n from typing import List\n \n from ddtrace.vendor.wrapt.importer import when_imported\n@@ -135,7 +134,7 @@\n \n \n def patch_all(**patch_modules):\n- # type: (Dict[str, bool]) -> None\n+ # type: (bool) -> None\n \"\"\"Automatically patches all available modules.\n \n In addition to ``patch_modules``, an override can be specified via an\n@@ -165,7 +164,7 @@\n \n \n def patch(raise_errors=True, **patch_modules):\n- # type: (bool, Dict[str, bool]) -> None\n+ # type: (bool, bool) -> None\n \"\"\"Patch only a set of given modules.\n \n :param bool raise_errors: Raise error if one patch fail.\n", "issue": "Incorrect types for patch() and patch_all() keyword arguments\n### Which version of dd-trace-py are you using?\r\n0.51.1\r\n\r\n### Which version of pip are you using?\r\n\r\n20.2.3\r\n\r\n### Which version of the libraries are you using?\r\n\r\nfastapi==0.63.0\r\n\r\n### How can we reproduce your problem?\r\n\r\nI have this line of code: ` ddtrace.patch(fastapi=True)`\r\n\r\nAnd then in the command line I run `mypy project_dir`\r\n\r\n### What is the result that you get?\r\n\r\n`error: Argument \"fastapi\" to \"patch\" has incompatible type \"bool\"; expected \"Dict[str, bool]\"`\r\n\r\n### What is the result that you expected?\r\n\r\nNo type errors\r\n\r\n\r\nI believe this is because the type of the patch_modules parameter should be just `bool` and not `Dict[str, bool]` because it is a keyword arguments parameter (declared with **) for these two functions. See: https://www.python.org/dev/peps/pep-0484/#arbitrary-argument-lists-and-default-argument-values \n", "before_files": [{"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import _config as config\nfrom .utils import formats\nfrom .utils.deprecation import deprecated\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": True,\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"mariadb\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"urllib3\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n \"dogpile_cache\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask\",),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\n \"elasticsearch\",\n \"elasticsearch2\",\n \"elasticsearch5\",\n \"elasticsearch6\",\n \"elasticsearch7\",\n ),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n # type: (str, bool) -> Callable[[Any], None]\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n if raise_errors:\n raise\n log.error(\"failed to import ddtrace module %r when patching on import\", path, exc_info=True)\n else:\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n # type: (Dict[str, bool]) -> None\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n # type: (bool, Dict[str, bool]) -> None\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n modules_to_poi = _PATCH_ON_IMPORT[module]\n for m in modules_to_poi:\n # If the module has already been imported then patch immediately\n if m in sys.modules:\n _patch_module(module, raise_errors=raise_errors)\n break\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(m)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n _patch_module(module, raise_errors=raise_errors)\n\n patched_modules = _get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\n@deprecated(\n message=\"This function will be removed.\",\n version=\"1.0.0\",\n)\ndef patch_module(module, raise_errors=True):\n # type: (str, bool) -> bool\n return _patch_module(module, raise_errors=raise_errors)\n\n\ndef _patch_module(module, raise_errors=True):\n # type: (str, bool) -> bool\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _attempt_patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\n@deprecated(\n message=\"This function will be removed.\",\n version=\"1.0.0\",\n)\ndef get_patched_modules():\n # type: () -> List[str]\n return _get_patched_modules()\n\n\ndef _get_patched_modules():\n # type: () -> List[str]\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _attempt_patch_module(module):\n # type: (str) -> bool\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch() # type: ignore\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]}
| 3,466 | 233 |
gh_patches_debug_27529
|
rasdani/github-patches
|
git_diff
|
deis__deis-163
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`deis destroy` did not destroy runtime layer in Opscode management console
On Friday, I performed a `deis destroy` on the NodeJS example application. I didn't check the Opscode Management Console to see if everything had closed down. On Monday, I opened up the Opscode console and the runtime layer was still there (screenshot below).

</issue>
<code>
[start of celerytasks/chef.py]
1 """
2 Much of this has been copied from pyChef.
3 https://github.com/coderanger/pychef
4
5 We want a simpler version for making API calls
6 """
7
8 import base64
9 import datetime
10 import hashlib
11 import httplib
12 import json
13 import re
14 import urlparse
15
16 from chef_rsa import Key
17
18
19 def ruby_b64encode(value):
20 """The Ruby function Base64.encode64 automatically breaks things up
21 into 60-character chunks.
22 """
23 b64 = base64.b64encode(value)
24 for i in xrange(0, len(b64), 60):
25 yield b64[i:i+60]
26
27
28 class UTC(datetime.tzinfo):
29 """UTC timezone stub."""
30
31 ZERO = datetime.timedelta(0)
32
33 def utcoffset(self, dt):
34 return self.ZERO
35
36 def tzname(self, dt):
37 return 'UTC'
38
39 def dst(self, dt):
40 return self.ZERO
41
42
43 utc = UTC()
44
45
46 def canonical_time(timestamp):
47 if timestamp.tzinfo is not None:
48 timestamp = timestamp.astimezone(utc).replace(tzinfo=None)
49 return timestamp.replace(microsecond=0).isoformat() + 'Z'
50
51
52 canonical_path_regex = re.compile(r'/+')
53
54
55 def canonical_path(path):
56 path = canonical_path_regex.sub('/', path)
57 if len(path) > 1:
58 path = path.rstrip('/')
59 return path
60
61
62 def canonical_request(http_method, path, hashed_body, timestamp, user_id):
63 # Canonicalize request parameters
64 http_method = http_method.upper()
65 path = canonical_path(path)
66 if isinstance(timestamp, datetime.datetime):
67 timestamp = canonical_time(timestamp)
68 hashed_path = sha1_base64(path)
69 return """\
70 Method:{}
71 Hashed Path:{}
72 X-Ops-Content-Hash:{}
73 X-Ops-Timestamp:{}
74 X-Ops-UserId:{}""".format(http_method, hashed_path, hashed_body, timestamp,
75 user_id)
76
77
78 def sha1_base64(value):
79 return '\n'.join(ruby_b64encode(hashlib.sha1(value).digest()))
80
81
82 def create_authorization(blank_headers, verb, url, priv_key, user, body=''):
83 headers = blank_headers.copy()
84 rsa_key = Key(fp=priv_key)
85 timestamp = canonical_time(datetime.datetime.utcnow())
86 hashed_body = sha1_base64(body)
87
88 canon = canonical_request(verb, url, hashed_body, timestamp, user)
89 b64_priv = ruby_b64encode(rsa_key.private_encrypt(canon))
90
91 for i, line in enumerate(b64_priv):
92 headers['X-Ops-Authorization-' + str(i + 1)] = line
93
94 headers['X-Ops-Timestamp'] = timestamp
95 headers['X-Ops-Content-Hash'] = hashed_body
96 headers['X-Ops-UserId'] = user
97 return headers
98
99
100 class ChefAPI(object):
101
102 headers = {
103 'Accept': 'application/json',
104 'X-Chef-Version': '11.0.4.x',
105 'X-Ops-Sign': 'version=1.0',
106 'Content-Type': 'application/json'
107 }
108
109 def __init__(self, server_url, client_name, client_key):
110 self.server_url = server_url
111 self.client_name = client_name
112 self.client_key = client_key
113 self.hostname = urlparse.urlsplit(self.server_url).netloc
114 self.path = urlparse.urlsplit(self.server_url).path
115 self.headers.update({'Host': self.hostname})
116 self.conn = httplib.HTTPSConnection(self.hostname)
117 self.conn.connect()
118
119 def request(self, verb, path, body=''):
120 url = self.path + path
121 headers = create_authorization(
122 self.headers, verb, url, self.client_key, self.client_name, body)
123 self.conn.request(verb, url, body=body, headers=headers)
124 resp = self.conn.getresponse()
125 return resp.read(), resp.status
126
127 def create_databag(self, name):
128 body = json.dumps({'name': name, 'id': name})
129 resp = self.request('POST', '/data', body)
130 return resp
131
132 def create_databag_item(self, name, item_name, item_value):
133 item_dict = {'id': item_name}
134 item_dict.update(item_value)
135 body = json.dumps(item_dict)
136 resp = self.request('POST', '/data/%s' % name, body)
137 return resp
138
139 def get_databag(self, bag_name):
140 return self.request('GET', '/data/%s' % bag_name)
141
142 def delete_databag(self, bag_name):
143 return self.request('DELETE', '/data/%s' % bag_name)
144
145 def delete_databag_item(self, bag_name, item_name):
146 return self.request('DELETE', '/data/%s/%s' % (bag_name, item_name))
147
148 def update_databag_item(self, bag_name, item_name, item_value):
149 body = json.dumps(item_value)
150 return self.request('PUT', '/data/%s/%s' % (bag_name, item_name), body)
151
152 def get_databag_item(self, bag_name, item_name):
153 return self.request('GET', '/data/%s/%s' % (bag_name, item_name))
154
155 def get_all_cookbooks(self):
156 return self.request('GET', '/cookbooks')
157
158 def get_node(self, node_id):
159 return self.request('GET', '/nodes/%s' % node_id)
160
161 def delete_node(self, node_id):
162 return self.request('DELETE', '/nodes/%s' % node_id)
163
164 def delete_client(self, client_id):
165 return self.request('DELETE', '/clients/%s' % client_id)
166
167 # def create_cookbook(self, cookbook_name, cookbooks, priv_key, user, org):
168 # checksums = {}
169 # by_cb = {}
170 # first = None
171 # for c in cookbooks:
172 # json_cb = json.dumps(c)
173 # first = json_cb
174 # hasher = hashlib.md5()
175 # hasher.update(json_cb)
176 # check = hasher.hexdigest()
177 # checksums[check] = None
178 # by_cb[c['name']] = check
179 # body = json.dumps({'checksums': checksums})
180 # sandbox = json.loads(self.request('POST', '/sandboxes'))
181 # print 'Sandbox is ', sandbox
182 # for k, v in sandbox['checksums'].items():
183 # print 'URL ', v
184 # if 'url' in v:
185 # print 'Trigger it ', self.request(
186 # 'PUT', v['url'][25:], json_cb, priv_key, user)
187 #
188 # print 'Mark as uploaded ', self.request(
189 # 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,
190 # user)
191 # print 'Mark as uploaded ', self.request(
192 # 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,
193 # user)
194 # print 'Mark as uploaded ', self.request(
195 # 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,
196 # user)
197 # print 'Mark as uploaded ', self.request(
198 # 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,
199 # user)
200 #
201 # for c in cookbooks:
202 # c['definitions'] = [{
203 # 'name': 'unicorn_config.rb',
204 # 'checksum': by_cb[c['name']],
205 # 'path': 'definitions/unicorn_config.rb',
206 # 'specificity': 'default'
207 # }],
208 # return self.request('PUT', '/organizations/%s/cookbooks/%s/1' %
209 # (org, cookbook_name), body, priv_key, user)
210 #
211 # @task(name='chef.update_data_bag_item')
212 # def update_data_bag_item(conn_info, bag_name, item_name, item_value):
213 # client = ChefAPI(conn_info['server_url'],
214 # conn_info['client_name'],
215 # conn_info['client_key'],
216 # conn_info['organization'])
217 # client.update_databag_item(bag_name, item_name, item_value)
218
[end of celerytasks/chef.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/celerytasks/chef.py b/celerytasks/chef.py
--- a/celerytasks/chef.py
+++ b/celerytasks/chef.py
@@ -11,6 +11,7 @@
import httplib
import json
import re
+import time
import urlparse
from chef_rsa import Key
@@ -22,7 +23,7 @@
"""
b64 = base64.b64encode(value)
for i in xrange(0, len(b64), 60):
- yield b64[i:i+60]
+ yield b64[i:i + 60]
class UTC(datetime.tzinfo):
@@ -116,12 +117,20 @@
self.conn = httplib.HTTPSConnection(self.hostname)
self.conn.connect()
- def request(self, verb, path, body=''):
+ def request(self, verb, path, body='', attempts=5, interval=5):
url = self.path + path
headers = create_authorization(
self.headers, verb, url, self.client_key, self.client_name, body)
- self.conn.request(verb, url, body=body, headers=headers)
- resp = self.conn.getresponse()
+ # retry all chef api requests
+ for _ in range(attempts):
+ self.conn.request(verb, url, body=body, headers=headers)
+ resp = self.conn.getresponse()
+ if resp.status != 500:
+ break
+ time.sleep(interval)
+ else:
+ errmsg = 'Chef API requests failed: {}'.format(path)
+ raise RuntimeError(errmsg)
return resp.read(), resp.status
def create_databag(self, name):
|
{"golden_diff": "diff --git a/celerytasks/chef.py b/celerytasks/chef.py\n--- a/celerytasks/chef.py\n+++ b/celerytasks/chef.py\n@@ -11,6 +11,7 @@\n import httplib\n import json\n import re\n+import time\n import urlparse\n \n from chef_rsa import Key\n@@ -22,7 +23,7 @@\n \"\"\"\n b64 = base64.b64encode(value)\n for i in xrange(0, len(b64), 60):\n- yield b64[i:i+60]\n+ yield b64[i:i + 60]\n \n \n class UTC(datetime.tzinfo):\n@@ -116,12 +117,20 @@\n self.conn = httplib.HTTPSConnection(self.hostname)\n self.conn.connect()\n \n- def request(self, verb, path, body=''):\n+ def request(self, verb, path, body='', attempts=5, interval=5):\n url = self.path + path\n headers = create_authorization(\n self.headers, verb, url, self.client_key, self.client_name, body)\n- self.conn.request(verb, url, body=body, headers=headers)\n- resp = self.conn.getresponse()\n+ # retry all chef api requests\n+ for _ in range(attempts):\n+ self.conn.request(verb, url, body=body, headers=headers)\n+ resp = self.conn.getresponse()\n+ if resp.status != 500:\n+ break\n+ time.sleep(interval)\n+ else:\n+ errmsg = 'Chef API requests failed: {}'.format(path)\n+ raise RuntimeError(errmsg)\n return resp.read(), resp.status\n \n def create_databag(self, name):\n", "issue": "`deis destroy` did not destroy runtime layer in Opscode management console\nOn Friday, I performed a `deis destroy` on the NodeJS example application. I didn't check the Opscode Management Console to see if everything had closed down. On Monday, I opened up the Opscode console and the runtime layer was still there (screenshot below).\n\n\n\n", "before_files": [{"content": "\"\"\"\nMuch of this has been copied from pyChef.\nhttps://github.com/coderanger/pychef\n\nWe want a simpler version for making API calls\n\"\"\"\n\nimport base64\nimport datetime\nimport hashlib\nimport httplib\nimport json\nimport re\nimport urlparse\n\nfrom chef_rsa import Key\n\n\ndef ruby_b64encode(value):\n \"\"\"The Ruby function Base64.encode64 automatically breaks things up\n into 60-character chunks.\n \"\"\"\n b64 = base64.b64encode(value)\n for i in xrange(0, len(b64), 60):\n yield b64[i:i+60]\n\n\nclass UTC(datetime.tzinfo):\n \"\"\"UTC timezone stub.\"\"\"\n\n ZERO = datetime.timedelta(0)\n\n def utcoffset(self, dt):\n return self.ZERO\n\n def tzname(self, dt):\n return 'UTC'\n\n def dst(self, dt):\n return self.ZERO\n\n\nutc = UTC()\n\n\ndef canonical_time(timestamp):\n if timestamp.tzinfo is not None:\n timestamp = timestamp.astimezone(utc).replace(tzinfo=None)\n return timestamp.replace(microsecond=0).isoformat() + 'Z'\n\n\ncanonical_path_regex = re.compile(r'/+')\n\n\ndef canonical_path(path):\n path = canonical_path_regex.sub('/', path)\n if len(path) > 1:\n path = path.rstrip('/')\n return path\n\n\ndef canonical_request(http_method, path, hashed_body, timestamp, user_id):\n # Canonicalize request parameters\n http_method = http_method.upper()\n path = canonical_path(path)\n if isinstance(timestamp, datetime.datetime):\n timestamp = canonical_time(timestamp)\n hashed_path = sha1_base64(path)\n return \"\"\"\\\nMethod:{}\nHashed Path:{}\nX-Ops-Content-Hash:{}\nX-Ops-Timestamp:{}\nX-Ops-UserId:{}\"\"\".format(http_method, hashed_path, hashed_body, timestamp,\n user_id)\n\n\ndef sha1_base64(value):\n return '\\n'.join(ruby_b64encode(hashlib.sha1(value).digest()))\n\n\ndef create_authorization(blank_headers, verb, url, priv_key, user, body=''):\n headers = blank_headers.copy()\n rsa_key = Key(fp=priv_key)\n timestamp = canonical_time(datetime.datetime.utcnow())\n hashed_body = sha1_base64(body)\n\n canon = canonical_request(verb, url, hashed_body, timestamp, user)\n b64_priv = ruby_b64encode(rsa_key.private_encrypt(canon))\n\n for i, line in enumerate(b64_priv):\n headers['X-Ops-Authorization-' + str(i + 1)] = line\n\n headers['X-Ops-Timestamp'] = timestamp\n headers['X-Ops-Content-Hash'] = hashed_body\n headers['X-Ops-UserId'] = user\n return headers\n\n\nclass ChefAPI(object):\n\n headers = {\n 'Accept': 'application/json',\n 'X-Chef-Version': '11.0.4.x',\n 'X-Ops-Sign': 'version=1.0',\n 'Content-Type': 'application/json'\n }\n\n def __init__(self, server_url, client_name, client_key):\n self.server_url = server_url\n self.client_name = client_name\n self.client_key = client_key\n self.hostname = urlparse.urlsplit(self.server_url).netloc\n self.path = urlparse.urlsplit(self.server_url).path\n self.headers.update({'Host': self.hostname})\n self.conn = httplib.HTTPSConnection(self.hostname)\n self.conn.connect()\n\n def request(self, verb, path, body=''):\n url = self.path + path\n headers = create_authorization(\n self.headers, verb, url, self.client_key, self.client_name, body)\n self.conn.request(verb, url, body=body, headers=headers)\n resp = self.conn.getresponse()\n return resp.read(), resp.status\n\n def create_databag(self, name):\n body = json.dumps({'name': name, 'id': name})\n resp = self.request('POST', '/data', body)\n return resp\n\n def create_databag_item(self, name, item_name, item_value):\n item_dict = {'id': item_name}\n item_dict.update(item_value)\n body = json.dumps(item_dict)\n resp = self.request('POST', '/data/%s' % name, body)\n return resp\n\n def get_databag(self, bag_name):\n return self.request('GET', '/data/%s' % bag_name)\n\n def delete_databag(self, bag_name):\n return self.request('DELETE', '/data/%s' % bag_name)\n\n def delete_databag_item(self, bag_name, item_name):\n return self.request('DELETE', '/data/%s/%s' % (bag_name, item_name))\n\n def update_databag_item(self, bag_name, item_name, item_value):\n body = json.dumps(item_value)\n return self.request('PUT', '/data/%s/%s' % (bag_name, item_name), body)\n\n def get_databag_item(self, bag_name, item_name):\n return self.request('GET', '/data/%s/%s' % (bag_name, item_name))\n\n def get_all_cookbooks(self):\n return self.request('GET', '/cookbooks')\n\n def get_node(self, node_id):\n return self.request('GET', '/nodes/%s' % node_id)\n\n def delete_node(self, node_id):\n return self.request('DELETE', '/nodes/%s' % node_id)\n\n def delete_client(self, client_id):\n return self.request('DELETE', '/clients/%s' % client_id)\n\n# def create_cookbook(self, cookbook_name, cookbooks, priv_key, user, org):\n# checksums = {}\n# by_cb = {}\n# first = None\n# for c in cookbooks:\n# json_cb = json.dumps(c)\n# first = json_cb\n# hasher = hashlib.md5()\n# hasher.update(json_cb)\n# check = hasher.hexdigest()\n# checksums[check] = None\n# by_cb[c['name']] = check\n# body = json.dumps({'checksums': checksums})\n# sandbox = json.loads(self.request('POST', '/sandboxes'))\n# print 'Sandbox is ', sandbox\n# for k, v in sandbox['checksums'].items():\n# print 'URL ', v\n# if 'url' in v:\n# print 'Trigger it ', self.request(\n# 'PUT', v['url'][25:], json_cb, priv_key, user)\n#\n# print 'Mark as uploaded ', self.request(\n# 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,\n# user)\n# print 'Mark as uploaded ', self.request(\n# 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,\n# user)\n# print 'Mark as uploaded ', self.request(\n# 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,\n# user)\n# print 'Mark as uploaded ', self.request(\n# 'PUT', sandbox['uri'][25:], '''{'is_completed':true}''', priv_key,\n# user)\n#\n# for c in cookbooks:\n# c['definitions'] = [{\n# 'name': 'unicorn_config.rb',\n# 'checksum': by_cb[c['name']],\n# 'path': 'definitions/unicorn_config.rb',\n# 'specificity': 'default'\n# }],\n# return self.request('PUT', '/organizations/%s/cookbooks/%s/1' %\n# (org, cookbook_name), body, priv_key, user)\n#\n# @task(name='chef.update_data_bag_item')\n# def update_data_bag_item(conn_info, bag_name, item_name, item_value):\n# client = ChefAPI(conn_info['server_url'],\n# conn_info['client_name'],\n# conn_info['client_key'],\n# conn_info['organization'])\n# client.update_databag_item(bag_name, item_name, item_value)\n", "path": "celerytasks/chef.py"}]}
| 3,042 | 386 |
gh_patches_debug_540
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-628
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Port to Pandas 2.0
**Describe the bug**
when running `gandlf_run`, I am encountering:
`ERROR: 'DataFrame' object has no attribute 'append'`
**To Reproduce**
Train a model using `gandlf_run`.
I trained using `2d_rad_segmentation` data from `https://upenn.box.com/shared/static/y8162xkq1zz5555ye3pwadry2m2e39bs.zip` and the config file from samples in the repo `config_classification.yaml`
**Additional context**
- check the changelog of pandas [here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/v2.0.0.html#removal-of-prior-version-deprecations-changes:~:text=Removed%20deprecated%20Series.append()%2C%20DataFrame.append()%2C%20use%20concat()%20instead%20(GH35407))
- The training runs successfully when downgrading `pandas` to `1.5.3`
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import sys, re
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 try:
13 with open("README.md") as readme_file:
14 readme = readme_file.read()
15 except Exception as error:
16 readme = "No README information found."
17 sys.stderr.write("Warning: Could not open '%s' due %s\n" % ("README.md", error))
18
19
20 class CustomInstallCommand(install):
21 def run(self):
22 install.run(self)
23
24
25 class CustomDevelopCommand(develop):
26 def run(self):
27 develop.run(self)
28
29
30 class CustomEggInfoCommand(egg_info):
31 def run(self):
32 egg_info.run(self)
33
34
35 try:
36 filepath = "GANDLF/version.py"
37 version_file = open(filepath)
38 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
39
40 except Exception as error:
41 __version__ = "0.0.1"
42 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
43
44 requirements = [
45 "torch==1.13.1",
46 "black",
47 "numpy==1.22.0",
48 "scipy",
49 "SimpleITK!=2.0.*",
50 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536
51 "torchvision",
52 "tqdm",
53 "torchio==0.18.75",
54 "pandas",
55 "scikit-learn>=0.23.2",
56 "scikit-image>=0.19.1",
57 "setuptools",
58 "seaborn",
59 "pyyaml",
60 "tiffslide",
61 "matplotlib",
62 "requests>=2.25.0",
63 "pytest",
64 "coverage",
65 "pytest-cov",
66 "psutil",
67 "medcam",
68 "opencv-python",
69 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
70 "OpenPatchMiner==0.1.8",
71 "zarr==2.10.3",
72 "pydicom",
73 "onnx",
74 "torchinfo==1.7.0",
75 "segmentation-models-pytorch==0.3.2",
76 "ACSConv==0.1.1",
77 "docker",
78 "dicom-anonymizer",
79 "twine",
80 "zarr",
81 "keyring",
82 ]
83
84 if __name__ == "__main__":
85 setup(
86 name="GANDLF",
87 version=__version__,
88 author="MLCommons",
89 author_email="[email protected]",
90 python_requires=">=3.8",
91 packages=find_packages(),
92 cmdclass={
93 "install": CustomInstallCommand,
94 "develop": CustomDevelopCommand,
95 "egg_info": CustomEggInfoCommand,
96 },
97 scripts=[
98 "gandlf_run",
99 "gandlf_constructCSV",
100 "gandlf_collectStats",
101 "gandlf_patchMiner",
102 "gandlf_preprocess",
103 "gandlf_anonymizer",
104 "gandlf_verifyInstall",
105 "gandlf_configGenerator",
106 "gandlf_recoverConfig",
107 "gandlf_deploy",
108 "gandlf_optimizeModel",
109 ],
110 classifiers=[
111 "Development Status :: 3 - Alpha",
112 "Intended Audience :: Science/Research",
113 "License :: OSI Approved :: Apache Software License",
114 "Natural Language :: English",
115 "Operating System :: OS Independent",
116 "Programming Language :: Python :: 3.8",
117 "Programming Language :: Python :: 3.9",
118 "Programming Language :: Python :: 3.10",
119 "Topic :: Scientific/Engineering :: Medical Science Apps.",
120 ],
121 description=(
122 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
123 ),
124 install_requires=requirements,
125 license="Apache-2.0",
126 long_description=readme,
127 long_description_content_type="text/markdown",
128 include_package_data=True,
129 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch",
130 zip_safe=False,
131 )
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
"torchvision",
"tqdm",
"torchio==0.18.75",
- "pandas",
+ "pandas<2.0.0",
"scikit-learn>=0.23.2",
"scikit-image>=0.19.1",
"setuptools",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n- \"pandas\",\n+ \"pandas<2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n", "issue": "Port to Pandas 2.0\n**Describe the bug**\r\nwhen running `gandlf_run`, I am encountering:\r\n\r\n`ERROR: 'DataFrame' object has no attribute 'append'`\r\n\r\n**To Reproduce**\r\n\r\nTrain a model using `gandlf_run`.\r\n\r\nI trained using `2d_rad_segmentation` data from `https://upenn.box.com/shared/static/y8162xkq1zz5555ye3pwadry2m2e39bs.zip` and the config file from samples in the repo `config_classification.yaml`\r\n\r\n**Additional context**\r\n- check the changelog of pandas [here](https://pandas.pydata.org/pandas-docs/stable/whatsnew/v2.0.0.html#removal-of-prior-version-deprecations-changes:~:text=Removed%20deprecated%20Series.append()%2C%20DataFrame.append()%2C%20use%20concat()%20instead%20(GH35407))\r\n- The training runs successfully when downgrading `pandas` to `1.5.3`\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error))\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"torch==1.13.1\",\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n \"pandas\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.2\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">=3.8\",\n packages=find_packages(),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
| 2,026 | 106 |
gh_patches_debug_33232
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-834
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow updating tag_name and target_commitish of GitRelease
GitHub API [allows updating](https://developer.github.com/v3/repos/releases/#edit-a-release) `tag_name` and `target_commitish` of a release, among `name`, `body`, `draft` and `prerelease`, but PyGithub API wrapper allows updating all of them but `tag_name` and `target_commitish`:
https://github.com/PyGithub/PyGithub/blob/e389396405146bb1b0c4587de1f6f757a9514a43/github/GitRelease.py#L178-L199
Could we add `tag_name` and `target_commitish` parameters to that function?
</issue>
<code>
[start of github/GitRelease.py]
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2015 Ed Holland <[email protected]> #
6 # Copyright 2016 Benjamin Whitney <[email protected]> #
7 # Copyright 2016 Jannis Gebauer <[email protected]> #
8 # Copyright 2016 Peter Buckley <[email protected]> #
9 # Copyright 2017 Chris McBride <[email protected]> #
10 # Copyright 2017 Simon <[email protected]> #
11 # Copyright 2018 Shinichi TAMURA <[email protected]> #
12 # Copyright 2018 Wan Liuyang <[email protected]> #
13 # Copyright 2018 edquist <[email protected]> #
14 # Copyright 2018 sfdye <[email protected]> #
15 # #
16 # This file is part of PyGithub. #
17 # http://pygithub.readthedocs.io/ #
18 # #
19 # PyGithub is free software: you can redistribute it and/or modify it under #
20 # the terms of the GNU Lesser General Public License as published by the Free #
21 # Software Foundation, either version 3 of the License, or (at your option) #
22 # any later version. #
23 # #
24 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
25 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
26 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
27 # details. #
28 # #
29 # You should have received a copy of the GNU Lesser General Public License #
30 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
31 # #
32 ################################################################################
33
34 from os.path import basename
35 import github.GithubObject
36 import github.GitAuthor
37 import github.GitReleaseAsset
38
39
40 class GitRelease(github.GithubObject.CompletableGithubObject):
41 """
42 This class represents GitReleases. The reference can be found here https://developer.github.com/v3/repos/releases
43 """
44
45 def __repr__(self):
46 return self.get__repr__({"title": self._title.value})
47
48 @property
49 def id(self):
50 """
51 :type: integer
52 """
53 self._completeIfNotSet(self._id)
54 return self._id.value
55
56 @property
57 def body(self):
58 """
59 :type: string
60 """
61 self._completeIfNotSet(self._body)
62 return self._body.value
63
64 @property
65 def title(self):
66 """
67 :type: string
68 """
69 self._completeIfNotSet(self._title)
70 return self._title.value
71
72 @property
73 def tag_name(self):
74 """
75 :type: string
76 """
77 self._completeIfNotSet(self._tag_name)
78 return self._tag_name.value
79
80 @property
81 def target_commitish(self):
82 """
83 :type: string
84 """
85 self._completeIfNotSet(self._target_commitish)
86 return self._target_commitish.value
87
88 @property
89 def draft(self):
90 """
91 :type: bool
92 """
93 self._completeIfNotSet(self._draft)
94 return self._draft.value
95
96 @property
97 def prerelease(self):
98 """
99 :type: bool
100 """
101 self._completeIfNotSet(self._prerelease)
102 return self._prerelease.value
103
104 @property
105 def author(self):
106 """
107 :type: :class:`github.GitAuthor.GitAuthor`
108 """
109 self._completeIfNotSet(self._author)
110 return self._author.value
111
112 @property
113 def created_at(self):
114 """
115 :type: datetime.datetime
116 """
117 self._completeIfNotSet(self._created_at)
118 return self._created_at.value
119
120 @property
121 def published_at(self):
122 """
123 :type: datetime.datetime
124 """
125 self._completeIfNotSet(self._published_at)
126 return self._published_at.value
127
128 @property
129 def url(self):
130 """
131 :type: string
132 """
133 self._completeIfNotSet(self._url)
134 return self._url.value
135
136 @property
137 def upload_url(self):
138 """
139 :type: string
140 """
141 self._completeIfNotSet(self._upload_url)
142 return self._upload_url.value
143
144 @property
145 def html_url(self):
146 """
147 :type: string
148 """
149 self._completeIfNotSet(self._html_url)
150 return self._html_url.value
151
152 @property
153 def tarball_url(self):
154 """
155 :type: string
156 """
157 self._completeIfNotSet(self._tarball_url)
158 return self._tarball_url.value
159
160 @property
161 def zipball_url(self):
162 """
163 :type: string
164 """
165 self._completeIfNotSet(self._zipball_url)
166 return self._zipball_url.value
167
168 def delete_release(self):
169 """
170 :calls: `DELETE /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#delete-a-release>`_
171 :rtype: None
172 """
173 headers, data = self._requester.requestJsonAndCheck(
174 "DELETE",
175 self.url
176 )
177
178 def update_release(self, name, message, draft=False, prerelease=False):
179 """
180 :calls: `PATCH /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#edit-a-release>`_
181 :rtype: :class:`github.GitRelease.GitRelease`
182 """
183 assert isinstance(name, (str, unicode)), name
184 assert isinstance(message, (str, unicode)), message
185 assert isinstance(draft, bool), draft
186 assert isinstance(prerelease, bool), prerelease
187 post_parameters = {
188 "tag_name": self.tag_name,
189 "name": name,
190 "body": message,
191 "draft": draft,
192 "prerelease": prerelease,
193 }
194 headers, data = self._requester.requestJsonAndCheck(
195 "PATCH",
196 self.url,
197 input=post_parameters
198 )
199 return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)
200
201 def upload_asset(self, path, label="", content_type=""):
202 """
203 :calls: `POST https://<upload_url>/repos/:owner/:repo/releases/:release_id/assets?name=foo.zip <https://developer.github.com/v3/repos/releases/#upload-a-release-asset>`_
204 :rtype: :class:`github.GitReleaseAsset.GitReleaseAsset`
205 """
206 assert isinstance(path, (str, unicode)), path
207 assert isinstance(label, (str, unicode)), label
208
209 post_parameters = {
210 "name": basename(path),
211 "label": label
212 }
213 headers = {}
214 if len(content_type) > 0:
215 headers["Content-Type"] = content_type
216 resp_headers, data = self._requester.requestBlobAndCheck(
217 "POST",
218 self.upload_url.split("{?")[0],
219 parameters=post_parameters,
220 headers=headers,
221 input=path
222 )
223 return github.GitReleaseAsset.GitReleaseAsset(self._requester, resp_headers, data, completed=True)
224
225 def get_assets(self):
226 """
227 :calls: `GET /repos/:owner/:repo/releases/:release_id/assets <https://developer.github.com/v3/repos/releases/#list-assets-for-a-release>`_
228 :rtype: :class:`github.PaginatedList.PaginatedList`
229 """
230 return github.PaginatedList.PaginatedList(
231 github.GitReleaseAsset.GitReleaseAsset,
232 self._requester,
233 self.url + "/assets",
234 None
235 )
236
237 def _initAttributes(self):
238 self._id = github.GithubObject.NotSet
239 self._body = github.GithubObject.NotSet
240 self._title = github.GithubObject.NotSet
241 self._tag_name = github.GithubObject.NotSet
242 self._target_commitish = github.GithubObject.NotSet
243 self._draft = github.GithubObject.NotSet
244 self._prerelease = github.GithubObject.NotSet
245 self._author = github.GithubObject.NotSet
246 self._url = github.GithubObject.NotSet
247 self._upload_url = github.GithubObject.NotSet
248 self._html_url = github.GithubObject.NotSet
249 self._created_at = github.GithubObject.NotSet
250 self._published_at = github.GithubObject.NotSet
251 self._tarball_url = github.GithubObject.NotSet
252 self._zipball_url = github.GithubObject.NotSet
253
254 def _useAttributes(self, attributes):
255 if "id" in attributes:
256 self._id = self._makeIntAttribute(attributes["id"])
257 if "body" in attributes:
258 self._body = self._makeStringAttribute(attributes["body"])
259 if "name" in attributes:
260 self._title = self._makeStringAttribute(attributes["name"])
261 if "tag_name" in attributes:
262 self._tag_name = self._makeStringAttribute(attributes["tag_name"])
263 if "target_commitish" in attributes:
264 self._target_commitish = self._makeStringAttribute(attributes["target_commitish"])
265 if "draft" in attributes:
266 self._draft = self._makeBoolAttribute(attributes["draft"])
267 if "prerelease" in attributes:
268 self._prerelease = self._makeBoolAttribute(attributes["prerelease"])
269 if "author" in attributes:
270 self._author = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["author"])
271 if "url" in attributes:
272 self._url = self._makeStringAttribute(attributes["url"])
273 if "upload_url" in attributes:
274 self._upload_url = self._makeStringAttribute(attributes["upload_url"])
275 if "html_url" in attributes:
276 self._html_url = self._makeStringAttribute(attributes["html_url"])
277 if "created_at" in attributes:
278 self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
279 if "published_at" in attributes:
280 self._published_at = self._makeDatetimeAttribute(attributes["published_at"])
281 if "tarball_url" in attributes:
282 self._tarball_url = self._makeStringAttribute(attributes["tarball_url"])
283 if "zipball_url" in attributes:
284 self._zipball_url = self._makeStringAttribute(attributes["zipball_url"])
285
[end of github/GitRelease.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/github/GitRelease.py b/github/GitRelease.py
--- a/github/GitRelease.py
+++ b/github/GitRelease.py
@@ -175,22 +175,36 @@
self.url
)
- def update_release(self, name, message, draft=False, prerelease=False):
+ def update_release(self, name, message, draft=False, prerelease=False,
+ tag_name=github.GithubObject.NotSet,
+ target_commitish=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#edit-a-release>`_
:rtype: :class:`github.GitRelease.GitRelease`
"""
+ assert tag_name is github.GithubObject.NotSet \
+ or isinstance(tag_name, (str, unicode)), \
+ 'tag_name must be a str/unicode object'
+ assert target_commitish is github.GithubObject.NotSet \
+ or isinstance(target_commitish, (str, unicode)), \
+ 'target_commitish must be a str/unicode object'
assert isinstance(name, (str, unicode)), name
assert isinstance(message, (str, unicode)), message
assert isinstance(draft, bool), draft
assert isinstance(prerelease, bool), prerelease
+ if tag_name is github.GithubObject.NotSet:
+ tag_name = self.tag_name
post_parameters = {
- "tag_name": self.tag_name,
+ "tag_name": tag_name,
"name": name,
"body": message,
"draft": draft,
"prerelease": prerelease,
}
+ # Do not set target_commitish to self.target_commitish when ommited, just don't send it
+ # alltogether in that case, in order to match the Github API behaviour. Only send it when set.
+ if target_commitish is not github.GithubObject.NotSet:
+ post_parameters['target_commitish'] = target_commitish
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
|
{"golden_diff": "diff --git a/github/GitRelease.py b/github/GitRelease.py\n--- a/github/GitRelease.py\n+++ b/github/GitRelease.py\n@@ -175,22 +175,36 @@\n self.url\n )\n \n- def update_release(self, name, message, draft=False, prerelease=False):\n+ def update_release(self, name, message, draft=False, prerelease=False,\n+ tag_name=github.GithubObject.NotSet,\n+ target_commitish=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#edit-a-release>`_\n :rtype: :class:`github.GitRelease.GitRelease`\n \"\"\"\n+ assert tag_name is github.GithubObject.NotSet \\\n+ or isinstance(tag_name, (str, unicode)), \\\n+ 'tag_name must be a str/unicode object'\n+ assert target_commitish is github.GithubObject.NotSet \\\n+ or isinstance(target_commitish, (str, unicode)), \\\n+ 'target_commitish must be a str/unicode object'\n assert isinstance(name, (str, unicode)), name\n assert isinstance(message, (str, unicode)), message\n assert isinstance(draft, bool), draft\n assert isinstance(prerelease, bool), prerelease\n+ if tag_name is github.GithubObject.NotSet:\n+ tag_name = self.tag_name\n post_parameters = {\n- \"tag_name\": self.tag_name,\n+ \"tag_name\": tag_name,\n \"name\": name,\n \"body\": message,\n \"draft\": draft,\n \"prerelease\": prerelease,\n }\n+ # Do not set target_commitish to self.target_commitish when ommited, just don't send it\n+ # alltogether in that case, in order to match the Github API behaviour. Only send it when set.\n+ if target_commitish is not github.GithubObject.NotSet:\n+ post_parameters['target_commitish'] = target_commitish\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n", "issue": "Allow updating tag_name and target_commitish of GitRelease\nGitHub API [allows updating](https://developer.github.com/v3/repos/releases/#edit-a-release) `tag_name` and `target_commitish` of a release, among `name`, `body`, `draft` and `prerelease`, but PyGithub API wrapper allows updating all of them but `tag_name` and `target_commitish`:\r\n\r\nhttps://github.com/PyGithub/PyGithub/blob/e389396405146bb1b0c4587de1f6f757a9514a43/github/GitRelease.py#L178-L199\r\n\r\nCould we add `tag_name` and `target_commitish` parameters to that function?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2015 Ed Holland <[email protected]> #\n# Copyright 2016 Benjamin Whitney <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Chris McBride <[email protected]> #\n# Copyright 2017 Simon <[email protected]> #\n# Copyright 2018 Shinichi TAMURA <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 edquist <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom os.path import basename\nimport github.GithubObject\nimport github.GitAuthor\nimport github.GitReleaseAsset\n\n\nclass GitRelease(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents GitReleases. The reference can be found here https://developer.github.com/v3/repos/releases\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"title\": self._title.value})\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def title(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._title)\n return self._title.value\n\n @property\n def tag_name(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._tag_name)\n return self._tag_name.value\n\n @property\n def target_commitish(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._target_commitish)\n return self._target_commitish.value\n\n @property\n def draft(self):\n \"\"\"\n :type: bool\n \"\"\"\n self._completeIfNotSet(self._draft)\n return self._draft.value\n\n @property\n def prerelease(self):\n \"\"\"\n :type: bool\n \"\"\"\n self._completeIfNotSet(self._prerelease)\n return self._prerelease.value\n\n @property\n def author(self):\n \"\"\"\n :type: :class:`github.GitAuthor.GitAuthor`\n \"\"\"\n self._completeIfNotSet(self._author)\n return self._author.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def published_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._published_at)\n return self._published_at.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def upload_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._upload_url)\n return self._upload_url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def tarball_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._tarball_url)\n return self._tarball_url.value\n\n @property\n def zipball_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._zipball_url)\n return self._zipball_url.value\n\n def delete_release(self):\n \"\"\"\n :calls: `DELETE /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#delete-a-release>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def update_release(self, name, message, draft=False, prerelease=False):\n \"\"\"\n :calls: `PATCH /repos/:owner/:repo/releases/:release_id <https://developer.github.com/v3/repos/releases/#edit-a-release>`_\n :rtype: :class:`github.GitRelease.GitRelease`\n \"\"\"\n assert isinstance(name, (str, unicode)), name\n assert isinstance(message, (str, unicode)), message\n assert isinstance(draft, bool), draft\n assert isinstance(prerelease, bool), prerelease\n post_parameters = {\n \"tag_name\": self.tag_name,\n \"name\": name,\n \"body\": message,\n \"draft\": draft,\n \"prerelease\": prerelease,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n return github.GitRelease.GitRelease(self._requester, headers, data, completed=True)\n\n def upload_asset(self, path, label=\"\", content_type=\"\"):\n \"\"\"\n :calls: `POST https://<upload_url>/repos/:owner/:repo/releases/:release_id/assets?name=foo.zip <https://developer.github.com/v3/repos/releases/#upload-a-release-asset>`_\n :rtype: :class:`github.GitReleaseAsset.GitReleaseAsset`\n \"\"\"\n assert isinstance(path, (str, unicode)), path\n assert isinstance(label, (str, unicode)), label\n\n post_parameters = {\n \"name\": basename(path),\n \"label\": label\n }\n headers = {}\n if len(content_type) > 0:\n headers[\"Content-Type\"] = content_type\n resp_headers, data = self._requester.requestBlobAndCheck(\n \"POST\",\n self.upload_url.split(\"{?\")[0],\n parameters=post_parameters,\n headers=headers,\n input=path\n )\n return github.GitReleaseAsset.GitReleaseAsset(self._requester, resp_headers, data, completed=True)\n\n def get_assets(self):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/releases/:release_id/assets <https://developer.github.com/v3/repos/releases/#list-assets-for-a-release>`_\n :rtype: :class:`github.PaginatedList.PaginatedList`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.GitReleaseAsset.GitReleaseAsset,\n self._requester,\n self.url + \"/assets\",\n None\n )\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._body = github.GithubObject.NotSet\n self._title = github.GithubObject.NotSet\n self._tag_name = github.GithubObject.NotSet\n self._target_commitish = github.GithubObject.NotSet\n self._draft = github.GithubObject.NotSet\n self._prerelease = github.GithubObject.NotSet\n self._author = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._upload_url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._published_at = github.GithubObject.NotSet\n self._tarball_url = github.GithubObject.NotSet\n self._zipball_url = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes:\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"body\" in attributes:\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"name\" in attributes:\n self._title = self._makeStringAttribute(attributes[\"name\"])\n if \"tag_name\" in attributes:\n self._tag_name = self._makeStringAttribute(attributes[\"tag_name\"])\n if \"target_commitish\" in attributes:\n self._target_commitish = self._makeStringAttribute(attributes[\"target_commitish\"])\n if \"draft\" in attributes:\n self._draft = self._makeBoolAttribute(attributes[\"draft\"])\n if \"prerelease\" in attributes:\n self._prerelease = self._makeBoolAttribute(attributes[\"prerelease\"])\n if \"author\" in attributes:\n self._author = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes[\"author\"])\n if \"url\" in attributes:\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"upload_url\" in attributes:\n self._upload_url = self._makeStringAttribute(attributes[\"upload_url\"])\n if \"html_url\" in attributes:\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"created_at\" in attributes:\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"published_at\" in attributes:\n self._published_at = self._makeDatetimeAttribute(attributes[\"published_at\"])\n if \"tarball_url\" in attributes:\n self._tarball_url = self._makeStringAttribute(attributes[\"tarball_url\"])\n if \"zipball_url\" in attributes:\n self._zipball_url = self._makeStringAttribute(attributes[\"zipball_url\"])\n", "path": "github/GitRelease.py"}]}
| 3,780 | 474 |
gh_patches_debug_32174
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-2304
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Limit the number of fields (columns) in /croissant
https://huggingface.co/datasets/mstz/arcene has 10K columns, and thus the /croissant response is 3.7MB (120K lines when the JSON is prettified).
We already limit the number of supported columns to 1K: https://github.com/huggingface/datasets-server/blob/main/chart/values.yaml#L204
We could apply the same limit, and mention it in the description of the recordset.
From Slack (internal): https://huggingface.slack.com/archives/C04L6P8KNQ5/p1705224717632239
</issue>
<code>
[start of services/api/src/api/routes/croissant.py]
1 import logging
2 import re
3 from collections.abc import Mapping
4 from http import HTTPStatus
5 from itertools import islice
6 from typing import Any, Optional
7
8 from datasets import ClassLabel, Features, Image, Value
9 from libapi.authentication import auth_check
10 from libapi.exceptions import (
11 ApiError,
12 MissingRequiredParameterError,
13 UnexpectedApiError,
14 )
15 from libapi.request import get_request_parameter
16 from libapi.utils import (
17 Endpoint,
18 are_valid_parameters,
19 get_cache_entry_from_steps,
20 get_json_api_error_response,
21 get_json_error_response,
22 get_json_ok_response,
23 )
24 from libcommon.constants import DATASET_INFO_KINDS
25 from libcommon.prometheus import StepProfiler
26 from libcommon.storage_client import StorageClient
27 from starlette.requests import Request
28 from starlette.responses import Response
29
30 MAX_CONFIGS = 100
31
32 HF_TO_CROISSANT_VALUE_TYPE = {
33 "string": "sc:Text",
34 "int32": "sc:Integer",
35 "int64": "sc:Integer",
36 "float32": "sc:Float",
37 "float64": "sc:Float",
38 "bool": "sc:Boolean",
39 }
40
41 NAME_PATTERN_REGEX = "[^a-zA-Z0-9\\-_\\.]"
42
43
44 def _escape_name(name: str, names: set[str]) -> str:
45 """Escapes names in Croissant.
46
47 Reasons:
48 - `/` are used in the syntax as delimiters. So we replace them.
49 - Two FileObject/FileSet/RecordSet/Fields cannot have the same name. So we append a postfix in case it happens.
50
51 Args:
52 name: The initial non-escaped name.
53 names: The set of already existing names.
54 Returns:
55 The escaped name.
56 """
57 escaped_name = re.sub(NAME_PATTERN_REGEX, "_", name)
58 while escaped_name in names:
59 escaped_name = f"{escaped_name}_0"
60 names.add(escaped_name)
61 return escaped_name
62
63
64 def get_croissant_from_dataset_infos(dataset: str, infos: list[Mapping[str, Any]], partial: bool) -> Mapping[str, Any]:
65 repo_name = "repo"
66 names: set[str] = set(repo_name)
67 distribution = [
68 {
69 "@type": "sc:FileObject",
70 "name": repo_name,
71 "description": "The Hugging Face git repository.",
72 "contentUrl": f"https://huggingface.co/datasets/{dataset}/tree/refs%2Fconvert%2Fparquet",
73 "encodingFormat": "git+https",
74 "sha256": "https://github.com/mlcommons/croissant/issues/80",
75 }
76 ]
77 record_set = []
78 for info in infos:
79 config = info["config_name"]
80 features = Features.from_dict(info["features"])
81 fields: list[dict[str, Any]] = []
82 splits = list(info["splits"])
83 distribution_name = _escape_name(f"parquet-files-for-config-{config}", names)
84 distribution.append(
85 {
86 "@type": "sc:FileSet",
87 "name": distribution_name,
88 "containedIn": repo_name,
89 "encodingFormat": "application/x-parquet",
90 "includes": f"{config}/*/*.parquet",
91 }
92 )
93 skipped_columns = []
94 for column, feature in features.items():
95 fields_names: set[str] = set()
96 if isinstance(feature, Value) and feature.dtype in HF_TO_CROISSANT_VALUE_TYPE:
97 fields.append(
98 {
99 "@type": "ml:Field",
100 "name": _escape_name(column, fields_names),
101 "description": f"Column '{column}' from the Hugging Face parquet file.",
102 "dataType": HF_TO_CROISSANT_VALUE_TYPE[feature.dtype],
103 "source": {"distribution": distribution_name, "extract": {"column": column}},
104 }
105 )
106 elif isinstance(feature, Image):
107 fields.append(
108 {
109 "@type": "ml:Field",
110 "name": _escape_name(column, fields_names),
111 "description": f"Image column '{column}' from the Hugging Face parquet file.",
112 "dataType": "sc:ImageObject",
113 "source": {
114 "distribution": distribution_name,
115 "extract": {"column": column},
116 "transform": {"jsonPath": "bytes"},
117 },
118 }
119 )
120 elif isinstance(feature, ClassLabel):
121 fields.append(
122 {
123 "@type": "ml:Field",
124 "name": _escape_name(column, fields_names),
125 "description": f"ClassLabel column '{column}' from the Hugging Face parquet file.\nLabels:\n"
126 + ", ".join(f"{name} ({i})" for i, name in enumerate(feature.names)),
127 "dataType": "sc:Integer",
128 "source": {"distribution": distribution_name, "extract": {"column": column}},
129 }
130 )
131 else:
132 skipped_columns.append(column)
133 record_set_name = config if config != dataset else f"record_set_{config}"
134 description = f"{dataset} - '{config}' subset"
135 if partial:
136 description += " (first 5GB)"
137 description_body = ""
138 if len(splits) > 1:
139 description_body += f"\n- {len(splits)} split{'s' if len(splits) > 1 else ''}: {', '.join(splits)}"
140 if skipped_columns:
141 description_body += f"\n- {len(skipped_columns)} skipped column{'s' if len(skipped_columns) > 1 else ''}: {', '.join(skipped_columns)}"
142 if description_body:
143 description += "\n\nAdditional information:"
144 description += description_body
145 record_set.append(
146 {
147 "@type": "ml:RecordSet",
148 "name": _escape_name(record_set_name, names),
149 "description": description,
150 "field": fields,
151 }
152 )
153 return {
154 "@context": {
155 "@language": "en",
156 "@vocab": "https://schema.org/",
157 "column": "ml:column",
158 "data": {
159 "@id": "ml:data",
160 "@type": "@json",
161 },
162 "dataType": {
163 "@id": "ml:dataType",
164 "@type": "@vocab",
165 },
166 "extract": "ml:extract",
167 "field": "ml:field",
168 "fileProperty": "ml:fileProperty",
169 "format": "ml:format",
170 "includes": "ml:includes",
171 "isEnumeration": "ml:isEnumeration",
172 "jsonPath": "ml:jsonPath",
173 "ml": "http://mlcommons.org/schema/",
174 "parentField": "ml:parentField",
175 "path": "ml:path",
176 "recordSet": "ml:recordSet",
177 "references": "ml:references",
178 "regex": "ml:regex",
179 "repeated": "ml:repeated",
180 "replace": "ml:replace",
181 "sc": "https://schema.org/",
182 "separator": "ml:separator",
183 "source": "ml:source",
184 "subField": "ml:subField",
185 "transform": "ml:transform",
186 },
187 "@type": "sc:Dataset",
188 "name": _escape_name(dataset, names),
189 "description": f"{dataset} dataset hosted on Hugging Face and contributed by the HF Datasets community",
190 "url": f"https://huggingface.co/datasets/{dataset}",
191 "distribution": distribution,
192 "recordSet": record_set,
193 }
194
195
196 def create_croissant_endpoint(
197 hf_endpoint: str,
198 blocked_datasets: list[str],
199 hf_token: Optional[str] = None,
200 hf_jwt_public_keys: Optional[list[str]] = None,
201 hf_jwt_algorithm: Optional[str] = None,
202 external_auth_url: Optional[str] = None,
203 hf_timeout_seconds: Optional[float] = None,
204 max_age_long: int = 0,
205 max_age_short: int = 0,
206 storage_clients: Optional[list[StorageClient]] = None,
207 ) -> Endpoint:
208 async def croissant_endpoint(request: Request) -> Response:
209 endpoint_name = "croissant"
210 context = f"endpoint: {endpoint_name}"
211 revision: Optional[str] = None
212 with StepProfiler(method="croissant_endpoint", step="all", context=context):
213 try:
214 with StepProfiler(
215 method="croissant_endpoint",
216 step="validate parameters and get processing steps",
217 context=context,
218 ):
219 dataset = get_request_parameter(request, "dataset")
220 logging.debug(f"endpoint={endpoint_name} dataset={dataset}")
221 if not are_valid_parameters([dataset]):
222 raise MissingRequiredParameterError("Parameter 'dataset' is required")
223 # if auth_check fails, it will raise an exception that will be caught below
224 with StepProfiler(method="croissant_endpoint", step="check authentication", context=context):
225 await auth_check(
226 dataset,
227 external_auth_url=external_auth_url,
228 request=request,
229 hf_jwt_public_keys=hf_jwt_public_keys,
230 hf_jwt_algorithm=hf_jwt_algorithm,
231 hf_timeout_seconds=hf_timeout_seconds,
232 )
233 # getting result based on processing steps
234 with StepProfiler(method="croissant_endpoint", step="get info cache entry", context=context):
235 info_result = get_cache_entry_from_steps(
236 processing_step_names=DATASET_INFO_KINDS,
237 dataset=dataset,
238 config=None,
239 split=None,
240 hf_endpoint=hf_endpoint,
241 hf_token=hf_token,
242 blocked_datasets=blocked_datasets,
243 hf_timeout_seconds=hf_timeout_seconds,
244 storage_clients=storage_clients,
245 )
246 content = info_result["content"]
247 http_status = info_result["http_status"]
248 error_code = info_result["error_code"]
249 revision = info_result["dataset_git_revision"]
250 if http_status == HTTPStatus.OK:
251 infos = list(islice(content["dataset_info"].values(), MAX_CONFIGS))
252 partial = content["partial"]
253 with StepProfiler(method="croissant_endpoint", step="generate croissant json", context=context):
254 croissant = get_croissant_from_dataset_infos(dataset=dataset, infos=infos, partial=partial)
255 with StepProfiler(method="croissant_endpoint", step="generate OK response", context=context):
256 return get_json_ok_response(content=croissant, max_age=max_age_long, revision=revision)
257 else:
258 with StepProfiler(method="croissant_endpoint", step="generate error response", context=context):
259 return get_json_error_response(
260 content=content,
261 status_code=http_status,
262 max_age=max_age_short,
263 error_code=error_code,
264 revision=revision,
265 )
266 except Exception as e:
267 error = e if isinstance(e, ApiError) else UnexpectedApiError("Unexpected error.", e)
268 with StepProfiler(method="croissant_endpoint", step="generate API error response", context=context):
269 return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
270
271 return croissant_endpoint
272
[end of services/api/src/api/routes/croissant.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/services/api/src/api/routes/croissant.py b/services/api/src/api/routes/croissant.py
--- a/services/api/src/api/routes/croissant.py
+++ b/services/api/src/api/routes/croissant.py
@@ -28,6 +28,9 @@
from starlette.responses import Response
MAX_CONFIGS = 100
+MAX_COLUMNS = 1_000
+# ^ same value as the default for FIRST_ROWS_COLUMNS_MAX_NUMBER (see services/worker)
+
HF_TO_CROISSANT_VALUE_TYPE = {
"string": "sc:Text",
@@ -76,6 +79,7 @@
]
record_set = []
for info in infos:
+ description_body = ""
config = info["config_name"]
features = Features.from_dict(info["features"])
fields: list[dict[str, Any]] = []
@@ -92,6 +96,9 @@
)
skipped_columns = []
for column, feature in features.items():
+ if len(fields) >= MAX_COLUMNS:
+ description_body += f"\n- {len(features) - MAX_COLUMNS} skipped column{'s' if len(features) - MAX_COLUMNS > 1 else ''} (max number of columns reached)"
+ break
fields_names: set[str] = set()
if isinstance(feature, Value) and feature.dtype in HF_TO_CROISSANT_VALUE_TYPE:
fields.append(
@@ -134,7 +141,6 @@
description = f"{dataset} - '{config}' subset"
if partial:
description += " (first 5GB)"
- description_body = ""
if len(splits) > 1:
description_body += f"\n- {len(splits)} split{'s' if len(splits) > 1 else ''}: {', '.join(splits)}"
if skipped_columns:
|
{"golden_diff": "diff --git a/services/api/src/api/routes/croissant.py b/services/api/src/api/routes/croissant.py\n--- a/services/api/src/api/routes/croissant.py\n+++ b/services/api/src/api/routes/croissant.py\n@@ -28,6 +28,9 @@\n from starlette.responses import Response\n \n MAX_CONFIGS = 100\n+MAX_COLUMNS = 1_000\n+# ^ same value as the default for FIRST_ROWS_COLUMNS_MAX_NUMBER (see services/worker)\n+\n \n HF_TO_CROISSANT_VALUE_TYPE = {\n \"string\": \"sc:Text\",\n@@ -76,6 +79,7 @@\n ]\n record_set = []\n for info in infos:\n+ description_body = \"\"\n config = info[\"config_name\"]\n features = Features.from_dict(info[\"features\"])\n fields: list[dict[str, Any]] = []\n@@ -92,6 +96,9 @@\n )\n skipped_columns = []\n for column, feature in features.items():\n+ if len(fields) >= MAX_COLUMNS:\n+ description_body += f\"\\n- {len(features) - MAX_COLUMNS} skipped column{'s' if len(features) - MAX_COLUMNS > 1 else ''} (max number of columns reached)\"\n+ break\n fields_names: set[str] = set()\n if isinstance(feature, Value) and feature.dtype in HF_TO_CROISSANT_VALUE_TYPE:\n fields.append(\n@@ -134,7 +141,6 @@\n description = f\"{dataset} - '{config}' subset\"\n if partial:\n description += \" (first 5GB)\"\n- description_body = \"\"\n if len(splits) > 1:\n description_body += f\"\\n- {len(splits)} split{'s' if len(splits) > 1 else ''}: {', '.join(splits)}\"\n if skipped_columns:\n", "issue": "Limit the number of fields (columns) in /croissant\nhttps://huggingface.co/datasets/mstz/arcene has 10K columns, and thus the /croissant response is 3.7MB (120K lines when the JSON is prettified).\r\n\r\nWe already limit the number of supported columns to 1K: https://github.com/huggingface/datasets-server/blob/main/chart/values.yaml#L204\r\n\r\nWe could apply the same limit, and mention it in the description of the recordset.\r\n\r\nFrom Slack (internal): https://huggingface.slack.com/archives/C04L6P8KNQ5/p1705224717632239 \n", "before_files": [{"content": "import logging\nimport re\nfrom collections.abc import Mapping\nfrom http import HTTPStatus\nfrom itertools import islice\nfrom typing import Any, Optional\n\nfrom datasets import ClassLabel, Features, Image, Value\nfrom libapi.authentication import auth_check\nfrom libapi.exceptions import (\n ApiError,\n MissingRequiredParameterError,\n UnexpectedApiError,\n)\nfrom libapi.request import get_request_parameter\nfrom libapi.utils import (\n Endpoint,\n are_valid_parameters,\n get_cache_entry_from_steps,\n get_json_api_error_response,\n get_json_error_response,\n get_json_ok_response,\n)\nfrom libcommon.constants import DATASET_INFO_KINDS\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.storage_client import StorageClient\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nMAX_CONFIGS = 100\n\nHF_TO_CROISSANT_VALUE_TYPE = {\n \"string\": \"sc:Text\",\n \"int32\": \"sc:Integer\",\n \"int64\": \"sc:Integer\",\n \"float32\": \"sc:Float\",\n \"float64\": \"sc:Float\",\n \"bool\": \"sc:Boolean\",\n}\n\nNAME_PATTERN_REGEX = \"[^a-zA-Z0-9\\\\-_\\\\.]\"\n\n\ndef _escape_name(name: str, names: set[str]) -> str:\n \"\"\"Escapes names in Croissant.\n\n Reasons:\n - `/` are used in the syntax as delimiters. So we replace them.\n - Two FileObject/FileSet/RecordSet/Fields cannot have the same name. So we append a postfix in case it happens.\n\n Args:\n name: The initial non-escaped name.\n names: The set of already existing names.\n Returns:\n The escaped name.\n \"\"\"\n escaped_name = re.sub(NAME_PATTERN_REGEX, \"_\", name)\n while escaped_name in names:\n escaped_name = f\"{escaped_name}_0\"\n names.add(escaped_name)\n return escaped_name\n\n\ndef get_croissant_from_dataset_infos(dataset: str, infos: list[Mapping[str, Any]], partial: bool) -> Mapping[str, Any]:\n repo_name = \"repo\"\n names: set[str] = set(repo_name)\n distribution = [\n {\n \"@type\": \"sc:FileObject\",\n \"name\": repo_name,\n \"description\": \"The Hugging Face git repository.\",\n \"contentUrl\": f\"https://huggingface.co/datasets/{dataset}/tree/refs%2Fconvert%2Fparquet\",\n \"encodingFormat\": \"git+https\",\n \"sha256\": \"https://github.com/mlcommons/croissant/issues/80\",\n }\n ]\n record_set = []\n for info in infos:\n config = info[\"config_name\"]\n features = Features.from_dict(info[\"features\"])\n fields: list[dict[str, Any]] = []\n splits = list(info[\"splits\"])\n distribution_name = _escape_name(f\"parquet-files-for-config-{config}\", names)\n distribution.append(\n {\n \"@type\": \"sc:FileSet\",\n \"name\": distribution_name,\n \"containedIn\": repo_name,\n \"encodingFormat\": \"application/x-parquet\",\n \"includes\": f\"{config}/*/*.parquet\",\n }\n )\n skipped_columns = []\n for column, feature in features.items():\n fields_names: set[str] = set()\n if isinstance(feature, Value) and feature.dtype in HF_TO_CROISSANT_VALUE_TYPE:\n fields.append(\n {\n \"@type\": \"ml:Field\",\n \"name\": _escape_name(column, fields_names),\n \"description\": f\"Column '{column}' from the Hugging Face parquet file.\",\n \"dataType\": HF_TO_CROISSANT_VALUE_TYPE[feature.dtype],\n \"source\": {\"distribution\": distribution_name, \"extract\": {\"column\": column}},\n }\n )\n elif isinstance(feature, Image):\n fields.append(\n {\n \"@type\": \"ml:Field\",\n \"name\": _escape_name(column, fields_names),\n \"description\": f\"Image column '{column}' from the Hugging Face parquet file.\",\n \"dataType\": \"sc:ImageObject\",\n \"source\": {\n \"distribution\": distribution_name,\n \"extract\": {\"column\": column},\n \"transform\": {\"jsonPath\": \"bytes\"},\n },\n }\n )\n elif isinstance(feature, ClassLabel):\n fields.append(\n {\n \"@type\": \"ml:Field\",\n \"name\": _escape_name(column, fields_names),\n \"description\": f\"ClassLabel column '{column}' from the Hugging Face parquet file.\\nLabels:\\n\"\n + \", \".join(f\"{name} ({i})\" for i, name in enumerate(feature.names)),\n \"dataType\": \"sc:Integer\",\n \"source\": {\"distribution\": distribution_name, \"extract\": {\"column\": column}},\n }\n )\n else:\n skipped_columns.append(column)\n record_set_name = config if config != dataset else f\"record_set_{config}\"\n description = f\"{dataset} - '{config}' subset\"\n if partial:\n description += \" (first 5GB)\"\n description_body = \"\"\n if len(splits) > 1:\n description_body += f\"\\n- {len(splits)} split{'s' if len(splits) > 1 else ''}: {', '.join(splits)}\"\n if skipped_columns:\n description_body += f\"\\n- {len(skipped_columns)} skipped column{'s' if len(skipped_columns) > 1 else ''}: {', '.join(skipped_columns)}\"\n if description_body:\n description += \"\\n\\nAdditional information:\"\n description += description_body\n record_set.append(\n {\n \"@type\": \"ml:RecordSet\",\n \"name\": _escape_name(record_set_name, names),\n \"description\": description,\n \"field\": fields,\n }\n )\n return {\n \"@context\": {\n \"@language\": \"en\",\n \"@vocab\": \"https://schema.org/\",\n \"column\": \"ml:column\",\n \"data\": {\n \"@id\": \"ml:data\",\n \"@type\": \"@json\",\n },\n \"dataType\": {\n \"@id\": \"ml:dataType\",\n \"@type\": \"@vocab\",\n },\n \"extract\": \"ml:extract\",\n \"field\": \"ml:field\",\n \"fileProperty\": \"ml:fileProperty\",\n \"format\": \"ml:format\",\n \"includes\": \"ml:includes\",\n \"isEnumeration\": \"ml:isEnumeration\",\n \"jsonPath\": \"ml:jsonPath\",\n \"ml\": \"http://mlcommons.org/schema/\",\n \"parentField\": \"ml:parentField\",\n \"path\": \"ml:path\",\n \"recordSet\": \"ml:recordSet\",\n \"references\": \"ml:references\",\n \"regex\": \"ml:regex\",\n \"repeated\": \"ml:repeated\",\n \"replace\": \"ml:replace\",\n \"sc\": \"https://schema.org/\",\n \"separator\": \"ml:separator\",\n \"source\": \"ml:source\",\n \"subField\": \"ml:subField\",\n \"transform\": \"ml:transform\",\n },\n \"@type\": \"sc:Dataset\",\n \"name\": _escape_name(dataset, names),\n \"description\": f\"{dataset} dataset hosted on Hugging Face and contributed by the HF Datasets community\",\n \"url\": f\"https://huggingface.co/datasets/{dataset}\",\n \"distribution\": distribution,\n \"recordSet\": record_set,\n }\n\n\ndef create_croissant_endpoint(\n hf_endpoint: str,\n blocked_datasets: list[str],\n hf_token: Optional[str] = None,\n hf_jwt_public_keys: Optional[list[str]] = None,\n hf_jwt_algorithm: Optional[str] = None,\n external_auth_url: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n max_age_long: int = 0,\n max_age_short: int = 0,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> Endpoint:\n async def croissant_endpoint(request: Request) -> Response:\n endpoint_name = \"croissant\"\n context = f\"endpoint: {endpoint_name}\"\n revision: Optional[str] = None\n with StepProfiler(method=\"croissant_endpoint\", step=\"all\", context=context):\n try:\n with StepProfiler(\n method=\"croissant_endpoint\",\n step=\"validate parameters and get processing steps\",\n context=context,\n ):\n dataset = get_request_parameter(request, \"dataset\")\n logging.debug(f\"endpoint={endpoint_name} dataset={dataset}\")\n if not are_valid_parameters([dataset]):\n raise MissingRequiredParameterError(\"Parameter 'dataset' is required\")\n # if auth_check fails, it will raise an exception that will be caught below\n with StepProfiler(method=\"croissant_endpoint\", step=\"check authentication\", context=context):\n await auth_check(\n dataset,\n external_auth_url=external_auth_url,\n request=request,\n hf_jwt_public_keys=hf_jwt_public_keys,\n hf_jwt_algorithm=hf_jwt_algorithm,\n hf_timeout_seconds=hf_timeout_seconds,\n )\n # getting result based on processing steps\n with StepProfiler(method=\"croissant_endpoint\", step=\"get info cache entry\", context=context):\n info_result = get_cache_entry_from_steps(\n processing_step_names=DATASET_INFO_KINDS,\n dataset=dataset,\n config=None,\n split=None,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n blocked_datasets=blocked_datasets,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n content = info_result[\"content\"]\n http_status = info_result[\"http_status\"]\n error_code = info_result[\"error_code\"]\n revision = info_result[\"dataset_git_revision\"]\n if http_status == HTTPStatus.OK:\n infos = list(islice(content[\"dataset_info\"].values(), MAX_CONFIGS))\n partial = content[\"partial\"]\n with StepProfiler(method=\"croissant_endpoint\", step=\"generate croissant json\", context=context):\n croissant = get_croissant_from_dataset_infos(dataset=dataset, infos=infos, partial=partial)\n with StepProfiler(method=\"croissant_endpoint\", step=\"generate OK response\", context=context):\n return get_json_ok_response(content=croissant, max_age=max_age_long, revision=revision)\n else:\n with StepProfiler(method=\"croissant_endpoint\", step=\"generate error response\", context=context):\n return get_json_error_response(\n content=content,\n status_code=http_status,\n max_age=max_age_short,\n error_code=error_code,\n revision=revision,\n )\n except Exception as e:\n error = e if isinstance(e, ApiError) else UnexpectedApiError(\"Unexpected error.\", e)\n with StepProfiler(method=\"croissant_endpoint\", step=\"generate API error response\", context=context):\n return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)\n\n return croissant_endpoint\n", "path": "services/api/src/api/routes/croissant.py"}]}
| 3,755 | 403 |
gh_patches_debug_8607
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chile inconsistency between live and historical data acquisition
Our systems use historical data to train forecasts. When run in production, those forecasts require live data. Therefore, the two datafeeds need to be consistent.
I think the Chile parser uses two different datafeeds that aren't consistent, and thus our forecasts might be inconsistent.
If in doubt, we should only implement the real-time feed to avoid our database having both data mixed.
</issue>
<code>
[start of parsers/CL.py]
1 #!/usr/bin/env python3
2
3 """Parser for the electricity grid of Chile"""
4
5 import arrow
6 import logging
7 import requests
8 from collections import defaultdict
9 from operator import itemgetter
10 from .lib.validation import validate
11
12 # Historical API
13 API_BASE_URL = "https://sipub.coordinador.cl/api/v1/recursos/generacion_centrales_tecnologia_horario?"
14 # Live API
15 API_BASE_URL_LIVE_TOT = 'http://panelapp.coordinadorelectrico.cl/api/chart/demanda'
16 API_BASE_URL_LIVE_REN = 'http://panelapp.coordinadorelectrico.cl/api/chart/ernc' # ERNC = energias renovables no convencionales
17
18 TYPE_MAPPING = {'hidraulica': 'hydro',
19 'termica': 'unknown',
20 'eolica': 'wind',
21 'solar': 'solar',
22 'geotermica': 'geothermal'}
23
24
25 def get_data_live(session, logger):
26 """Requests live generation data in json format."""
27
28 s = session or requests.session()
29 json_total = s.get(API_BASE_URL_LIVE_TOT).json()
30 json_ren = s.get(API_BASE_URL_LIVE_REN).json()
31
32 return json_total, json_ren
33
34
35 def production_processor_live(json_tot, json_ren):
36 """
37 Extracts generation data and timestamp into dictionary.
38 Returns a list of dictionaries for all of the available "live" data, usually that day.
39 """
40
41 gen_total = json_tot['data'][0]['values']
42
43 if json_ren['data'][1]['key'] == 'ENERGÍA SOLAR':
44 rawgen_sol = json_ren['data'][1]['values']
45 else:
46 raise RuntimeError(
47 f"Unexpected data label. Expected 'ENERGÍA SOLAR' and got {json_ren['data'][1]['key']}")
48
49 if json_ren['data'][0]['key'] == 'ENERGÍA EÓLICA':
50 rawgen_wind = json_ren['data'][0]['values']
51 else:
52 raise RuntimeError(
53 f"Unexpected data label. Expected 'ENERGÍA EÓLICA' and got {json_ren['data'][0]['key']}")
54
55 mapped_totals = []
56
57 for total in gen_total:
58 datapoint = {}
59
60 dt = total[0]
61 for pair in rawgen_sol:
62 if pair[0] == dt:
63 solar = pair[1]
64 break
65 for pair in rawgen_wind:
66 if pair[0] == dt:
67 wind = pair[1]
68 break
69
70 datapoint['datetime'] = arrow.get(dt / 1000, tzinfo='Chile/Continental').datetime
71 datapoint['unknown'] = (total[1] - wind - solar)
72 datapoint['wind'] = wind
73 datapoint['solar'] = solar
74 mapped_totals.append(datapoint)
75
76 return mapped_totals
77
78
79 def production_processor_historical(raw_data):
80 """Takes raw json data and groups by datetime while mapping generation to type.
81 Returns a list of dictionaries.
82 """
83
84 clean_datapoints = []
85 for datapoint in raw_data:
86 clean_datapoint = {}
87 date, hour = datapoint['fecha'], datapoint['hora']
88 hour -= 1 # `hora` starts at 1
89 date = arrow.get(date, "YYYY-MM-DD", tzinfo='Chile/Continental').shift(hours=hour)
90 clean_datapoint['datetime'] = date.datetime
91
92 gen_type_es = datapoint['tipo_central']
93 mapped_gen_type = TYPE_MAPPING[gen_type_es]
94 value_mw = float(datapoint['generacion_sum'])
95
96 clean_datapoint[mapped_gen_type] = value_mw
97
98 clean_datapoints.append(clean_datapoint)
99
100 combined = defaultdict(dict)
101 for elem in clean_datapoints:
102 combined[elem['datetime']].update(elem)
103
104 ordered_data = sorted(combined.values(), key=itemgetter("datetime"))
105
106 return ordered_data
107
108
109 def fetch_production(zone_key='CL', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
110 """Requests the last known production mix (in MW) of a given zone
111 Arguments:
112 zone_key (optional) -- used in case a parser is able to fetch multiple zones
113 session (optional) -- request session passed in order to re-use an existing session
114 target_datetime (optional) -- used if parser can fetch data for a specific day, a string in the form YYYYMMDD
115 logger (optional) -- handles logging when parser is run
116 Return:
117 A list of dictionaries in the form:
118 {
119 'zoneKey': 'FR',
120 'datetime': '2017-01-01T00:00:00Z',
121 'production': {
122 'biomass': 0.0,
123 'coal': 0.0,
124 'gas': 0.0,
125 'hydro': 0.0,
126 'nuclear': null,
127 'oil': 0.0,
128 'solar': 0.0,
129 'wind': 0.0,
130 'geothermal': 0.0,
131 'unknown': 0.0
132 },
133 'storage': {
134 'hydro': -10.0,
135 },
136 'source': 'mysource.com'
137 }
138 """
139
140 if target_datetime is None:
141 gen_tot, gen_ren = get_data_live(session, logger)
142
143 processed_data = production_processor_live(gen_tot, gen_ren)
144
145 data = []
146
147 for production_data in processed_data:
148 dt = production_data.pop('datetime')
149
150 datapoint = {
151 'zoneKey': zone_key,
152 'datetime': dt,
153 'production': production_data,
154 'storage': {
155 'hydro': None,
156 },
157 'source': 'coordinadorelectrico.cl'
158 }
159 datapoint = validate(datapoint, logger,
160 remove_negative=True, floor=1000)
161
162 data.append(datapoint)
163
164 return data
165
166 arr_target_datetime = arrow.get(target_datetime)
167 start = arr_target_datetime.shift(days=-1).format("YYYY-MM-DD")
168 end = arr_target_datetime.format("YYYY-MM-DD")
169
170 date_component = 'fecha__gte={}&fecha__lte={}'.format(start, end)
171
172 # required for access
173 headers = {'Referer': 'https://www.coordinador.cl/operacion/graficos/operacion-real/generacion-real-del-sistema/',
174 'Origin': 'https://www.coordinador.cl'}
175
176 s = session or requests.Session()
177 url = API_BASE_URL + date_component
178
179 req = s.get(url, headers=headers)
180 raw_data = req.json()['aggs']
181 processed_data = production_processor_historical(raw_data)
182
183 data = []
184 for production_data in processed_data:
185 dt = production_data.pop('datetime')
186
187 datapoint = {
188 'zoneKey': zone_key,
189 'datetime': dt,
190 'production': production_data,
191 'storage': {
192 'hydro': None,
193 },
194 'source': 'coordinador.cl'
195 }
196
197 data.append(datapoint)
198
199 return data[:-9]
200 """The last 9 datapoints should be omitted because they usually are incomplete and shouldn't appear on the map."""
201
202
203 if __name__ == "__main__":
204 """Main method, never used by the Electricity Map backend, but handy for testing."""
205 print('fetch_production() ->')
206 print(fetch_production())
207 # For fetching historical data instead, try:
208 print(fetch_production(target_datetime=arrow.get("20200220", "YYYYMMDD")))
209
[end of parsers/CL.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/CL.py b/parsers/CL.py
--- a/parsers/CL.py
+++ b/parsers/CL.py
@@ -103,6 +103,17 @@
ordered_data = sorted(combined.values(), key=itemgetter("datetime"))
+ # For consistency with live API, hydro and geothermal must be squeezed into unknown
+ for datapoint in ordered_data:
+ if 'unknown' not in datapoint:
+ datapoint['unknown'] = 0
+ if 'hydro' in datapoint:
+ datapoint['unknown'] += datapoint['hydro']
+ del datapoint['hydro']
+ if 'geothermal' in datapoint:
+ datapoint['unknown'] += datapoint['geothermal']
+ del datapoint['geothermal']
+
return ordered_data
|
{"golden_diff": "diff --git a/parsers/CL.py b/parsers/CL.py\n--- a/parsers/CL.py\n+++ b/parsers/CL.py\n@@ -103,6 +103,17 @@\n \n ordered_data = sorted(combined.values(), key=itemgetter(\"datetime\"))\n \n+ # For consistency with live API, hydro and geothermal must be squeezed into unknown\n+ for datapoint in ordered_data:\n+ if 'unknown' not in datapoint:\n+ datapoint['unknown'] = 0\n+ if 'hydro' in datapoint:\n+ datapoint['unknown'] += datapoint['hydro']\n+ del datapoint['hydro']\n+ if 'geothermal' in datapoint:\n+ datapoint['unknown'] += datapoint['geothermal']\n+ del datapoint['geothermal']\n+\n return ordered_data\n", "issue": "Chile inconsistency between live and historical data acquisition\nOur systems use historical data to train forecasts. When run in production, those forecasts require live data. Therefore, the two datafeeds need to be consistent.\r\nI think the Chile parser uses two different datafeeds that aren't consistent, and thus our forecasts might be inconsistent.\r\nIf in doubt, we should only implement the real-time feed to avoid our database having both data mixed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the electricity grid of Chile\"\"\"\n\nimport arrow\nimport logging\nimport requests\nfrom collections import defaultdict\nfrom operator import itemgetter\nfrom .lib.validation import validate\n\n# Historical API\nAPI_BASE_URL = \"https://sipub.coordinador.cl/api/v1/recursos/generacion_centrales_tecnologia_horario?\"\n# Live API\nAPI_BASE_URL_LIVE_TOT = 'http://panelapp.coordinadorelectrico.cl/api/chart/demanda'\nAPI_BASE_URL_LIVE_REN = 'http://panelapp.coordinadorelectrico.cl/api/chart/ernc' # ERNC = energias renovables no convencionales\n\nTYPE_MAPPING = {'hidraulica': 'hydro',\n 'termica': 'unknown',\n 'eolica': 'wind',\n 'solar': 'solar',\n 'geotermica': 'geothermal'}\n\n\ndef get_data_live(session, logger):\n \"\"\"Requests live generation data in json format.\"\"\"\n\n s = session or requests.session()\n json_total = s.get(API_BASE_URL_LIVE_TOT).json()\n json_ren = s.get(API_BASE_URL_LIVE_REN).json()\n\n return json_total, json_ren\n\n\ndef production_processor_live(json_tot, json_ren):\n \"\"\"\n Extracts generation data and timestamp into dictionary.\n Returns a list of dictionaries for all of the available \"live\" data, usually that day.\n \"\"\"\n\n gen_total = json_tot['data'][0]['values']\n\n if json_ren['data'][1]['key'] == 'ENERG\u00cdA SOLAR':\n rawgen_sol = json_ren['data'][1]['values']\n else:\n raise RuntimeError(\n f\"Unexpected data label. Expected 'ENERG\u00cdA SOLAR' and got {json_ren['data'][1]['key']}\")\n\n if json_ren['data'][0]['key'] == 'ENERG\u00cdA E\u00d3LICA':\n rawgen_wind = json_ren['data'][0]['values']\n else:\n raise RuntimeError(\n f\"Unexpected data label. Expected 'ENERG\u00cdA E\u00d3LICA' and got {json_ren['data'][0]['key']}\")\n\n mapped_totals = []\n\n for total in gen_total:\n datapoint = {}\n\n dt = total[0]\n for pair in rawgen_sol:\n if pair[0] == dt:\n solar = pair[1]\n break\n for pair in rawgen_wind:\n if pair[0] == dt:\n wind = pair[1]\n break\n\n datapoint['datetime'] = arrow.get(dt / 1000, tzinfo='Chile/Continental').datetime\n datapoint['unknown'] = (total[1] - wind - solar)\n datapoint['wind'] = wind\n datapoint['solar'] = solar\n mapped_totals.append(datapoint)\n\n return mapped_totals\n\n\ndef production_processor_historical(raw_data):\n \"\"\"Takes raw json data and groups by datetime while mapping generation to type.\n Returns a list of dictionaries.\n \"\"\"\n\n clean_datapoints = []\n for datapoint in raw_data:\n clean_datapoint = {}\n date, hour = datapoint['fecha'], datapoint['hora']\n hour -= 1 # `hora` starts at 1\n date = arrow.get(date, \"YYYY-MM-DD\", tzinfo='Chile/Continental').shift(hours=hour)\n clean_datapoint['datetime'] = date.datetime\n\n gen_type_es = datapoint['tipo_central']\n mapped_gen_type = TYPE_MAPPING[gen_type_es]\n value_mw = float(datapoint['generacion_sum'])\n\n clean_datapoint[mapped_gen_type] = value_mw\n\n clean_datapoints.append(clean_datapoint)\n\n combined = defaultdict(dict)\n for elem in clean_datapoints:\n combined[elem['datetime']].update(elem)\n\n ordered_data = sorted(combined.values(), key=itemgetter(\"datetime\"))\n\n return ordered_data\n\n\ndef fetch_production(zone_key='CL', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n target_datetime (optional) -- used if parser can fetch data for a specific day, a string in the form YYYYMMDD\n logger (optional) -- handles logging when parser is run\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime is None:\n gen_tot, gen_ren = get_data_live(session, logger)\n\n processed_data = production_processor_live(gen_tot, gen_ren)\n\n data = []\n\n for production_data in processed_data:\n dt = production_data.pop('datetime')\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'production': production_data,\n 'storage': {\n 'hydro': None,\n },\n 'source': 'coordinadorelectrico.cl'\n }\n datapoint = validate(datapoint, logger,\n remove_negative=True, floor=1000)\n\n data.append(datapoint)\n\n return data\n\n arr_target_datetime = arrow.get(target_datetime)\n start = arr_target_datetime.shift(days=-1).format(\"YYYY-MM-DD\")\n end = arr_target_datetime.format(\"YYYY-MM-DD\")\n\n date_component = 'fecha__gte={}&fecha__lte={}'.format(start, end)\n\n # required for access\n headers = {'Referer': 'https://www.coordinador.cl/operacion/graficos/operacion-real/generacion-real-del-sistema/',\n 'Origin': 'https://www.coordinador.cl'}\n\n s = session or requests.Session()\n url = API_BASE_URL + date_component\n\n req = s.get(url, headers=headers)\n raw_data = req.json()['aggs']\n processed_data = production_processor_historical(raw_data)\n\n data = []\n for production_data in processed_data:\n dt = production_data.pop('datetime')\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'production': production_data,\n 'storage': {\n 'hydro': None,\n },\n 'source': 'coordinador.cl'\n }\n\n data.append(datapoint)\n\n return data[:-9]\n \"\"\"The last 9 datapoints should be omitted because they usually are incomplete and shouldn't appear on the map.\"\"\"\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n print('fetch_production() ->')\n print(fetch_production())\n # For fetching historical data instead, try:\n print(fetch_production(target_datetime=arrow.get(\"20200220\", \"YYYYMMDD\")))\n", "path": "parsers/CL.py"}]}
| 2,828 | 185 |
gh_patches_debug_38855
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-1458
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VisPy isocurve incompatible with matplotlib 2.2.0
Matplotlib 2.2 is now out and the private contour api `from matplotlib import _cntr` has been removed. We knew this day would come. @QuLogic pointed this thread out on gitter: https://mail.python.org/pipermail/matplotlib-users/2018-March/001313.html
The TL;DR of that is that we can either copy the source from old mpl releases or we could try using skimage. I'm not a huge fan of adding such a huge (optional) dependency on to vispy, but as `isocurve.py` sits right now it uses the `_cntr` classes as an optional dependency so it shouldn't be a huge deal.
http://scikit-image.org/docs/dev/auto_examples/edges/plot_contours.html
As a quick hack I will try to make a PR in the next couple days to switch the import of `_cntr` to a try/except and use the slow contouring method if it isn't found. That should at least let our tests pass and give github users working code.
</issue>
<code>
[start of vispy/visuals/isocurve.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from __future__ import division
6
7 import numpy as np
8
9 from .line import LineVisual
10 from ..color import ColorArray
11 from ..color.colormap import _normalize, get_colormap
12 from ..geometry.isocurve import isocurve
13 from ..testing import has_matplotlib
14
15 # checking for matplotlib
16 _HAS_MPL = has_matplotlib()
17 if _HAS_MPL:
18 try:
19 from matplotlib import _cntr as cntr
20 except ImportError:
21 import warnings
22 warnings.warn("VisPy is not yet compatible with matplotlib 2.2+")
23 _HAS_MPL = False
24 cntr = None
25
26
27 class IsocurveVisual(LineVisual):
28 """Displays an isocurve of a 2D scalar array.
29
30 Parameters
31 ----------
32 data : ndarray | None
33 2D scalar array.
34 levels : ndarray, shape (Nlev,) | None
35 The levels at which the isocurve is constructed from "*data*".
36 color_lev : Color, colormap name, tuple, list or array
37 The color to use when drawing the line. If a list is given, it
38 must be of shape (Nlev), if an array is given, it must be of
39 shape (Nlev, ...). and provide one color per level (rgba, colorname).
40 clim : tuple
41 (min, max) limits to apply when mapping level values through a
42 colormap.
43 **kwargs : dict
44 Keyword arguments to pass to `LineVisual`.
45
46 Notes
47 -----
48 """
49 def __init__(self, data=None, levels=None, color_lev=None, clim=None,
50 **kwargs):
51 self._data = None
52 self._levels = levels
53 self._color_lev = color_lev
54 self._clim = clim
55 self._need_color_update = True
56 self._need_level_update = True
57 self._need_recompute = True
58 self._X = None
59 self._Y = None
60 self._iso = None
61 self._level_min = None
62 self._data_is_uniform = False
63 self._lc = None
64 self._cl = None
65 self._li = None
66 self._connect = None
67 self._verts = None
68 kwargs['method'] = 'gl'
69 kwargs['antialias'] = False
70 LineVisual.__init__(self, **kwargs)
71 if data is not None:
72 self.set_data(data)
73
74 @property
75 def levels(self):
76 """ The threshold at which the isocurve is constructed from the
77 2D data.
78 """
79 return self._levels
80
81 @levels.setter
82 def levels(self, levels):
83 self._levels = levels
84 self._need_level_update = True
85 self._need_recompute = True
86 self.update()
87
88 @property
89 def color(self):
90 return self._color_lev
91
92 @color.setter
93 def color(self, color):
94 self._color_lev = color
95 self._need_level_update = True
96 self._need_color_update = True
97 self.update()
98
99 def set_data(self, data):
100 """ Set the scalar array data
101
102 Parameters
103 ----------
104 data : ndarray
105 A 2D array of scalar values. The isocurve is constructed to show
106 all locations in the scalar field equal to ``self.levels``.
107 """
108 self._data = data
109
110 # if using matplotlib isoline algorithm we have to check for meshgrid
111 # and we can setup the tracer object here
112 if _HAS_MPL:
113 if self._X is None or self._X.shape != data.shape:
114 self._X, self._Y = np.meshgrid(np.arange(data.shape[1]),
115 np.arange(data.shape[0]))
116 self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
117
118 if self._clim is None:
119 self._clim = (data.min(), data.max())
120
121 # sanity check,
122 # should we raise an error here, since no isolines can be drawn?
123 # for now, _prepare_draw returns False if no isoline can be drawn
124 if self._data.min() != self._data.max():
125 self._data_is_uniform = False
126 else:
127 self._data_is_uniform = True
128
129 self._need_recompute = True
130 self.update()
131
132 def _get_verts_and_connect(self, paths):
133 """ retrieve vertices and connects from given paths-list
134 """
135 verts = np.vstack(paths)
136 gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
137 connect = np.ones(gaps[-1], dtype=bool)
138 connect[gaps[:-1]] = False
139 return verts, connect
140
141 def _compute_iso_line(self):
142 """ compute LineVisual vertices, connects and color-index
143 """
144 level_index = []
145 connects = []
146 verts = []
147
148 # calculate which level are within data range
149 # this works for now and the existing examples, but should be tested
150 # thoroughly also with the data-sanity check in set_data-function
151 choice = np.nonzero((self.levels > self._data.min()) &
152 (self._levels < self._data.max()))
153 levels_to_calc = np.array(self.levels)[choice]
154
155 # save minimum level index
156 self._level_min = choice[0][0]
157
158 for level in levels_to_calc:
159 # if we use matplotlib isoline algorithm we need to add half a
160 # pixel in both (x,y) dimensions because isolines are aligned to
161 # pixel centers
162 if _HAS_MPL:
163 nlist = self._iso.trace(level, level, 0)
164 paths = nlist[:len(nlist)//2]
165 v, c = self._get_verts_and_connect(paths)
166 v += np.array([0.5, 0.5])
167 else:
168 paths = isocurve(self._data.astype(float).T, level,
169 extend_to_edge=True, connected=True)
170 v, c = self._get_verts_and_connect(paths)
171
172 level_index.append(v.shape[0])
173 connects.append(np.hstack((c, [False])))
174 verts.append(v)
175
176 self._li = np.hstack(level_index)
177 self._connect = np.hstack(connects)
178 self._verts = np.vstack(verts)
179
180 def _compute_iso_color(self):
181 """ compute LineVisual color from level index and corresponding color
182 """
183 level_color = []
184 colors = self._lc
185 for i, index in enumerate(self._li):
186 level_color.append(np.zeros((index, 4)) +
187 colors[i+self._level_min])
188 self._cl = np.vstack(level_color)
189
190 def _levels_to_colors(self):
191 # computes ColorArrays for given levels
192 # try _color_lev as colormap, except as everything else
193 try:
194 f_color_levs = get_colormap(self._color_lev)
195 except (KeyError, TypeError):
196 colors = ColorArray(self._color_lev).rgba
197 else:
198 lev = _normalize(self._levels, self._clim[0], self._clim[1])
199 # map function expects (Nlev,1)!
200 colors = f_color_levs.map(lev[:, np.newaxis])
201
202 # broadcast to (nlev, 4) array
203 if len(colors) == 1:
204 colors = colors * np.ones((len(self._levels), 1))
205
206 # detect color_lev/levels mismatch and raise error
207 if (len(colors) != len(self._levels)):
208 raise TypeError("Color/level mismatch. Color must be of shape "
209 "(Nlev, ...) and provide one color per level")
210
211 self._lc = colors
212
213 def _prepare_draw(self, view):
214 if (self._data is None or self._levels is None or
215 self._color_lev is None or self._data_is_uniform):
216 return False
217
218 if self._need_level_update:
219 self._levels_to_colors()
220 self._need_level_update = False
221
222 if self._need_recompute:
223 self._compute_iso_line()
224 self._compute_iso_color()
225 LineVisual.set_data(self, pos=self._verts, connect=self._connect,
226 color=self._cl)
227 self._need_recompute = False
228
229 if self._need_color_update:
230 self._compute_iso_color()
231 LineVisual.set_data(self, color=self._cl)
232 self._need_color_update = False
233
234 return LineVisual._prepare_draw(self, view)
235
[end of vispy/visuals/isocurve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vispy/visuals/isocurve.py b/vispy/visuals/isocurve.py
--- a/vispy/visuals/isocurve.py
+++ b/vispy/visuals/isocurve.py
@@ -10,18 +10,16 @@
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
-from ..testing import has_matplotlib
+from ..testing import has_skimage
-# checking for matplotlib
-_HAS_MPL = has_matplotlib()
-if _HAS_MPL:
+# checking for scikit-image
+_HAS_SKI = has_skimage()
+if _HAS_SKI:
try:
- from matplotlib import _cntr as cntr
+ from skimage.measure import find_contours
except ImportError:
- import warnings
- warnings.warn("VisPy is not yet compatible with matplotlib 2.2+")
- _HAS_MPL = False
- cntr = None
+ _HAS_SKI = False
+ find_contours = None
class IsocurveVisual(LineVisual):
@@ -55,9 +53,6 @@
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
- self._X = None
- self._Y = None
- self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
@@ -107,14 +102,6 @@
"""
self._data = data
- # if using matplotlib isoline algorithm we have to check for meshgrid
- # and we can setup the tracer object here
- if _HAS_MPL:
- if self._X is None or self._X.shape != data.shape:
- self._X, self._Y = np.meshgrid(np.arange(data.shape[1]),
- np.arange(data.shape[0]))
- self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
-
if self._clim is None:
self._clim = (data.min(), data.max())
@@ -156,13 +143,15 @@
self._level_min = choice[0][0]
for level in levels_to_calc:
- # if we use matplotlib isoline algorithm we need to add half a
+ # if we use skimage isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
- if _HAS_MPL:
- nlist = self._iso.trace(level, level, 0)
- paths = nlist[:len(nlist)//2]
- v, c = self._get_verts_and_connect(paths)
+ if _HAS_SKI:
+ contours = find_contours(self._data, level,
+ positive_orientation='high')
+ v, c = self._get_verts_and_connect(contours)
+ # swap row, column to column, row (x, y)
+ v[:, [0, 1]] = v[:, [1, 0]]
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
|
{"golden_diff": "diff --git a/vispy/visuals/isocurve.py b/vispy/visuals/isocurve.py\n--- a/vispy/visuals/isocurve.py\n+++ b/vispy/visuals/isocurve.py\n@@ -10,18 +10,16 @@\n from ..color import ColorArray\n from ..color.colormap import _normalize, get_colormap\n from ..geometry.isocurve import isocurve\n-from ..testing import has_matplotlib\n+from ..testing import has_skimage\n \n-# checking for matplotlib\n-_HAS_MPL = has_matplotlib()\n-if _HAS_MPL:\n+# checking for scikit-image\n+_HAS_SKI = has_skimage()\n+if _HAS_SKI:\n try:\n- from matplotlib import _cntr as cntr\n+ from skimage.measure import find_contours\n except ImportError:\n- import warnings\n- warnings.warn(\"VisPy is not yet compatible with matplotlib 2.2+\")\n- _HAS_MPL = False\n- cntr = None\n+ _HAS_SKI = False\n+ find_contours = None\n \n \n class IsocurveVisual(LineVisual):\n@@ -55,9 +53,6 @@\n self._need_color_update = True\n self._need_level_update = True\n self._need_recompute = True\n- self._X = None\n- self._Y = None\n- self._iso = None\n self._level_min = None\n self._data_is_uniform = False\n self._lc = None\n@@ -107,14 +102,6 @@\n \"\"\"\n self._data = data\n \n- # if using matplotlib isoline algorithm we have to check for meshgrid\n- # and we can setup the tracer object here\n- if _HAS_MPL:\n- if self._X is None or self._X.shape != data.shape:\n- self._X, self._Y = np.meshgrid(np.arange(data.shape[1]),\n- np.arange(data.shape[0]))\n- self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))\n-\n if self._clim is None:\n self._clim = (data.min(), data.max())\n \n@@ -156,13 +143,15 @@\n self._level_min = choice[0][0]\n \n for level in levels_to_calc:\n- # if we use matplotlib isoline algorithm we need to add half a\n+ # if we use skimage isoline algorithm we need to add half a\n # pixel in both (x,y) dimensions because isolines are aligned to\n # pixel centers\n- if _HAS_MPL:\n- nlist = self._iso.trace(level, level, 0)\n- paths = nlist[:len(nlist)//2]\n- v, c = self._get_verts_and_connect(paths)\n+ if _HAS_SKI:\n+ contours = find_contours(self._data, level,\n+ positive_orientation='high')\n+ v, c = self._get_verts_and_connect(contours)\n+ # swap row, column to column, row (x, y)\n+ v[:, [0, 1]] = v[:, [1, 0]]\n v += np.array([0.5, 0.5])\n else:\n paths = isocurve(self._data.astype(float).T, level,\n", "issue": "VisPy isocurve incompatible with matplotlib 2.2.0\nMatplotlib 2.2 is now out and the private contour api `from matplotlib import _cntr` has been removed. We knew this day would come. @QuLogic pointed this thread out on gitter: https://mail.python.org/pipermail/matplotlib-users/2018-March/001313.html\r\n\r\nThe TL;DR of that is that we can either copy the source from old mpl releases or we could try using skimage. I'm not a huge fan of adding such a huge (optional) dependency on to vispy, but as `isocurve.py` sits right now it uses the `_cntr` classes as an optional dependency so it shouldn't be a huge deal.\r\n\r\nhttp://scikit-image.org/docs/dev/auto_examples/edges/plot_contours.html\r\n\r\nAs a quick hack I will try to make a PR in the next couple days to switch the import of `_cntr` to a try/except and use the slow contouring method if it isn't found. That should at least let our tests pass and give github users working code.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom .line import LineVisual\nfrom ..color import ColorArray\nfrom ..color.colormap import _normalize, get_colormap\nfrom ..geometry.isocurve import isocurve\nfrom ..testing import has_matplotlib\n\n# checking for matplotlib\n_HAS_MPL = has_matplotlib()\nif _HAS_MPL:\n try:\n from matplotlib import _cntr as cntr\n except ImportError:\n import warnings\n warnings.warn(\"VisPy is not yet compatible with matplotlib 2.2+\")\n _HAS_MPL = False\n cntr = None\n\n\nclass IsocurveVisual(LineVisual):\n \"\"\"Displays an isocurve of a 2D scalar array.\n\n Parameters\n ----------\n data : ndarray | None\n 2D scalar array.\n levels : ndarray, shape (Nlev,) | None\n The levels at which the isocurve is constructed from \"*data*\".\n color_lev : Color, colormap name, tuple, list or array\n The color to use when drawing the line. If a list is given, it\n must be of shape (Nlev), if an array is given, it must be of\n shape (Nlev, ...). and provide one color per level (rgba, colorname).\n clim : tuple\n (min, max) limits to apply when mapping level values through a\n colormap.\n **kwargs : dict\n Keyword arguments to pass to `LineVisual`.\n\n Notes\n -----\n \"\"\"\n def __init__(self, data=None, levels=None, color_lev=None, clim=None,\n **kwargs):\n self._data = None\n self._levels = levels\n self._color_lev = color_lev\n self._clim = clim\n self._need_color_update = True\n self._need_level_update = True\n self._need_recompute = True\n self._X = None\n self._Y = None\n self._iso = None\n self._level_min = None\n self._data_is_uniform = False\n self._lc = None\n self._cl = None\n self._li = None\n self._connect = None\n self._verts = None\n kwargs['method'] = 'gl'\n kwargs['antialias'] = False\n LineVisual.__init__(self, **kwargs)\n if data is not None:\n self.set_data(data)\n\n @property\n def levels(self):\n \"\"\" The threshold at which the isocurve is constructed from the\n 2D data.\n \"\"\"\n return self._levels\n\n @levels.setter\n def levels(self, levels):\n self._levels = levels\n self._need_level_update = True\n self._need_recompute = True\n self.update()\n\n @property\n def color(self):\n return self._color_lev\n\n @color.setter\n def color(self, color):\n self._color_lev = color\n self._need_level_update = True\n self._need_color_update = True\n self.update()\n\n def set_data(self, data):\n \"\"\" Set the scalar array data\n\n Parameters\n ----------\n data : ndarray\n A 2D array of scalar values. The isocurve is constructed to show\n all locations in the scalar field equal to ``self.levels``.\n \"\"\"\n self._data = data\n\n # if using matplotlib isoline algorithm we have to check for meshgrid\n # and we can setup the tracer object here\n if _HAS_MPL:\n if self._X is None or self._X.shape != data.shape:\n self._X, self._Y = np.meshgrid(np.arange(data.shape[1]),\n np.arange(data.shape[0]))\n self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))\n\n if self._clim is None:\n self._clim = (data.min(), data.max())\n\n # sanity check,\n # should we raise an error here, since no isolines can be drawn?\n # for now, _prepare_draw returns False if no isoline can be drawn\n if self._data.min() != self._data.max():\n self._data_is_uniform = False\n else:\n self._data_is_uniform = True\n\n self._need_recompute = True\n self.update()\n\n def _get_verts_and_connect(self, paths):\n \"\"\" retrieve vertices and connects from given paths-list\n \"\"\"\n verts = np.vstack(paths)\n gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1\n connect = np.ones(gaps[-1], dtype=bool)\n connect[gaps[:-1]] = False\n return verts, connect\n\n def _compute_iso_line(self):\n \"\"\" compute LineVisual vertices, connects and color-index\n \"\"\"\n level_index = []\n connects = []\n verts = []\n\n # calculate which level are within data range\n # this works for now and the existing examples, but should be tested\n # thoroughly also with the data-sanity check in set_data-function\n choice = np.nonzero((self.levels > self._data.min()) &\n (self._levels < self._data.max()))\n levels_to_calc = np.array(self.levels)[choice]\n\n # save minimum level index\n self._level_min = choice[0][0]\n\n for level in levels_to_calc:\n # if we use matplotlib isoline algorithm we need to add half a\n # pixel in both (x,y) dimensions because isolines are aligned to\n # pixel centers\n if _HAS_MPL:\n nlist = self._iso.trace(level, level, 0)\n paths = nlist[:len(nlist)//2]\n v, c = self._get_verts_and_connect(paths)\n v += np.array([0.5, 0.5])\n else:\n paths = isocurve(self._data.astype(float).T, level,\n extend_to_edge=True, connected=True)\n v, c = self._get_verts_and_connect(paths)\n\n level_index.append(v.shape[0])\n connects.append(np.hstack((c, [False])))\n verts.append(v)\n\n self._li = np.hstack(level_index)\n self._connect = np.hstack(connects)\n self._verts = np.vstack(verts)\n\n def _compute_iso_color(self):\n \"\"\" compute LineVisual color from level index and corresponding color\n \"\"\"\n level_color = []\n colors = self._lc\n for i, index in enumerate(self._li):\n level_color.append(np.zeros((index, 4)) +\n colors[i+self._level_min])\n self._cl = np.vstack(level_color)\n\n def _levels_to_colors(self):\n # computes ColorArrays for given levels\n # try _color_lev as colormap, except as everything else\n try:\n f_color_levs = get_colormap(self._color_lev)\n except (KeyError, TypeError):\n colors = ColorArray(self._color_lev).rgba\n else:\n lev = _normalize(self._levels, self._clim[0], self._clim[1])\n # map function expects (Nlev,1)!\n colors = f_color_levs.map(lev[:, np.newaxis])\n\n # broadcast to (nlev, 4) array\n if len(colors) == 1:\n colors = colors * np.ones((len(self._levels), 1))\n\n # detect color_lev/levels mismatch and raise error\n if (len(colors) != len(self._levels)):\n raise TypeError(\"Color/level mismatch. Color must be of shape \"\n \"(Nlev, ...) and provide one color per level\")\n\n self._lc = colors\n\n def _prepare_draw(self, view):\n if (self._data is None or self._levels is None or\n self._color_lev is None or self._data_is_uniform):\n return False\n\n if self._need_level_update:\n self._levels_to_colors()\n self._need_level_update = False\n\n if self._need_recompute:\n self._compute_iso_line()\n self._compute_iso_color()\n LineVisual.set_data(self, pos=self._verts, connect=self._connect,\n color=self._cl)\n self._need_recompute = False\n\n if self._need_color_update:\n self._compute_iso_color()\n LineVisual.set_data(self, color=self._cl)\n self._need_color_update = False\n\n return LineVisual._prepare_draw(self, view)\n", "path": "vispy/visuals/isocurve.py"}]}
| 3,282 | 752 |
gh_patches_debug_16396
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-2099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
decimal.Decimal cannot be sent across session
When trying to give data of type decimal.Decimal as data sources to plots, the BokehJSONEncoder does tries to serialise the Decimal object with the standard built-in JSON encoder. This causes an exception "Decimal('...') not JSON serializable". The solution is to edit BokehJSONEncoder.trasnform_python_types to account for this possibility. I have tested the solution and it works.
</issue>
<code>
[start of bokeh/protocol.py]
1 from __future__ import absolute_import
2
3 import json
4 import logging
5 import time
6 import datetime as dt
7 import calendar
8
9 import numpy as np
10 from six.moves import cPickle as pickle
11
12 try:
13 import pandas as pd
14 is_pandas = True
15 except ImportError:
16 is_pandas = False
17
18 try:
19 from dateutil.relativedelta import relativedelta
20 is_dateutil = True
21 except ImportError:
22 is_dateutil = False
23
24 from .settings import settings
25
26 log = logging.getLogger(__name__)
27
28 millifactor = 10**6.0
29
30 class BokehJSONEncoder(json.JSONEncoder):
31 def transform_series(self, obj):
32 """transform series
33 """
34 vals = obj.values
35 return self.transform_array(vals)
36
37 # Check for astype failures (putative Numpy < 1.7)
38 dt2001 = np.datetime64('2001')
39 legacy_datetime64 = (dt2001.astype('int64') ==
40 dt2001.astype('datetime64[ms]').astype('int64'))
41 def transform_array(self, obj):
42 """Transform arrays into lists of json safe types
43 also handles pandas series, and replacing
44 nans and infs with strings
45 """
46 ## not quite correct, truncates to ms..
47 if obj.dtype.kind == 'M':
48 if self.legacy_datetime64:
49 if obj.dtype == np.dtype('datetime64[ns]'):
50 return (obj.astype('int64') / millifactor).tolist()
51 # else punt.
52 else:
53 return obj.astype('datetime64[ms]').astype('int64').tolist()
54 elif obj.dtype.kind in ('u', 'i', 'f'):
55 return self.transform_numerical_array(obj)
56 return obj.tolist()
57
58 def transform_numerical_array(self, obj):
59 """handles nans/inf conversion
60 """
61 if isinstance(obj, np.ma.MaskedArray):
62 obj = obj.filled(np.nan) # Set masked values to nan
63 if not np.isnan(obj).any() and not np.isinf(obj).any():
64 return obj.tolist()
65 else:
66 transformed = obj.astype('object')
67 transformed[np.isnan(obj)] = 'NaN'
68 transformed[np.isposinf(obj)] = 'Infinity'
69 transformed[np.isneginf(obj)] = '-Infinity'
70 return transformed.tolist()
71
72 def transform_python_types(self, obj):
73 """handle special scalars, default to default json encoder
74 """
75 # Pandas Timestamp
76 if is_pandas and isinstance(obj, pd.tslib.Timestamp):
77 return obj.value / millifactor #nanosecond to millisecond
78 elif np.issubdtype(type(obj), np.float):
79 return float(obj)
80 elif np.issubdtype(type(obj), np.int):
81 return int(obj)
82 elif np.issubdtype(type(obj), np.bool_):
83 return bool(obj)
84 # Datetime, Date
85 elif isinstance(obj, (dt.datetime, dt.date)):
86 return calendar.timegm(obj.timetuple()) * 1000.
87 # Numpy datetime64
88 elif isinstance(obj, np.datetime64):
89 epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
90 return (epoch_delta / np.timedelta64(1, 'ms'))
91 # Time
92 elif isinstance(obj, dt.time):
93 return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond / 1000.
94 elif is_dateutil and isinstance(obj, relativedelta):
95 return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
96 minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
97 else:
98 return super(BokehJSONEncoder, self).default(obj)
99
100 def default(self, obj):
101 #argh! local import!
102 from .plot_object import PlotObject
103 from .properties import HasProps
104 from .colors import Color
105 ## array types
106 if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
107 return self.transform_series(obj)
108 elif isinstance(obj, np.ndarray):
109 return self.transform_array(obj)
110 elif isinstance(obj, PlotObject):
111 return obj.ref
112 elif isinstance(obj, HasProps):
113 return obj.changed_properties_with_values()
114 elif isinstance(obj, Color):
115 return obj.to_css()
116 else:
117 return self.transform_python_types(obj)
118
119 def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
120 if settings.pretty(False):
121 kwargs["indent"] = 4
122 return json.dumps(obj, cls=encoder, **kwargs)
123
124 deserialize_json = json.loads
125
126 serialize_web = serialize_json
127
128 deserialize_web = deserialize_json
129
130 def status_obj(status):
131 return {'msgtype': 'status',
132 'status': status}
133
134 def error_obj(error_msg):
135 return {
136 'msgtype': 'error',
137 'error_msg': error_msg}
138
[end of bokeh/protocol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/protocol.py b/bokeh/protocol.py
--- a/bokeh/protocol.py
+++ b/bokeh/protocol.py
@@ -5,6 +5,7 @@
import time
import datetime as dt
import calendar
+import decimal
import numpy as np
from six.moves import cPickle as pickle
@@ -94,6 +95,9 @@
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
+ # Decimal
+ elif isinstance(obj, decimal.Decimal):
+ return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
|
{"golden_diff": "diff --git a/bokeh/protocol.py b/bokeh/protocol.py\n--- a/bokeh/protocol.py\n+++ b/bokeh/protocol.py\n@@ -5,6 +5,7 @@\n import time\n import datetime as dt\n import calendar\n+import decimal\n \n import numpy as np\n from six.moves import cPickle as pickle\n@@ -94,6 +95,9 @@\n elif is_dateutil and isinstance(obj, relativedelta):\n return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,\n minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)\n+ # Decimal\n+ elif isinstance(obj, decimal.Decimal):\n+ return float(obj)\n else:\n return super(BokehJSONEncoder, self).default(obj)\n", "issue": "decimal.Decimal cannot be sent across session\nWhen trying to give data of type decimal.Decimal as data sources to plots, the BokehJSONEncoder does tries to serialise the Decimal object with the standard built-in JSON encoder. This causes an exception \"Decimal('...') not JSON serializable\". The solution is to edit BokehJSONEncoder.trasnform_python_types to account for this possibility. I have tested the solution and it works.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport json\nimport logging\nimport time\nimport datetime as dt\nimport calendar\n\nimport numpy as np\nfrom six.moves import cPickle as pickle\n\ntry:\n import pandas as pd\n is_pandas = True\nexcept ImportError:\n is_pandas = False\n\ntry:\n from dateutil.relativedelta import relativedelta\n is_dateutil = True\nexcept ImportError:\n is_dateutil = False\n\nfrom .settings import settings\n\nlog = logging.getLogger(__name__)\n\nmillifactor = 10**6.0\n\nclass BokehJSONEncoder(json.JSONEncoder):\n def transform_series(self, obj):\n \"\"\"transform series\n \"\"\"\n vals = obj.values\n return self.transform_array(vals)\n\n # Check for astype failures (putative Numpy < 1.7)\n dt2001 = np.datetime64('2001')\n legacy_datetime64 = (dt2001.astype('int64') ==\n dt2001.astype('datetime64[ms]').astype('int64'))\n def transform_array(self, obj):\n \"\"\"Transform arrays into lists of json safe types\n also handles pandas series, and replacing\n nans and infs with strings\n \"\"\"\n ## not quite correct, truncates to ms..\n if obj.dtype.kind == 'M':\n if self.legacy_datetime64:\n if obj.dtype == np.dtype('datetime64[ns]'):\n return (obj.astype('int64') / millifactor).tolist()\n # else punt.\n else:\n return obj.astype('datetime64[ms]').astype('int64').tolist()\n elif obj.dtype.kind in ('u', 'i', 'f'):\n return self.transform_numerical_array(obj)\n return obj.tolist()\n\n def transform_numerical_array(self, obj):\n \"\"\"handles nans/inf conversion\n \"\"\"\n if isinstance(obj, np.ma.MaskedArray):\n obj = obj.filled(np.nan) # Set masked values to nan\n if not np.isnan(obj).any() and not np.isinf(obj).any():\n return obj.tolist()\n else:\n transformed = obj.astype('object')\n transformed[np.isnan(obj)] = 'NaN'\n transformed[np.isposinf(obj)] = 'Infinity'\n transformed[np.isneginf(obj)] = '-Infinity'\n return transformed.tolist()\n\n def transform_python_types(self, obj):\n \"\"\"handle special scalars, default to default json encoder\n \"\"\"\n # Pandas Timestamp\n if is_pandas and isinstance(obj, pd.tslib.Timestamp):\n return obj.value / millifactor #nanosecond to millisecond\n elif np.issubdtype(type(obj), np.float):\n return float(obj)\n elif np.issubdtype(type(obj), np.int):\n return int(obj)\n elif np.issubdtype(type(obj), np.bool_):\n return bool(obj)\n # Datetime, Date\n elif isinstance(obj, (dt.datetime, dt.date)):\n return calendar.timegm(obj.timetuple()) * 1000.\n # Numpy datetime64\n elif isinstance(obj, np.datetime64):\n epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')\n return (epoch_delta / np.timedelta64(1, 'ms'))\n # Time\n elif isinstance(obj, dt.time):\n return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond / 1000.\n elif is_dateutil and isinstance(obj, relativedelta):\n return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,\n minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)\n else:\n return super(BokehJSONEncoder, self).default(obj)\n\n def default(self, obj):\n #argh! local import!\n from .plot_object import PlotObject\n from .properties import HasProps\n from .colors import Color\n ## array types\n if is_pandas and isinstance(obj, (pd.Series, pd.Index)):\n return self.transform_series(obj)\n elif isinstance(obj, np.ndarray):\n return self.transform_array(obj)\n elif isinstance(obj, PlotObject):\n return obj.ref\n elif isinstance(obj, HasProps):\n return obj.changed_properties_with_values()\n elif isinstance(obj, Color):\n return obj.to_css()\n else:\n return self.transform_python_types(obj)\n\ndef serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):\n if settings.pretty(False):\n kwargs[\"indent\"] = 4\n return json.dumps(obj, cls=encoder, **kwargs)\n\ndeserialize_json = json.loads\n\nserialize_web = serialize_json\n\ndeserialize_web = deserialize_json\n\ndef status_obj(status):\n return {'msgtype': 'status',\n 'status': status}\n\ndef error_obj(error_msg):\n return {\n 'msgtype': 'error',\n 'error_msg': error_msg}\n", "path": "bokeh/protocol.py"}]}
| 2,030 | 174 |
gh_patches_debug_16527
|
rasdani/github-patches
|
git_diff
|
pypa__pip-3656
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`pip search` doesn't work in narrow terminals
- Pip version: 8.1.1 (also happens in at least 8.1.0)
- Python version: 2.7.9
- Operating System: xenial
### Description:
`pip search` can't print results to narrow terminal windows
### What I've run:
`pip search [something with results]` in a 63-column urxvt instance:
```
Exception:
Traceback (most recent call last):
File "/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/basecommand.py", line 209, in main
status = self.run(options, args)
File "/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/commands/search.py", line 50, in run
print_results(hits, terminal_width=terminal_width)
File "/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/commands/search.py", line 122, in print_results
terminal_width - name_column_width - 5,
File "/usr/lib/python2.7/textwrap.py", line 354, in wrap
return w.wrap(text)
File "/usr/lib/python2.7/textwrap.py", line 329, in wrap
return self._wrap_chunks(chunks)
File "/usr/lib/python2.7/textwrap.py", line 258, in _wrap_chunks
raise ValueError("invalid width %r (must be > 0)" % self.width)
ValueError: invalid width -14 (must be > 0)
```
```
$ tput cols
63
```
as an aside, it's a bummer that the download progress bars don't get narrow in such windows and instead vomits hundreds of lines of rectangles; `progressbar` handles this fine. this is the first time i've found something that just straight-up doesn't work at all though
</issue>
<code>
[start of pip/commands/search.py]
1 from __future__ import absolute_import
2
3 import logging
4 import sys
5 import textwrap
6
7 from pip.basecommand import Command, SUCCESS
8 from pip.download import PipXmlrpcTransport
9 from pip.models import PyPI
10 from pip.utils import get_terminal_size
11 from pip.utils.logging import indent_log
12 from pip.exceptions import CommandError
13 from pip.status_codes import NO_MATCHES_FOUND
14 from pip._vendor import pkg_resources
15 from pip._vendor.six.moves import xmlrpc_client
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class SearchCommand(Command):
22 """Search for PyPI packages whose name or summary contains <query>."""
23 name = 'search'
24 usage = """
25 %prog [options] <query>"""
26 summary = 'Search PyPI for packages.'
27
28 def __init__(self, *args, **kw):
29 super(SearchCommand, self).__init__(*args, **kw)
30 self.cmd_opts.add_option(
31 '--index',
32 dest='index',
33 metavar='URL',
34 default=PyPI.pypi_url,
35 help='Base URL of Python Package Index (default %default)')
36
37 self.parser.insert_option_group(0, self.cmd_opts)
38
39 def run(self, options, args):
40 if not args:
41 raise CommandError('Missing required argument (search query).')
42 query = args
43 pypi_hits = self.search(query, options)
44 hits = transform_hits(pypi_hits)
45
46 terminal_width = None
47 if sys.stdout.isatty():
48 terminal_width = get_terminal_size()[0]
49
50 print_results(hits, terminal_width=terminal_width)
51 if pypi_hits:
52 return SUCCESS
53 return NO_MATCHES_FOUND
54
55 def search(self, query, options):
56 index_url = options.index
57 with self._build_session(options) as session:
58 transport = PipXmlrpcTransport(index_url, session)
59 pypi = xmlrpc_client.ServerProxy(index_url, transport)
60 hits = pypi.search({'name': query, 'summary': query}, 'or')
61 return hits
62
63
64 def transform_hits(hits):
65 """
66 The list from pypi is really a list of versions. We want a list of
67 packages with the list of versions stored inline. This converts the
68 list from pypi into one we can use.
69 """
70 packages = {}
71 for hit in hits:
72 name = hit['name']
73 summary = hit['summary']
74 version = hit['version']
75 score = hit['_pypi_ordering']
76 if score is None:
77 score = 0
78
79 if name not in packages.keys():
80 packages[name] = {
81 'name': name,
82 'summary': summary,
83 'versions': [version],
84 'score': score,
85 }
86 else:
87 packages[name]['versions'].append(version)
88
89 # if this is the highest version, replace summary and score
90 if version == highest_version(packages[name]['versions']):
91 packages[name]['summary'] = summary
92 packages[name]['score'] = score
93
94 # each record has a unique name now, so we will convert the dict into a
95 # list sorted by score
96 package_list = sorted(
97 packages.values(),
98 key=lambda x: x['score'],
99 reverse=True,
100 )
101 return package_list
102
103
104 def print_results(hits, name_column_width=None, terminal_width=None):
105 if not hits:
106 return
107 if name_column_width is None:
108 name_column_width = max([
109 len(hit['name']) + len(hit.get('versions', ['-'])[-1])
110 for hit in hits
111 ]) + 4
112
113 installed_packages = [p.project_name for p in pkg_resources.working_set]
114 for hit in hits:
115 name = hit['name']
116 summary = hit['summary'] or ''
117 version = hit.get('versions', ['-'])[-1]
118 if terminal_width is not None:
119 # wrap and indent summary to fit terminal
120 summary = textwrap.wrap(
121 summary,
122 terminal_width - name_column_width - 5,
123 )
124 summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
125
126 line = '%-*s - %s' % (name_column_width,
127 '%s (%s)' % (name, version), summary)
128 try:
129 logger.info(line)
130 if name in installed_packages:
131 dist = pkg_resources.get_distribution(name)
132 with indent_log():
133 latest = highest_version(hit['versions'])
134 if dist.version == latest:
135 logger.info('INSTALLED: %s (latest)', dist.version)
136 else:
137 logger.info('INSTALLED: %s', dist.version)
138 logger.info('LATEST: %s', latest)
139 except UnicodeEncodeError:
140 pass
141
142
143 def highest_version(versions):
144 return next(iter(
145 sorted(versions, key=pkg_resources.parse_version, reverse=True)
146 ))
147
[end of pip/commands/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/commands/search.py b/pip/commands/search.py
--- a/pip/commands/search.py
+++ b/pip/commands/search.py
@@ -116,12 +116,11 @@
summary = hit['summary'] or ''
version = hit.get('versions', ['-'])[-1]
if terminal_width is not None:
- # wrap and indent summary to fit terminal
- summary = textwrap.wrap(
- summary,
- terminal_width - name_column_width - 5,
- )
- summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
+ target_width = terminal_width - name_column_width - 5
+ if target_width > 10:
+ # wrap and indent summary to fit terminal
+ summary = textwrap.wrap(summary, target_width)
+ summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%-*s - %s' % (name_column_width,
'%s (%s)' % (name, version), summary)
|
{"golden_diff": "diff --git a/pip/commands/search.py b/pip/commands/search.py\n--- a/pip/commands/search.py\n+++ b/pip/commands/search.py\n@@ -116,12 +116,11 @@\n summary = hit['summary'] or ''\n version = hit.get('versions', ['-'])[-1]\n if terminal_width is not None:\n- # wrap and indent summary to fit terminal\n- summary = textwrap.wrap(\n- summary,\n- terminal_width - name_column_width - 5,\n- )\n- summary = ('\\n' + ' ' * (name_column_width + 3)).join(summary)\n+ target_width = terminal_width - name_column_width - 5\n+ if target_width > 10:\n+ # wrap and indent summary to fit terminal\n+ summary = textwrap.wrap(summary, target_width)\n+ summary = ('\\n' + ' ' * (name_column_width + 3)).join(summary)\n \n line = '%-*s - %s' % (name_column_width,\n '%s (%s)' % (name, version), summary)\n", "issue": "`pip search` doesn't work in narrow terminals\n- Pip version: 8.1.1 (also happens in at least 8.1.0)\n- Python version: 2.7.9\n- Operating System: xenial\n### Description:\n\n`pip search` can't print results to narrow terminal windows\n### What I've run:\n\n`pip search [something with results]` in a 63-column urxvt instance:\n\n```\nException:\nTraceback (most recent call last):\n File \"/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/basecommand.py\", line 209, in main\n status = self.run(options, args)\n File \"/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/commands/search.py\", line 50, in run\n print_results(hits, terminal_width=terminal_width)\n File \"/home/tinruufu/.virtualenvs/tinruufu/local/lib/python2.7/site-packages/pip/commands/search.py\", line 122, in print_results\n terminal_width - name_column_width - 5,\n File \"/usr/lib/python2.7/textwrap.py\", line 354, in wrap\n return w.wrap(text)\n File \"/usr/lib/python2.7/textwrap.py\", line 329, in wrap\n return self._wrap_chunks(chunks)\n File \"/usr/lib/python2.7/textwrap.py\", line 258, in _wrap_chunks\n raise ValueError(\"invalid width %r (must be > 0)\" % self.width)\nValueError: invalid width -14 (must be > 0)\n```\n\n```\n$ tput cols\n63\n```\n\nas an aside, it's a bummer that the download progress bars don't get narrow in such windows and instead vomits hundreds of lines of rectangles; `progressbar` handles this fine. this is the first time i've found something that just straight-up doesn't work at all though\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport sys\nimport textwrap\n\nfrom pip.basecommand import Command, SUCCESS\nfrom pip.download import PipXmlrpcTransport\nfrom pip.models import PyPI\nfrom pip.utils import get_terminal_size\nfrom pip.utils.logging import indent_log\nfrom pip.exceptions import CommandError\nfrom pip.status_codes import NO_MATCHES_FOUND\nfrom pip._vendor import pkg_resources\nfrom pip._vendor.six.moves import xmlrpc_client\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SearchCommand(Command):\n \"\"\"Search for PyPI packages whose name or summary contains <query>.\"\"\"\n name = 'search'\n usage = \"\"\"\n %prog [options] <query>\"\"\"\n summary = 'Search PyPI for packages.'\n\n def __init__(self, *args, **kw):\n super(SearchCommand, self).__init__(*args, **kw)\n self.cmd_opts.add_option(\n '--index',\n dest='index',\n metavar='URL',\n default=PyPI.pypi_url,\n help='Base URL of Python Package Index (default %default)')\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n if not args:\n raise CommandError('Missing required argument (search query).')\n query = args\n pypi_hits = self.search(query, options)\n hits = transform_hits(pypi_hits)\n\n terminal_width = None\n if sys.stdout.isatty():\n terminal_width = get_terminal_size()[0]\n\n print_results(hits, terminal_width=terminal_width)\n if pypi_hits:\n return SUCCESS\n return NO_MATCHES_FOUND\n\n def search(self, query, options):\n index_url = options.index\n with self._build_session(options) as session:\n transport = PipXmlrpcTransport(index_url, session)\n pypi = xmlrpc_client.ServerProxy(index_url, transport)\n hits = pypi.search({'name': query, 'summary': query}, 'or')\n return hits\n\n\ndef transform_hits(hits):\n \"\"\"\n The list from pypi is really a list of versions. We want a list of\n packages with the list of versions stored inline. This converts the\n list from pypi into one we can use.\n \"\"\"\n packages = {}\n for hit in hits:\n name = hit['name']\n summary = hit['summary']\n version = hit['version']\n score = hit['_pypi_ordering']\n if score is None:\n score = 0\n\n if name not in packages.keys():\n packages[name] = {\n 'name': name,\n 'summary': summary,\n 'versions': [version],\n 'score': score,\n }\n else:\n packages[name]['versions'].append(version)\n\n # if this is the highest version, replace summary and score\n if version == highest_version(packages[name]['versions']):\n packages[name]['summary'] = summary\n packages[name]['score'] = score\n\n # each record has a unique name now, so we will convert the dict into a\n # list sorted by score\n package_list = sorted(\n packages.values(),\n key=lambda x: x['score'],\n reverse=True,\n )\n return package_list\n\n\ndef print_results(hits, name_column_width=None, terminal_width=None):\n if not hits:\n return\n if name_column_width is None:\n name_column_width = max([\n len(hit['name']) + len(hit.get('versions', ['-'])[-1])\n for hit in hits\n ]) + 4\n\n installed_packages = [p.project_name for p in pkg_resources.working_set]\n for hit in hits:\n name = hit['name']\n summary = hit['summary'] or ''\n version = hit.get('versions', ['-'])[-1]\n if terminal_width is not None:\n # wrap and indent summary to fit terminal\n summary = textwrap.wrap(\n summary,\n terminal_width - name_column_width - 5,\n )\n summary = ('\\n' + ' ' * (name_column_width + 3)).join(summary)\n\n line = '%-*s - %s' % (name_column_width,\n '%s (%s)' % (name, version), summary)\n try:\n logger.info(line)\n if name in installed_packages:\n dist = pkg_resources.get_distribution(name)\n with indent_log():\n latest = highest_version(hit['versions'])\n if dist.version == latest:\n logger.info('INSTALLED: %s (latest)', dist.version)\n else:\n logger.info('INSTALLED: %s', dist.version)\n logger.info('LATEST: %s', latest)\n except UnicodeEncodeError:\n pass\n\n\ndef highest_version(versions):\n return next(iter(\n sorted(versions, key=pkg_resources.parse_version, reverse=True)\n ))\n", "path": "pip/commands/search.py"}]}
| 2,359 | 243 |
gh_patches_debug_25059
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-294
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feature request]Changing how the loss metric get the shape information
Recently I was working with multi-output models and I was using a pattern that I believe to
be common. The model returns a tuple, for instance `y_pred = (ypred_1, ypred_2)` with the correspondent y of the form `y = (y_1, y_2)` with the `loss_fn` accepting as arguments `ypred` and `y` (`loss_fn(ypred, y)`).
However I have run into problems when using the Loss metric due to the batch size used on the update function of this metric calling the attribute shape of the, presumed, torch.Tensor directly.
I have as suggestion to change the Loss metric class to accept an extra function, for instance `batch_size = lambda x: x.shape[0]` to recover the current behavior and to permit to access the shape attribute from a tuple/list.
</issue>
<code>
[start of ignite/metrics/loss.py]
1 from __future__ import division
2
3 from ignite.exceptions import NotComputableError
4 from ignite.metrics.metric import Metric
5
6
7 class Loss(Metric):
8 """
9 Calculates the average loss according to the passed loss_fn.
10
11 Args:
12 loss_fn (callable): a callable taking a prediction tensor, a target
13 tensor, optionally other arguments, and returns the average loss
14 over all observations in the batch.
15 output_transform (callable): a callable that is used to transform the
16 :class:`ignite.engine.Engine`'s `process_function`'s output into the
17 form expected by the metric.
18 This can be useful if, for example, you have a multi-output model and
19 you want to compute the metric with respect to one of the outputs.
20 The output is is expected to be a tuple (prediction, target) or
21 (prediction, target, kwargs) where kwargs is a dictionary of extra
22 keywords arguments.
23
24 """
25
26 def __init__(self, loss_fn, output_transform=lambda x: x):
27 super(Loss, self).__init__(output_transform)
28 self._loss_fn = loss_fn
29
30 def reset(self):
31 self._sum = 0
32 self._num_examples = 0
33
34 def update(self, output):
35 if len(output) == 2:
36 y_pred, y = output
37 kwargs = {}
38 else:
39 y_pred, y, kwargs = output
40 average_loss = self._loss_fn(y_pred, y, **kwargs)
41
42 if len(average_loss.shape) != 0:
43 raise ValueError('loss_fn did not return the average loss')
44
45 self._sum += average_loss.item() * y.shape[0]
46 self._num_examples += y.shape[0]
47
48 def compute(self):
49 if self._num_examples == 0:
50 raise NotComputableError(
51 'Loss must have at least one example before it can be computed')
52 return self._sum / self._num_examples
53
[end of ignite/metrics/loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py
--- a/ignite/metrics/loss.py
+++ b/ignite/metrics/loss.py
@@ -20,12 +20,16 @@
The output is is expected to be a tuple (prediction, target) or
(prediction, target, kwargs) where kwargs is a dictionary of extra
keywords arguments.
+ batch_size (callable): a callable taking a target tensor that returns the
+ first dimension size (usually the batch size).
"""
- def __init__(self, loss_fn, output_transform=lambda x: x):
+ def __init__(self, loss_fn, output_transform=lambda x: x,
+ batch_size=lambda x: x.shape[0]):
super(Loss, self).__init__(output_transform)
self._loss_fn = loss_fn
+ self._batch_size = batch_size
def reset(self):
self._sum = 0
@@ -42,8 +46,9 @@
if len(average_loss.shape) != 0:
raise ValueError('loss_fn did not return the average loss')
- self._sum += average_loss.item() * y.shape[0]
- self._num_examples += y.shape[0]
+ N = self._batch_size(y)
+ self._sum += average_loss.item() * N
+ self._num_examples += N
def compute(self):
if self._num_examples == 0:
|
{"golden_diff": "diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py\n--- a/ignite/metrics/loss.py\n+++ b/ignite/metrics/loss.py\n@@ -20,12 +20,16 @@\n The output is is expected to be a tuple (prediction, target) or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments.\n+ batch_size (callable): a callable taking a target tensor that returns the\n+ first dimension size (usually the batch size).\n \n \"\"\"\n \n- def __init__(self, loss_fn, output_transform=lambda x: x):\n+ def __init__(self, loss_fn, output_transform=lambda x: x,\n+ batch_size=lambda x: x.shape[0]):\n super(Loss, self).__init__(output_transform)\n self._loss_fn = loss_fn\n+ self._batch_size = batch_size\n \n def reset(self):\n self._sum = 0\n@@ -42,8 +46,9 @@\n if len(average_loss.shape) != 0:\n raise ValueError('loss_fn did not return the average loss')\n \n- self._sum += average_loss.item() * y.shape[0]\n- self._num_examples += y.shape[0]\n+ N = self._batch_size(y)\n+ self._sum += average_loss.item() * N\n+ self._num_examples += N\n \n def compute(self):\n if self._num_examples == 0:\n", "issue": "[feature request]Changing how the loss metric get the shape information\nRecently I was working with multi-output models and I was using a pattern that I believe to\r\nbe common. The model returns a tuple, for instance `y_pred = (ypred_1, ypred_2)` with the correspondent y of the form `y = (y_1, y_2)` with the `loss_fn` accepting as arguments `ypred` and `y` (`loss_fn(ypred, y)`).\r\n\r\nHowever I have run into problems when using the Loss metric due to the batch size used on the update function of this metric calling the attribute shape of the, presumed, torch.Tensor directly.\r\n\r\nI have as suggestion to change the Loss metric class to accept an extra function, for instance `batch_size = lambda x: x.shape[0]` to recover the current behavior and to permit to access the shape attribute from a tuple/list.\r\n\n", "before_files": [{"content": "from __future__ import division\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn (callable): a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform (callable): a callable that is used to transform the\n :class:`ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is is expected to be a tuple (prediction, target) or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments.\n\n \"\"\"\n\n def __init__(self, loss_fn, output_transform=lambda x: x):\n super(Loss, self).__init__(output_transform)\n self._loss_fn = loss_fn\n\n def reset(self):\n self._sum = 0\n self._num_examples = 0\n\n def update(self, output):\n if len(output) == 2:\n y_pred, y = output\n kwargs = {}\n else:\n y_pred, y, kwargs = output\n average_loss = self._loss_fn(y_pred, y, **kwargs)\n\n if len(average_loss.shape) != 0:\n raise ValueError('loss_fn did not return the average loss')\n\n self._sum += average_loss.item() * y.shape[0]\n self._num_examples += y.shape[0]\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\n 'Loss must have at least one example before it can be computed')\n return self._sum / self._num_examples\n", "path": "ignite/metrics/loss.py"}]}
| 1,253 | 328 |
gh_patches_debug_3427
|
rasdani/github-patches
|
git_diff
|
nltk__nltk-2896
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hack to keep NLTK's "tokenize" module fails
in decorators.py file line 24:
`sys.path = [p for p in sys.path if "nltk" not in p]`
causes an error while importing nltk when there are other objects than strings in the path variable(such as pathlib.Posixpath)
this issue can be solved simply just by replacing the above code with:
`sys.path = [p for p in sys.path if (type(p) == str and "nltk" not in p)]`
</issue>
<code>
[start of nltk/decorators.py]
1 """
2 Decorator module by Michele Simionato <[email protected]>
3 Copyright Michele Simionato, distributed under the terms of the BSD License (see below).
4 http://www.phyast.pitt.edu/~micheles/python/documentation.html
5
6 Included in NLTK for its support of a nice memoization decorator.
7 """
8
9 __docformat__ = "restructuredtext en"
10
11 ## The basic trick is to generate the source code for the decorated function
12 ## with the right signature and to evaluate it.
13 ## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator
14 ## to understand what is going on.
15
16 __all__ = ["decorator", "new_wrapper", "getinfo"]
17
18 import sys
19
20 # Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
21 # the Python standard library.
22 OLD_SYS_PATH = sys.path[:]
23 sys.path = [p for p in sys.path if p and "nltk" not in p]
24 import inspect
25
26 sys.path = OLD_SYS_PATH
27
28
29 def __legacysignature(signature):
30 """
31 For retrocompatibility reasons, we don't use a standard Signature.
32 Instead, we use the string generated by this method.
33 Basically, from a Signature we create a string and remove the default values.
34 """
35 listsignature = str(signature)[1:-1].split(",")
36 for counter, param in enumerate(listsignature):
37 if param.count("=") > 0:
38 listsignature[counter] = param[0 : param.index("=")].strip()
39 else:
40 listsignature[counter] = param.strip()
41 return ", ".join(listsignature)
42
43
44 def getinfo(func):
45 """
46 Returns an info dictionary containing:
47 - name (the name of the function : str)
48 - argnames (the names of the arguments : list)
49 - defaults (the values of the default arguments : tuple)
50 - signature (the signature : str)
51 - fullsignature (the full signature : Signature)
52 - doc (the docstring : str)
53 - module (the module name : str)
54 - dict (the function __dict__ : str)
55
56 >>> def f(self, x=1, y=2, *args, **kw): pass
57
58 >>> info = getinfo(f)
59
60 >>> info["name"]
61 'f'
62 >>> info["argnames"]
63 ['self', 'x', 'y', 'args', 'kw']
64
65 >>> info["defaults"]
66 (1, 2)
67
68 >>> info["signature"]
69 'self, x, y, *args, **kw'
70
71 >>> info["fullsignature"]
72 <Signature (self, x=1, y=2, *args, **kw)>
73 """
74 assert inspect.ismethod(func) or inspect.isfunction(func)
75 argspec = inspect.getfullargspec(func)
76 regargs, varargs, varkwargs = argspec[:3]
77 argnames = list(regargs)
78 if varargs:
79 argnames.append(varargs)
80 if varkwargs:
81 argnames.append(varkwargs)
82 fullsignature = inspect.signature(func)
83 # Convert Signature to str
84 signature = __legacysignature(fullsignature)
85
86 # pypy compatibility
87 if hasattr(func, "__closure__"):
88 _closure = func.__closure__
89 _globals = func.__globals__
90 else:
91 _closure = func.func_closure
92 _globals = func.func_globals
93
94 return dict(
95 name=func.__name__,
96 argnames=argnames,
97 signature=signature,
98 fullsignature=fullsignature,
99 defaults=func.__defaults__,
100 doc=func.__doc__,
101 module=func.__module__,
102 dict=func.__dict__,
103 globals=_globals,
104 closure=_closure,
105 )
106
107
108 def update_wrapper(wrapper, model, infodict=None):
109 "akin to functools.update_wrapper"
110 infodict = infodict or getinfo(model)
111 wrapper.__name__ = infodict["name"]
112 wrapper.__doc__ = infodict["doc"]
113 wrapper.__module__ = infodict["module"]
114 wrapper.__dict__.update(infodict["dict"])
115 wrapper.__defaults__ = infodict["defaults"]
116 wrapper.undecorated = model
117 return wrapper
118
119
120 def new_wrapper(wrapper, model):
121 """
122 An improvement over functools.update_wrapper. The wrapper is a generic
123 callable object. It works by generating a copy of the wrapper with the
124 right signature and by updating the copy, not the original.
125 Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',
126 'dict', 'defaults'.
127 """
128 if isinstance(model, dict):
129 infodict = model
130 else: # assume model is a function
131 infodict = getinfo(model)
132 assert (
133 not "_wrapper_" in infodict["argnames"]
134 ), '"_wrapper_" is a reserved argument name!'
135 src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict
136 funcopy = eval(src, dict(_wrapper_=wrapper))
137 return update_wrapper(funcopy, model, infodict)
138
139
140 # helper used in decorator_factory
141 def __call__(self, func):
142 return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func)
143
144
145 def decorator_factory(cls):
146 """
147 Take a class with a ``.caller`` method and return a callable decorator
148 object. It works by adding a suitable __call__ method to the class;
149 it raises a TypeError if the class already has a nontrivial __call__
150 method.
151 """
152 attrs = set(dir(cls))
153 if "__call__" in attrs:
154 raise TypeError(
155 "You cannot decorate a class with a nontrivial " "__call__ method"
156 )
157 if "call" not in attrs:
158 raise TypeError("You cannot decorate a class without a " ".call method")
159 cls.__call__ = __call__
160 return cls
161
162
163 def decorator(caller):
164 """
165 General purpose decorator factory: takes a caller function as
166 input and returns a decorator with the same attributes.
167 A caller function is any function like this::
168
169 def caller(func, *args, **kw):
170 # do something
171 return func(*args, **kw)
172
173 Here is an example of usage:
174
175 >>> @decorator
176 ... def chatty(f, *args, **kw):
177 ... print("Calling %r" % f.__name__)
178 ... return f(*args, **kw)
179
180 >>> chatty.__name__
181 'chatty'
182
183 >>> @chatty
184 ... def f(): pass
185 ...
186 >>> f()
187 Calling 'f'
188
189 decorator can also take in input a class with a .caller method; in this
190 case it converts the class into a factory of callable decorator objects.
191 See the documentation for an example.
192 """
193 if inspect.isclass(caller):
194 return decorator_factory(caller)
195
196 def _decorator(func): # the real meat is here
197 infodict = getinfo(func)
198 argnames = infodict["argnames"]
199 assert not (
200 "_call_" in argnames or "_func_" in argnames
201 ), "You cannot use _call_ or _func_ as argument names!"
202 src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict
203 # import sys; print >> sys.stderr, src # for debugging purposes
204 dec_func = eval(src, dict(_func_=func, _call_=caller))
205 return update_wrapper(dec_func, func, infodict)
206
207 return update_wrapper(_decorator, caller)
208
209
210 def getattr_(obj, name, default_thunk):
211 "Similar to .setdefault in dictionaries."
212 try:
213 return getattr(obj, name)
214 except AttributeError:
215 default = default_thunk()
216 setattr(obj, name, default)
217 return default
218
219
220 @decorator
221 def memoize(func, *args):
222 dic = getattr_(func, "memoize_dic", dict)
223 # memoize_dic is created at the first call
224 if args in dic:
225 return dic[args]
226 result = func(*args)
227 dic[args] = result
228 return result
229
230
231 ########################## LEGALESE ###############################
232
233 ## Redistributions of source code must retain the above copyright
234 ## notice, this list of conditions and the following disclaimer.
235 ## Redistributions in bytecode form must reproduce the above copyright
236 ## notice, this list of conditions and the following disclaimer in
237 ## the documentation and/or other materials provided with the
238 ## distribution.
239
240 ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
241 ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
242 ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
243 ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
244 ## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
245 ## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
246 ## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
247 ## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
248 ## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
249 ## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
250 ## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
251 ## DAMAGE.
252
[end of nltk/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nltk/decorators.py b/nltk/decorators.py
--- a/nltk/decorators.py
+++ b/nltk/decorators.py
@@ -20,7 +20,7 @@
# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
# the Python standard library.
OLD_SYS_PATH = sys.path[:]
-sys.path = [p for p in sys.path if p and "nltk" not in p]
+sys.path = [p for p in sys.path if p and "nltk" not in str(p)]
import inspect
sys.path = OLD_SYS_PATH
|
{"golden_diff": "diff --git a/nltk/decorators.py b/nltk/decorators.py\n--- a/nltk/decorators.py\n+++ b/nltk/decorators.py\n@@ -20,7 +20,7 @@\n # Hack to keep NLTK's \"tokenize\" module from colliding with the \"tokenize\" in\n # the Python standard library.\n OLD_SYS_PATH = sys.path[:]\n-sys.path = [p for p in sys.path if p and \"nltk\" not in p]\n+sys.path = [p for p in sys.path if p and \"nltk\" not in str(p)]\n import inspect\n \n sys.path = OLD_SYS_PATH\n", "issue": "Hack to keep NLTK's \"tokenize\" module fails \nin decorators.py file line 24:\r\n`sys.path = [p for p in sys.path if \"nltk\" not in p]`\r\ncauses an error while importing nltk when there are other objects than strings in the path variable(such as pathlib.Posixpath)\r\nthis issue can be solved simply just by replacing the above code with:\r\n`sys.path = [p for p in sys.path if (type(p) == str and \"nltk\" not in p)]`\r\n\n", "before_files": [{"content": "\"\"\"\nDecorator module by Michele Simionato <[email protected]>\nCopyright Michele Simionato, distributed under the terms of the BSD License (see below).\nhttp://www.phyast.pitt.edu/~micheles/python/documentation.html\n\nIncluded in NLTK for its support of a nice memoization decorator.\n\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\n## The basic trick is to generate the source code for the decorated function\n## with the right signature and to evaluate it.\n## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator\n## to understand what is going on.\n\n__all__ = [\"decorator\", \"new_wrapper\", \"getinfo\"]\n\nimport sys\n\n# Hack to keep NLTK's \"tokenize\" module from colliding with the \"tokenize\" in\n# the Python standard library.\nOLD_SYS_PATH = sys.path[:]\nsys.path = [p for p in sys.path if p and \"nltk\" not in p]\nimport inspect\n\nsys.path = OLD_SYS_PATH\n\n\ndef __legacysignature(signature):\n \"\"\"\n For retrocompatibility reasons, we don't use a standard Signature.\n Instead, we use the string generated by this method.\n Basically, from a Signature we create a string and remove the default values.\n \"\"\"\n listsignature = str(signature)[1:-1].split(\",\")\n for counter, param in enumerate(listsignature):\n if param.count(\"=\") > 0:\n listsignature[counter] = param[0 : param.index(\"=\")].strip()\n else:\n listsignature[counter] = param.strip()\n return \", \".join(listsignature)\n\n\ndef getinfo(func):\n \"\"\"\n Returns an info dictionary containing:\n - name (the name of the function : str)\n - argnames (the names of the arguments : list)\n - defaults (the values of the default arguments : tuple)\n - signature (the signature : str)\n - fullsignature (the full signature : Signature)\n - doc (the docstring : str)\n - module (the module name : str)\n - dict (the function __dict__ : str)\n\n >>> def f(self, x=1, y=2, *args, **kw): pass\n\n >>> info = getinfo(f)\n\n >>> info[\"name\"]\n 'f'\n >>> info[\"argnames\"]\n ['self', 'x', 'y', 'args', 'kw']\n\n >>> info[\"defaults\"]\n (1, 2)\n\n >>> info[\"signature\"]\n 'self, x, y, *args, **kw'\n\n >>> info[\"fullsignature\"]\n <Signature (self, x=1, y=2, *args, **kw)>\n \"\"\"\n assert inspect.ismethod(func) or inspect.isfunction(func)\n argspec = inspect.getfullargspec(func)\n regargs, varargs, varkwargs = argspec[:3]\n argnames = list(regargs)\n if varargs:\n argnames.append(varargs)\n if varkwargs:\n argnames.append(varkwargs)\n fullsignature = inspect.signature(func)\n # Convert Signature to str\n signature = __legacysignature(fullsignature)\n\n # pypy compatibility\n if hasattr(func, \"__closure__\"):\n _closure = func.__closure__\n _globals = func.__globals__\n else:\n _closure = func.func_closure\n _globals = func.func_globals\n\n return dict(\n name=func.__name__,\n argnames=argnames,\n signature=signature,\n fullsignature=fullsignature,\n defaults=func.__defaults__,\n doc=func.__doc__,\n module=func.__module__,\n dict=func.__dict__,\n globals=_globals,\n closure=_closure,\n )\n\n\ndef update_wrapper(wrapper, model, infodict=None):\n \"akin to functools.update_wrapper\"\n infodict = infodict or getinfo(model)\n wrapper.__name__ = infodict[\"name\"]\n wrapper.__doc__ = infodict[\"doc\"]\n wrapper.__module__ = infodict[\"module\"]\n wrapper.__dict__.update(infodict[\"dict\"])\n wrapper.__defaults__ = infodict[\"defaults\"]\n wrapper.undecorated = model\n return wrapper\n\n\ndef new_wrapper(wrapper, model):\n \"\"\"\n An improvement over functools.update_wrapper. The wrapper is a generic\n callable object. It works by generating a copy of the wrapper with the\n right signature and by updating the copy, not the original.\n Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module',\n 'dict', 'defaults'.\n \"\"\"\n if isinstance(model, dict):\n infodict = model\n else: # assume model is a function\n infodict = getinfo(model)\n assert (\n not \"_wrapper_\" in infodict[\"argnames\"]\n ), '\"_wrapper_\" is a reserved argument name!'\n src = \"lambda %(signature)s: _wrapper_(%(signature)s)\" % infodict\n funcopy = eval(src, dict(_wrapper_=wrapper))\n return update_wrapper(funcopy, model, infodict)\n\n\n# helper used in decorator_factory\ndef __call__(self, func):\n return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func)\n\n\ndef decorator_factory(cls):\n \"\"\"\n Take a class with a ``.caller`` method and return a callable decorator\n object. It works by adding a suitable __call__ method to the class;\n it raises a TypeError if the class already has a nontrivial __call__\n method.\n \"\"\"\n attrs = set(dir(cls))\n if \"__call__\" in attrs:\n raise TypeError(\n \"You cannot decorate a class with a nontrivial \" \"__call__ method\"\n )\n if \"call\" not in attrs:\n raise TypeError(\"You cannot decorate a class without a \" \".call method\")\n cls.__call__ = __call__\n return cls\n\n\ndef decorator(caller):\n \"\"\"\n General purpose decorator factory: takes a caller function as\n input and returns a decorator with the same attributes.\n A caller function is any function like this::\n\n def caller(func, *args, **kw):\n # do something\n return func(*args, **kw)\n\n Here is an example of usage:\n\n >>> @decorator\n ... def chatty(f, *args, **kw):\n ... print(\"Calling %r\" % f.__name__)\n ... return f(*args, **kw)\n\n >>> chatty.__name__\n 'chatty'\n\n >>> @chatty\n ... def f(): pass\n ...\n >>> f()\n Calling 'f'\n\n decorator can also take in input a class with a .caller method; in this\n case it converts the class into a factory of callable decorator objects.\n See the documentation for an example.\n \"\"\"\n if inspect.isclass(caller):\n return decorator_factory(caller)\n\n def _decorator(func): # the real meat is here\n infodict = getinfo(func)\n argnames = infodict[\"argnames\"]\n assert not (\n \"_call_\" in argnames or \"_func_\" in argnames\n ), \"You cannot use _call_ or _func_ as argument names!\"\n src = \"lambda %(signature)s: _call_(_func_, %(signature)s)\" % infodict\n # import sys; print >> sys.stderr, src # for debugging purposes\n dec_func = eval(src, dict(_func_=func, _call_=caller))\n return update_wrapper(dec_func, func, infodict)\n\n return update_wrapper(_decorator, caller)\n\n\ndef getattr_(obj, name, default_thunk):\n \"Similar to .setdefault in dictionaries.\"\n try:\n return getattr(obj, name)\n except AttributeError:\n default = default_thunk()\n setattr(obj, name, default)\n return default\n\n\n@decorator\ndef memoize(func, *args):\n dic = getattr_(func, \"memoize_dic\", dict)\n # memoize_dic is created at the first call\n if args in dic:\n return dic[args]\n result = func(*args)\n dic[args] = result\n return result\n\n\n########################## LEGALESE ###############################\n\n## Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## Redistributions in bytecode form must reproduce the above copyright\n## notice, this list of conditions and the following disclaimer in\n## the documentation and/or other materials provided with the\n## distribution.\n\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\n## DAMAGE.\n", "path": "nltk/decorators.py"}]}
| 3,350 | 139 |
gh_patches_debug_2294
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-4760
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
upgrade to PyYAML 5.2
PyYAML 5.2 is out, with more security fixes. aws-cli pins to an older version, preventing upgrades. Please update the pin.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 install_requires = ['botocore==1.13.38',
27 'docutils>=0.10,<0.16',
28 'rsa>=3.1.2,<=3.5.0',
29 's3transfer>=0.2.0,<0.3.0']
30
31
32 if sys.version_info[:2] == (2, 6):
33 # For python2.6 we have to require argparse since it
34 # was not in stdlib until 2.7.
35 install_requires.append('argparse>=1.1')
36
37 # For Python 2.6, we have to require a different verion of PyYAML since the latest
38 # versions dropped support for Python 2.6.
39 install_requires.append('PyYAML>=3.10,<=3.13')
40
41 # Colorama removed support for EOL pythons.
42 install_requires.append('colorama>=0.2.5,<=0.3.9')
43 elif sys.version_info[:2] == (3, 3):
44 install_requires.append('PyYAML>=3.10,<=3.13')
45 # Colorama removed support for EOL pythons.
46 install_requires.append('colorama>=0.2.5,<=0.3.9')
47 else:
48 install_requires.append('PyYAML>=3.10,<5.2')
49 install_requires.append('colorama>=0.2.5,<0.4.2')
50
51
52 setup_options = dict(
53 name='awscli',
54 version=find_version("awscli", "__init__.py"),
55 description='Universal Command Line Environment for AWS.',
56 long_description=read('README.rst'),
57 author='Amazon Web Services',
58 url='http://aws.amazon.com/cli/',
59 scripts=['bin/aws', 'bin/aws.cmd',
60 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
61 'bin/aws_bash_completer'],
62 packages=find_packages(exclude=['tests*']),
63 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
64 'examples/*/*.txt', 'examples/*/*/*.txt',
65 'examples/*/*/*.rst', 'topics/*.rst',
66 'topics/*.json']},
67 install_requires=install_requires,
68 extras_require={
69 ':python_version=="2.6"': [
70 'argparse>=1.1',
71 ]
72 },
73 license="Apache License 2.0",
74 classifiers=[
75 'Development Status :: 5 - Production/Stable',
76 'Intended Audience :: Developers',
77 'Intended Audience :: System Administrators',
78 'Natural Language :: English',
79 'License :: OSI Approved :: Apache Software License',
80 'Programming Language :: Python',
81 'Programming Language :: Python :: 2',
82 'Programming Language :: Python :: 2.6',
83 'Programming Language :: Python :: 2.7',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.3',
86 'Programming Language :: Python :: 3.4',
87 'Programming Language :: Python :: 3.5',
88 'Programming Language :: Python :: 3.6',
89 'Programming Language :: Python :: 3.7',
90 ],
91 )
92
93 if 'py2exe' in sys.argv:
94 # This will actually give us a py2exe command.
95 import py2exe
96 # And we have some py2exe specific options.
97 setup_options['options'] = {
98 'py2exe': {
99 'optimize': 0,
100 'skip_archive': True,
101 'dll_excludes': ['crypt32.dll'],
102 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
103 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
104 }
105 }
106 setup_options['console'] = ['bin/aws']
107
108
109 setup(**setup_options)
110
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
# Colorama removed support for EOL pythons.
install_requires.append('colorama>=0.2.5,<=0.3.9')
else:
- install_requires.append('PyYAML>=3.10,<5.2')
+ install_requires.append('PyYAML>=3.10,<5.3')
install_requires.append('colorama>=0.2.5,<0.4.2')
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n # Colorama removed support for EOL pythons.\n install_requires.append('colorama>=0.2.5,<=0.3.9')\n else:\n- install_requires.append('PyYAML>=3.10,<5.2')\n+ install_requires.append('PyYAML>=3.10,<5.3')\n install_requires.append('colorama>=0.2.5,<0.4.2')\n", "issue": "upgrade to PyYAML 5.2\nPyYAML 5.2 is out, with more security fixes. aws-cli pins to an older version, preventing upgrades. Please update the pin.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ninstall_requires = ['botocore==1.13.38',\n 'docutils>=0.10,<0.16',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n install_requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n install_requires.append('PyYAML>=3.10,<=3.13')\n\n # Colorama removed support for EOL pythons.\n install_requires.append('colorama>=0.2.5,<=0.3.9')\nelif sys.version_info[:2] == (3, 3):\n install_requires.append('PyYAML>=3.10,<=3.13')\n # Colorama removed support for EOL pythons.\n install_requires.append('colorama>=0.2.5,<=0.3.9')\nelse:\n install_requires.append('PyYAML>=3.10,<5.2')\n install_requires.append('colorama>=0.2.5,<0.4.2')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=install_requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
| 1,769 | 126 |
gh_patches_debug_33553
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-1098
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build wheels for manylinux1
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See LICENSE in the project root
5 # for license information.
6
7 import os
8 import os.path
9 import subprocess
10 import sys
11
12 from setuptools import setup
13
14 import versioneer
15
16 sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src'))
17 import ptvsd
18 import ptvsd._vendored
19 del sys.path[0]
20
21
22 PYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')
23 PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
24
25
26 def cython_build():
27 print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
28 subprocess.call([
29 sys.executable,
30 os.path.join(PYDEVD_ROOT, 'setup_cython.py'),
31 'build_ext',
32 '-i',
33 ])
34
35
36 def iter_vendored_files():
37 # Add pydevd files as data files for this package. They are not
38 # treated as a package of their own, because we don't actually
39 # want to provide pydevd - just use our own copy internally.
40 for project in ptvsd._vendored.list_all():
41 for filename in ptvsd._vendored.iter_packaging_files(project):
42 yield filename
43
44
45 with open('DESCRIPTION.md', 'r') as fh:
46 long_description = fh.read()
47
48
49 if __name__ == '__main__':
50 if not os.getenv('SKIP_CYTHON_BUILD'):
51 cython_build()
52
53 setup(
54 name='ptvsd',
55 version=versioneer.get_version(),
56 description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
57 long_description=long_description,
58 long_description_content_type='text/markdown',
59 license='MIT',
60 author='Microsoft Corporation',
61 author_email='[email protected]',
62 url='https://aka.ms/ptvs',
63 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Programming Language :: Python :: 2.7',
67 'Programming Language :: Python :: 3.4',
68 'Programming Language :: Python :: 3.5',
69 'Programming Language :: Python :: 3.6',
70 'Programming Language :: Python :: 3.7',
71 'Topic :: Software Development :: Debuggers',
72 'Operating System :: OS Independent',
73 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',
74 'License :: OSI Approved :: MIT License',
75 ],
76 package_dir={'': 'src'},
77 packages=[
78 'ptvsd',
79 'ptvsd._vendored',
80 ],
81 package_data={
82 'ptvsd': ['ThirdPartyNotices.txt'],
83 'ptvsd._vendored': list(iter_vendored_files()),
84 },
85 cmdclass=versioneer.get_cmdclass(),
86 )
87
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,13 +9,23 @@
import subprocess
import sys
-from setuptools import setup
+pure = None
+if '--pure' in sys.argv:
+ pure = True
+ sys.argv.remove('--pure')
+elif '--universal' in sys.argv:
+ pure = True
+elif '--abi' in sys.argv:
+ pure = False
+ sys.argv.remove('--abi')
-import versioneer
+
+from setuptools import setup # noqa
+import versioneer # noqa
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src'))
-import ptvsd
-import ptvsd._vendored
+import ptvsd # noqa
+import ptvsd._vendored # noqa
del sys.path[0]
@@ -23,6 +33,11 @@
PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
+def get_buildplatform():
+ if '-p' in sys.argv:
+ return sys.argv[sys.argv.index('-p') + 1]
+ return None
+
def cython_build():
print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
subprocess.call([
@@ -46,10 +61,29 @@
long_description = fh.read()
+try:
+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
+
+ class bdist_wheel(_bdist_wheel):
+ def finalize_options(self):
+ _bdist_wheel.finalize_options(self)
+ self.root_is_pure = pure
+
+except ImportError:
+ bdist_wheel = None
+
if __name__ == '__main__':
if not os.getenv('SKIP_CYTHON_BUILD'):
cython_build()
+ cmds = versioneer.get_cmdclass()
+ cmds['bdist_wheel'] = bdist_wheel
+
+ extras = {}
+ platforms = get_buildplatform()
+ if platforms is not None:
+ extras['platforms'] = platforms
+
setup(
name='ptvsd',
version=versioneer.get_version(),
@@ -82,5 +116,6 @@
'ptvsd': ['ThirdPartyNotices.txt'],
'ptvsd._vendored': list(iter_vendored_files()),
},
- cmdclass=versioneer.get_cmdclass(),
+ cmdclass=cmds,
+ **extras
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,13 +9,23 @@\n import subprocess\n import sys\n \n-from setuptools import setup\n+pure = None\n+if '--pure' in sys.argv:\n+ pure = True\n+ sys.argv.remove('--pure')\n+elif '--universal' in sys.argv:\n+ pure = True\n+elif '--abi' in sys.argv:\n+ pure = False\n+ sys.argv.remove('--abi')\n \n-import versioneer\n+\n+from setuptools import setup # noqa\n+import versioneer # noqa\n \n sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src'))\n-import ptvsd\n-import ptvsd._vendored\n+import ptvsd # noqa\n+import ptvsd._vendored # noqa\n del sys.path[0]\n \n \n@@ -23,6 +33,11 @@\n PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n \n \n+def get_buildplatform():\n+ if '-p' in sys.argv:\n+ return sys.argv[sys.argv.index('-p') + 1]\n+ return None\n+\n def cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n@@ -46,10 +61,29 @@\n long_description = fh.read()\n \n \n+try:\n+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n+\n+ class bdist_wheel(_bdist_wheel):\n+ def finalize_options(self):\n+ _bdist_wheel.finalize_options(self)\n+ self.root_is_pure = pure\n+\n+except ImportError:\n+ bdist_wheel = None\n+\n if __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n \n+ cmds = versioneer.get_cmdclass()\n+ cmds['bdist_wheel'] = bdist_wheel\n+\n+ extras = {}\n+ platforms = get_buildplatform()\n+ if platforms is not None:\n+ extras['platforms'] = platforms\n+\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n@@ -82,5 +116,6 @@\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n- cmdclass=versioneer.get_cmdclass(),\n+ cmdclass=cmds,\n+ **extras\n )\n", "issue": "Build wheels for manylinux1\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\n\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src'))\nimport ptvsd\nimport ptvsd._vendored\ndel sys.path[0]\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nwith open('DESCRIPTION.md', 'r') as fh:\n long_description = fh.read()\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Debuggers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n package_dir={'': 'src'},\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}]}
| 1,372 | 558 |
gh_patches_debug_25162
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-853
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.at without an argument gives TypeError
```
<embolalia> .at
<Sopel> TypeError: expected string or buffer (file "/opt/rh/python33/root/usr/lib/python3.3/site-packages/sopel/modules/remind.py", line 165, in at)
```
.at without an argument gives TypeError
```
<embolalia> .at
<Sopel> TypeError: expected string or buffer (file "/opt/rh/python33/root/usr/lib/python3.3/site-packages/sopel/modules/remind.py", line 165, in at)
```
</issue>
<code>
[start of sopel/modules/remind.py]
1 # coding=utf8
2 """
3 remind.py - Sopel Reminder Module
4 Copyright 2011, Sean B. Palmer, inamidst.com
5 Licensed under the Eiffel Forum License 2.
6
7 http://sopel.chat
8 """
9 from __future__ import unicode_literals
10
11 import os
12 import re
13 import time
14 import threading
15 import collections
16 import codecs
17 from datetime import datetime
18 from sopel.module import commands, example, NOLIMIT
19 import sopel.tools
20 from sopel.tools.time import get_timezone, format_time
21
22 try:
23 import pytz
24 except:
25 pytz = None
26
27
28 def filename(self):
29 name = self.nick + '-' + self.config.core.host + '.reminders.db'
30 return os.path.join(self.config.core.homedir, name)
31
32
33 def load_database(name):
34 data = {}
35 if os.path.isfile(name):
36 f = codecs.open(name, 'r', encoding='utf-8')
37 for line in f:
38 unixtime, channel, nick, message = line.split('\t')
39 message = message.rstrip('\n')
40 t = int(float(unixtime)) # WTFs going on here?
41 reminder = (channel, nick, message)
42 try:
43 data[t].append(reminder)
44 except KeyError:
45 data[t] = [reminder]
46 f.close()
47 return data
48
49
50 def dump_database(name, data):
51 f = codecs.open(name, 'w', encoding='utf-8')
52 for unixtime, reminders in sopel.tools.iteritems(data):
53 for channel, nick, message in reminders:
54 f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
55 f.close()
56
57
58 def setup(bot):
59 bot.rfn = filename(bot)
60 bot.rdb = load_database(bot.rfn)
61
62 def monitor(bot):
63 time.sleep(5)
64 while True:
65 now = int(time.time())
66 unixtimes = [int(key) for key in bot.rdb]
67 oldtimes = [t for t in unixtimes if t <= now]
68 if oldtimes:
69 for oldtime in oldtimes:
70 for (channel, nick, message) in bot.rdb[oldtime]:
71 if message:
72 bot.msg(channel, nick + ': ' + message)
73 else:
74 bot.msg(channel, nick + '!')
75 del bot.rdb[oldtime]
76 dump_database(bot.rfn, bot.rdb)
77 time.sleep(2.5)
78
79 targs = (bot,)
80 t = threading.Thread(target=monitor, args=targs)
81 t.start()
82
83 scaling = collections.OrderedDict([
84 ('years', 365.25 * 24 * 3600),
85 ('year', 365.25 * 24 * 3600),
86 ('yrs', 365.25 * 24 * 3600),
87 ('y', 365.25 * 24 * 3600),
88
89 ('months', 29.53059 * 24 * 3600),
90 ('month', 29.53059 * 24 * 3600),
91 ('mo', 29.53059 * 24 * 3600),
92
93 ('weeks', 7 * 24 * 3600),
94 ('week', 7 * 24 * 3600),
95 ('wks', 7 * 24 * 3600),
96 ('wk', 7 * 24 * 3600),
97 ('w', 7 * 24 * 3600),
98
99 ('days', 24 * 3600),
100 ('day', 24 * 3600),
101 ('d', 24 * 3600),
102
103 ('hours', 3600),
104 ('hour', 3600),
105 ('hrs', 3600),
106 ('hr', 3600),
107 ('h', 3600),
108
109 ('minutes', 60),
110 ('minute', 60),
111 ('mins', 60),
112 ('min', 60),
113 ('m', 60),
114
115 ('seconds', 1),
116 ('second', 1),
117 ('secs', 1),
118 ('sec', 1),
119 ('s', 1),
120 ])
121
122 periods = '|'.join(scaling.keys())
123
124
125 @commands('in')
126 @example('.in 3h45m Go to class')
127 def remind(bot, trigger):
128 """Gives you a reminder in the given amount of time."""
129 duration = 0
130 message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
131 trigger.group(2))[1:])
132 reminder = ''
133 stop = False
134 for piece in message:
135 grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
136 if grp and not stop:
137 length = float(grp.group(1))
138 factor = scaling.get(grp.group(2).lower(), 60)
139 duration += length * factor
140 else:
141 reminder = reminder + piece
142 stop = True
143 if duration == 0:
144 return bot.reply("Sorry, didn't understand the input.")
145
146 if duration % 1:
147 duration = int(duration) + 1
148 else:
149 duration = int(duration)
150 timezone = get_timezone(
151 bot.db, bot.config, None, trigger.nick, trigger.sender)
152 create_reminder(bot, trigger, duration, reminder, timezone)
153
154
155 @commands('at')
156 @example('.at 13:47 Do your homework!')
157 def at(bot, trigger):
158 """
159 Gives you a reminder at the given time. Takes hh:mm:ssTimezone
160 message. Timezone is any timezone Sopel takes elsewhere; the best choices
161 are those from the tzdb; a list of valid options is available at
162 http://dft.ba/-tz . The seconds and timezone are optional.
163 """
164 regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
165 match = regex.match(trigger.group(2))
166 if not match:
167 bot.reply("Sorry, but I didn't understand your input.")
168 return NOLIMIT
169 hour, minute, second, tz, message = match.groups()
170 if not second:
171 second = '0'
172
173 if pytz:
174 timezone = get_timezone(bot.db, bot.config, tz,
175 trigger.nick, trigger.sender)
176 if not timezone:
177 timezone = 'UTC'
178 now = datetime.now(pytz.timezone(timezone))
179 at_time = datetime(now.year, now.month, now.day,
180 int(hour), int(minute), int(second),
181 tzinfo=now.tzinfo)
182 timediff = at_time - now
183 else:
184 if tz and tz.upper() != 'UTC':
185 bot.reply("I don't have timzeone support installed.")
186 return NOLIMIT
187 now = datetime.now()
188 at_time = datetime(now.year, now.month, now.day,
189 int(hour), int(minute), int(second))
190 timediff = at_time - now
191
192 duration = timediff.seconds
193
194 if duration < 0:
195 duration += 86400
196 create_reminder(bot, trigger, duration, message, 'UTC')
197
198
199 def create_reminder(bot, trigger, duration, message, tz):
200 t = int(time.time()) + duration
201 reminder = (trigger.sender, trigger.nick, message)
202 try:
203 bot.rdb[t].append(reminder)
204 except KeyError:
205 bot.rdb[t] = [reminder]
206
207 dump_database(bot.rfn, bot.rdb)
208
209 if duration >= 60:
210 remind_at = datetime.utcfromtimestamp(t)
211 timef = format_time(bot.db, bot.config, tz, trigger.nick,
212 trigger.sender, remind_at)
213
214 bot.reply('Okay, will remind at %s' % timef)
215 else:
216 bot.reply('Okay, will remind in %s secs' % duration)
217
[end of sopel/modules/remind.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/remind.py b/sopel/modules/remind.py
--- a/sopel/modules/remind.py
+++ b/sopel/modules/remind.py
@@ -126,6 +126,12 @@
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
+ if not trigger.group(2):
+ bot.say("Missing arguments for reminder command.")
+ return NOLIMIT
+ if trigger.group(3) and not trigger.group(4):
+ bot.say("No message given for reminder.")
+ return NOLIMIT
duration = 0
message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2))[1:])
@@ -161,6 +167,12 @@
are those from the tzdb; a list of valid options is available at
http://dft.ba/-tz . The seconds and timezone are optional.
"""
+ if not trigger.group(2):
+ bot.say("No arguments given for reminder command.")
+ return NOLIMIT
+ if trigger.group(3) and not trigger.group(4):
+ bot.say("No message given for reminder.")
+ return NOLIMIT
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
|
{"golden_diff": "diff --git a/sopel/modules/remind.py b/sopel/modules/remind.py\n--- a/sopel/modules/remind.py\n+++ b/sopel/modules/remind.py\n@@ -126,6 +126,12 @@\n @example('.in 3h45m Go to class')\n def remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n+ if not trigger.group(2):\n+ bot.say(\"Missing arguments for reminder command.\")\n+ return NOLIMIT\n+ if trigger.group(3) and not trigger.group(4):\n+ bot.say(\"No message given for reminder.\")\n+ return NOLIMIT\n duration = 0\n message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:(?i)' + periods + ')) ?',\n trigger.group(2))[1:])\n@@ -161,6 +167,12 @@\n are those from the tzdb; a list of valid options is available at\n http://dft.ba/-tz . The seconds and timezone are optional.\n \"\"\"\n+ if not trigger.group(2):\n+ bot.say(\"No arguments given for reminder command.\")\n+ return NOLIMIT\n+ if trigger.group(3) and not trigger.group(4):\n+ bot.say(\"No message given for reminder.\")\n+ return NOLIMIT\n regex = re.compile(r'(\\d+):(\\d+)(?::(\\d+))?([^\\s\\d]+)? (.*)')\n match = regex.match(trigger.group(2))\n if not match:\n", "issue": ".at without an argument gives TypeError\n```\n<embolalia> .at\n<Sopel> TypeError: expected string or buffer (file \"/opt/rh/python33/root/usr/lib/python3.3/site-packages/sopel/modules/remind.py\", line 165, in at)\n```\n\n.at without an argument gives TypeError\n```\n<embolalia> .at\n<Sopel> TypeError: expected string or buffer (file \"/opt/rh/python33/root/usr/lib/python3.3/site-packages/sopel/modules/remind.py\", line 165, in at)\n```\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\nremind.py - Sopel Reminder Module\nCopyright 2011, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport time\nimport threading\nimport collections\nimport codecs\nfrom datetime import datetime\nfrom sopel.module import commands, example, NOLIMIT\nimport sopel.tools\nfrom sopel.tools.time import get_timezone, format_time\n\ntry:\n import pytz\nexcept:\n pytz = None\n\n\ndef filename(self):\n name = self.nick + '-' + self.config.core.host + '.reminders.db'\n return os.path.join(self.config.core.homedir, name)\n\n\ndef load_database(name):\n data = {}\n if os.path.isfile(name):\n f = codecs.open(name, 'r', encoding='utf-8')\n for line in f:\n unixtime, channel, nick, message = line.split('\\t')\n message = message.rstrip('\\n')\n t = int(float(unixtime)) # WTFs going on here?\n reminder = (channel, nick, message)\n try:\n data[t].append(reminder)\n except KeyError:\n data[t] = [reminder]\n f.close()\n return data\n\n\ndef dump_database(name, data):\n f = codecs.open(name, 'w', encoding='utf-8')\n for unixtime, reminders in sopel.tools.iteritems(data):\n for channel, nick, message in reminders:\n f.write('%s\\t%s\\t%s\\t%s\\n' % (unixtime, channel, nick, message))\n f.close()\n\n\ndef setup(bot):\n bot.rfn = filename(bot)\n bot.rdb = load_database(bot.rfn)\n\n def monitor(bot):\n time.sleep(5)\n while True:\n now = int(time.time())\n unixtimes = [int(key) for key in bot.rdb]\n oldtimes = [t for t in unixtimes if t <= now]\n if oldtimes:\n for oldtime in oldtimes:\n for (channel, nick, message) in bot.rdb[oldtime]:\n if message:\n bot.msg(channel, nick + ': ' + message)\n else:\n bot.msg(channel, nick + '!')\n del bot.rdb[oldtime]\n dump_database(bot.rfn, bot.rdb)\n time.sleep(2.5)\n\n targs = (bot,)\n t = threading.Thread(target=monitor, args=targs)\n t.start()\n\nscaling = collections.OrderedDict([\n ('years', 365.25 * 24 * 3600),\n ('year', 365.25 * 24 * 3600),\n ('yrs', 365.25 * 24 * 3600),\n ('y', 365.25 * 24 * 3600),\n\n ('months', 29.53059 * 24 * 3600),\n ('month', 29.53059 * 24 * 3600),\n ('mo', 29.53059 * 24 * 3600),\n\n ('weeks', 7 * 24 * 3600),\n ('week', 7 * 24 * 3600),\n ('wks', 7 * 24 * 3600),\n ('wk', 7 * 24 * 3600),\n ('w', 7 * 24 * 3600),\n\n ('days', 24 * 3600),\n ('day', 24 * 3600),\n ('d', 24 * 3600),\n\n ('hours', 3600),\n ('hour', 3600),\n ('hrs', 3600),\n ('hr', 3600),\n ('h', 3600),\n\n ('minutes', 60),\n ('minute', 60),\n ('mins', 60),\n ('min', 60),\n ('m', 60),\n\n ('seconds', 1),\n ('second', 1),\n ('secs', 1),\n ('sec', 1),\n ('s', 1),\n])\n\nperiods = '|'.join(scaling.keys())\n\n\n@commands('in')\n@example('.in 3h45m Go to class')\ndef remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:(?i)' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n for piece in message:\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n factor = scaling.get(grp.group(2).lower(), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n stop = True\n if duration == 0:\n return bot.reply(\"Sorry, didn't understand the input.\")\n\n if duration % 1:\n duration = int(duration) + 1\n else:\n duration = int(duration)\n timezone = get_timezone(\n bot.db, bot.config, None, trigger.nick, trigger.sender)\n create_reminder(bot, trigger, duration, reminder, timezone)\n\n\n@commands('at')\n@example('.at 13:47 Do your homework!')\ndef at(bot, trigger):\n \"\"\"\n Gives you a reminder at the given time. Takes hh:mm:ssTimezone\n message. Timezone is any timezone Sopel takes elsewhere; the best choices\n are those from the tzdb; a list of valid options is available at\n http://dft.ba/-tz . The seconds and timezone are optional.\n \"\"\"\n regex = re.compile(r'(\\d+):(\\d+)(?::(\\d+))?([^\\s\\d]+)? (.*)')\n match = regex.match(trigger.group(2))\n if not match:\n bot.reply(\"Sorry, but I didn't understand your input.\")\n return NOLIMIT\n hour, minute, second, tz, message = match.groups()\n if not second:\n second = '0'\n\n if pytz:\n timezone = get_timezone(bot.db, bot.config, tz,\n trigger.nick, trigger.sender)\n if not timezone:\n timezone = 'UTC'\n now = datetime.now(pytz.timezone(timezone))\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second),\n tzinfo=now.tzinfo)\n timediff = at_time - now\n else:\n if tz and tz.upper() != 'UTC':\n bot.reply(\"I don't have timzeone support installed.\")\n return NOLIMIT\n now = datetime.now()\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second))\n timediff = at_time - now\n\n duration = timediff.seconds\n\n if duration < 0:\n duration += 86400\n create_reminder(bot, trigger, duration, message, 'UTC')\n\n\ndef create_reminder(bot, trigger, duration, message, tz):\n t = int(time.time()) + duration\n reminder = (trigger.sender, trigger.nick, message)\n try:\n bot.rdb[t].append(reminder)\n except KeyError:\n bot.rdb[t] = [reminder]\n\n dump_database(bot.rfn, bot.rdb)\n\n if duration >= 60:\n remind_at = datetime.utcfromtimestamp(t)\n timef = format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, remind_at)\n\n bot.reply('Okay, will remind at %s' % timef)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n", "path": "sopel/modules/remind.py"}]}
| 3,015 | 350 |
gh_patches_debug_38327
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-3995
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add option to extend sys.path with app directory
(I thought there was already an issue for this, but I could not find it)
After some investigation I do not think it's possible (or even possibly desirable) to have things like
```
from .foo import bar
```
work from the app directory. AFIAK Jupyter notebooks do not try to create a package, they just add the notebook directory to `sys.path` and then
```
from foo import bar
```
is possible. I propose to do the same:
```
bokeh serve --modify-sys-path=yes
```
UPDATE:
After a little more research, I believe the most proper thing to do is to prepend `''` into `sys.path` and set the CWD to the app location before `exec`, then undo those things. That seems to be exactly consistent with standard python behavior as described here: https://docs.python.org/2/library/sys.html#sys.path
</issue>
<code>
[start of bokeh/application/handlers/code_runner.py]
1 from __future__ import absolute_import, print_function
2
3 from os.path import abspath
4 from types import ModuleType
5 import os
6 import sys
7 import traceback
8
9 from bokeh.util.serialization import make_id
10
11 class _CodeRunner(object):
12 """ Compile and run a Python source code."""
13
14 def __init__(self, source, path):
15 self._failed = False
16 self._error = None
17 self._error_detail = None
18
19 import ast
20 self._code = None
21
22 try:
23 nodes = ast.parse(source, path)
24 self._code = compile(nodes, filename=path, mode='exec')
25 except SyntaxError as e:
26 self._failed = True
27 self._error = ("Invalid syntax in \"%s\" on line %d:\n%s" % (os.path.basename(e.filename), e.lineno, e.text))
28 import traceback
29 self._error_detail = traceback.format_exc()
30
31 self._path = path
32 self._source = source
33
34 @property
35 def source(self):
36 return self._source
37
38
39 @property
40 def path(self):
41 return self._path
42
43 @property
44 def failed(self):
45 """True if the handler failed to modify the doc"""
46 return self._failed
47
48 @property
49 def error(self):
50 """Error message if the handler failed"""
51 return self._error
52
53 @property
54 def error_detail(self):
55 """Traceback or other details if the handler failed"""
56 return self._error_detail
57
58 def new_module(self):
59 """Make a fresh module to run in."""
60 if self.failed:
61 return None
62
63 module_name = 'bk_script_' + make_id().replace('-', '')
64 module = ModuleType(module_name)
65 module.__dict__['__file__'] = abspath(self._path)
66
67 return module
68
69 def run(self, module, post_check):
70 try:
71 exec(self._code, module.__dict__)
72 post_check()
73 except Exception as e:
74 self._failed = True
75 self._error_detail = traceback.format_exc()
76
77 exc_type, exc_value, exc_traceback = sys.exc_info()
78 filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
79
80 self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
81
[end of bokeh/application/handlers/code_runner.py]
[start of examples/app/crossfilter/main.py]
1 import math
2 import numpy as np
3 import pandas as pd
4
5 from functools import partial
6
7 from bokeh import palettes
8 from bokeh.io import curdoc
9 from bokeh.models import HBox, Select
10 from bokeh.plotting import Figure
11 from bokeh.sampledata.autompg import autompg
12
13 # this is to be able to import modules from the app directory
14 import sys
15 from os.path import dirname
16 sys.path.append(dirname(__file__))
17
18 from models import StyleableBox, StatsBox
19 from models.helpers import load_component
20
21 class AppModel(object):
22
23 def __init__(self, df):
24 self.df = df
25 self.columns = []
26 self.color_field = None
27 self.size_field = None
28 self.palette_name = 'Spectral5'
29 self.palettes = [v for v in vars(palettes) if '_' not in v and 'brewer' not in v]
30 self.background_fill = '#2F2F2F'
31 self.default_color = "#31AADE"
32 self.default_size = 9
33 self.scatter_sizes = list(range(6, 22, 3))
34 self.stats_box_style = load_component('stats_box.css')
35 self.columns = self.describe_df()
36 self.x_field = self.continuous_columns[0]['name']
37 self.y_field = self.continuous_columns[1]['name']
38
39 def describe_df(self):
40 descriptors = []
41 for col in self.df.columns:
42 desc = self.df[col].describe()
43 unique = len(self.df[col].unique())
44 if self.df[col].dtype == object:
45 descriptors.append({
46 'type': "DiscreteColumn", 'name': col,
47 'count': desc['count'], 'unique': unique,
48 'top': desc['top'], 'freq': desc['freq'],
49 })
50 elif self.df[col].dtype == np.datetime64:
51 descriptors.append({
52 'type': "TimeColumn", 'name': col,
53 'unique': unique, 'count': desc['count'],
54 'unique': desc['unique'], 'first': desc['first'],
55 'last': desc['last'],
56 })
57 else:
58 descriptors.append({
59 'type': "ContinuousColumn", 'name': col,
60 'count': desc['count'], 'unique': unique,
61 'mean': "%.2f" % desc['mean'], 'std': "%.2f" % desc['std'],
62 'min': "%.2f" % desc['min'], 'max': "%.2f" % desc['max'],
63 })
64 return descriptors
65
66 @property
67 def continuous_columns(self):
68 return [x for x in self.columns if x['type'] != 'DiscreteColumn']
69
70 @property
71 def continuous_column_names(self):
72 return [x.get('name') for x in self.columns if x['type'] != 'DiscreteColumn']
73
74 @property
75 def discrete_column_names(self):
76 return [x.get('name') for x in self.columns if x['type'] == 'DiscreteColumn']
77
78 @property
79 def quantileable_column_names(self):
80 return [x.get('name') for x in self.columns if x['type'] != 'DiscreteColumn' and x['unique'] > 20]
81
82 def get_axes_values(self):
83 xs = self.df[self.x_field].tolist()
84 ys = self.df[self.y_field].tolist()
85
86 if self.color_field:
87 scatter_colors = list(reversed(getattr(palettes, self.palette_name)))
88 bins = len(scatter_colors)
89 groups = pd.qcut(self.df[self.color_field].tolist(), bins)
90 color = [scatter_colors[l] for l in groups.codes]
91 else:
92 color = self.default_color
93
94 if self.size_field:
95 bins = len(self.scatter_sizes)
96 groups = pd.qcut(self.df[self.size_field].tolist(), bins)
97 size = [self.scatter_sizes[l] for l in groups.codes]
98 else:
99 size = self.default_size
100
101 return xs, ys, color, size
102
103 def bind_on_change(attr, old, new, model_field):
104 global plot_view
105 setattr(model, model_field, None) if new == 'None' else setattr(model, model_field, new)
106 plot_view.children = [create_figure()]
107
108 def create_figure():
109 xs, ys, colors, sizes = model.get_axes_values()
110 fig_args = dict(tools='pan', plot_height=600, plot_width=800)
111
112 if model.x_field in model.discrete_column_names and model.y_field in model.discrete_column_names:
113 figure = Figure(x_range=xs, y_range=ys, **fig_args)
114 figure.axis.major_label_orientation = math.pi / 4
115 elif model.x_field in model.discrete_column_names:
116 figure = Figure(x_range=xs, **fig_args)
117 figure.xaxis.major_label_orientation = math.pi / 4
118 elif model.y_field in model.discrete_column_names:
119 figure = Figure(y_range=ys, **fig_args)
120 figure.yaxis.major_label_orientation = math.pi / 4
121 else:
122 figure = Figure(**fig_args)
123
124 figure.circle(x=xs, y=ys, color=colors, size=sizes, line_color="white", alpha=0.8)
125 figure.toolbar_location = None
126 figure.xaxis.axis_label = model.x_field
127 figure.yaxis.axis_label = model.y_field
128 figure.background_fill_color = model.background_fill
129 figure.border_fill_color = model.background_fill
130 figure.axis.axis_line_color = "white"
131 figure.axis.axis_label_text_color = "white"
132 figure.axis.major_label_text_color = "white"
133 figure.axis.major_tick_line_color = "white"
134 figure.axis.minor_tick_line_color = "white"
135 figure.axis.minor_tick_line_color = "white"
136 figure.grid.grid_line_dash = [6, 4]
137 figure.grid.grid_line_alpha = .3
138 return figure
139
140 model = AppModel(autompg)
141
142 controls_view = HBox(width=800)
143
144 x_select = Select.create(name='X-Axis', value=model.x_field, options=model.df.columns)
145 x_select.on_change('value', partial(bind_on_change, model_field='x_field'))
146
147 y_select = Select.create(name='Y-Axis', value=model.y_field, options=model.df.columns)
148 y_select.on_change('value', partial(bind_on_change, model_field='y_field'))
149
150 color_select = Select.create(name='Color', value=model.color_field, options=['None'] + model.quantileable_column_names)
151 color_select.on_change('value', partial(bind_on_change, model_field='color_field'))
152
153 palette_select = Select.create(name='Palette', options=sorted(model.palettes))
154 palette_select.on_change('value', partial(bind_on_change, model_field='palette_name'))
155
156 size_select = Select.create(name='Size', value=model.size_field, options=['None'] + model.quantileable_column_names)
157 size_select.on_change('value', partial(bind_on_change, model_field='size_field'))
158
159 controls_view.children = [x_select, y_select, color_select, palette_select, size_select]
160
161 plot_view = HBox(width=900)
162 plot_view.children = [create_figure()]
163
164 side_container = StyleableBox()
165 side_container.children = [StatsBox(display_items=c, styles=model.stats_box_style) for c in model.continuous_columns]
166 side_container.css_properties = dict(
167 position='absolute', overflow='scroll', top='1em', left='1em', bottom='1em'
168 )
169
170 main_container = StyleableBox()
171 main_container.children = [controls_view, plot_view]
172 main_container.css_properties = dict(
173 position='absolute', top='1em', right='1em', left='12.5em', bottom='1em'
174 )
175
176 doc = curdoc().add_root(HBox(children=[side_container, main_container]))
177
[end of examples/app/crossfilter/main.py]
[start of examples/app/crossfilter/models/models.py]
1 from bokeh.properties import Dict, String, Any
2 from bokeh.models.layouts import BaseBox
3 from bokeh.core import validation
4 from bokeh.core.validation.warnings import EMPTY_LAYOUT
5
6 import sys
7 from os.path import dirname
8 sys.path.append(dirname(__file__))
9
10 from helpers import load_component
11
12 class StyleableBox(BaseBox):
13 '''
14 styleable box provides element level css_properties as a dictionary
15 '''
16 __implementation__ = load_component('./styleable_box.coffee')
17 css_properties = Dict(String, Any, default=None)
18 orientation = String(default='vertical')
19
20 class StatsBox(BaseBox):
21 __implementation__ = load_component('./stats_box.coffee')
22 styles = String(default=None)
23 display_items = Dict(String, Any, default=None)
24
25 @validation.warning(EMPTY_LAYOUT)
26 def _check_empty_layout(self):
27 pass
28
[end of examples/app/crossfilter/models/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/application/handlers/code_runner.py b/bokeh/application/handlers/code_runner.py
--- a/bokeh/application/handlers/code_runner.py
+++ b/bokeh/application/handlers/code_runner.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, print_function
-from os.path import abspath
+from os.path import abspath, dirname
from types import ModuleType
import os
import sys
@@ -68,8 +68,17 @@
def run(self, module, post_check):
try:
+ # Simulate the sys.path behaviour decribed here:
+ #
+ # https://docs.python.org/2/library/sys.html#sys.path
+ _cwd = os.getcwd()
+ _sys_path = list(sys.path)
+ os.chdir(dirname(self._path))
+ sys.path.insert(0, '')
+
exec(self._code, module.__dict__)
post_check()
+
except Exception as e:
self._failed = True
self._error_detail = traceback.format_exc()
@@ -78,3 +87,8 @@
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
self._error = "%s\nFile \"%s\", line %d, in %s:\n%s" % (str(e), os.path.basename(filename), line_number, func, txt)
+
+ finally:
+ # undo sys.path, CWD fixups
+ os.chdir(_cwd)
+ sys.path = _sys_path
diff --git a/examples/app/crossfilter/main.py b/examples/app/crossfilter/main.py
--- a/examples/app/crossfilter/main.py
+++ b/examples/app/crossfilter/main.py
@@ -10,11 +10,6 @@
from bokeh.plotting import Figure
from bokeh.sampledata.autompg import autompg
-# this is to be able to import modules from the app directory
-import sys
-from os.path import dirname
-sys.path.append(dirname(__file__))
-
from models import StyleableBox, StatsBox
from models.helpers import load_component
diff --git a/examples/app/crossfilter/models/models.py b/examples/app/crossfilter/models/models.py
--- a/examples/app/crossfilter/models/models.py
+++ b/examples/app/crossfilter/models/models.py
@@ -3,11 +3,7 @@
from bokeh.core import validation
from bokeh.core.validation.warnings import EMPTY_LAYOUT
-import sys
-from os.path import dirname
-sys.path.append(dirname(__file__))
-
-from helpers import load_component
+from .helpers import load_component
class StyleableBox(BaseBox):
'''
@@ -21,7 +17,7 @@
__implementation__ = load_component('./stats_box.coffee')
styles = String(default=None)
display_items = Dict(String, Any, default=None)
-
+
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
pass
|
{"golden_diff": "diff --git a/bokeh/application/handlers/code_runner.py b/bokeh/application/handlers/code_runner.py\n--- a/bokeh/application/handlers/code_runner.py\n+++ b/bokeh/application/handlers/code_runner.py\n@@ -1,6 +1,6 @@\n from __future__ import absolute_import, print_function\n \n-from os.path import abspath\n+from os.path import abspath, dirname\n from types import ModuleType\n import os\n import sys\n@@ -68,8 +68,17 @@\n \n def run(self, module, post_check):\n try:\n+ # Simulate the sys.path behaviour decribed here:\n+ #\n+ # https://docs.python.org/2/library/sys.html#sys.path\n+ _cwd = os.getcwd()\n+ _sys_path = list(sys.path)\n+ os.chdir(dirname(self._path))\n+ sys.path.insert(0, '')\n+\n exec(self._code, module.__dict__)\n post_check()\n+\n except Exception as e:\n self._failed = True\n self._error_detail = traceback.format_exc()\n@@ -78,3 +87,8 @@\n filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]\n \n self._error = \"%s\\nFile \\\"%s\\\", line %d, in %s:\\n%s\" % (str(e), os.path.basename(filename), line_number, func, txt)\n+\n+ finally:\n+ # undo sys.path, CWD fixups\n+ os.chdir(_cwd)\n+ sys.path = _sys_path\ndiff --git a/examples/app/crossfilter/main.py b/examples/app/crossfilter/main.py\n--- a/examples/app/crossfilter/main.py\n+++ b/examples/app/crossfilter/main.py\n@@ -10,11 +10,6 @@\n from bokeh.plotting import Figure\n from bokeh.sampledata.autompg import autompg\n \n-# this is to be able to import modules from the app directory\n-import sys\n-from os.path import dirname\n-sys.path.append(dirname(__file__))\n-\n from models import StyleableBox, StatsBox\n from models.helpers import load_component\n \ndiff --git a/examples/app/crossfilter/models/models.py b/examples/app/crossfilter/models/models.py\n--- a/examples/app/crossfilter/models/models.py\n+++ b/examples/app/crossfilter/models/models.py\n@@ -3,11 +3,7 @@\n from bokeh.core import validation\n from bokeh.core.validation.warnings import EMPTY_LAYOUT\n \n-import sys\n-from os.path import dirname\n-sys.path.append(dirname(__file__))\n-\n-from helpers import load_component\n+from .helpers import load_component\n \n class StyleableBox(BaseBox):\n '''\n@@ -21,7 +17,7 @@\n __implementation__ = load_component('./stats_box.coffee')\n styles = String(default=None)\n display_items = Dict(String, Any, default=None)\n- \n+\n @validation.warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n pass\n", "issue": "Add option to extend sys.path with app directory\n(I thought there was already an issue for this, but I could not find it)\n\nAfter some investigation I do not think it's possible (or even possibly desirable) to have things like \n\n```\nfrom .foo import bar\n```\n\nwork from the app directory. AFIAK Jupyter notebooks do not try to create a package, they just add the notebook directory to `sys.path` and then \n\n```\nfrom foo import bar\n```\n\nis possible. I propose to do the same:\n\n```\nbokeh serve --modify-sys-path=yes\n```\n\nUPDATE: \n\nAfter a little more research, I believe the most proper thing to do is to prepend `''` into `sys.path` and set the CWD to the app location before `exec`, then undo those things. That seems to be exactly consistent with standard python behavior as described here: https://docs.python.org/2/library/sys.html#sys.path\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nfrom os.path import abspath\nfrom types import ModuleType\nimport os\nimport sys\nimport traceback\n\nfrom bokeh.util.serialization import make_id\n\nclass _CodeRunner(object):\n \"\"\" Compile and run a Python source code.\"\"\"\n\n def __init__(self, source, path):\n self._failed = False\n self._error = None\n self._error_detail = None\n\n import ast\n self._code = None\n\n try:\n nodes = ast.parse(source, path)\n self._code = compile(nodes, filename=path, mode='exec')\n except SyntaxError as e:\n self._failed = True\n self._error = (\"Invalid syntax in \\\"%s\\\" on line %d:\\n%s\" % (os.path.basename(e.filename), e.lineno, e.text))\n import traceback\n self._error_detail = traceback.format_exc()\n\n self._path = path\n self._source = source\n\n @property\n def source(self):\n return self._source\n\n\n @property\n def path(self):\n return self._path\n\n @property\n def failed(self):\n \"\"\"True if the handler failed to modify the doc\"\"\"\n return self._failed\n\n @property\n def error(self):\n \"\"\"Error message if the handler failed\"\"\"\n return self._error\n\n @property\n def error_detail(self):\n \"\"\"Traceback or other details if the handler failed\"\"\"\n return self._error_detail\n\n def new_module(self):\n \"\"\"Make a fresh module to run in.\"\"\"\n if self.failed:\n return None\n\n module_name = 'bk_script_' + make_id().replace('-', '')\n module = ModuleType(module_name)\n module.__dict__['__file__'] = abspath(self._path)\n\n return module\n\n def run(self, module, post_check):\n try:\n exec(self._code, module.__dict__)\n post_check()\n except Exception as e:\n self._failed = True\n self._error_detail = traceback.format_exc()\n\n exc_type, exc_value, exc_traceback = sys.exc_info()\n filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]\n\n self._error = \"%s\\nFile \\\"%s\\\", line %d, in %s:\\n%s\" % (str(e), os.path.basename(filename), line_number, func, txt)\n", "path": "bokeh/application/handlers/code_runner.py"}, {"content": "import math\nimport numpy as np\nimport pandas as pd\n\nfrom functools import partial\n\nfrom bokeh import palettes\nfrom bokeh.io import curdoc\nfrom bokeh.models import HBox, Select\nfrom bokeh.plotting import Figure\nfrom bokeh.sampledata.autompg import autompg\n\n# this is to be able to import modules from the app directory\nimport sys\nfrom os.path import dirname\nsys.path.append(dirname(__file__))\n\nfrom models import StyleableBox, StatsBox\nfrom models.helpers import load_component\n\nclass AppModel(object):\n\n def __init__(self, df):\n self.df = df\n self.columns = []\n self.color_field = None\n self.size_field = None\n self.palette_name = 'Spectral5'\n self.palettes = [v for v in vars(palettes) if '_' not in v and 'brewer' not in v]\n self.background_fill = '#2F2F2F'\n self.default_color = \"#31AADE\"\n self.default_size = 9\n self.scatter_sizes = list(range(6, 22, 3))\n self.stats_box_style = load_component('stats_box.css')\n self.columns = self.describe_df()\n self.x_field = self.continuous_columns[0]['name']\n self.y_field = self.continuous_columns[1]['name']\n\n def describe_df(self):\n descriptors = []\n for col in self.df.columns:\n desc = self.df[col].describe()\n unique = len(self.df[col].unique())\n if self.df[col].dtype == object:\n descriptors.append({\n 'type': \"DiscreteColumn\", 'name': col,\n 'count': desc['count'], 'unique': unique,\n 'top': desc['top'], 'freq': desc['freq'],\n })\n elif self.df[col].dtype == np.datetime64:\n descriptors.append({\n 'type': \"TimeColumn\", 'name': col,\n 'unique': unique, 'count': desc['count'],\n 'unique': desc['unique'], 'first': desc['first'],\n 'last': desc['last'],\n })\n else:\n descriptors.append({\n 'type': \"ContinuousColumn\", 'name': col,\n 'count': desc['count'], 'unique': unique,\n 'mean': \"%.2f\" % desc['mean'], 'std': \"%.2f\" % desc['std'],\n 'min': \"%.2f\" % desc['min'], 'max': \"%.2f\" % desc['max'],\n })\n return descriptors\n\n @property\n def continuous_columns(self):\n return [x for x in self.columns if x['type'] != 'DiscreteColumn']\n\n @property\n def continuous_column_names(self):\n return [x.get('name') for x in self.columns if x['type'] != 'DiscreteColumn']\n\n @property\n def discrete_column_names(self):\n return [x.get('name') for x in self.columns if x['type'] == 'DiscreteColumn']\n\n @property\n def quantileable_column_names(self):\n return [x.get('name') for x in self.columns if x['type'] != 'DiscreteColumn' and x['unique'] > 20]\n\n def get_axes_values(self):\n xs = self.df[self.x_field].tolist()\n ys = self.df[self.y_field].tolist()\n\n if self.color_field:\n scatter_colors = list(reversed(getattr(palettes, self.palette_name)))\n bins = len(scatter_colors)\n groups = pd.qcut(self.df[self.color_field].tolist(), bins)\n color = [scatter_colors[l] for l in groups.codes]\n else:\n color = self.default_color\n\n if self.size_field:\n bins = len(self.scatter_sizes)\n groups = pd.qcut(self.df[self.size_field].tolist(), bins)\n size = [self.scatter_sizes[l] for l in groups.codes]\n else:\n size = self.default_size\n\n return xs, ys, color, size\n\ndef bind_on_change(attr, old, new, model_field):\n global plot_view\n setattr(model, model_field, None) if new == 'None' else setattr(model, model_field, new)\n plot_view.children = [create_figure()]\n\ndef create_figure():\n xs, ys, colors, sizes = model.get_axes_values()\n fig_args = dict(tools='pan', plot_height=600, plot_width=800)\n\n if model.x_field in model.discrete_column_names and model.y_field in model.discrete_column_names:\n figure = Figure(x_range=xs, y_range=ys, **fig_args)\n figure.axis.major_label_orientation = math.pi / 4\n elif model.x_field in model.discrete_column_names:\n figure = Figure(x_range=xs, **fig_args)\n figure.xaxis.major_label_orientation = math.pi / 4\n elif model.y_field in model.discrete_column_names:\n figure = Figure(y_range=ys, **fig_args)\n figure.yaxis.major_label_orientation = math.pi / 4\n else:\n figure = Figure(**fig_args)\n\n figure.circle(x=xs, y=ys, color=colors, size=sizes, line_color=\"white\", alpha=0.8)\n figure.toolbar_location = None\n figure.xaxis.axis_label = model.x_field\n figure.yaxis.axis_label = model.y_field\n figure.background_fill_color = model.background_fill\n figure.border_fill_color = model.background_fill\n figure.axis.axis_line_color = \"white\"\n figure.axis.axis_label_text_color = \"white\"\n figure.axis.major_label_text_color = \"white\"\n figure.axis.major_tick_line_color = \"white\"\n figure.axis.minor_tick_line_color = \"white\"\n figure.axis.minor_tick_line_color = \"white\"\n figure.grid.grid_line_dash = [6, 4]\n figure.grid.grid_line_alpha = .3\n return figure\n\nmodel = AppModel(autompg)\n\ncontrols_view = HBox(width=800)\n\nx_select = Select.create(name='X-Axis', value=model.x_field, options=model.df.columns)\nx_select.on_change('value', partial(bind_on_change, model_field='x_field'))\n\ny_select = Select.create(name='Y-Axis', value=model.y_field, options=model.df.columns)\ny_select.on_change('value', partial(bind_on_change, model_field='y_field'))\n\ncolor_select = Select.create(name='Color', value=model.color_field, options=['None'] + model.quantileable_column_names)\ncolor_select.on_change('value', partial(bind_on_change, model_field='color_field'))\n\npalette_select = Select.create(name='Palette', options=sorted(model.palettes))\npalette_select.on_change('value', partial(bind_on_change, model_field='palette_name'))\n\nsize_select = Select.create(name='Size', value=model.size_field, options=['None'] + model.quantileable_column_names)\nsize_select.on_change('value', partial(bind_on_change, model_field='size_field'))\n\ncontrols_view.children = [x_select, y_select, color_select, palette_select, size_select]\n\nplot_view = HBox(width=900)\nplot_view.children = [create_figure()]\n\nside_container = StyleableBox()\nside_container.children = [StatsBox(display_items=c, styles=model.stats_box_style) for c in model.continuous_columns]\nside_container.css_properties = dict(\n position='absolute', overflow='scroll', top='1em', left='1em', bottom='1em'\n)\n\nmain_container = StyleableBox()\nmain_container.children = [controls_view, plot_view]\nmain_container.css_properties = dict(\n position='absolute', top='1em', right='1em', left='12.5em', bottom='1em'\n)\n\ndoc = curdoc().add_root(HBox(children=[side_container, main_container]))\n", "path": "examples/app/crossfilter/main.py"}, {"content": "from bokeh.properties import Dict, String, Any\nfrom bokeh.models.layouts import BaseBox\nfrom bokeh.core import validation\nfrom bokeh.core.validation.warnings import EMPTY_LAYOUT\n\nimport sys\nfrom os.path import dirname\nsys.path.append(dirname(__file__))\n\nfrom helpers import load_component\n\nclass StyleableBox(BaseBox):\n '''\n styleable box provides element level css_properties as a dictionary\n '''\n __implementation__ = load_component('./styleable_box.coffee')\n css_properties = Dict(String, Any, default=None)\n orientation = String(default='vertical')\n\nclass StatsBox(BaseBox):\n __implementation__ = load_component('./stats_box.coffee')\n styles = String(default=None)\n display_items = Dict(String, Any, default=None)\n \n @validation.warning(EMPTY_LAYOUT)\n def _check_empty_layout(self):\n pass\n", "path": "examples/app/crossfilter/models/models.py"}]}
| 3,797 | 645 |
gh_patches_debug_16134
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-4385
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py]
1 from typing import Any, Dict, List, Optional, Tuple, Union
2
3 import numpy as np
4
5 from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
6 from great_expectations.rule_based_profiler.helpers.util import (
7 get_parameter_value_and_validate_return_type,
8 )
9 from great_expectations.rule_based_profiler.parameter_builder import (
10 MetricMultiBatchParameterBuilder,
11 )
12 from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
13 MetricValues,
14 )
15 from great_expectations.rule_based_profiler.types import (
16 Domain,
17 ParameterContainer,
18 ParameterNode,
19 )
20
21
22 class MeanUnexpectedMapMetricMultiBatchParameterBuilder(
23 MetricMultiBatchParameterBuilder
24 ):
25 """
26 Compute mean unexpected count ratio (as a fraction) of a specified map-style metric across all specified batches.
27 """
28
29 def __init__(
30 self,
31 name: str,
32 map_metric_name: str,
33 total_count_parameter_builder_name: str,
34 null_count_parameter_builder_name: Optional[str] = None,
35 metric_domain_kwargs: Optional[Union[str, dict]] = None,
36 metric_value_kwargs: Optional[Union[str, dict]] = None,
37 batch_list: Optional[List[Batch]] = None,
38 batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,
39 json_serialize: Union[str, bool] = True,
40 data_context: Optional["DataContext"] = None, # noqa: F821
41 ):
42 """
43 Args:
44 name: the name of this parameter -- this is user-specified parameter name (from configuration);
45 it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
46 and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
47 map_metric_name: the name of a map metric (must be a supported and registered map metric); the suffix
48 ".unexpected_count" will be appended to "map_metric_name" to be used in MetricConfiguration to get values.
49 total_count_parameter_builder_name: name of parameter that computes total_count (of rows in Batch).
50 null_count_parameter_builder_name: name of parameter that computes null_count (of domain values in Batch).
51 metric_domain_kwargs: used in MetricConfiguration
52 metric_value_kwargs: used in MetricConfiguration
53 batch_list: explicitly passed Batch objects for parameter computation (take precedence over batch_request).
54 batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
55 json_serialize: If True (default), convert computed value to JSON prior to saving results.
56 data_context: DataContext
57 """
58 super().__init__(
59 name=name,
60 metric_name=f"{map_metric_name}.unexpected_count",
61 metric_domain_kwargs=metric_domain_kwargs,
62 metric_value_kwargs=metric_value_kwargs,
63 enforce_numeric_metric=True,
64 replace_nan_with_zero=True,
65 reduce_scalar_metric=True,
66 batch_list=batch_list,
67 batch_request=batch_request,
68 json_serialize=json_serialize,
69 data_context=data_context,
70 )
71
72 self._map_metric_name = map_metric_name
73 self._total_count_parameter_builder_name = total_count_parameter_builder_name
74 self._null_count_parameter_builder_name = null_count_parameter_builder_name
75
76 @property
77 def map_metric_name(self) -> str:
78 return self._map_metric_name
79
80 @property
81 def total_count_parameter_builder_name(self) -> str:
82 return self._total_count_parameter_builder_name
83
84 @property
85 def null_count_parameter_builder_name(self) -> Optional[str]:
86 return self._null_count_parameter_builder_name
87
88 def _build_parameters(
89 self,
90 parameter_container: ParameterContainer,
91 domain: Domain,
92 variables: Optional[ParameterContainer] = None,
93 parameters: Optional[Dict[str, ParameterContainer]] = None,
94 ) -> Tuple[Any, dict]:
95 """
96 Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional
97 details.
98
99 return: Tuple containing computed_parameter_value and parameter_computation_details metadata.
100 """
101 # Obtain total_count_parameter_builder_name from "rule state" (i.e., variables and parameters); from instance variable otherwise.
102 total_count_parameter_builder_name: str = (
103 get_parameter_value_and_validate_return_type(
104 domain=domain,
105 parameter_reference=self.total_count_parameter_builder_name,
106 expected_return_type=str,
107 variables=variables,
108 parameters=parameters,
109 )
110 )
111
112 fully_qualified_total_count_parameter_builder_name: str = (
113 f"$parameter.{total_count_parameter_builder_name}"
114 )
115 # Obtain total_count from "rule state" (i.e., variables and parameters); from instance variable otherwise.
116 total_count_parameter_node: ParameterNode = (
117 get_parameter_value_and_validate_return_type(
118 domain=domain,
119 parameter_reference=fully_qualified_total_count_parameter_builder_name,
120 expected_return_type=None,
121 variables=variables,
122 parameters=parameters,
123 )
124 )
125 total_count_values: MetricValues = total_count_parameter_node.value
126
127 # Obtain null_count_parameter_builder_name from "rule state" (i.e., variables and parameters); from instance variable otherwise.
128 null_count_parameter_builder_name: str = (
129 get_parameter_value_and_validate_return_type(
130 domain=domain,
131 parameter_reference=self.null_count_parameter_builder_name,
132 expected_return_type=str,
133 variables=variables,
134 parameters=parameters,
135 )
136 )
137
138 batch_ids: Optional[List[str]] = self.get_batch_ids(
139 domain=domain,
140 variables=variables,
141 parameters=parameters,
142 )
143 num_batch_ids: int = len(batch_ids)
144
145 null_count_values: MetricValues
146 if null_count_parameter_builder_name is None:
147 null_count_values = np.zeros(shape=(num_batch_ids,))
148 else:
149 fully_qualified_null_count_parameter_builder_name: str = (
150 f"$parameter.{null_count_parameter_builder_name}"
151 )
152 # Obtain null_count from "rule state" (i.e., variables and parameters); from instance variable otherwise.
153 null_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
154 domain=domain,
155 parameter_reference=fully_qualified_null_count_parameter_builder_name,
156 expected_return_type=None,
157 variables=variables,
158 parameters=parameters,
159 )
160 null_count_values = null_count_parameter_node.value
161
162 nonnull_count_values: np.ndarray = total_count_values - null_count_values
163
164 # Compute "unexpected_count" corresponding to "map_metric_name" (given as argument to this "ParameterBuilder").
165 super().build_parameters(
166 parameter_container=parameter_container,
167 domain=domain,
168 variables=variables,
169 parameters=parameters,
170 parameter_computation_impl=super()._build_parameters,
171 )
172
173 # Retrieve "unexpected_count" corresponding to "map_metric_name" (given as argument to this "ParameterBuilder").
174 parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
175 domain=domain,
176 parameter_reference=self.fully_qualified_parameter_name,
177 expected_return_type=None,
178 variables=variables,
179 parameters=parameters,
180 )
181 unexpected_count_values: MetricValues = parameter_node.value
182
183 unexpected_count_ratio_values: np.ndarray = (
184 unexpected_count_values / nonnull_count_values
185 )
186 mean_unexpected_count_ratio: np.float64 = np.mean(unexpected_count_ratio_values)
187
188 return (
189 mean_unexpected_count_ratio,
190 parameter_node.details,
191 )
192
[end of great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py b/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py
--- a/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py
+++ b/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py
@@ -1,4 +1,4 @@
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
@@ -26,6 +26,15 @@
Compute mean unexpected count ratio (as a fraction) of a specified map-style metric across all specified batches.
"""
+ exclude_field_names: Set[
+ str
+ ] = MetricMultiBatchParameterBuilder.exclude_field_names | {
+ "metric_name",
+ "enforce_numeric_metric",
+ "replace_nan_with_zero",
+ "reduce_scalar_metric",
+ }
+
def __init__(
self,
name: str,
|
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py b/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py\n--- a/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py\n+++ b/great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py\n@@ -1,4 +1,4 @@\n-from typing import Any, Dict, List, Optional, Tuple, Union\n+from typing import Any, Dict, List, Optional, Set, Tuple, Union\n \n import numpy as np\n \n@@ -26,6 +26,15 @@\n Compute mean unexpected count ratio (as a fraction) of a specified map-style metric across all specified batches.\n \"\"\"\n \n+ exclude_field_names: Set[\n+ str\n+ ] = MetricMultiBatchParameterBuilder.exclude_field_names | {\n+ \"metric_name\",\n+ \"enforce_numeric_metric\",\n+ \"replace_nan_with_zero\",\n+ \"reduce_scalar_metric\",\n+ }\n+\n def __init__(\n self,\n name: str,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest\nfrom great_expectations.rule_based_profiler.helpers.util import (\n get_parameter_value_and_validate_return_type,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder import (\n MetricMultiBatchParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n MetricValues,\n)\nfrom great_expectations.rule_based_profiler.types import (\n Domain,\n ParameterContainer,\n ParameterNode,\n)\n\n\nclass MeanUnexpectedMapMetricMultiBatchParameterBuilder(\n MetricMultiBatchParameterBuilder\n):\n \"\"\"\n Compute mean unexpected count ratio (as a fraction) of a specified map-style metric across all specified batches.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n map_metric_name: str,\n total_count_parameter_builder_name: str,\n null_count_parameter_builder_name: Optional[str] = None,\n metric_domain_kwargs: Optional[Union[str, dict]] = None,\n metric_value_kwargs: Optional[Union[str, dict]] = None,\n batch_list: Optional[List[Batch]] = None,\n batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,\n json_serialize: Union[str, bool] = True,\n data_context: Optional[\"DataContext\"] = None, # noqa: F821\n ):\n \"\"\"\n Args:\n name: the name of this parameter -- this is user-specified parameter name (from configuration);\n it is not the fully-qualified parameter name; a fully-qualified parameter name must start with \"$parameter.\"\n and may contain one or more subsequent parts (e.g., \"$parameter.<my_param_from_config>.<metric_name>\").\n map_metric_name: the name of a map metric (must be a supported and registered map metric); the suffix\n \".unexpected_count\" will be appended to \"map_metric_name\" to be used in MetricConfiguration to get values.\n total_count_parameter_builder_name: name of parameter that computes total_count (of rows in Batch).\n null_count_parameter_builder_name: name of parameter that computes null_count (of domain values in Batch).\n metric_domain_kwargs: used in MetricConfiguration\n metric_value_kwargs: used in MetricConfiguration\n batch_list: explicitly passed Batch objects for parameter computation (take precedence over batch_request).\n batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.\n json_serialize: If True (default), convert computed value to JSON prior to saving results.\n data_context: DataContext\n \"\"\"\n super().__init__(\n name=name,\n metric_name=f\"{map_metric_name}.unexpected_count\",\n metric_domain_kwargs=metric_domain_kwargs,\n metric_value_kwargs=metric_value_kwargs,\n enforce_numeric_metric=True,\n replace_nan_with_zero=True,\n reduce_scalar_metric=True,\n batch_list=batch_list,\n batch_request=batch_request,\n json_serialize=json_serialize,\n data_context=data_context,\n )\n\n self._map_metric_name = map_metric_name\n self._total_count_parameter_builder_name = total_count_parameter_builder_name\n self._null_count_parameter_builder_name = null_count_parameter_builder_name\n\n @property\n def map_metric_name(self) -> str:\n return self._map_metric_name\n\n @property\n def total_count_parameter_builder_name(self) -> str:\n return self._total_count_parameter_builder_name\n\n @property\n def null_count_parameter_builder_name(self) -> Optional[str]:\n return self._null_count_parameter_builder_name\n\n def _build_parameters(\n self,\n parameter_container: ParameterContainer,\n domain: Domain,\n variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n ) -> Tuple[Any, dict]:\n \"\"\"\n Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional\n details.\n\n return: Tuple containing computed_parameter_value and parameter_computation_details metadata.\n \"\"\"\n # Obtain total_count_parameter_builder_name from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n total_count_parameter_builder_name: str = (\n get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.total_count_parameter_builder_name,\n expected_return_type=str,\n variables=variables,\n parameters=parameters,\n )\n )\n\n fully_qualified_total_count_parameter_builder_name: str = (\n f\"$parameter.{total_count_parameter_builder_name}\"\n )\n # Obtain total_count from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n total_count_parameter_node: ParameterNode = (\n get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=fully_qualified_total_count_parameter_builder_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n )\n total_count_values: MetricValues = total_count_parameter_node.value\n\n # Obtain null_count_parameter_builder_name from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n null_count_parameter_builder_name: str = (\n get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.null_count_parameter_builder_name,\n expected_return_type=str,\n variables=variables,\n parameters=parameters,\n )\n )\n\n batch_ids: Optional[List[str]] = self.get_batch_ids(\n domain=domain,\n variables=variables,\n parameters=parameters,\n )\n num_batch_ids: int = len(batch_ids)\n\n null_count_values: MetricValues\n if null_count_parameter_builder_name is None:\n null_count_values = np.zeros(shape=(num_batch_ids,))\n else:\n fully_qualified_null_count_parameter_builder_name: str = (\n f\"$parameter.{null_count_parameter_builder_name}\"\n )\n # Obtain null_count from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n null_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=fully_qualified_null_count_parameter_builder_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n null_count_values = null_count_parameter_node.value\n\n nonnull_count_values: np.ndarray = total_count_values - null_count_values\n\n # Compute \"unexpected_count\" corresponding to \"map_metric_name\" (given as argument to this \"ParameterBuilder\").\n super().build_parameters(\n parameter_container=parameter_container,\n domain=domain,\n variables=variables,\n parameters=parameters,\n parameter_computation_impl=super()._build_parameters,\n )\n\n # Retrieve \"unexpected_count\" corresponding to \"map_metric_name\" (given as argument to this \"ParameterBuilder\").\n parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n unexpected_count_values: MetricValues = parameter_node.value\n\n unexpected_count_ratio_values: np.ndarray = (\n unexpected_count_values / nonnull_count_values\n )\n mean_unexpected_count_ratio: np.float64 = np.mean(unexpected_count_ratio_values)\n\n return (\n mean_unexpected_count_ratio,\n parameter_node.details,\n )\n", "path": "great_expectations/rule_based_profiler/parameter_builder/mean_unexpected_map_metric_multi_batch_parameter_builder.py"}]}
| 2,624 | 262 |
gh_patches_debug_17983
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-56308
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BE] Update the alerts response to include whether the alert was disabled due to being “noisy”
Similar to https://github.com/getsentry/sentry/issues/55098 - update the response to include something like `disableReason: noisy` if a `NoisyIssueAlert` entry exists for that rule id. Also need to add a `noisyDisableDate` so the front end can know whether or not to display the banner.
</issue>
<code>
[start of src/sentry/api/serializers/models/rule.py]
1 from collections import defaultdict
2 from typing import List
3
4 from django.db.models import Max, Q, prefetch_related_objects
5 from rest_framework import serializers
6
7 from sentry.api.serializers import Serializer, register
8 from sentry.constants import ObjectStatus
9 from sentry.models import (
10 ACTOR_TYPES,
11 Environment,
12 Rule,
13 RuleActivity,
14 RuleActivityType,
15 actor_type_to_string,
16 )
17 from sentry.models.actor import Actor
18 from sentry.models.rulefirehistory import RuleFireHistory
19 from sentry.models.rulesnooze import RuleSnooze
20 from sentry.services.hybrid_cloud.user.service import user_service
21
22
23 def _generate_rule_label(project, rule, data):
24 from sentry.rules import rules
25
26 rule_cls = rules.get(data["id"])
27 if rule_cls is None:
28 return
29
30 rule_inst = rule_cls(project, data=data, rule=rule)
31 return rule_inst.render_label()
32
33
34 def _is_filter(data):
35 from sentry.rules import rules
36
37 rule_cls = rules.get(data["id"])
38 return rule_cls.rule_type == "filter/event"
39
40
41 @register(Rule)
42 class RuleSerializer(Serializer):
43 def __init__(self, expand=None):
44 super().__init__()
45 self.expand = expand or []
46
47 def get_attrs(self, item_list, user, **kwargs):
48 from sentry.services.hybrid_cloud.app import app_service
49
50 prefetch_related_objects(item_list, "project")
51
52 environments = Environment.objects.in_bulk(
53 [_f for _f in [i.environment_id for i in item_list] if _f]
54 )
55
56 result = {i: {"environment": environments.get(i.environment_id)} for i in item_list}
57 ras = list(
58 RuleActivity.objects.filter(
59 rule__in=item_list, type=RuleActivityType.CREATED.value
60 ).select_related("rule")
61 )
62
63 users = {
64 u.id: u for u in user_service.get_many(filter=dict(user_ids=[ra.user_id for ra in ras]))
65 }
66
67 for rule_activity in ras:
68 u = users.get(rule_activity.user_id)
69 if u:
70 user = {
71 "id": u.id,
72 "name": u.get_display_name(),
73 "email": u.email,
74 }
75 else:
76 user = None
77
78 result[rule_activity.rule].update({"created_by": user})
79
80 rules = {item.id: item for item in item_list}
81 resolved_actors = {}
82 owners_by_type = defaultdict(list)
83
84 sentry_app_uuids = [
85 sentry_app_uuid
86 for sentry_app_uuid in (
87 action.get("sentryAppInstallationUuid")
88 for rule in rules.values()
89 for action in rule.data.get("actions", [])
90 )
91 if sentry_app_uuid is not None
92 ]
93
94 sentry_app_ids: List[int] = [
95 i.sentry_app.id for i in app_service.get_many(filter=dict(uuids=sentry_app_uuids))
96 ]
97 sentry_app_installations_by_uuid = app_service.get_related_sentry_app_components(
98 organization_ids=[rule.project.organization_id for rule in rules.values()],
99 sentry_app_ids=sentry_app_ids,
100 type="alert-rule-action",
101 group_by="uuid",
102 )
103
104 for item in item_list:
105 if item.owner_id is not None:
106 owners_by_type[actor_type_to_string(item.owner.type)].append(item.owner_id)
107
108 for k, v in ACTOR_TYPES.items():
109 actors = Actor.objects.filter(type=v, id__in=owners_by_type[k])
110 if k == "team":
111 resolved_actors[k] = {actor.id: actor.team_id for actor in actors}
112 if k == "user":
113 resolved_actors[k] = {actor.id: actor.user_id for actor in actors}
114
115 for rule in rules.values():
116 if rule.owner_id:
117 type = actor_type_to_string(rule.owner.type)
118 if rule.owner_id in resolved_actors[type]:
119 result[rule]["owner"] = f"{type}:{resolved_actors[type][rule.owner_id]}"
120
121 for action in rule.data.get("actions", []):
122 install = sentry_app_installations_by_uuid.get(
123 str(action.get("sentryAppInstallationUuid"))
124 )
125 if install:
126 action["_sentry_app_component"] = install.get("sentry_app_component")
127 action["_sentry_app_installation"] = install.get("sentry_app_installation")
128
129 if "lastTriggered" in self.expand:
130 last_triggered_lookup = {
131 rfh["rule_id"]: rfh["date_added"]
132 for rfh in RuleFireHistory.objects.filter(rule__in=item_list)
133 .values("rule_id")
134 .annotate(date_added=Max("date_added"))
135 }
136 for rule in item_list:
137 result[rule]["last_triggered"] = last_triggered_lookup.get(rule.id, None)
138
139 return result
140
141 def serialize(self, obj, attrs, user, **kwargs):
142 environment = attrs["environment"]
143 all_conditions = [
144 dict(list(o.items()) + [("name", _generate_rule_label(obj.project, obj, o))])
145 for o in obj.data.get("conditions", [])
146 ]
147
148 actions = []
149 for action in obj.data.get("actions", []):
150 try:
151 actions.append(
152 dict(
153 list(action.items())
154 + [("name", _generate_rule_label(obj.project, obj, action))]
155 )
156 )
157 except serializers.ValidationError:
158 # Integrations can be deleted and we don't want to fail to load the rule
159 pass
160
161 d = {
162 # XXX(dcramer): we currently serialize unsaved rule objects
163 # as part of the rule editor
164 "id": str(obj.id) if obj.id else None,
165 # conditions pertain to criteria that can trigger an alert
166 "conditions": list(filter(lambda condition: not _is_filter(condition), all_conditions)),
167 # filters are not new conditions but are the subset of conditions that pertain to event attributes
168 "filters": list(filter(lambda condition: _is_filter(condition), all_conditions)),
169 "actions": actions,
170 "actionMatch": obj.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH,
171 "filterMatch": obj.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH,
172 "frequency": obj.data.get("frequency") or Rule.DEFAULT_FREQUENCY,
173 "name": obj.label,
174 "dateCreated": obj.date_added,
175 "owner": attrs.get("owner", None),
176 "createdBy": attrs.get("created_by", None),
177 "environment": environment.name if environment is not None else None,
178 "projects": [obj.project.slug],
179 "status": "active" if obj.status == ObjectStatus.ACTIVE else "disabled",
180 }
181 if "last_triggered" in attrs:
182 d["lastTriggered"] = attrs["last_triggered"]
183
184 rule_snooze = RuleSnooze.objects.filter(Q(user_id=user.id) | Q(user_id=None), rule=obj)
185 if rule_snooze.exists():
186 d["snooze"] = True
187 snooze = rule_snooze[0]
188 if user.id == snooze.owner_id:
189 created_by = "You"
190 else:
191 creator_name = user_service.get_user(snooze.owner_id).get_display_name()
192 created_by = creator_name
193 d["snoozeCreatedBy"] = created_by
194 d["snoozeForEveryone"] = snooze.user_id is None
195 else:
196 d["snooze"] = False
197
198 return d
199
[end of src/sentry/api/serializers/models/rule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/api/serializers/models/rule.py b/src/sentry/api/serializers/models/rule.py
--- a/src/sentry/api/serializers/models/rule.py
+++ b/src/sentry/api/serializers/models/rule.py
@@ -15,6 +15,7 @@
actor_type_to_string,
)
from sentry.models.actor import Actor
+from sentry.models.rule import NeglectedRule
from sentry.models.rulefirehistory import RuleFireHistory
from sentry.models.rulesnooze import RuleSnooze
from sentry.services.hybrid_cloud.user.service import user_service
@@ -195,4 +196,13 @@
else:
d["snooze"] = False
+ try:
+ neglected_rule = NeglectedRule.objects.get(
+ rule=obj, organization=obj.project.organization_id, opted_out=False
+ )
+ d["disableReason"] = "noisy"
+ d["disableDate"] = neglected_rule.disable_date
+ except (NeglectedRule.DoesNotExist, NeglectedRule.MultipleObjectsReturned):
+ pass
+
return d
|
{"golden_diff": "diff --git a/src/sentry/api/serializers/models/rule.py b/src/sentry/api/serializers/models/rule.py\n--- a/src/sentry/api/serializers/models/rule.py\n+++ b/src/sentry/api/serializers/models/rule.py\n@@ -15,6 +15,7 @@\n actor_type_to_string,\n )\n from sentry.models.actor import Actor\n+from sentry.models.rule import NeglectedRule\n from sentry.models.rulefirehistory import RuleFireHistory\n from sentry.models.rulesnooze import RuleSnooze\n from sentry.services.hybrid_cloud.user.service import user_service\n@@ -195,4 +196,13 @@\n else:\n d[\"snooze\"] = False\n \n+ try:\n+ neglected_rule = NeglectedRule.objects.get(\n+ rule=obj, organization=obj.project.organization_id, opted_out=False\n+ )\n+ d[\"disableReason\"] = \"noisy\"\n+ d[\"disableDate\"] = neglected_rule.disable_date\n+ except (NeglectedRule.DoesNotExist, NeglectedRule.MultipleObjectsReturned):\n+ pass\n+\n return d\n", "issue": "[BE] Update the alerts response to include whether the alert was disabled due to being \u201cnoisy\u201d\nSimilar to https://github.com/getsentry/sentry/issues/55098 - update the response to include something like `disableReason: noisy` if a `NoisyIssueAlert` entry exists for that rule id. Also need to add a `noisyDisableDate` so the front end can know whether or not to display the banner. \n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom django.db.models import Max, Q, prefetch_related_objects\nfrom rest_framework import serializers\n\nfrom sentry.api.serializers import Serializer, register\nfrom sentry.constants import ObjectStatus\nfrom sentry.models import (\n ACTOR_TYPES,\n Environment,\n Rule,\n RuleActivity,\n RuleActivityType,\n actor_type_to_string,\n)\nfrom sentry.models.actor import Actor\nfrom sentry.models.rulefirehistory import RuleFireHistory\nfrom sentry.models.rulesnooze import RuleSnooze\nfrom sentry.services.hybrid_cloud.user.service import user_service\n\n\ndef _generate_rule_label(project, rule, data):\n from sentry.rules import rules\n\n rule_cls = rules.get(data[\"id\"])\n if rule_cls is None:\n return\n\n rule_inst = rule_cls(project, data=data, rule=rule)\n return rule_inst.render_label()\n\n\ndef _is_filter(data):\n from sentry.rules import rules\n\n rule_cls = rules.get(data[\"id\"])\n return rule_cls.rule_type == \"filter/event\"\n\n\n@register(Rule)\nclass RuleSerializer(Serializer):\n def __init__(self, expand=None):\n super().__init__()\n self.expand = expand or []\n\n def get_attrs(self, item_list, user, **kwargs):\n from sentry.services.hybrid_cloud.app import app_service\n\n prefetch_related_objects(item_list, \"project\")\n\n environments = Environment.objects.in_bulk(\n [_f for _f in [i.environment_id for i in item_list] if _f]\n )\n\n result = {i: {\"environment\": environments.get(i.environment_id)} for i in item_list}\n ras = list(\n RuleActivity.objects.filter(\n rule__in=item_list, type=RuleActivityType.CREATED.value\n ).select_related(\"rule\")\n )\n\n users = {\n u.id: u for u in user_service.get_many(filter=dict(user_ids=[ra.user_id for ra in ras]))\n }\n\n for rule_activity in ras:\n u = users.get(rule_activity.user_id)\n if u:\n user = {\n \"id\": u.id,\n \"name\": u.get_display_name(),\n \"email\": u.email,\n }\n else:\n user = None\n\n result[rule_activity.rule].update({\"created_by\": user})\n\n rules = {item.id: item for item in item_list}\n resolved_actors = {}\n owners_by_type = defaultdict(list)\n\n sentry_app_uuids = [\n sentry_app_uuid\n for sentry_app_uuid in (\n action.get(\"sentryAppInstallationUuid\")\n for rule in rules.values()\n for action in rule.data.get(\"actions\", [])\n )\n if sentry_app_uuid is not None\n ]\n\n sentry_app_ids: List[int] = [\n i.sentry_app.id for i in app_service.get_many(filter=dict(uuids=sentry_app_uuids))\n ]\n sentry_app_installations_by_uuid = app_service.get_related_sentry_app_components(\n organization_ids=[rule.project.organization_id for rule in rules.values()],\n sentry_app_ids=sentry_app_ids,\n type=\"alert-rule-action\",\n group_by=\"uuid\",\n )\n\n for item in item_list:\n if item.owner_id is not None:\n owners_by_type[actor_type_to_string(item.owner.type)].append(item.owner_id)\n\n for k, v in ACTOR_TYPES.items():\n actors = Actor.objects.filter(type=v, id__in=owners_by_type[k])\n if k == \"team\":\n resolved_actors[k] = {actor.id: actor.team_id for actor in actors}\n if k == \"user\":\n resolved_actors[k] = {actor.id: actor.user_id for actor in actors}\n\n for rule in rules.values():\n if rule.owner_id:\n type = actor_type_to_string(rule.owner.type)\n if rule.owner_id in resolved_actors[type]:\n result[rule][\"owner\"] = f\"{type}:{resolved_actors[type][rule.owner_id]}\"\n\n for action in rule.data.get(\"actions\", []):\n install = sentry_app_installations_by_uuid.get(\n str(action.get(\"sentryAppInstallationUuid\"))\n )\n if install:\n action[\"_sentry_app_component\"] = install.get(\"sentry_app_component\")\n action[\"_sentry_app_installation\"] = install.get(\"sentry_app_installation\")\n\n if \"lastTriggered\" in self.expand:\n last_triggered_lookup = {\n rfh[\"rule_id\"]: rfh[\"date_added\"]\n for rfh in RuleFireHistory.objects.filter(rule__in=item_list)\n .values(\"rule_id\")\n .annotate(date_added=Max(\"date_added\"))\n }\n for rule in item_list:\n result[rule][\"last_triggered\"] = last_triggered_lookup.get(rule.id, None)\n\n return result\n\n def serialize(self, obj, attrs, user, **kwargs):\n environment = attrs[\"environment\"]\n all_conditions = [\n dict(list(o.items()) + [(\"name\", _generate_rule_label(obj.project, obj, o))])\n for o in obj.data.get(\"conditions\", [])\n ]\n\n actions = []\n for action in obj.data.get(\"actions\", []):\n try:\n actions.append(\n dict(\n list(action.items())\n + [(\"name\", _generate_rule_label(obj.project, obj, action))]\n )\n )\n except serializers.ValidationError:\n # Integrations can be deleted and we don't want to fail to load the rule\n pass\n\n d = {\n # XXX(dcramer): we currently serialize unsaved rule objects\n # as part of the rule editor\n \"id\": str(obj.id) if obj.id else None,\n # conditions pertain to criteria that can trigger an alert\n \"conditions\": list(filter(lambda condition: not _is_filter(condition), all_conditions)),\n # filters are not new conditions but are the subset of conditions that pertain to event attributes\n \"filters\": list(filter(lambda condition: _is_filter(condition), all_conditions)),\n \"actions\": actions,\n \"actionMatch\": obj.data.get(\"action_match\") or Rule.DEFAULT_CONDITION_MATCH,\n \"filterMatch\": obj.data.get(\"filter_match\") or Rule.DEFAULT_FILTER_MATCH,\n \"frequency\": obj.data.get(\"frequency\") or Rule.DEFAULT_FREQUENCY,\n \"name\": obj.label,\n \"dateCreated\": obj.date_added,\n \"owner\": attrs.get(\"owner\", None),\n \"createdBy\": attrs.get(\"created_by\", None),\n \"environment\": environment.name if environment is not None else None,\n \"projects\": [obj.project.slug],\n \"status\": \"active\" if obj.status == ObjectStatus.ACTIVE else \"disabled\",\n }\n if \"last_triggered\" in attrs:\n d[\"lastTriggered\"] = attrs[\"last_triggered\"]\n\n rule_snooze = RuleSnooze.objects.filter(Q(user_id=user.id) | Q(user_id=None), rule=obj)\n if rule_snooze.exists():\n d[\"snooze\"] = True\n snooze = rule_snooze[0]\n if user.id == snooze.owner_id:\n created_by = \"You\"\n else:\n creator_name = user_service.get_user(snooze.owner_id).get_display_name()\n created_by = creator_name\n d[\"snoozeCreatedBy\"] = created_by\n d[\"snoozeForEveryone\"] = snooze.user_id is None\n else:\n d[\"snooze\"] = False\n\n return d\n", "path": "src/sentry/api/serializers/models/rule.py"}]}
| 2,731 | 244 |
gh_patches_debug_7009
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-4028
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lutris shell script generation leaves out overrides based on DLL selection for the runner
### Bug description
Executing `lutris -b /some/path.sh lutris:write-script/<some game>` generates environment variables and templates them into a shell script that is supposed to be suitable for starting the game. Because of the order that the environment is built in this method, the runner doesn't undergo its `prelaunch` and properly populate environment variables, so `get_launch_parameters` doesn't return the proper WINEDLLOVERRIDES and the generated script is incomplete, not taking advantage of DXVK and other important DLLs that should be used.
### How to Reproduce
Steps to reproduce the behavior:
1. Install a game
2. Configure DXVK or some other runner option that's enabled via WINEDLLOVERRIDE
3. At a terminal run `lutris -b /tmp/game.sh lutris:write-script/1` or swap the `1` for the game ID of the game you installed.
4. `grep WINEDLLOVERRIDES /tmp/game.sh` and note the lack of overrides for the things configured in step 2
### Expected behavior
I expect the generated script to be able to adequately launch the game without Lutris, using the options I'd configured in Lutris. For example, when launching a copy of the same game from lutris, I see the following:
```
$ xargs -0 -n 1 echo < /proc/396196/environ | grep WINEDLLOVERRIDES
WINEDLLOVERRIDES=d3d10core,d3d11,d3d12,d3d9,d3dcompiler_42,d3dcompiler_43,d3dcompiler_46,d3dcompiler_47,d3dx10,d3dx10_33,d3dx10_34,d3dx10_35,d3dx10_36,d3dx10_37,d3dx10_38,d3dx10_39,d3dx10_40,d3dx10_41,d3dx10_42,d3dx10_43,d3dx11_42,d3dx11_43,d3dx9_24,d3dx9_25,d3dx9_26,d3dx9_27,d3dx9_28,d3dx9_29,d3dx9_30,d3dx9_31,d3dx9_32,d3dx9_33,d3dx9_34,d3dx9_35,d3dx9_36,d3dx9_37,d3dx9_38,d3dx9_39,d3dx9_40,d3dx9_41,d3dx9_42,d3dx9_43,dxgi=n;winemenubuilder=
```
### Log output
```shell
$ lutris -b /tmp/gw2.sh lutris:write-script/1
2022-01-30 13:42:52,767: Magic not available. Unable to automatically find game executables. Please install python-magic
2022-01-30 13:42:52,792: Starting Lutris 0.5.9.1
2022-01-30 13:42:52,806: No cores found
2022-01-30 13:42:53,813: Failed to read content length on response from https://api.github.com/repos/lutris/dxvk-nvapi/releases
2022-01-30 13:42:54,141: Startup complete
2022-01-30 13:42:54,142: Running AMD Mesa driver 21.3.4 on AMD Radeon RX 5700 XT (NAVI10, DRM 3.42.0, 5.15.16-200.fc35.x86_64, LLVM 13.0.0) (0x731f)
2022-01-30 13:42:54,142: GPU: 1002:731F 1DA2:E410 (amdgpu drivers)
2022-01-30 13:42:54,212: Shutting down Lutris
$ grep WINEDLLOVERRIDES /tmp/gw2.sh
export WINEDLLOVERRIDES="winemenubuilder="
```
```
### System Information
```shell
[System]
OS: Fedora Linux 35
Arch: x86_64
Kernel: 5.15.16-200.fc35.x86_64
Desktop: Not found
Display Server: wayland
[CPU]
Vendor: AuthenticAMD
Model: AMD Ryzen 9 5950X 16-Core Processor
Physical cores: 16
Logical cores: 32
[Memory]
RAM: 62.8 GB
Swap: 8.0 GB
[Graphics]
Vendor: AMD
OpenGL Renderer: AMD Radeon RX 5700 XT (NAVI10, DRM 3.42.0, 5.15.16-200.fc35.x86_64, LLVM 13.0.0)
OpenGL Version: 4.6 (Compatibility Profile) Mesa 21.3.4
OpenGL Core: 4.6 (Core Profile) Mesa 21.3.4
OpenGL ES: OpenGL ES 3.2 Mesa 21.3.4
Vulkan: Supported
```
### Media (optional)
_No response_
### Checklist:
- [X] I'm not asking for support with a game or the wine runner.
- [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed.
- [X] I have checked for existing issues that describe my problem prior to opening this one.
- [X] I understand that improperly formatted bug reports may be closed without explanation.
</issue>
<code>
[start of lutris/runner_interpreter.py]
1 """Transform runner parameters to data usable for runtime execution"""
2 import os
3 import shlex
4 import stat
5
6 from lutris.util import system
7 from lutris.util.linux import LINUX_SYSTEM
8 from lutris.util.log import logger
9
10
11 def get_mangohud_conf(system_config):
12 """Return correct launch arguments and environment variables for Mangohud."""
13 env = {"MANGOHUD": "1"}
14 mango_args = []
15 mangohud = system_config.get("mangohud") or ""
16 if mangohud and system.find_executable("mangohud"):
17 if mangohud == "gl64":
18 mango_args = ["mangohud"]
19 env["MANGOHUD_DLSYM"] = "1"
20 elif mangohud == "gl32":
21 mango_args = ["mangohud.x86"]
22 env["MANGOHUD_DLSYM"] = "1"
23 else:
24 mango_args = ["mangohud"]
25 return mango_args, env
26
27
28 def get_launch_parameters(runner, gameplay_info):
29 system_config = runner.system_config
30 launch_arguments = gameplay_info["command"]
31 env = {
32 "DISABLE_LAYER_AMD_SWITCHABLE_GRAPHICS_1": "1"
33 }
34
35 # Steam compatibility
36 if os.environ.get("SteamAppId"):
37 logger.info("Game launched from steam (AppId: %s)", os.environ["SteamAppId"])
38 env["LC_ALL"] = ""
39
40 # Optimus
41 optimus = system_config.get("optimus")
42 if optimus == "primusrun" and system.find_executable("primusrun"):
43 launch_arguments.insert(0, "primusrun")
44 elif optimus == "optirun" and system.find_executable("optirun"):
45 launch_arguments.insert(0, "virtualgl")
46 launch_arguments.insert(0, "-b")
47 launch_arguments.insert(0, "optirun")
48 elif optimus == "pvkrun" and system.find_executable("pvkrun"):
49 launch_arguments.insert(0, "pvkrun")
50
51 mango_args, mango_env = get_mangohud_conf(system_config)
52 if mango_args:
53 launch_arguments = mango_args + launch_arguments
54 env.update(mango_env)
55
56 # Libstrangle
57 fps_limit = system_config.get("fps_limit") or ""
58 if fps_limit:
59 strangle_cmd = system.find_executable("strangle")
60 if strangle_cmd:
61 launch_arguments = [strangle_cmd, fps_limit] + launch_arguments
62 else:
63 logger.warning("libstrangle is not available on this system, FPS limiter disabled")
64
65 prefix_command = system_config.get("prefix_command") or ""
66 if prefix_command:
67 launch_arguments = (shlex.split(os.path.expandvars(prefix_command)) + launch_arguments)
68
69 single_cpu = system_config.get("single_cpu") or False
70 if single_cpu:
71 logger.info("The game will run on a single CPU core")
72 launch_arguments.insert(0, "0")
73 launch_arguments.insert(0, "-c")
74 launch_arguments.insert(0, "taskset")
75
76 env.update(runner.get_env())
77
78 env.update(gameplay_info.get("env") or {})
79
80 # Set environment variables dependent on gameplay info
81
82 # LD_PRELOAD
83 ld_preload = gameplay_info.get("ld_preload")
84 if ld_preload:
85 env["LD_PRELOAD"] = ld_preload
86
87 # LD_LIBRARY_PATH
88 game_ld_libary_path = gameplay_info.get("ld_library_path")
89 if game_ld_libary_path:
90 ld_library_path = env.get("LD_LIBRARY_PATH")
91 if not ld_library_path:
92 ld_library_path = "$LD_LIBRARY_PATH"
93 env["LD_LIBRARY_PATH"] = ":".join([game_ld_libary_path, ld_library_path])
94
95 # Feral gamemode
96 gamemode = system_config.get("gamemode") and LINUX_SYSTEM.gamemode_available()
97 if gamemode:
98 launch_arguments.insert(0, "gamemoderun")
99
100 # Gamescope
101 gamescope = system_config.get("gamescope") and system.find_executable("gamescope")
102 if gamescope:
103 launch_arguments = get_gamescope_args(launch_arguments, system_config)
104
105 return launch_arguments, env
106
107
108 def get_gamescope_args(launch_arguments, system_config):
109 """Insert gamescope at the start of the launch arguments"""
110 launch_arguments.insert(0, "--")
111 launch_arguments.insert(0, "-f")
112 if system_config.get("gamescope_output_res"):
113 output_width, output_height = system_config["gamescope_output_res"].lower().split("x")
114 launch_arguments.insert(0, output_height)
115 launch_arguments.insert(0, "-H")
116 launch_arguments.insert(0, output_width)
117 launch_arguments.insert(0, "-W")
118 if system_config.get("gamescope_game_res"):
119 game_width, game_height = system_config["gamescope_game_res"].lower().split("x")
120 launch_arguments.insert(0, game_height)
121 launch_arguments.insert(0, "-h")
122 launch_arguments.insert(0, game_width)
123 launch_arguments.insert(0, "-w")
124 launch_arguments.insert(0, "gamescope")
125 return launch_arguments
126
127
128 def export_bash_script(runner, gameplay_info, script_path):
129 """Convert runner configuration into a bash script"""
130 command, env = get_launch_parameters(runner, gameplay_info)
131 # Override TERM otherwise the script might not run
132 env["TERM"] = "xterm"
133 script_content = "#!/bin/bash\n\n\n"
134 script_content += "# Environment variables\n"
135 for name, value in env.items():
136 script_content += 'export %s="%s"\n' % (name, value)
137 script_content += "\n# Command\n"
138 script_content += " ".join([shlex.quote(c) for c in command])
139 with open(script_path, "w", encoding='utf-8') as script_file:
140 script_file.write(script_content)
141
142 os.chmod(script_path, os.stat(script_path).st_mode | stat.S_IEXEC)
143
[end of lutris/runner_interpreter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/runner_interpreter.py b/lutris/runner_interpreter.py
--- a/lutris/runner_interpreter.py
+++ b/lutris/runner_interpreter.py
@@ -127,6 +127,8 @@
def export_bash_script(runner, gameplay_info, script_path):
"""Convert runner configuration into a bash script"""
+ if getattr(runner, 'prelaunch', None) is not None:
+ runner.prelaunch()
command, env = get_launch_parameters(runner, gameplay_info)
# Override TERM otherwise the script might not run
env["TERM"] = "xterm"
|
{"golden_diff": "diff --git a/lutris/runner_interpreter.py b/lutris/runner_interpreter.py\n--- a/lutris/runner_interpreter.py\n+++ b/lutris/runner_interpreter.py\n@@ -127,6 +127,8 @@\n \n def export_bash_script(runner, gameplay_info, script_path):\n \"\"\"Convert runner configuration into a bash script\"\"\"\n+ if getattr(runner, 'prelaunch', None) is not None:\n+ runner.prelaunch()\n command, env = get_launch_parameters(runner, gameplay_info)\n # Override TERM otherwise the script might not run\n env[\"TERM\"] = \"xterm\"\n", "issue": "Lutris shell script generation leaves out overrides based on DLL selection for the runner\n### Bug description\n\nExecuting `lutris -b /some/path.sh lutris:write-script/<some game>` generates environment variables and templates them into a shell script that is supposed to be suitable for starting the game. Because of the order that the environment is built in this method, the runner doesn't undergo its `prelaunch` and properly populate environment variables, so `get_launch_parameters` doesn't return the proper WINEDLLOVERRIDES and the generated script is incomplete, not taking advantage of DXVK and other important DLLs that should be used.\n\n### How to Reproduce\n\nSteps to reproduce the behavior:\r\n1. Install a game\r\n2. Configure DXVK or some other runner option that's enabled via WINEDLLOVERRIDE\r\n3. At a terminal run `lutris -b /tmp/game.sh lutris:write-script/1` or swap the `1` for the game ID of the game you installed.\r\n4. `grep WINEDLLOVERRIDES /tmp/game.sh` and note the lack of overrides for the things configured in step 2\r\n\n\n### Expected behavior\n\nI expect the generated script to be able to adequately launch the game without Lutris, using the options I'd configured in Lutris. For example, when launching a copy of the same game from lutris, I see the following:\r\n\r\n```\r\n$ xargs -0 -n 1 echo < /proc/396196/environ | grep WINEDLLOVERRIDES\r\nWINEDLLOVERRIDES=d3d10core,d3d11,d3d12,d3d9,d3dcompiler_42,d3dcompiler_43,d3dcompiler_46,d3dcompiler_47,d3dx10,d3dx10_33,d3dx10_34,d3dx10_35,d3dx10_36,d3dx10_37,d3dx10_38,d3dx10_39,d3dx10_40,d3dx10_41,d3dx10_42,d3dx10_43,d3dx11_42,d3dx11_43,d3dx9_24,d3dx9_25,d3dx9_26,d3dx9_27,d3dx9_28,d3dx9_29,d3dx9_30,d3dx9_31,d3dx9_32,d3dx9_33,d3dx9_34,d3dx9_35,d3dx9_36,d3dx9_37,d3dx9_38,d3dx9_39,d3dx9_40,d3dx9_41,d3dx9_42,d3dx9_43,dxgi=n;winemenubuilder=\r\n```\n\n### Log output\n\n```shell\n$ lutris -b /tmp/gw2.sh lutris:write-script/1\r\n2022-01-30 13:42:52,767: Magic not available. Unable to automatically find game executables. Please install python-magic\r\n2022-01-30 13:42:52,792: Starting Lutris 0.5.9.1\r\n2022-01-30 13:42:52,806: No cores found\r\n2022-01-30 13:42:53,813: Failed to read content length on response from https://api.github.com/repos/lutris/dxvk-nvapi/releases\r\n2022-01-30 13:42:54,141: Startup complete\r\n2022-01-30 13:42:54,142: Running AMD Mesa driver 21.3.4 on AMD Radeon RX 5700 XT (NAVI10, DRM 3.42.0, 5.15.16-200.fc35.x86_64, LLVM 13.0.0) (0x731f)\r\n2022-01-30 13:42:54,142: GPU: 1002:731F 1DA2:E410 (amdgpu drivers)\r\n2022-01-30 13:42:54,212: Shutting down Lutris\r\n$ grep WINEDLLOVERRIDES /tmp/gw2.sh\r\nexport WINEDLLOVERRIDES=\"winemenubuilder=\"\r\n```\n```\n\n\n### System Information\n\n```shell\n[System]\r\nOS: Fedora Linux 35 \r\nArch: x86_64\r\nKernel: 5.15.16-200.fc35.x86_64\r\nDesktop: Not found\r\nDisplay Server: wayland\r\n\r\n[CPU]\r\nVendor: AuthenticAMD\r\nModel: AMD Ryzen 9 5950X 16-Core Processor\r\nPhysical cores: 16\r\nLogical cores: 32\r\n\r\n[Memory]\r\nRAM: 62.8 GB\r\nSwap: 8.0 GB\r\n\r\n[Graphics]\r\nVendor: AMD\r\nOpenGL Renderer: AMD Radeon RX 5700 XT (NAVI10, DRM 3.42.0, 5.15.16-200.fc35.x86_64, LLVM 13.0.0)\r\nOpenGL Version: 4.6 (Compatibility Profile) Mesa 21.3.4\r\nOpenGL Core: 4.6 (Core Profile) Mesa 21.3.4\r\nOpenGL ES: OpenGL ES 3.2 Mesa 21.3.4\r\nVulkan: Supported\n```\n\n\n### Media (optional)\n\n_No response_\n\n### Checklist:\n\n- [X] I'm not asking for support with a game or the wine runner.\n- [X] I have followed the above mentioned guides and have all the graphics and wine dependencies installed.\n- [X] I have checked for existing issues that describe my problem prior to opening this one.\n- [X] I understand that improperly formatted bug reports may be closed without explanation.\n", "before_files": [{"content": "\"\"\"Transform runner parameters to data usable for runtime execution\"\"\"\nimport os\nimport shlex\nimport stat\n\nfrom lutris.util import system\nfrom lutris.util.linux import LINUX_SYSTEM\nfrom lutris.util.log import logger\n\n\ndef get_mangohud_conf(system_config):\n \"\"\"Return correct launch arguments and environment variables for Mangohud.\"\"\"\n env = {\"MANGOHUD\": \"1\"}\n mango_args = []\n mangohud = system_config.get(\"mangohud\") or \"\"\n if mangohud and system.find_executable(\"mangohud\"):\n if mangohud == \"gl64\":\n mango_args = [\"mangohud\"]\n env[\"MANGOHUD_DLSYM\"] = \"1\"\n elif mangohud == \"gl32\":\n mango_args = [\"mangohud.x86\"]\n env[\"MANGOHUD_DLSYM\"] = \"1\"\n else:\n mango_args = [\"mangohud\"]\n return mango_args, env\n\n\ndef get_launch_parameters(runner, gameplay_info):\n system_config = runner.system_config\n launch_arguments = gameplay_info[\"command\"]\n env = {\n \"DISABLE_LAYER_AMD_SWITCHABLE_GRAPHICS_1\": \"1\"\n }\n\n # Steam compatibility\n if os.environ.get(\"SteamAppId\"):\n logger.info(\"Game launched from steam (AppId: %s)\", os.environ[\"SteamAppId\"])\n env[\"LC_ALL\"] = \"\"\n\n # Optimus\n optimus = system_config.get(\"optimus\")\n if optimus == \"primusrun\" and system.find_executable(\"primusrun\"):\n launch_arguments.insert(0, \"primusrun\")\n elif optimus == \"optirun\" and system.find_executable(\"optirun\"):\n launch_arguments.insert(0, \"virtualgl\")\n launch_arguments.insert(0, \"-b\")\n launch_arguments.insert(0, \"optirun\")\n elif optimus == \"pvkrun\" and system.find_executable(\"pvkrun\"):\n launch_arguments.insert(0, \"pvkrun\")\n\n mango_args, mango_env = get_mangohud_conf(system_config)\n if mango_args:\n launch_arguments = mango_args + launch_arguments\n env.update(mango_env)\n\n # Libstrangle\n fps_limit = system_config.get(\"fps_limit\") or \"\"\n if fps_limit:\n strangle_cmd = system.find_executable(\"strangle\")\n if strangle_cmd:\n launch_arguments = [strangle_cmd, fps_limit] + launch_arguments\n else:\n logger.warning(\"libstrangle is not available on this system, FPS limiter disabled\")\n\n prefix_command = system_config.get(\"prefix_command\") or \"\"\n if prefix_command:\n launch_arguments = (shlex.split(os.path.expandvars(prefix_command)) + launch_arguments)\n\n single_cpu = system_config.get(\"single_cpu\") or False\n if single_cpu:\n logger.info(\"The game will run on a single CPU core\")\n launch_arguments.insert(0, \"0\")\n launch_arguments.insert(0, \"-c\")\n launch_arguments.insert(0, \"taskset\")\n\n env.update(runner.get_env())\n\n env.update(gameplay_info.get(\"env\") or {})\n\n # Set environment variables dependent on gameplay info\n\n # LD_PRELOAD\n ld_preload = gameplay_info.get(\"ld_preload\")\n if ld_preload:\n env[\"LD_PRELOAD\"] = ld_preload\n\n # LD_LIBRARY_PATH\n game_ld_libary_path = gameplay_info.get(\"ld_library_path\")\n if game_ld_libary_path:\n ld_library_path = env.get(\"LD_LIBRARY_PATH\")\n if not ld_library_path:\n ld_library_path = \"$LD_LIBRARY_PATH\"\n env[\"LD_LIBRARY_PATH\"] = \":\".join([game_ld_libary_path, ld_library_path])\n\n # Feral gamemode\n gamemode = system_config.get(\"gamemode\") and LINUX_SYSTEM.gamemode_available()\n if gamemode:\n launch_arguments.insert(0, \"gamemoderun\")\n\n # Gamescope\n gamescope = system_config.get(\"gamescope\") and system.find_executable(\"gamescope\")\n if gamescope:\n launch_arguments = get_gamescope_args(launch_arguments, system_config)\n\n return launch_arguments, env\n\n\ndef get_gamescope_args(launch_arguments, system_config):\n \"\"\"Insert gamescope at the start of the launch arguments\"\"\"\n launch_arguments.insert(0, \"--\")\n launch_arguments.insert(0, \"-f\")\n if system_config.get(\"gamescope_output_res\"):\n output_width, output_height = system_config[\"gamescope_output_res\"].lower().split(\"x\")\n launch_arguments.insert(0, output_height)\n launch_arguments.insert(0, \"-H\")\n launch_arguments.insert(0, output_width)\n launch_arguments.insert(0, \"-W\")\n if system_config.get(\"gamescope_game_res\"):\n game_width, game_height = system_config[\"gamescope_game_res\"].lower().split(\"x\")\n launch_arguments.insert(0, game_height)\n launch_arguments.insert(0, \"-h\")\n launch_arguments.insert(0, game_width)\n launch_arguments.insert(0, \"-w\")\n launch_arguments.insert(0, \"gamescope\")\n return launch_arguments\n\n\ndef export_bash_script(runner, gameplay_info, script_path):\n \"\"\"Convert runner configuration into a bash script\"\"\"\n command, env = get_launch_parameters(runner, gameplay_info)\n # Override TERM otherwise the script might not run\n env[\"TERM\"] = \"xterm\"\n script_content = \"#!/bin/bash\\n\\n\\n\"\n script_content += \"# Environment variables\\n\"\n for name, value in env.items():\n script_content += 'export %s=\"%s\"\\n' % (name, value)\n script_content += \"\\n# Command\\n\"\n script_content += \" \".join([shlex.quote(c) for c in command])\n with open(script_path, \"w\", encoding='utf-8') as script_file:\n script_file.write(script_content)\n\n os.chmod(script_path, os.stat(script_path).st_mode | stat.S_IEXEC)\n", "path": "lutris/runner_interpreter.py"}]}
| 3,586 | 142 |
gh_patches_debug_6568
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-3638
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CI] Test examples in CI
### Anything you want to discuss about vllm.
Current scripts in `examples/` directory are not tested in CI. We should run them to ensure passing
</issue>
<code>
[start of examples/llava_example.py]
1 import argparse
2 import os
3 import subprocess
4
5 import torch
6
7 from vllm import LLM
8 from vllm.sequence import MultiModalData
9
10 # The assets are located at `s3://air-example-data-2/vllm_opensource_llava/`.
11
12
13 def run_llava_pixel_values():
14 llm = LLM(
15 model="llava-hf/llava-1.5-7b-hf",
16 image_input_type="pixel_values",
17 image_token_id=32000,
18 image_input_shape="1,3,336,336",
19 image_feature_size=576,
20 )
21
22 prompt = "<image>" * 576 + (
23 "\nUSER: What is the content of this image?\nASSISTANT:")
24
25 # This should be provided by another online or offline component.
26 images = torch.load("images/stop_sign_pixel_values.pt")
27
28 outputs = llm.generate(prompt,
29 multi_modal_data=MultiModalData(
30 type=MultiModalData.Type.IMAGE, data=images))
31 for o in outputs:
32 generated_text = o.outputs[0].text
33 print(generated_text)
34
35
36 def run_llava_image_features():
37 llm = LLM(
38 model="llava-hf/llava-1.5-7b-hf",
39 image_input_type="image_features",
40 image_token_id=32000,
41 image_input_shape="1,576,1024",
42 image_feature_size=576,
43 )
44
45 prompt = "<image>" * 576 + (
46 "\nUSER: What is the content of this image?\nASSISTANT:")
47
48 # This should be provided by another online or offline component.
49 images = torch.load("images/stop_sign_image_features.pt")
50
51 outputs = llm.generate(prompt,
52 multi_modal_data=MultiModalData(
53 type=MultiModalData.Type.IMAGE, data=images))
54 for o in outputs:
55 generated_text = o.outputs[0].text
56 print(generated_text)
57
58
59 def main(args):
60 if args.type == "pixel_values":
61 run_llava_pixel_values()
62 else:
63 run_llava_image_features()
64
65
66 if __name__ == "__main__":
67 parser = argparse.ArgumentParser(description="Demo on Llava")
68 parser.add_argument("--type",
69 type=str,
70 choices=["pixel_values", "image_features"],
71 default="pixel_values",
72 help="image input type")
73 args = parser.parse_args()
74 # Download from s3
75 s3_bucket_path = "s3://air-example-data-2/vllm_opensource_llava/"
76 local_directory = "images"
77
78 # Make sure the local directory exists or create it
79 os.makedirs(local_directory, exist_ok=True)
80
81 # Use AWS CLI to sync the directory
82 subprocess.check_call(
83 ["aws", "s3", "sync", s3_bucket_path, local_directory])
84 main(args)
85
[end of examples/llava_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/llava_example.py b/examples/llava_example.py
--- a/examples/llava_example.py
+++ b/examples/llava_example.py
@@ -78,7 +78,13 @@
# Make sure the local directory exists or create it
os.makedirs(local_directory, exist_ok=True)
- # Use AWS CLI to sync the directory
- subprocess.check_call(
- ["aws", "s3", "sync", s3_bucket_path, local_directory])
+ # Use AWS CLI to sync the directory, assume anonymous access
+ subprocess.check_call([
+ "aws",
+ "s3",
+ "sync",
+ s3_bucket_path,
+ local_directory,
+ "--no-sign-request",
+ ])
main(args)
|
{"golden_diff": "diff --git a/examples/llava_example.py b/examples/llava_example.py\n--- a/examples/llava_example.py\n+++ b/examples/llava_example.py\n@@ -78,7 +78,13 @@\n # Make sure the local directory exists or create it\n os.makedirs(local_directory, exist_ok=True)\n \n- # Use AWS CLI to sync the directory\n- subprocess.check_call(\n- [\"aws\", \"s3\", \"sync\", s3_bucket_path, local_directory])\n+ # Use AWS CLI to sync the directory, assume anonymous access\n+ subprocess.check_call([\n+ \"aws\",\n+ \"s3\",\n+ \"sync\",\n+ s3_bucket_path,\n+ local_directory,\n+ \"--no-sign-request\",\n+ ])\n main(args)\n", "issue": "[CI] Test examples in CI\n### Anything you want to discuss about vllm.\n\nCurrent scripts in `examples/` directory are not tested in CI. We should run them to ensure passing \n", "before_files": [{"content": "import argparse\nimport os\nimport subprocess\n\nimport torch\n\nfrom vllm import LLM\nfrom vllm.sequence import MultiModalData\n\n# The assets are located at `s3://air-example-data-2/vllm_opensource_llava/`.\n\n\ndef run_llava_pixel_values():\n llm = LLM(\n model=\"llava-hf/llava-1.5-7b-hf\",\n image_input_type=\"pixel_values\",\n image_token_id=32000,\n image_input_shape=\"1,3,336,336\",\n image_feature_size=576,\n )\n\n prompt = \"<image>\" * 576 + (\n \"\\nUSER: What is the content of this image?\\nASSISTANT:\")\n\n # This should be provided by another online or offline component.\n images = torch.load(\"images/stop_sign_pixel_values.pt\")\n\n outputs = llm.generate(prompt,\n multi_modal_data=MultiModalData(\n type=MultiModalData.Type.IMAGE, data=images))\n for o in outputs:\n generated_text = o.outputs[0].text\n print(generated_text)\n\n\ndef run_llava_image_features():\n llm = LLM(\n model=\"llava-hf/llava-1.5-7b-hf\",\n image_input_type=\"image_features\",\n image_token_id=32000,\n image_input_shape=\"1,576,1024\",\n image_feature_size=576,\n )\n\n prompt = \"<image>\" * 576 + (\n \"\\nUSER: What is the content of this image?\\nASSISTANT:\")\n\n # This should be provided by another online or offline component.\n images = torch.load(\"images/stop_sign_image_features.pt\")\n\n outputs = llm.generate(prompt,\n multi_modal_data=MultiModalData(\n type=MultiModalData.Type.IMAGE, data=images))\n for o in outputs:\n generated_text = o.outputs[0].text\n print(generated_text)\n\n\ndef main(args):\n if args.type == \"pixel_values\":\n run_llava_pixel_values()\n else:\n run_llava_image_features()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Demo on Llava\")\n parser.add_argument(\"--type\",\n type=str,\n choices=[\"pixel_values\", \"image_features\"],\n default=\"pixel_values\",\n help=\"image input type\")\n args = parser.parse_args()\n # Download from s3\n s3_bucket_path = \"s3://air-example-data-2/vllm_opensource_llava/\"\n local_directory = \"images\"\n\n # Make sure the local directory exists or create it\n os.makedirs(local_directory, exist_ok=True)\n\n # Use AWS CLI to sync the directory\n subprocess.check_call(\n [\"aws\", \"s3\", \"sync\", s3_bucket_path, local_directory])\n main(args)\n", "path": "examples/llava_example.py"}]}
| 1,386 | 172 |
gh_patches_debug_17874
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1605
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error on Android when converting selection value to String
Sample app:
```
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class AFV(toga.App):
def startup(self):
self.main_window = toga.MainWindow(title=self.formal_name)
box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))
self.label_1 = toga.Label('TESTE 1')
self.comboBox_1 = toga.Selection(items=["ITEM 1", "ITEM 2", "ITEM 3"])
self.lineEdit_1 = toga.TextInput()
self.pushButton_1 = toga.Button('TESTE')
box_test.add(self.label_1, self.comboBox_1, self.lineEdit_1, self.pushButton_1)
self.pushButton_1.on_press = self.print_combo
self.main_window.content = box_test
self.main_window.show()
def print_combo(self, widget):
name_combo = self.comboBox_1.value
print(name_combo)
def main():
return AFV()
```
When the button is pressed, the error:
com.chaquo.python.PyException: AttributeError: 'str' object has no attribute 'toString'
is raised.
Using Briefcase 0.3.10; worked previously on Briefcase 0.3.9.
Error on Android when converting selection value to String
Sample app:
```
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class AFV(toga.App):
def startup(self):
self.main_window = toga.MainWindow(title=self.formal_name)
box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))
self.label_1 = toga.Label('TESTE 1')
self.comboBox_1 = toga.Selection(items=["ITEM 1", "ITEM 2", "ITEM 3"])
self.lineEdit_1 = toga.TextInput()
self.pushButton_1 = toga.Button('TESTE')
box_test.add(self.label_1, self.comboBox_1, self.lineEdit_1, self.pushButton_1)
self.pushButton_1.on_press = self.print_combo
self.main_window.content = box_test
self.main_window.show()
def print_combo(self, widget):
name_combo = self.comboBox_1.value
print(name_combo)
def main():
return AFV()
```
When the button is pressed, the error:
com.chaquo.python.PyException: AttributeError: 'str' object has no attribute 'toString'
is raised.
Using Briefcase 0.3.10; worked previously on Briefcase 0.3.9.
</issue>
<code>
[start of src/android/toga_android/widgets/selection.py]
1 from travertino.size import at_least
2
3 from ..libs.android import R__layout
4 from ..libs.android.view import Gravity, View__MeasureSpec
5 from ..libs.android.widget import ArrayAdapter, OnItemSelectedListener, Spinner
6 from .base import Widget, align
7
8
9 class TogaOnItemSelectedListener(OnItemSelectedListener):
10 def __init__(self, impl):
11 super().__init__()
12 self._impl = impl
13
14 def onItemSelected(self, _parent, _view, _position, _id):
15 if self._impl.interface.on_select:
16 self._impl.interface.on_select(widget=self._impl.interface)
17
18
19 class Selection(Widget):
20 def create(self):
21 self.native = Spinner(self._native_activity, Spinner.MODE_DROPDOWN)
22 self.native.setOnItemSelectedListener(TogaOnItemSelectedListener(
23 impl=self
24 ))
25 # On Android, the list of options is provided to the `Spinner` wrapped in
26 # an `ArrayAdapter`. We store `self.adapter` to avoid having to typecast it
27 # in `add_item()`.
28 self.adapter = ArrayAdapter(
29 self._native_activity,
30 R__layout.simple_spinner_item
31 )
32 self.adapter.setDropDownViewResource(R__layout.simple_spinner_dropdown_item)
33 self.native.setAdapter(self.adapter)
34 # Create a mapping from text to numeric index to support `select_item()`.
35 self._indexByItem = {}
36
37 def add_item(self, item):
38 new_index = self.adapter.getCount()
39 self.adapter.add(str(item))
40 self._indexByItem[item] = new_index
41
42 def select_item(self, item):
43 self.native.setSelection(self._indexByItem[item])
44
45 def get_selected_item(self):
46 selected = self.native.getSelectedItem()
47 if selected:
48 return selected.toString()
49 else:
50 return None
51
52 def remove_all_items(self):
53 self.adapter.clear()
54
55 def rehint(self):
56 self.native.measure(
57 View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED
58 )
59 self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
60 self.interface.intrinsic.height = self.native.getMeasuredHeight()
61
62 def set_alignment(self, value):
63 self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))
64
65 def set_on_select(self, handler):
66 # No special handling is required.
67 pass
68
[end of src/android/toga_android/widgets/selection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/android/toga_android/widgets/selection.py b/src/android/toga_android/widgets/selection.py
--- a/src/android/toga_android/widgets/selection.py
+++ b/src/android/toga_android/widgets/selection.py
@@ -22,9 +22,6 @@
self.native.setOnItemSelectedListener(TogaOnItemSelectedListener(
impl=self
))
- # On Android, the list of options is provided to the `Spinner` wrapped in
- # an `ArrayAdapter`. We store `self.adapter` to avoid having to typecast it
- # in `add_item()`.
self.adapter = ArrayAdapter(
self._native_activity,
R__layout.simple_spinner_item
@@ -45,7 +42,7 @@
def get_selected_item(self):
selected = self.native.getSelectedItem()
if selected:
- return selected.toString()
+ return str(selected)
else:
return None
|
{"golden_diff": "diff --git a/src/android/toga_android/widgets/selection.py b/src/android/toga_android/widgets/selection.py\n--- a/src/android/toga_android/widgets/selection.py\n+++ b/src/android/toga_android/widgets/selection.py\n@@ -22,9 +22,6 @@\n self.native.setOnItemSelectedListener(TogaOnItemSelectedListener(\n impl=self\n ))\n- # On Android, the list of options is provided to the `Spinner` wrapped in\n- # an `ArrayAdapter`. We store `self.adapter` to avoid having to typecast it\n- # in `add_item()`.\n self.adapter = ArrayAdapter(\n self._native_activity,\n R__layout.simple_spinner_item\n@@ -45,7 +42,7 @@\n def get_selected_item(self):\n selected = self.native.getSelectedItem()\n if selected:\n- return selected.toString()\n+ return str(selected)\n else:\n return None\n", "issue": "Error on Android when converting selection value to String\nSample app:\r\n```\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass AFV(toga.App):\r\n\r\n def startup(self):\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n\r\n box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))\r\n self.label_1 = toga.Label('TESTE 1')\r\n self.comboBox_1 = toga.Selection(items=[\"ITEM 1\", \"ITEM 2\", \"ITEM 3\"])\r\n self.lineEdit_1 = toga.TextInput()\r\n self.pushButton_1 = toga.Button('TESTE')\r\n\r\n box_test.add(self.label_1, self.comboBox_1, self.lineEdit_1, self.pushButton_1)\r\n\r\n self.pushButton_1.on_press = self.print_combo\r\n\r\n self.main_window.content = box_test\r\n self.main_window.show()\r\n\r\n def print_combo(self, widget):\r\n name_combo = self.comboBox_1.value\r\n print(name_combo)\r\n\r\n\r\n\r\ndef main():\r\n return AFV()\r\n```\r\n\r\nWhen the button is pressed, the error:\r\n\r\n com.chaquo.python.PyException: AttributeError: 'str' object has no attribute 'toString'\r\n\r\nis raised.\r\n\r\nUsing Briefcase 0.3.10; worked previously on Briefcase 0.3.9.\nError on Android when converting selection value to String\nSample app:\r\n```\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass AFV(toga.App):\r\n\r\n def startup(self):\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n\r\n box_test = toga.Box(style=Pack(direction=COLUMN, padding=5))\r\n self.label_1 = toga.Label('TESTE 1')\r\n self.comboBox_1 = toga.Selection(items=[\"ITEM 1\", \"ITEM 2\", \"ITEM 3\"])\r\n self.lineEdit_1 = toga.TextInput()\r\n self.pushButton_1 = toga.Button('TESTE')\r\n\r\n box_test.add(self.label_1, self.comboBox_1, self.lineEdit_1, self.pushButton_1)\r\n\r\n self.pushButton_1.on_press = self.print_combo\r\n\r\n self.main_window.content = box_test\r\n self.main_window.show()\r\n\r\n def print_combo(self, widget):\r\n name_combo = self.comboBox_1.value\r\n print(name_combo)\r\n\r\n\r\n\r\ndef main():\r\n return AFV()\r\n```\r\n\r\nWhen the button is pressed, the error:\r\n\r\n com.chaquo.python.PyException: AttributeError: 'str' object has no attribute 'toString'\r\n\r\nis raised.\r\n\r\nUsing Briefcase 0.3.10; worked previously on Briefcase 0.3.9.\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom ..libs.android import R__layout\nfrom ..libs.android.view import Gravity, View__MeasureSpec\nfrom ..libs.android.widget import ArrayAdapter, OnItemSelectedListener, Spinner\nfrom .base import Widget, align\n\n\nclass TogaOnItemSelectedListener(OnItemSelectedListener):\n def __init__(self, impl):\n super().__init__()\n self._impl = impl\n\n def onItemSelected(self, _parent, _view, _position, _id):\n if self._impl.interface.on_select:\n self._impl.interface.on_select(widget=self._impl.interface)\n\n\nclass Selection(Widget):\n def create(self):\n self.native = Spinner(self._native_activity, Spinner.MODE_DROPDOWN)\n self.native.setOnItemSelectedListener(TogaOnItemSelectedListener(\n impl=self\n ))\n # On Android, the list of options is provided to the `Spinner` wrapped in\n # an `ArrayAdapter`. We store `self.adapter` to avoid having to typecast it\n # in `add_item()`.\n self.adapter = ArrayAdapter(\n self._native_activity,\n R__layout.simple_spinner_item\n )\n self.adapter.setDropDownViewResource(R__layout.simple_spinner_dropdown_item)\n self.native.setAdapter(self.adapter)\n # Create a mapping from text to numeric index to support `select_item()`.\n self._indexByItem = {}\n\n def add_item(self, item):\n new_index = self.adapter.getCount()\n self.adapter.add(str(item))\n self._indexByItem[item] = new_index\n\n def select_item(self, item):\n self.native.setSelection(self._indexByItem[item])\n\n def get_selected_item(self):\n selected = self.native.getSelectedItem()\n if selected:\n return selected.toString()\n else:\n return None\n\n def remove_all_items(self):\n self.adapter.clear()\n\n def rehint(self):\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED, View__MeasureSpec.UNSPECIFIED\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n\n def set_alignment(self, value):\n self.native.setGravity(Gravity.CENTER_VERTICAL | align(value))\n\n def set_on_select(self, handler):\n # No special handling is required.\n pass\n", "path": "src/android/toga_android/widgets/selection.py"}]}
| 1,725 | 196 |
gh_patches_debug_11906
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-4441
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Panel 1.0: Divider takes 100% of window height
I'm on the current `main` branch of Panel. When I use the `Divider` it takes up 100% of the window height.
```python
import panel as pn
pn.extension(sizing_mode="stretch_width")
pn.panel("Header", styles={"background": "lightgray"}).servable()
pn.layout.Divider(styles={"background": "salmon"}).servable()
pn.panel("Footer", styles={"background": "lightgray"}).servable()
```

I don't know if it is on purpose. But the `styles` seem not to apply to the `Divider` either.
</issue>
<code>
[start of panel/layout/spacer.py]
1 """
2 Spacer components to add horizontal or vertical space to a layout.
3 """
4
5 import param
6
7 from bokeh.models import Div as BkDiv, Spacer as BkSpacer
8
9 from ..reactive import Reactive
10
11
12 class Spacer(Reactive):
13 """
14 The `Spacer` layout is a very versatile component which makes it easy to
15 put fixed or responsive spacing between objects.
16
17 Like all other components spacers support both absolute and responsive
18 sizing modes.
19
20 Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
21
22 :Example:
23
24 >>> pn.Row(
25 ... 1, pn.Spacer(width=200),
26 ... 2, pn.Spacer(width=100),
27 ... 3
28 ... )
29 """
30
31 _bokeh_model = BkSpacer
32
33 def _get_model(self, doc, root=None, parent=None, comm=None):
34 properties = self._process_param_change(self._init_params())
35 model = self._bokeh_model(**properties)
36 if root is None:
37 root = model
38 self._models[root.ref['id']] = (model, parent)
39 return model
40
41
42 class VSpacer(Spacer):
43 """
44 The `VSpacer` layout provides responsive vertical spacing.
45
46 Using this component we can space objects equidistantly in a layout and
47 allow the empty space to shrink when the browser is resized.
48
49 Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
50
51 :Example:
52
53 >>> pn.Column(
54 ... pn.layout.VSpacer(), 'Item 1',
55 ... pn.layout.VSpacer(), 'Item 2',
56 ... pn.layout.VSpacer()
57 ... )
58 """
59
60 sizing_mode = param.Parameter(default='stretch_height', readonly=True)
61
62
63 class HSpacer(Spacer):
64 """
65 The `HSpacer` layout provides responsive vertical spacing.
66
67 Using this component we can space objects equidistantly in a layout and
68 allow the empty space to shrink when the browser is resized.
69
70 Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers
71
72 :Example:
73
74 >>> pn.Row(
75 ... pn.layout.HSpacer(), 'Item 1',
76 ... pn.layout.HSpacer(), 'Item 2',
77 ... pn.layout.HSpacer()
78 ... )
79 """
80
81 sizing_mode = param.Parameter(default='stretch_width', readonly=True)
82
83
84 class Divider(Reactive):
85 """
86 A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate
87 multiple components in a layout. It automatically spans the full width of
88 the container.
89
90 Reference: https://panel.holoviz.org/reference/layouts/Divider.html
91
92 :Example:
93
94 >>> pn.Column(
95 ... '# Lorem Ipsum',
96 ... pn.layout.Divider(),
97 ... 'A very long text... '
98 >>> )
99 """
100
101 width_policy = param.ObjectSelector(default="fit", readonly=True)
102
103 _bokeh_model = BkDiv
104
105 def _get_model(self, doc, root=None, parent=None, comm=None):
106 properties = self._process_param_change(self._init_params())
107 properties['styles'] = {'width': '100%', 'height': '100%'}
108 model = self._bokeh_model(text='<hr style="margin: 0px">', **properties)
109 if root is None:
110 root = model
111 self._models[root.ref['id']] = (model, parent)
112 return model
113
[end of panel/layout/spacer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/panel/layout/spacer.py b/panel/layout/spacer.py
--- a/panel/layout/spacer.py
+++ b/panel/layout/spacer.py
@@ -102,10 +102,11 @@
_bokeh_model = BkDiv
+ _stylesheets = ["css/divider.css"]
+
def _get_model(self, doc, root=None, parent=None, comm=None):
properties = self._process_param_change(self._init_params())
- properties['styles'] = {'width': '100%', 'height': '100%'}
- model = self._bokeh_model(text='<hr style="margin: 0px">', **properties)
+ model = self._bokeh_model(text='<hr>', **properties)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
|
{"golden_diff": "diff --git a/panel/layout/spacer.py b/panel/layout/spacer.py\n--- a/panel/layout/spacer.py\n+++ b/panel/layout/spacer.py\n@@ -102,10 +102,11 @@\n \n _bokeh_model = BkDiv\n \n+ _stylesheets = [\"css/divider.css\"]\n+\n def _get_model(self, doc, root=None, parent=None, comm=None):\n properties = self._process_param_change(self._init_params())\n- properties['styles'] = {'width': '100%', 'height': '100%'}\n- model = self._bokeh_model(text='<hr style=\"margin: 0px\">', **properties)\n+ model = self._bokeh_model(text='<hr>', **properties)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n", "issue": "Panel 1.0: Divider takes 100% of window height\nI'm on the current `main` branch of Panel. When I use the `Divider` it takes up 100% of the window height.\r\n\r\n```python\r\nimport panel as pn\r\n\r\npn.extension(sizing_mode=\"stretch_width\")\r\n\r\npn.panel(\"Header\", styles={\"background\": \"lightgray\"}).servable()\r\npn.layout.Divider(styles={\"background\": \"salmon\"}).servable()\r\npn.panel(\"Footer\", styles={\"background\": \"lightgray\"}).servable()\r\n```\r\n\r\n\r\n\r\nI don't know if it is on purpose. But the `styles` seem not to apply to the `Divider` either.\n", "before_files": [{"content": "\"\"\"\nSpacer components to add horizontal or vertical space to a layout.\n\"\"\"\n\nimport param\n\nfrom bokeh.models import Div as BkDiv, Spacer as BkSpacer\n\nfrom ..reactive import Reactive\n\n\nclass Spacer(Reactive):\n \"\"\"\n The `Spacer` layout is a very versatile component which makes it easy to\n put fixed or responsive spacing between objects.\n\n Like all other components spacers support both absolute and responsive\n sizing modes.\n\n Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers\n\n :Example:\n\n >>> pn.Row(\n ... 1, pn.Spacer(width=200),\n ... 2, pn.Spacer(width=100),\n ... 3\n ... )\n \"\"\"\n\n _bokeh_model = BkSpacer\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n properties = self._process_param_change(self._init_params())\n model = self._bokeh_model(**properties)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n\nclass VSpacer(Spacer):\n \"\"\"\n The `VSpacer` layout provides responsive vertical spacing.\n\n Using this component we can space objects equidistantly in a layout and\n allow the empty space to shrink when the browser is resized.\n\n Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers\n\n :Example:\n\n >>> pn.Column(\n ... pn.layout.VSpacer(), 'Item 1',\n ... pn.layout.VSpacer(), 'Item 2',\n ... pn.layout.VSpacer()\n ... )\n \"\"\"\n\n sizing_mode = param.Parameter(default='stretch_height', readonly=True)\n\n\nclass HSpacer(Spacer):\n \"\"\"\n The `HSpacer` layout provides responsive vertical spacing.\n\n Using this component we can space objects equidistantly in a layout and\n allow the empty space to shrink when the browser is resized.\n\n Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers\n\n :Example:\n\n >>> pn.Row(\n ... pn.layout.HSpacer(), 'Item 1',\n ... pn.layout.HSpacer(), 'Item 2',\n ... pn.layout.HSpacer()\n ... )\n \"\"\"\n\n sizing_mode = param.Parameter(default='stretch_width', readonly=True)\n\n\nclass Divider(Reactive):\n \"\"\"\n A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate\n multiple components in a layout. It automatically spans the full width of\n the container.\n\n Reference: https://panel.holoviz.org/reference/layouts/Divider.html\n\n :Example:\n\n >>> pn.Column(\n ... '# Lorem Ipsum',\n ... pn.layout.Divider(),\n ... 'A very long text... '\n >>> )\n \"\"\"\n\n width_policy = param.ObjectSelector(default=\"fit\", readonly=True)\n\n _bokeh_model = BkDiv\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n properties = self._process_param_change(self._init_params())\n properties['styles'] = {'width': '100%', 'height': '100%'}\n model = self._bokeh_model(text='<hr style=\"margin: 0px\">', **properties)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n", "path": "panel/layout/spacer.py"}]}
| 1,752 | 196 |
gh_patches_debug_305
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-337
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unified sentry-sdk integration does not have support to add stack trace in python logger using 'stack': True in extra dict.
Migration from raven to unified sentry sdk, affected extended functionalities to python logging provided by raven. _extra_from_record - excludes keywords 'stack' and 'data'. Is there a known workaround?
</issue>
<code>
[start of sentry_sdk/integrations/logging.py]
1 from __future__ import absolute_import
2
3 import logging
4 import datetime
5
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.utils import (
8 to_string,
9 event_from_exception,
10 current_stacktrace,
11 capture_internal_exceptions,
12 )
13 from sentry_sdk.integrations import Integration
14
15 if False:
16 from logging import LogRecord
17 from typing import Any
18 from typing import Dict
19 from typing import Optional
20
21 DEFAULT_LEVEL = logging.INFO
22 DEFAULT_EVENT_LEVEL = logging.ERROR
23
24 _IGNORED_LOGGERS = set(["sentry_sdk.errors"])
25
26
27 def ignore_logger(name):
28 # type: (str) -> None
29 """This disables the breadcrumb integration for a logger of a specific
30 name. This primary use is for some integrations to disable breadcrumbs
31 of this integration.
32 """
33 _IGNORED_LOGGERS.add(name)
34
35
36 class LoggingIntegration(Integration):
37 identifier = "logging"
38
39 def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
40 # type: (int, int) -> None
41 self._handler = None
42 self._breadcrumb_handler = None
43
44 if level is not None:
45 self._breadcrumb_handler = BreadcrumbHandler(level=level)
46
47 if event_level is not None:
48 self._handler = EventHandler(level=event_level)
49
50 def _handle_record(self, record):
51 # type: (LogRecord) -> None
52 if self._handler is not None and record.levelno >= self._handler.level:
53 self._handler.handle(record)
54
55 if (
56 self._breadcrumb_handler is not None
57 and record.levelno >= self._breadcrumb_handler.level
58 ):
59 self._breadcrumb_handler.handle(record)
60
61 @staticmethod
62 def setup_once():
63 # type: () -> None
64 old_callhandlers = logging.Logger.callHandlers # type: ignore
65
66 def sentry_patched_callhandlers(self, record):
67 # type: (Any, LogRecord) -> Any
68 try:
69 return old_callhandlers(self, record)
70 finally:
71 # This check is done twice, once also here before we even get
72 # the integration. Otherwise we have a high chance of getting
73 # into a recursion error when the integration is resolved
74 # (this also is slower).
75 if record.name not in _IGNORED_LOGGERS:
76 integration = Hub.current.get_integration(LoggingIntegration)
77 if integration is not None:
78 integration._handle_record(record)
79
80 logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
81
82
83 def _can_record(record):
84 # type: (LogRecord) -> bool
85 return record.name not in _IGNORED_LOGGERS
86
87
88 def _breadcrumb_from_record(record):
89 # type: (LogRecord) -> Dict[str, Any]
90 return {
91 "ty": "log",
92 "level": _logging_to_event_level(record.levelname),
93 "category": record.name,
94 "message": record.message,
95 "timestamp": datetime.datetime.fromtimestamp(record.created),
96 "data": _extra_from_record(record),
97 }
98
99
100 def _logging_to_event_level(levelname):
101 # type: (str) -> str
102 return {"critical": "fatal"}.get(levelname.lower(), levelname.lower())
103
104
105 COMMON_RECORD_ATTRS = frozenset(
106 (
107 "args",
108 "created",
109 "data",
110 "exc_info",
111 "exc_text",
112 "filename",
113 "funcName",
114 "levelname",
115 "levelno",
116 "linenno",
117 "lineno",
118 "message",
119 "module",
120 "msecs",
121 "msg",
122 "name",
123 "pathname",
124 "process",
125 "processName",
126 "relativeCreated",
127 "stack",
128 "tags",
129 "thread",
130 "threadName",
131 )
132 )
133
134
135 def _extra_from_record(record):
136 # type: (LogRecord) -> Dict[str, None]
137 return {
138 k: v
139 for k, v in vars(record).items()
140 if k not in COMMON_RECORD_ATTRS and not k.startswith("_")
141 }
142
143
144 class EventHandler(logging.Handler, object):
145 def emit(self, record):
146 # type: (LogRecord) -> Any
147 with capture_internal_exceptions():
148 self.format(record)
149 return self._emit(record)
150
151 def _emit(self, record):
152 # type: (LogRecord) -> None
153 if not _can_record(record):
154 return
155
156 hub = Hub.current
157 if hub.client is None:
158 return
159
160 hint = None # type: Optional[Dict[str, Any]]
161 client_options = hub.client.options
162
163 # exc_info might be None or (None, None, None)
164 if record.exc_info is not None and record.exc_info[0] is not None:
165 event, hint = event_from_exception(
166 record.exc_info,
167 client_options=client_options,
168 mechanism={"type": "logging", "handled": True},
169 )
170 elif record.exc_info and record.exc_info[0] is None:
171 event = {}
172 hint = None
173 with capture_internal_exceptions():
174 event["threads"] = [
175 {
176 "stacktrace": current_stacktrace(client_options["with_locals"]),
177 "crashed": False,
178 "current": True,
179 }
180 ]
181 else:
182 event = {}
183
184 event["level"] = _logging_to_event_level(record.levelname)
185 event["logger"] = record.name
186 event["logentry"] = {"message": to_string(record.msg), "params": record.args}
187 event["extra"] = _extra_from_record(record)
188
189 hub.capture_event(event, hint=hint)
190
191
192 # Legacy name
193 SentryHandler = EventHandler
194
195
196 class BreadcrumbHandler(logging.Handler, object):
197 def emit(self, record):
198 # type: (LogRecord) -> Any
199 with capture_internal_exceptions():
200 self.format(record)
201 return self._emit(record)
202
203 def _emit(self, record):
204 # type: (LogRecord) -> None
205 if not _can_record(record):
206 return
207
208 Hub.current.add_breadcrumb(
209 _breadcrumb_from_record(record), hint={"log_record": record}
210 )
211
[end of sentry_sdk/integrations/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py
--- a/sentry_sdk/integrations/logging.py
+++ b/sentry_sdk/integrations/logging.py
@@ -106,7 +106,6 @@
(
"args",
"created",
- "data",
"exc_info",
"exc_text",
"filename",
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py\n--- a/sentry_sdk/integrations/logging.py\n+++ b/sentry_sdk/integrations/logging.py\n@@ -106,7 +106,6 @@\n (\n \"args\",\n \"created\",\n- \"data\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n", "issue": "Unified sentry-sdk integration does not have support to add stack trace in python logger using 'stack': True in extra dict.\nMigration from raven to unified sentry sdk, affected extended functionalities to python logging provided by raven. _extra_from_record - excludes keywords 'stack' and 'data'. Is there a known workaround?\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport datetime\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n to_string,\n event_from_exception,\n current_stacktrace,\n capture_internal_exceptions,\n)\nfrom sentry_sdk.integrations import Integration\n\nif False:\n from logging import LogRecord\n from typing import Any\n from typing import Dict\n from typing import Optional\n\nDEFAULT_LEVEL = logging.INFO\nDEFAULT_EVENT_LEVEL = logging.ERROR\n\n_IGNORED_LOGGERS = set([\"sentry_sdk.errors\"])\n\n\ndef ignore_logger(name):\n # type: (str) -> None\n \"\"\"This disables the breadcrumb integration for a logger of a specific\n name. This primary use is for some integrations to disable breadcrumbs\n of this integration.\n \"\"\"\n _IGNORED_LOGGERS.add(name)\n\n\nclass LoggingIntegration(Integration):\n identifier = \"logging\"\n\n def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):\n # type: (int, int) -> None\n self._handler = None\n self._breadcrumb_handler = None\n\n if level is not None:\n self._breadcrumb_handler = BreadcrumbHandler(level=level)\n\n if event_level is not None:\n self._handler = EventHandler(level=event_level)\n\n def _handle_record(self, record):\n # type: (LogRecord) -> None\n if self._handler is not None and record.levelno >= self._handler.level:\n self._handler.handle(record)\n\n if (\n self._breadcrumb_handler is not None\n and record.levelno >= self._breadcrumb_handler.level\n ):\n self._breadcrumb_handler.handle(record)\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_callhandlers = logging.Logger.callHandlers # type: ignore\n\n def sentry_patched_callhandlers(self, record):\n # type: (Any, LogRecord) -> Any\n try:\n return old_callhandlers(self, record)\n finally:\n # This check is done twice, once also here before we even get\n # the integration. Otherwise we have a high chance of getting\n # into a recursion error when the integration is resolved\n # (this also is slower).\n if record.name not in _IGNORED_LOGGERS:\n integration = Hub.current.get_integration(LoggingIntegration)\n if integration is not None:\n integration._handle_record(record)\n\n logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore\n\n\ndef _can_record(record):\n # type: (LogRecord) -> bool\n return record.name not in _IGNORED_LOGGERS\n\n\ndef _breadcrumb_from_record(record):\n # type: (LogRecord) -> Dict[str, Any]\n return {\n \"ty\": \"log\",\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n \"timestamp\": datetime.datetime.fromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n\n\ndef _logging_to_event_level(levelname):\n # type: (str) -> str\n return {\"critical\": \"fatal\"}.get(levelname.lower(), levelname.lower())\n\n\nCOMMON_RECORD_ATTRS = frozenset(\n (\n \"args\",\n \"created\",\n \"data\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n \"funcName\",\n \"levelname\",\n \"levelno\",\n \"linenno\",\n \"lineno\",\n \"message\",\n \"module\",\n \"msecs\",\n \"msg\",\n \"name\",\n \"pathname\",\n \"process\",\n \"processName\",\n \"relativeCreated\",\n \"stack\",\n \"tags\",\n \"thread\",\n \"threadName\",\n )\n)\n\n\ndef _extra_from_record(record):\n # type: (LogRecord) -> Dict[str, None]\n return {\n k: v\n for k, v in vars(record).items()\n if k not in COMMON_RECORD_ATTRS and not k.startswith(\"_\")\n }\n\n\nclass EventHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n hub = Hub.current\n if hub.client is None:\n return\n\n hint = None # type: Optional[Dict[str, Any]]\n client_options = hub.client.options\n\n # exc_info might be None or (None, None, None)\n if record.exc_info is not None and record.exc_info[0] is not None:\n event, hint = event_from_exception(\n record.exc_info,\n client_options=client_options,\n mechanism={\"type\": \"logging\", \"handled\": True},\n )\n elif record.exc_info and record.exc_info[0] is None:\n event = {}\n hint = None\n with capture_internal_exceptions():\n event[\"threads\"] = [\n {\n \"stacktrace\": current_stacktrace(client_options[\"with_locals\"]),\n \"crashed\": False,\n \"current\": True,\n }\n ]\n else:\n event = {}\n\n event[\"level\"] = _logging_to_event_level(record.levelname)\n event[\"logger\"] = record.name\n event[\"logentry\"] = {\"message\": to_string(record.msg), \"params\": record.args}\n event[\"extra\"] = _extra_from_record(record)\n\n hub.capture_event(event, hint=hint)\n\n\n# Legacy name\nSentryHandler = EventHandler\n\n\nclass BreadcrumbHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n Hub.current.add_breadcrumb(\n _breadcrumb_from_record(record), hint={\"log_record\": record}\n )\n", "path": "sentry_sdk/integrations/logging.py"}]}
| 2,462 | 87 |
gh_patches_debug_24522
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-569
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tests are stuck when running `make test`
I tried to use google's public dns as suggested by @sigmavirus24, but it didn't help.
``` shell
$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 brd 127.255.255.255 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp5s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
inet 192.168.0.17/24 brd 192.168.0.255 scope global enp5s0
valid_lft forever preferred_lft forever
inet6 fe80::beae:c5ff:fe65:dd71/64 scope link
valid_lft forever preferred_lft forever
$ cat /etc/resolv.conf
nameserver 8.8.8.8
nameserver 8.8.4.4
```
strace:
``` shell
$ strace -p 12676
Process 12676 attached
futex(0xbf8dd0, FUTEX_WAIT_PRIVATE, 0, NULL
```
This is gentoo, wicd and python 2.7. I think I had the same problem with Fedora and NetworkManager.
Also, last couple of lines from output:
```
test_oldapi (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_proxy_conn_fail (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_proxy_pooling (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_proxy_pooling_ext (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_proxy_verified (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_redirect (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok
test_multi_setcookie (test.with_dummyserver.test_socketlevel.TestCookies) ... Exception in thread Thread-8:
Traceback (most recent call last):
File "/usr/lib64/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/home/tt/dev/urllib3/dummyserver/server.py", line 76, in run
self.server = self._start_server()
File "/home/tt/dev/urllib3/dummyserver/server.py", line 63, in _start_server
sock.bind((self.host, 0))
File "/usr/lib64/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
error: getsockaddrarg: bad family
```
</issue>
<code>
[start of dummyserver/server.py]
1 #!/usr/bin/env python
2
3 """
4 Dummy server used for unit testing.
5 """
6 from __future__ import print_function
7
8 import errno
9 import logging
10 import os
11 import random
12 import string
13 import sys
14 import threading
15 import socket
16
17 from tornado.platform.auto import set_close_exec
18 import tornado.wsgi
19 import tornado.httpserver
20 import tornado.ioloop
21 import tornado.web
22
23
24 log = logging.getLogger(__name__)
25
26 CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
27 DEFAULT_CERTS = {
28 'certfile': os.path.join(CERTS_PATH, 'server.crt'),
29 'keyfile': os.path.join(CERTS_PATH, 'server.key'),
30 }
31 NO_SAN_CERTS = {
32 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'),
33 'keyfile': DEFAULT_CERTS['keyfile']
34 }
35 DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
36 DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
37 NO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem')
38
39
40 # Different types of servers we have:
41
42
43 class SocketServerThread(threading.Thread):
44 """
45 :param socket_handler: Callable which receives a socket argument for one
46 request.
47 :param ready_event: Event which gets set when the socket handler is
48 ready to receive requests.
49 """
50 def __init__(self, socket_handler, host='localhost', port=8081,
51 ready_event=None):
52 threading.Thread.__init__(self)
53 self.daemon = True
54
55 self.socket_handler = socket_handler
56 self.host = host
57 self.ready_event = ready_event
58
59 def _start_server(self):
60 sock = socket.socket(socket.AF_INET6)
61 if sys.platform != 'win32':
62 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
63 sock.bind((self.host, 0))
64 self.port = sock.getsockname()[1]
65
66 # Once listen() returns, the server socket is ready
67 sock.listen(0)
68
69 if self.ready_event:
70 self.ready_event.set()
71
72 self.socket_handler(sock)
73 sock.close()
74
75 def run(self):
76 self.server = self._start_server()
77
78
79 # FIXME: there is a pull request patching bind_sockets in Tornado directly.
80 # If it gets merged and released we can drop this and use
81 # `tornado.netutil.bind_sockets` again.
82 # https://github.com/facebook/tornado/pull/977
83
84 def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,
85 flags=None):
86 """Creates listening sockets bound to the given port and address.
87
88 Returns a list of socket objects (multiple sockets are returned if
89 the given address maps to multiple IP addresses, which is most common
90 for mixed IPv4 and IPv6 use).
91
92 Address may be either an IP address or hostname. If it's a hostname,
93 the server will listen on all IP addresses associated with the
94 name. Address may be an empty string or None to listen on all
95 available interfaces. Family may be set to either `socket.AF_INET`
96 or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
97 both will be used if available.
98
99 The ``backlog`` argument has the same meaning as for
100 `socket.listen() <socket.socket.listen>`.
101
102 ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
103 ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
104 """
105 sockets = []
106 if address == "":
107 address = None
108 if not socket.has_ipv6 and family == socket.AF_UNSPEC:
109 # Python can be compiled with --disable-ipv6, which causes
110 # operations on AF_INET6 sockets to fail, but does not
111 # automatically exclude those results from getaddrinfo
112 # results.
113 # http://bugs.python.org/issue16208
114 family = socket.AF_INET
115 if flags is None:
116 flags = socket.AI_PASSIVE
117 binded_port = None
118 for res in set(socket.getaddrinfo(address, port, family,
119 socket.SOCK_STREAM, 0, flags)):
120 af, socktype, proto, canonname, sockaddr = res
121 try:
122 sock = socket.socket(af, socktype, proto)
123 except socket.error as e:
124 if e.args[0] == errno.EAFNOSUPPORT:
125 continue
126 raise
127 set_close_exec(sock.fileno())
128 if os.name != 'nt':
129 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
130 if af == socket.AF_INET6:
131 # On linux, ipv6 sockets accept ipv4 too by default,
132 # but this makes it impossible to bind to both
133 # 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
134 # separate sockets *must* be used to listen for both ipv4
135 # and ipv6. For consistency, always disable ipv4 on our
136 # ipv6 sockets and use a separate ipv4 socket when needed.
137 #
138 # Python 2.x on windows doesn't have IPPROTO_IPV6.
139 if hasattr(socket, "IPPROTO_IPV6"):
140 sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
141
142 # automatic port allocation with port=None
143 # should bind on the same port on IPv4 and IPv6
144 host, requested_port = sockaddr[:2]
145 if requested_port == 0 and binded_port is not None:
146 sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))
147
148 sock.setblocking(0)
149 sock.bind(sockaddr)
150 binded_port = sock.getsockname()[1]
151 sock.listen(backlog)
152 sockets.append(sock)
153 return sockets
154
155
156 def run_tornado_app(app, io_loop, certs, scheme, host):
157 if scheme == 'https':
158 http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,
159 io_loop=io_loop)
160 else:
161 http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)
162
163 sockets = bind_sockets(None, address=host)
164 port = sockets[0].getsockname()[1]
165 http_server.add_sockets(sockets)
166 return http_server, port
167
168
169 def run_loop_in_thread(io_loop):
170 t = threading.Thread(target=io_loop.start)
171 t.start()
172 return t
173
174
175 def get_unreachable_address():
176 while True:
177 host = ''.join(random.choice(string.ascii_lowercase)
178 for _ in range(60))
179 sockaddr = (host, 54321)
180
181 # check if we are really "lucky" and hit an actual server
182 try:
183 s = socket.create_connection(sockaddr)
184 except socket.error:
185 return sockaddr
186 else:
187 s.close()
188
189
190 if __name__ == '__main__':
191 # For debugging dummyserver itself - python -m dummyserver.server
192 from .testcase import TestingApp
193 host = '127.0.0.1'
194
195 io_loop = tornado.ioloop.IOLoop()
196 app = tornado.wsgi.WSGIContainer(TestingApp())
197 server, port = run_tornado_app(app, io_loop, None,
198 'http', host)
199 server_thread = run_loop_in_thread(io_loop)
200
201 print("Listening on http://{host}:{port}".format(host=host, port=port))
202
[end of dummyserver/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dummyserver/server.py b/dummyserver/server.py
--- a/dummyserver/server.py
+++ b/dummyserver/server.py
@@ -13,6 +13,9 @@
import sys
import threading
import socket
+import warnings
+
+from urllib3.exceptions import HTTPWarning
from tornado.platform.auto import set_close_exec
import tornado.wsgi
@@ -40,6 +43,11 @@
# Different types of servers we have:
+class NoIPv6Warning(HTTPWarning):
+ "IPv6 is not available"
+ pass
+
+
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
@@ -57,7 +65,12 @@
self.ready_event = ready_event
def _start_server(self):
- sock = socket.socket(socket.AF_INET6)
+ if socket.has_ipv6:
+ sock = socket.socket(socket.AF_INET6)
+ else:
+ warnings.warn("No IPv6 support. Falling back to IPv4.",
+ NoIPv6Warning)
+ sock = socket.socket(socket.AF_INET)
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
|
{"golden_diff": "diff --git a/dummyserver/server.py b/dummyserver/server.py\n--- a/dummyserver/server.py\n+++ b/dummyserver/server.py\n@@ -13,6 +13,9 @@\n import sys\n import threading\n import socket\n+import warnings\n+\n+from urllib3.exceptions import HTTPWarning\n \n from tornado.platform.auto import set_close_exec\n import tornado.wsgi\n@@ -40,6 +43,11 @@\n # Different types of servers we have:\n \n \n+class NoIPv6Warning(HTTPWarning):\n+ \"IPv6 is not available\"\n+ pass\n+\n+\n class SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n@@ -57,7 +65,12 @@\n self.ready_event = ready_event\n \n def _start_server(self):\n- sock = socket.socket(socket.AF_INET6)\n+ if socket.has_ipv6:\n+ sock = socket.socket(socket.AF_INET6)\n+ else:\n+ warnings.warn(\"No IPv6 support. Falling back to IPv4.\",\n+ NoIPv6Warning)\n+ sock = socket.socket(socket.AF_INET)\n if sys.platform != 'win32':\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n", "issue": "tests are stuck when running `make test`\nI tried to use google's public dns as suggested by @sigmavirus24, but it didn't help.\n\n``` shell\n$ ip a\n1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default \n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00\n inet 127.0.0.1/8 brd 127.255.255.255 scope host lo\n valid_lft forever preferred_lft forever\n inet6 ::1/128 scope host \n valid_lft forever preferred_lft forever\n2: enp5s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000\n inet 192.168.0.17/24 brd 192.168.0.255 scope global enp5s0\n valid_lft forever preferred_lft forever\n inet6 fe80::beae:c5ff:fe65:dd71/64 scope link \n valid_lft forever preferred_lft forever\n$ cat /etc/resolv.conf\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n```\n\nstrace:\n\n``` shell\n$ strace -p 12676\nProcess 12676 attached\nfutex(0xbf8dd0, FUTEX_WAIT_PRIVATE, 0, NULL\n```\n\nThis is gentoo, wicd and python 2.7. I think I had the same problem with Fedora and NetworkManager.\n\nAlso, last couple of lines from output:\n\n```\ntest_oldapi (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_proxy_conn_fail (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_proxy_pooling (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_proxy_pooling_ext (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_proxy_verified (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_redirect (test.with_dummyserver.test_proxy_poolmanager.TestHTTPProxyManager) ... ok\ntest_multi_setcookie (test.with_dummyserver.test_socketlevel.TestCookies) ... Exception in thread Thread-8:\nTraceback (most recent call last):\n File \"/usr/lib64/python2.7/threading.py\", line 810, in __bootstrap_inner\n self.run()\n File \"/home/tt/dev/urllib3/dummyserver/server.py\", line 76, in run\n self.server = self._start_server()\n File \"/home/tt/dev/urllib3/dummyserver/server.py\", line 63, in _start_server\n sock.bind((self.host, 0))\n File \"/usr/lib64/python2.7/socket.py\", line 224, in meth\n return getattr(self._sock,name)(*args)\nerror: getsockaddrarg: bad family\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport errno\nimport logging\nimport os\nimport random\nimport string\nimport sys\nimport threading\nimport socket\n\nfrom tornado.platform.auto import set_close_exec\nimport tornado.wsgi\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')\nDEFAULT_CERTS = {\n 'certfile': os.path.join(CERTS_PATH, 'server.crt'),\n 'keyfile': os.path.join(CERTS_PATH, 'server.key'),\n}\nNO_SAN_CERTS = {\n 'certfile': os.path.join(CERTS_PATH, 'server.no_san.crt'),\n 'keyfile': DEFAULT_CERTS['keyfile']\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')\nDEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')\nNO_SAN_CA = os.path.join(CERTS_PATH, 'cacert.no_san.pem')\n\n\n# Different types of servers we have:\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n def __init__(self, socket_handler, host='localhost', port=8081,\n ready_event=None):\n threading.Thread.__init__(self)\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self):\n sock = socket.socket(socket.AF_INET6)\n if sys.platform != 'win32':\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(0)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n sock.close()\n\n def run(self):\n self.server = self._start_server()\n\n\n# FIXME: there is a pull request patching bind_sockets in Tornado directly.\n# If it gets merged and released we can drop this and use\n# `tornado.netutil.bind_sockets` again.\n# https://github.com/facebook/tornado/pull/977\n\ndef bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128,\n flags=None):\n \"\"\"Creates listening sockets bound to the given port and address.\n\n Returns a list of socket objects (multiple sockets are returned if\n the given address maps to multiple IP addresses, which is most common\n for mixed IPv4 and IPv6 use).\n\n Address may be either an IP address or hostname. If it's a hostname,\n the server will listen on all IP addresses associated with the\n name. Address may be an empty string or None to listen on all\n available interfaces. Family may be set to either `socket.AF_INET`\n or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise\n both will be used if available.\n\n The ``backlog`` argument has the same meaning as for\n `socket.listen() <socket.socket.listen>`.\n\n ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like\n ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.\n \"\"\"\n sockets = []\n if address == \"\":\n address = None\n if not socket.has_ipv6 and family == socket.AF_UNSPEC:\n # Python can be compiled with --disable-ipv6, which causes\n # operations on AF_INET6 sockets to fail, but does not\n # automatically exclude those results from getaddrinfo\n # results.\n # http://bugs.python.org/issue16208\n family = socket.AF_INET\n if flags is None:\n flags = socket.AI_PASSIVE\n binded_port = None\n for res in set(socket.getaddrinfo(address, port, family,\n socket.SOCK_STREAM, 0, flags)):\n af, socktype, proto, canonname, sockaddr = res\n try:\n sock = socket.socket(af, socktype, proto)\n except socket.error as e:\n if e.args[0] == errno.EAFNOSUPPORT:\n continue\n raise\n set_close_exec(sock.fileno())\n if os.name != 'nt':\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if af == socket.AF_INET6:\n # On linux, ipv6 sockets accept ipv4 too by default,\n # but this makes it impossible to bind to both\n # 0.0.0.0 in ipv4 and :: in ipv6. On other systems,\n # separate sockets *must* be used to listen for both ipv4\n # and ipv6. For consistency, always disable ipv4 on our\n # ipv6 sockets and use a separate ipv4 socket when needed.\n #\n # Python 2.x on windows doesn't have IPPROTO_IPV6.\n if hasattr(socket, \"IPPROTO_IPV6\"):\n sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)\n\n # automatic port allocation with port=None\n # should bind on the same port on IPv4 and IPv6\n host, requested_port = sockaddr[:2]\n if requested_port == 0 and binded_port is not None:\n sockaddr = tuple([host, binded_port] + list(sockaddr[2:]))\n\n sock.setblocking(0)\n sock.bind(sockaddr)\n binded_port = sock.getsockname()[1]\n sock.listen(backlog)\n sockets.append(sock)\n return sockets\n\n\ndef run_tornado_app(app, io_loop, certs, scheme, host):\n if scheme == 'https':\n http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs,\n io_loop=io_loop)\n else:\n http_server = tornado.httpserver.HTTPServer(app, io_loop=io_loop)\n\n sockets = bind_sockets(None, address=host)\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n\n\ndef run_loop_in_thread(io_loop):\n t = threading.Thread(target=io_loop.start)\n t.start()\n return t\n\n\ndef get_unreachable_address():\n while True:\n host = ''.join(random.choice(string.ascii_lowercase)\n for _ in range(60))\n sockaddr = (host, 54321)\n\n # check if we are really \"lucky\" and hit an actual server\n try:\n s = socket.create_connection(sockaddr)\n except socket.error:\n return sockaddr\n else:\n s.close()\n\n\nif __name__ == '__main__':\n # For debugging dummyserver itself - python -m dummyserver.server\n from .testcase import TestingApp\n host = '127.0.0.1'\n\n io_loop = tornado.ioloop.IOLoop()\n app = tornado.wsgi.WSGIContainer(TestingApp())\n server, port = run_tornado_app(app, io_loop, None,\n 'http', host)\n server_thread = run_loop_in_thread(io_loop)\n\n print(\"Listening on http://{host}:{port}\".format(host=host, port=port))\n", "path": "dummyserver/server.py"}]}
| 3,415 | 284 |
gh_patches_debug_12454
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-659
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
!GetAtt reference lint error
Hello,
Getting validation error on `AWS::ElasticLoadBalancingV2::Listener` property `LoadBalancerArn` when the `LoadBalancerArn` is referenced using !GetAtt nested-stack-name.Outputs.LoadbalancerArn
`[cfn-lint] E3008:CloudFormation stack outputs need to be strings not lists at Resources/ApiGwNlbListener/Properties/LoadBalancerArn/Fn::GetAtt
`
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/ValueRefGetAtt.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import six
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20 from cfnlint.helpers import RESOURCE_SPECS
21 import cfnlint.helpers
22
23
24 class ValueRefGetAtt(CloudFormationLintRule):
25 """Check if Resource Properties are correct"""
26 id = 'E3008'
27 shortdesc = 'Check values of properties for valid Refs and GetAtts'
28 description = 'Checks resource properties for Ref and GetAtt values'
29 tags = ['resources']
30
31 def initialize(self, cfn):
32 """Initialize the rule"""
33 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
34 self.resource_property_types.append(resource_type_spec)
35 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
36 self.resource_sub_property_types.append(property_type_spec)
37
38 def is_value_a_list(self, path, property_name):
39 """
40 Determines if the value checked is a list or a value in a list
41 We need to handle conditions in the path that could be nested, etc.
42 ['Resources', 'LoadBalancer', 'Properties', 'Subnets', 'Fn::If', 2, 'Fn::If', 2]
43 Numbers preceeded by a Fn::If should be removed and check repeated.
44 """
45 if path[-1] != property_name:
46 # Property doesn't match the property name
47 # Check if its a number and a condition
48 if isinstance(path[-1], int) and path[-2] == 'Fn::If':
49 return self.is_value_a_list(path[:-2], property_name)
50
51 return False
52
53 return True
54
55 def check_value_ref(self, value, path, **kwargs):
56 """Check Ref"""
57 matches = list()
58 cfn = kwargs.get('cfn')
59 value_specs = kwargs.get('value_specs', {}).get('Ref')
60 list_value_specs = kwargs.get('list_value_specs', {}).get('Ref')
61 property_type = kwargs.get('property_type')
62 property_name = kwargs.get('property_name')
63 if path[-1] == 'Ref' and property_type == 'List' and self.is_value_a_list(path[:-1], property_name):
64 specs = list_value_specs
65 else:
66 specs = value_specs
67
68 if not specs:
69 # If no Ref's are specified, just skip
70 # Opposite of GetAtt you will always have a Ref to a Parameter so if this is
71 # None it just hasn't been defined and we can skip
72 return matches
73
74 if value in cfn.template.get('Parameters', {}):
75 param = cfn.template.get('Parameters').get(value, {})
76 parameter_type = param.get('Type')
77 valid_parameter_types = []
78 for parameter in specs.get('Parameters'):
79 for param_type in RESOURCE_SPECS.get(cfn.regions[0]).get('ParameterTypes').get(parameter):
80 valid_parameter_types.append(param_type)
81
82 if not specs.get('Parameters'):
83 message = 'Property "{0}" has no valid Refs to Parameters at {1}'
84 matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))
85 elif parameter_type not in valid_parameter_types:
86 message = 'Property "{0}" can Ref to parameter of types [{1}] at {2}'
87 matches.append(
88 RuleMatch(
89 path,
90 message.format(
91 property_name,
92 ', '.join(map(str, valid_parameter_types)),
93 '/'.join(map(str, path)))))
94 if value in cfn.template.get('Resources', {}):
95 resource = cfn.template.get('Resources').get(value, {})
96 resource_type = resource.get('Type')
97 if not specs.get('Resources'):
98 message = 'Property "{0}" has no valid Refs to Resources at {1}'
99 matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))
100 elif resource_type not in specs.get('Resources'):
101 message = 'Property "{0}" can Ref to resources of types [{1}] at {2}'
102 matches.append(
103 RuleMatch(
104 path,
105 message.format(
106 property_name,
107 ', '.join(map(str, specs.get('Resources'))),
108 '/'.join(map(str, path)))))
109
110 return matches
111
112 def check_value_getatt(self, value, path, **kwargs):
113 """Check GetAtt"""
114 matches = []
115 cfn = kwargs.get('cfn')
116 value_specs = kwargs.get('value_specs', {}).get('GetAtt')
117 list_value_specs = kwargs.get('list_value_specs', {}).get('GetAtt')
118 property_type = kwargs.get('property_type')
119 property_name = kwargs.get('property_name')
120 # You can sometimes get a list or a string with . in it
121 if isinstance(value, list):
122 resource_name = value[0]
123 resource_attribute = value[1:]
124 elif isinstance(value, six.string_types):
125 resource_name = value.split('.')[0]
126 resource_attribute = value.split('.')[1:]
127 is_value_a_list = self.is_value_a_list(path[:-1], property_name)
128 if path[-1] == 'Fn::GetAtt' and property_type == 'List' and is_value_a_list:
129 specs = list_value_specs
130 else:
131 specs = value_specs
132
133 resource_type = cfn.template.get('Resources', {}).get(resource_name, {}).get('Type')
134
135 if cfnlint.helpers.is_custom_resource(resource_type):
136 # A custom resource voids the spec. Move on
137 return matches
138
139 if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':
140 # Nested Stack Outputs
141 # if its a string type we are good and return matches
142 # if its a list its a failure as Outputs can only be strings
143
144 if is_value_a_list:
145 message = 'CloudFormation stack outputs need to be strings not lists at {0}'
146 matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
147
148 return matches
149
150 if specs is None:
151 # GetAtt specs aren't specified skip
152 return matches
153 if not specs:
154 # GetAtt is specified but empty so there are no valid options
155 message = 'Property "{0}" has no valid Fn::GetAtt options at {1}'
156 matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))
157 return matches
158
159 if resource_type not in specs:
160 message = 'Property "{0}" can Fn::GetAtt to a resource of types [{1}] at {2}'
161 matches.append(
162 RuleMatch(
163 path,
164 message.format(
165 property_name,
166 ', '.join(map(str, specs)),
167 '/'.join(map(str, path)))))
168 elif '.'.join(map(str, resource_attribute)) != specs[resource_type]:
169 message = 'Property "{0}" can Fn::GetAtt to a resource attribute "{1}" at {2}'
170 matches.append(
171 RuleMatch(
172 path,
173 message.format(
174 property_name,
175 specs[resource_type],
176 '/'.join(map(str, path)))))
177
178 return matches
179
180 def check(self, cfn, properties, value_specs, property_specs, path):
181 """Check itself"""
182 matches = list()
183 for p_value, p_path in properties.items_safe(path[:]):
184 for prop in p_value:
185 if prop in value_specs:
186 value = value_specs.get(prop).get('Value', {})
187 if value:
188 value_type = value.get('ValueType', '')
189 list_value_type = value.get('ListValueType', '')
190 property_type = property_specs.get('Properties').get(prop).get('Type')
191 matches.extend(
192 cfn.check_value(
193 p_value, prop, p_path,
194 check_ref=self.check_value_ref,
195 check_get_att=self.check_value_getatt,
196 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
197 list_value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(list_value_type, {}),
198 cfn=cfn, property_type=property_type, property_name=prop
199 )
200 )
201
202 return matches
203
204 def match_resource_sub_properties(self, properties, property_type, path, cfn):
205 """Match for sub properties"""
206 matches = list()
207
208 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
209 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
210 matches.extend(self.check(cfn, properties, specs, property_specs, path))
211
212 return matches
213
214 def match_resource_properties(self, properties, resource_type, path, cfn):
215 """Check CloudFormation Properties"""
216 matches = list()
217
218 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
219 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
220 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
221
222 return matches
223
[end of src/cfnlint/rules/resources/properties/ValueRefGetAtt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
--- a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
+++ b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py
@@ -140,8 +140,7 @@
# Nested Stack Outputs
# if its a string type we are good and return matches
# if its a list its a failure as Outputs can only be strings
-
- if is_value_a_list:
+ if is_value_a_list and property_type == 'List':
message = 'CloudFormation stack outputs need to be strings not lists at {0}'
matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n--- a/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n+++ b/src/cfnlint/rules/resources/properties/ValueRefGetAtt.py\n@@ -140,8 +140,7 @@\n # Nested Stack Outputs\n # if its a string type we are good and return matches\n # if its a list its a failure as Outputs can only be strings\n-\n- if is_value_a_list:\n+ if is_value_a_list and property_type == 'List':\n message = 'CloudFormation stack outputs need to be strings not lists at {0}'\n matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))\n", "issue": "!GetAtt reference lint error\nHello,\r\n\r\nGetting validation error on `AWS::ElasticLoadBalancingV2::Listener` property `LoadBalancerArn` when the `LoadBalancerArn` is referenced using !GetAtt nested-stack-name.Outputs.LoadbalancerArn\r\n\r\n`[cfn-lint] E3008:CloudFormation stack outputs need to be strings not lists at Resources/ApiGwNlbListener/Properties/LoadBalancerArn/Fn::GetAtt\r\n`\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import RESOURCE_SPECS\nimport cfnlint.helpers\n\n\nclass ValueRefGetAtt(CloudFormationLintRule):\n \"\"\"Check if Resource Properties are correct\"\"\"\n id = 'E3008'\n shortdesc = 'Check values of properties for valid Refs and GetAtts'\n description = 'Checks resource properties for Ref and GetAtt values'\n tags = ['resources']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def is_value_a_list(self, path, property_name):\n \"\"\"\n Determines if the value checked is a list or a value in a list\n We need to handle conditions in the path that could be nested, etc.\n ['Resources', 'LoadBalancer', 'Properties', 'Subnets', 'Fn::If', 2, 'Fn::If', 2]\n Numbers preceeded by a Fn::If should be removed and check repeated.\n \"\"\"\n if path[-1] != property_name:\n # Property doesn't match the property name\n # Check if its a number and a condition\n if isinstance(path[-1], int) and path[-2] == 'Fn::If':\n return self.is_value_a_list(path[:-2], property_name)\n\n return False\n\n return True\n\n def check_value_ref(self, value, path, **kwargs):\n \"\"\"Check Ref\"\"\"\n matches = list()\n cfn = kwargs.get('cfn')\n value_specs = kwargs.get('value_specs', {}).get('Ref')\n list_value_specs = kwargs.get('list_value_specs', {}).get('Ref')\n property_type = kwargs.get('property_type')\n property_name = kwargs.get('property_name')\n if path[-1] == 'Ref' and property_type == 'List' and self.is_value_a_list(path[:-1], property_name):\n specs = list_value_specs\n else:\n specs = value_specs\n\n if not specs:\n # If no Ref's are specified, just skip\n # Opposite of GetAtt you will always have a Ref to a Parameter so if this is\n # None it just hasn't been defined and we can skip\n return matches\n\n if value in cfn.template.get('Parameters', {}):\n param = cfn.template.get('Parameters').get(value, {})\n parameter_type = param.get('Type')\n valid_parameter_types = []\n for parameter in specs.get('Parameters'):\n for param_type in RESOURCE_SPECS.get(cfn.regions[0]).get('ParameterTypes').get(parameter):\n valid_parameter_types.append(param_type)\n\n if not specs.get('Parameters'):\n message = 'Property \"{0}\" has no valid Refs to Parameters at {1}'\n matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))\n elif parameter_type not in valid_parameter_types:\n message = 'Property \"{0}\" can Ref to parameter of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, valid_parameter_types)),\n '/'.join(map(str, path)))))\n if value in cfn.template.get('Resources', {}):\n resource = cfn.template.get('Resources').get(value, {})\n resource_type = resource.get('Type')\n if not specs.get('Resources'):\n message = 'Property \"{0}\" has no valid Refs to Resources at {1}'\n matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))\n elif resource_type not in specs.get('Resources'):\n message = 'Property \"{0}\" can Ref to resources of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, specs.get('Resources'))),\n '/'.join(map(str, path)))))\n\n return matches\n\n def check_value_getatt(self, value, path, **kwargs):\n \"\"\"Check GetAtt\"\"\"\n matches = []\n cfn = kwargs.get('cfn')\n value_specs = kwargs.get('value_specs', {}).get('GetAtt')\n list_value_specs = kwargs.get('list_value_specs', {}).get('GetAtt')\n property_type = kwargs.get('property_type')\n property_name = kwargs.get('property_name')\n # You can sometimes get a list or a string with . in it\n if isinstance(value, list):\n resource_name = value[0]\n resource_attribute = value[1:]\n elif isinstance(value, six.string_types):\n resource_name = value.split('.')[0]\n resource_attribute = value.split('.')[1:]\n is_value_a_list = self.is_value_a_list(path[:-1], property_name)\n if path[-1] == 'Fn::GetAtt' and property_type == 'List' and is_value_a_list:\n specs = list_value_specs\n else:\n specs = value_specs\n\n resource_type = cfn.template.get('Resources', {}).get(resource_name, {}).get('Type')\n\n if cfnlint.helpers.is_custom_resource(resource_type):\n # A custom resource voids the spec. Move on\n return matches\n\n if resource_type == 'AWS::CloudFormation::Stack' and resource_attribute[0] == 'Outputs':\n # Nested Stack Outputs\n # if its a string type we are good and return matches\n # if its a list its a failure as Outputs can only be strings\n\n if is_value_a_list:\n message = 'CloudFormation stack outputs need to be strings not lists at {0}'\n matches.append(RuleMatch(path, message.format('/'.join(map(str, path)))))\n\n return matches\n\n if specs is None:\n # GetAtt specs aren't specified skip\n return matches\n if not specs:\n # GetAtt is specified but empty so there are no valid options\n message = 'Property \"{0}\" has no valid Fn::GetAtt options at {1}'\n matches.append(RuleMatch(path, message.format(property_name, '/'.join(map(str, path)))))\n return matches\n\n if resource_type not in specs:\n message = 'Property \"{0}\" can Fn::GetAtt to a resource of types [{1}] at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n ', '.join(map(str, specs)),\n '/'.join(map(str, path)))))\n elif '.'.join(map(str, resource_attribute)) != specs[resource_type]:\n message = 'Property \"{0}\" can Fn::GetAtt to a resource attribute \"{1}\" at {2}'\n matches.append(\n RuleMatch(\n path,\n message.format(\n property_name,\n specs[resource_type],\n '/'.join(map(str, path)))))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n list_value_type = value.get('ListValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_ref=self.check_value_ref,\n check_get_att=self.check_value_getatt,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n list_value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(list_value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValueRefGetAtt.py"}]}
| 3,392 | 177 |
gh_patches_debug_9461
|
rasdani/github-patches
|
git_diff
|
AnalogJ__lexicon-111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unable to install transip dependencies or use transip plugin
Output: pip install dns-lexicon[transip]
```
Requirement already satisfied: dns-lexicon[transip] in ./lib/python2.7/site-packages
Requirement already satisfied: requests in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Requirement already satisfied: future in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Requirement already satisfied: tldextract in ./lib/python2.7/site-packages (from dns-lexicon[transip])
Collecting transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip])
Could not find a version that satisfies the requirement transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip]) (from versions: 0.2)
No matching distribution found for transip==0.1.0-dev; extra == "transip" (from dns-lexicon[transip])
```
after manual installing the transip package i get the following error
```
Namespace(action='list', auth_api_key='../test-acme/private', auth_username='foobar', content='foo', delegated=None, domain='example.org', identifier=None, name='foo', priority=None, provider_name='transip', ttl=None, type='NS')
Traceback (most recent call last):
File "./bin/lexicon", line 11, in <module>
sys.exit(main())
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/__main__.py", line 56, in main
client.execute()
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/client.py", line 36, in execute
self.provider.authenticate()
File "/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/providers/transip.py", line 43, in authenticate
self.client.get_info(domain)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/service/domain.py", line 26, in get_info
cookie = self.build_cookie(mode=MODE_RO, method='getInfo', parameters=[domain_name])
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py", line 111, in build_cookie
timestamp=timestamp, nonce=nonce, additional=parameters))
File "/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py", line 51, in _sign
privkey = rsa.PrivateKey.load_pkcs1(keydata)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 75, in load_pkcs1
return method(keyfile)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 511, in _load_pkcs1_pem
return cls._load_pkcs1_der(der)
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 459, in _load_pkcs1_der
as_ints = tuple(int(x) for x in priv[1:9])
File "/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py", line 459, in <genexpr>
as_ints = tuple(int(x) for x in priv[1:9])
TypeError: int() argument must be a string or a number, not 'Sequence'
```
</issue>
<code>
[start of setup.py]
1 """A setuptools based setup module.
2
3 See:
4 https://packaging.python.org/en/latest/distributing.html
5 https://github.com/pypa/sampleproject
6 """
7
8 # Always prefer setuptools over distutils
9 from setuptools import setup, find_packages
10 # To use a consistent encoding
11 from codecs import open
12 from os import path, listdir
13
14 version = 'unknown'
15 with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
16 version = version_file.read().strip()
17
18 here = path.abspath(path.dirname(__file__))
19
20 # Get the long description from the README file
21 with open(path.join(here, 'README.md'), encoding='utf-8') as f:
22 long_description = f.read()
23
24 # Get a list of all the providers
25 current_filepath = path.join(here, 'lexicon', 'providers')
26 providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
27 providers = list(set(providers))
28 providers.remove('base')
29 providers.remove('__init__')
30
31 setup(
32 name='dns-lexicon',
33
34 # Versions should comply with PEP440. For a discussion on single-sourcing
35 # the version across setup.py and the project code, see
36 # https://packaging.python.org/en/latest/single_source_version.html
37 version=version,
38
39 description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
40 long_description=long_description,
41
42 # The project's main homepage.
43 url='https://github.com/AnalogJ/lexicon',
44
45 # Author details
46 author='Jason Kulatunga',
47 author_email='[email protected]',
48
49 license='MIT',
50
51 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
52 classifiers=[
53 'Development Status :: 5 - Production/Stable',
54
55 'Intended Audience :: Developers',
56 'Intended Audience :: System Administrators',
57 'Topic :: Software Development :: Libraries :: Python Modules',
58 'Topic :: Internet :: Name Service (DNS)',
59 'Topic :: System :: Systems Administration',
60 'Topic :: Utilities',
61
62 'License :: OSI Approved :: MIT License',
63
64 # Specify the Python versions you support here. In particular, ensure
65 # that you indicate whether you support Python 2, Python 3 or both.
66 'Programming Language :: Python :: 2',
67 'Programming Language :: Python :: 2.7',
68 'Programming Language :: Python :: 3',
69 'Programming Language :: Python :: 3.3',
70 'Programming Language :: Python :: 3.4',
71 'Programming Language :: Python :: 3.5',
72 ],
73
74 keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
75
76 packages=find_packages(exclude=['contrib', 'docs', 'tests']),
77
78 # List run-time dependencies here. These will be installed by pip when
79 # your project is installed. For an analysis of "install_requires" vs pip's
80 # requirements files see:
81 # https://packaging.python.org/en/latest/requirements.html
82 install_requires=['requests', 'tldextract', 'future'],
83
84 # Each dependency group in extras_require should match a provider name
85 # When adding a new depenency group here, please ensure that it has been
86 # added to optional-requirements.txt as well.
87 extras_require={
88 'route53': ['boto3'],
89 'transip': ['transip==0.1.0-dev']
90 },
91 dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],
92
93 # To provide executable scripts, use entry points in preference to the
94 # "scripts" keyword. Entry points provide cross-platform support and allow
95 # pip to create the appropriate form of executable for the target platform.
96 entry_points={
97 'console_scripts': [
98 'lexicon=lexicon.__main__:main',
99 ],
100 },
101 test_suite='tests'
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -86,9 +86,8 @@
# added to optional-requirements.txt as well.
extras_require={
'route53': ['boto3'],
- 'transip': ['transip==0.1.0-dev']
+ 'transip': ['transip>=0.3.0']
},
- dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,9 +86,8 @@\n # added to optional-requirements.txt as well.\n extras_require={\n 'route53': ['boto3'],\n- 'transip': ['transip==0.1.0-dev']\n+ 'transip': ['transip>=0.3.0']\n },\n- dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],\n \n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n", "issue": "unable to install transip dependencies or use transip plugin\nOutput: pip install dns-lexicon[transip]\r\n```\r\nRequirement already satisfied: dns-lexicon[transip] in ./lib/python2.7/site-packages\r\nRequirement already satisfied: requests in ./lib/python2.7/site-packages (from dns-lexicon[transip])\r\nRequirement already satisfied: future in ./lib/python2.7/site-packages (from dns-lexicon[transip])\r\nRequirement already satisfied: tldextract in ./lib/python2.7/site-packages (from dns-lexicon[transip])\r\nCollecting transip==0.1.0-dev; extra == \"transip\" (from dns-lexicon[transip])\r\n Could not find a version that satisfies the requirement transip==0.1.0-dev; extra == \"transip\" (from dns-lexicon[transip]) (from versions: 0.2)\r\nNo matching distribution found for transip==0.1.0-dev; extra == \"transip\" (from dns-lexicon[transip])\r\n```\r\n\r\nafter manual installing the transip package i get the following error\r\n\r\n```\r\nNamespace(action='list', auth_api_key='../test-acme/private', auth_username='foobar', content='foo', delegated=None, domain='example.org', identifier=None, name='foo', priority=None, provider_name='transip', ttl=None, type='NS')\r\nTraceback (most recent call last):\r\n File \"./bin/lexicon\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/__main__.py\", line 56, in main\r\n client.execute()\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/client.py\", line 36, in execute\r\n self.provider.authenticate()\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/lexicon/providers/transip.py\", line 43, in authenticate\r\n self.client.get_info(domain)\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/transip/service/domain.py\", line 26, in get_info\r\n cookie = self.build_cookie(mode=MODE_RO, method='getInfo', parameters=[domain_name])\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py\", line 111, in build_cookie\r\n timestamp=timestamp, nonce=nonce, additional=parameters))\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/transip/client.py\", line 51, in _sign\r\n privkey = rsa.PrivateKey.load_pkcs1(keydata)\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py\", line 75, in load_pkcs1\r\n return method(keyfile)\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py\", line 511, in _load_pkcs1_pem\r\n return cls._load_pkcs1_der(der)\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py\", line 459, in _load_pkcs1_der\r\n as_ints = tuple(int(x) for x in priv[1:9])\r\n File \"/home/muller/lexicon/local/lib/python2.7/site-packages/rsa/key.py\", line 459, in <genexpr>\r\n as_ints = tuple(int(x) for x in priv[1:9])\r\nTypeError: int() argument must be a string or a number, not 'Sequence'\r\n\r\n```\n", "before_files": [{"content": "\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path, listdir\n\nversion = 'unknown'\nwith open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:\n version = version_file.read().strip()\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n# Get a list of all the providers\ncurrent_filepath = path.join(here, 'lexicon', 'providers')\nproviders = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]\nproviders = list(set(providers))\nproviders.remove('base')\nproviders.remove('__init__')\n\nsetup(\n name='dns-lexicon',\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version=version,\n\n description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',\n long_description=long_description,\n\n # The project's main homepage.\n url='https://github.com/AnalogJ/lexicon',\n\n # Author details\n author='Jason Kulatunga',\n author_email='[email protected]',\n\n license='MIT',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: Name Service (DNS)',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n\n keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),\n\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=['requests', 'tldextract', 'future'],\n\n # Each dependency group in extras_require should match a provider name\n # When adding a new depenency group here, please ensure that it has been\n # added to optional-requirements.txt as well.\n extras_require={\n 'route53': ['boto3'],\n 'transip': ['transip==0.1.0-dev']\n },\n dependency_links = ['git+https://github.com/benkonrath/transip-api.git#egg=transip-0.1.0-dev'],\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # pip to create the appropriate form of executable for the target platform.\n entry_points={\n 'console_scripts': [\n 'lexicon=lexicon.__main__:main',\n ],\n },\n test_suite='tests'\n)\n", "path": "setup.py"}]}
| 2,423 | 159 |
gh_patches_debug_17973
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1164
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Batch requests accept additional attributes
We should probably forbid them.
```
> echo '{"requests": [], "foo": {}}' | http post http://localhost:8888/v1/batch
HTTP/1.1 200 OK
Access-Control-Expose-Headers: Retry-After, Alert, Content-Length, Backoff
Content-Length: 16
Content-Type: application/json
Date: Thu, 16 Mar 2017 23:01:01 GMT
Server: waitress
X-Content-Type-Options: nosniff
{
"responses": []
}
```
Edit: I just notice we currently test for batch ignoring additional attributes. Is this a feature?
</issue>
<code>
[start of kinto/core/views/batch.py]
1 import logging
2
3 import colander
4 from cornice.validators import colander_validator
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7
8 from kinto.core import errors
9 from kinto.core import Service
10 from kinto.core.errors import ErrorSchema
11 from kinto.core.utils import merge_dicts, build_request, build_response
12
13
14 subrequest_logger = logging.getLogger("subrequest.summary")
15
16 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',
17 'POST', 'PUT', 'PATCH'))
18
19
20 def string_values(node, cstruct):
21 """Validate that a ``colander.Mapping`` only has strings in its values.
22
23 .. warning::
24
25 Should be associated to a ``colander.Mapping`` schema node.
26 """
27 are_strings = [isinstance(v, str) for v in cstruct.values()]
28 if not all(are_strings):
29 error_msg = '{} contains non string value'.format(cstruct)
30 raise colander.Invalid(node, error_msg)
31
32
33 class BatchRequestSchema(colander.MappingSchema):
34 method = colander.SchemaNode(colander.String(),
35 validator=valid_http_method,
36 missing=colander.drop)
37 path = colander.SchemaNode(colander.String(),
38 validator=colander.Regex('^/'))
39 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
40 validator=string_values,
41 missing=colander.drop)
42 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
43 missing=colander.drop)
44
45
46 class BatchPayloadSchema(colander.MappingSchema):
47 defaults = BatchRequestSchema(missing=colander.drop).clone()
48 requests = colander.SchemaNode(colander.Sequence(),
49 BatchRequestSchema())
50
51 def __init__(self, *args, **kwargs):
52 super().__init__(*args, **kwargs)
53 # On defaults, path is not mandatory.
54 self.get('defaults').get('path').missing = colander.drop
55
56 def deserialize(self, cstruct=colander.null):
57 """Preprocess received data to carefully merge defaults.
58 """
59 if cstruct is not colander.null:
60 defaults = cstruct.get('defaults')
61 requests = cstruct.get('requests')
62 if isinstance(defaults, dict) and isinstance(requests, list):
63 for request in requests:
64 if isinstance(request, dict):
65 merge_dicts(request, defaults)
66 return super().deserialize(cstruct)
67
68
69 class BatchRequest(colander.MappingSchema):
70 body = BatchPayloadSchema()
71
72
73 class BatchResponseSchema(colander.MappingSchema):
74 status = colander.SchemaNode(colander.Integer())
75 path = colander.SchemaNode(colander.String())
76 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
77 validator=string_values,
78 missing=colander.drop)
79 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
80 missing=colander.drop)
81
82
83 class BatchResponseBodySchema(colander.MappingSchema):
84 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))
85
86
87 class BatchResponse(colander.MappingSchema):
88 body = BatchResponseBodySchema()
89
90
91 class ErrorResponseSchema(colander.MappingSchema):
92 body = ErrorSchema()
93
94
95 batch_responses = {
96 '200': BatchResponse(description='Return a list of operation responses.'),
97 '400': ErrorResponseSchema(description='The request was badly formatted.'),
98 'default': ErrorResponseSchema(description='an unknown error occurred.')
99 }
100
101 batch = Service(name="batch", path='/batch',
102 description="Batch operations")
103
104
105 @batch.post(schema=BatchRequest,
106 validators=(colander_validator,),
107 permission=NO_PERMISSION_REQUIRED,
108 tags=['Batch'], operation_id='batch',
109 response_schemas=batch_responses)
110 def post_batch(request):
111 requests = request.validated['body']['requests']
112
113 request.log_context(batch_size=len(requests))
114
115 limit = request.registry.settings['batch_max_requests']
116 if limit and len(requests) > int(limit):
117 error_msg = 'Number of requests is limited to {}'.format(limit)
118 request.errors.add('body', 'requests', error_msg)
119 return
120
121 if any([batch.path in req['path'] for req in requests]):
122 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)
123 request.errors.add('body', 'requests', error_msg)
124 return
125
126 responses = []
127
128 for subrequest_spec in requests:
129 subrequest = build_request(request, subrequest_spec)
130
131 log_context = {'path': subrequest.path,
132 'method': subrequest.method,
133 **request.log_context()}
134 try:
135 # Invoke subrequest without individual transaction.
136 resp, subrequest = request.follow_subrequest(subrequest,
137 use_tweens=False)
138 except httpexceptions.HTTPException as e:
139 if e.content_type == 'application/json':
140 resp = e
141 else:
142 # JSONify raw Pyramid errors.
143 resp = errors.http_error(e)
144
145 subrequest_logger.info('subrequest.summary', extra=log_context)
146
147 dict_resp = build_response(resp, subrequest)
148 responses.append(dict_resp)
149
150 return {
151 'responses': responses
152 }
153
[end of kinto/core/views/batch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -42,12 +42,20 @@
body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
missing=colander.drop)
+ @staticmethod
+ def schema_type():
+ return colander.Mapping(unknown='raise')
+
class BatchPayloadSchema(colander.MappingSchema):
defaults = BatchRequestSchema(missing=colander.drop).clone()
requests = colander.SchemaNode(colander.Sequence(),
BatchRequestSchema())
+ @staticmethod
+ def schema_type():
+ return colander.Mapping(unknown='raise')
+
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# On defaults, path is not mandatory.
|
{"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -42,12 +42,20 @@\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n \n+ @staticmethod\n+ def schema_type():\n+ return colander.Mapping(unknown='raise')\n+\n \n class BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n \n+ @staticmethod\n+ def schema_type():\n+ return colander.Mapping(unknown='raise')\n+\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n", "issue": "Batch requests accept additional attributes\nWe should probably forbid them.\r\n\r\n```\r\n> echo '{\"requests\": [], \"foo\": {}}' | http post http://localhost:8888/v1/batch \r\n\r\nHTTP/1.1 200 OK\r\nAccess-Control-Expose-Headers: Retry-After, Alert, Content-Length, Backoff\r\nContent-Length: 16\r\nContent-Type: application/json\r\nDate: Thu, 16 Mar 2017 23:01:01 GMT\r\nServer: waitress\r\nX-Content-Type-Options: nosniff\r\n\r\n{\r\n \"responses\": []\r\n}\r\n```\r\n\r\nEdit: I just notice we currently test for batch ignoring additional attributes. Is this a feature?\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\n\n\nsubrequest_logger = logging.getLogger(\"subrequest.summary\")\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name=\"batch\", path='/batch',\n description=\"Batch operations\")\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {'path': subrequest.path,\n 'method': subrequest.method,\n **request.log_context()}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]}
| 2,128 | 198 |
gh_patches_debug_30318
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1647
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AWS::Logs::MetricFilter MetricValues permits invalid strings
`cfn-lint 0.30.1`
AWS::Logs::MetricFilter.Properties.MetricTransformations[*].MetricValue allows a bare string not starting with '$' which it appears is never actually valid, ie
"MetricValue: length" vs "MetricValue: $length"
Assuming I'm reading the documentation correctly MetricValue must always either be a number OR start with a '$' character.
The following fragment lints, but is rejected by CloudFormation at runtime without a $ at the start of the named MetricValue field
```
QueueLengthMetricFilter:
Type: AWS::Logs::MetricFilter
Properties:
LogGroupName: !Ref LogGroup
FilterPattern: '[date, time, tag="rh-sched*", x01=throttling, x02="jobs.", ..., x10=Len, x11=of, x12=job, x13="queue*", length]'
MetricTransformations:
- MetricValue: length
MetricNamespace: !Sub '${EnvironmentName}'
MetricName: 'JobsQueued'
```
Note: I believe that this is also missed by the AWS ValidateTemplate API
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/AllowedPattern.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9 from cfnlint.helpers import RESOURCE_SPECS
10
11
12 class AllowedPattern(CloudFormationLintRule):
13 """Check if properties have a valid value"""
14 id = 'E3031'
15 shortdesc = 'Check if property values adhere to a specific pattern'
16 description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'
17 source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'
18 tags = ['resources', 'property', 'allowed pattern', 'regex']
19
20 def initialize(self, cfn):
21 """Initialize the rule"""
22 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
23 self.resource_property_types.append(resource_type_spec)
24 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
25 self.resource_sub_property_types.append(property_type_spec)
26
27 def check_value(self, value, path, property_name, **kwargs):
28 """Check Value"""
29 matches = []
30
31 # Get the Allowed Pattern Regex
32 value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})
33 # Get the "Human Readable" version for the error message. Optional, if not specified,
34 # the RegEx itself is used.
35 value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)
36
37 if value_pattern_regex:
38 regex = re.compile(value_pattern_regex)
39
40 # Ignore values with dynamic references. Simple check to prevent false-positives
41 # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
42 if '{{resolve:' not in value:
43 if not regex.match(value):
44 full_path = ('/'.join(str(x) for x in path))
45
46 message = '{} contains invalid characters (Pattern: {}) at {}'
47 matches.append(RuleMatch(path, message.format(
48 property_name, value_pattern, full_path)))
49
50 return matches
51
52 def check(self, cfn, properties, value_specs, property_specs, path):
53 """Check itself"""
54 matches = list()
55 for p_value, p_path in properties.items_safe(path[:]):
56 for prop in p_value:
57 if prop in value_specs:
58 value = value_specs.get(prop).get('Value', {})
59 if value:
60 value_type = value.get('ValueType', '')
61 property_type = property_specs.get('Properties').get(prop).get('Type')
62 matches.extend(
63 cfn.check_value(
64 p_value, prop, p_path,
65 check_value=self.check_value,
66 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(
67 'ValueTypes').get(value_type, {}),
68 cfn=cfn, property_type=property_type, property_name=prop
69 )
70 )
71 return matches
72
73 def match_resource_sub_properties(self, properties, property_type, path, cfn):
74 """Match for sub properties"""
75 matches = list()
76
77 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
78 'PropertyTypes').get(property_type, {}).get('Properties', {})
79 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
80 matches.extend(self.check(cfn, properties, specs, property_specs, path))
81
82 return matches
83
84 def match_resource_properties(self, properties, resource_type, path, cfn):
85 """Check CloudFormation Properties"""
86 matches = list()
87
88 specs = RESOURCE_SPECS.get(cfn.regions[0]).get(
89 'ResourceTypes').get(resource_type, {}).get('Properties', {})
90 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
91 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
92
93 return matches
94
[end of src/cfnlint/rules/resources/properties/AllowedPattern.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py
--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py
+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py
@@ -3,6 +3,7 @@
SPDX-License-Identifier: MIT-0
"""
import re
+import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
@@ -34,18 +35,22 @@
# the RegEx itself is used.
value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)
- if value_pattern_regex:
- regex = re.compile(value_pattern_regex)
+ if isinstance(value, (int, float)):
+ value = str(value)
- # Ignore values with dynamic references. Simple check to prevent false-positives
- # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
- if '{{resolve:' not in value:
- if not regex.match(value):
- full_path = ('/'.join(str(x) for x in path))
+ if isinstance(value, six.string_types):
+ if value_pattern_regex:
+ regex = re.compile(value_pattern_regex)
- message = '{} contains invalid characters (Pattern: {}) at {}'
- matches.append(RuleMatch(path, message.format(
- property_name, value_pattern, full_path)))
+ # Ignore values with dynamic references. Simple check to prevent false-positives
+ # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
+ if '{{resolve:' not in value:
+ if not regex.match(value):
+ full_path = ('/'.join(str(x) for x in path))
+
+ message = '{} contains invalid characters (Pattern: {}) at {}'
+ matches.append(RuleMatch(path, message.format(
+ property_name, value_pattern, full_path)))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py\n+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n@@ -3,6 +3,7 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n import re\n+import six\n from cfnlint.rules import CloudFormationLintRule\n from cfnlint.rules import RuleMatch\n \n@@ -34,18 +35,22 @@\n # the RegEx itself is used.\n value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)\n \n- if value_pattern_regex:\n- regex = re.compile(value_pattern_regex)\n+ if isinstance(value, (int, float)):\n+ value = str(value)\n \n- # Ignore values with dynamic references. Simple check to prevent false-positives\n- # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html\n- if '{{resolve:' not in value:\n- if not regex.match(value):\n- full_path = ('/'.join(str(x) for x in path))\n+ if isinstance(value, six.string_types):\n+ if value_pattern_regex:\n+ regex = re.compile(value_pattern_regex)\n \n- message = '{} contains invalid characters (Pattern: {}) at {}'\n- matches.append(RuleMatch(path, message.format(\n- property_name, value_pattern, full_path)))\n+ # Ignore values with dynamic references. Simple check to prevent false-positives\n+ # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html\n+ if '{{resolve:' not in value:\n+ if not regex.match(value):\n+ full_path = ('/'.join(str(x) for x in path))\n+\n+ message = '{} contains invalid characters (Pattern: {}) at {}'\n+ matches.append(RuleMatch(path, message.format(\n+ property_name, value_pattern, full_path)))\n \n return matches\n", "issue": "AWS::Logs::MetricFilter MetricValues permits invalid strings\n`cfn-lint 0.30.1`\r\n\r\nAWS::Logs::MetricFilter.Properties.MetricTransformations[*].MetricValue allows a bare string not starting with '$' which it appears is never actually valid, ie \r\n\"MetricValue: length\" vs \"MetricValue: $length\"\r\n\r\nAssuming I'm reading the documentation correctly MetricValue must always either be a number OR start with a '$' character.\r\n\r\nThe following fragment lints, but is rejected by CloudFormation at runtime without a $ at the start of the named MetricValue field\r\n```\r\n QueueLengthMetricFilter:\r\n Type: AWS::Logs::MetricFilter\r\n Properties:\r\n LogGroupName: !Ref LogGroup\r\n FilterPattern: '[date, time, tag=\"rh-sched*\", x01=throttling, x02=\"jobs.\", ..., x10=Len, x11=of, x12=job, x13=\"queue*\", length]'\r\n MetricTransformations:\r\n - MetricValue: length\r\n MetricNamespace: !Sub '${EnvironmentName}'\r\n MetricName: 'JobsQueued'\r\n```\r\n\r\n\r\nNote: I believe that this is also missed by the AWS ValidateTemplate API\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedPattern(CloudFormationLintRule):\n \"\"\"Check if properties have a valid value\"\"\"\n id = 'E3031'\n shortdesc = 'Check if property values adhere to a specific pattern'\n description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'\n tags = ['resources', 'property', 'allowed pattern', 'regex']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n\n # Get the Allowed Pattern Regex\n value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})\n # Get the \"Human Readable\" version for the error message. Optional, if not specified,\n # the RegEx itself is used.\n value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)\n\n if value_pattern_regex:\n regex = re.compile(value_pattern_regex)\n\n # Ignore values with dynamic references. Simple check to prevent false-positives\n # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html\n if '{{resolve:' not in value:\n if not regex.match(value):\n full_path = ('/'.join(str(x) for x in path))\n\n message = '{} contains invalid characters (Pattern: {}) at {}'\n matches.append(RuleMatch(path, message.format(\n property_name, value_pattern, full_path)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AllowedPattern.py"}]}
| 1,871 | 448 |
gh_patches_debug_61781
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-5484
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DragBehavior dispatching touch event incorrectly
<!--
The issue tracker is a tool to address bugs.
Please use the #kivy IRC channel on freenode or Stack Overflow for
support questions, more information at https://git.io/vM1yQ.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://git.io/vM1iE
* prepare a short, runnable example that reproduces the issue
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.6.0
* OS: Linux Mint ver17.3 (based on Ubuntu 14.04)
* Kivy: 1.10
* Kivy installation method: pip (using pyenv)
### Description
on_press() isn't called when holding down button A or B or D.
### Code and Logs
```python
from kivy.lang import Builder
from kivy.base import runTouchApp
root = Builder.load_string(r'''
<DraggableButton@DragBehavior+Button>:
font_size: 40
drag_rectangle: [*self.pos, *self.size, ]
size_hint: None, None
on_touch_down: print(self.text, 'on_touch_down')
on_press: print(self.text, 'on_press')
GridLayout:
on_touch_down: print('------------------------------------------')
cols: 2
RelativeLayout:
DraggableButton:
text: 'A'
RelativeLayout:
DraggableButton:
text: 'B'
RelativeLayout:
DraggableButton:
text: 'C'
RelativeLayout:
DraggableButton:
text: 'D'
''')
runTouchApp(root)
```
##### hold down A
```text
------------------------------------------
D on_touch_down
C on_touch_down
B on_touch_down
A on_touch_down
```
##### hold down B
```text
------------------------------------------
D on_touch_down
C on_touch_down
B on_touch_down
```
##### hold down C
```text
------------------------------------------
D on_touch_down
C on_touch_down
C on_press
```
##### hold down D
```text
------------------------------------------
D on_touch_down
```
</issue>
<code>
[start of kivy/uix/behaviors/drag.py]
1 """
2 Drag Behavior
3 =============
4
5 The :class:`~kivy.uix.behaviors.drag.DragBehavior`
6 `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides Drag behavior.
7 When combined with a widget, dragging in the rectangle defined by the
8 :attr:`~kivy.uix.behaviors.drag.DragBehavior.drag_rectangle` will drag the
9 widget.
10
11 Example
12 -------
13
14 The following example creates a draggable label::
15
16 from kivy.uix.label import Label
17 from kivy.app import App
18 from kivy.uix.behaviors import DragBehavior
19 from kivy.lang import Builder
20
21 # You could also put the following in your kv file...
22 kv = '''
23 <DragLabel>:
24 # Define the properties for the DragLabel
25 drag_rectangle: self.x, self.y, self.width, self.height
26 drag_timeout: 10000000
27 drag_distance: 0
28
29 FloatLayout:
30 # Define the root widget
31 DragLabel:
32 size_hint: 0.25, 0.2
33 text: 'Drag me'
34 '''
35
36
37 class DragLabel(DragBehavior, Label):
38 pass
39
40
41 class TestApp(App):
42 def build(self):
43 return Builder.load_string(kv)
44
45 TestApp().run()
46
47 """
48
49 __all__ = ('DragBehavior', )
50
51 from kivy.clock import Clock
52 from kivy.properties import NumericProperty, ReferenceListProperty
53 from kivy.config import Config
54 from kivy.metrics import sp
55 from functools import partial
56
57 # When we are generating documentation, Config doesn't exist
58 _scroll_timeout = _scroll_distance = 0
59 if Config:
60 _scroll_timeout = Config.getint('widgets', 'scroll_timeout')
61 _scroll_distance = Config.getint('widgets', 'scroll_distance')
62
63
64 class DragBehavior(object):
65 '''
66 The DragBehavior `mixin <https://en.wikipedia.org/wiki/Mixin>`_ provides
67 Drag behavior. When combined with a widget, dragging in the rectangle
68 defined by :attr:`drag_rectangle` will drag the widget. Please see
69 the :mod:`drag behaviors module <kivy.uix.behaviors.drag>` documentation
70 for more information.
71
72 .. versionadded:: 1.8.0
73 '''
74
75 drag_distance = NumericProperty(_scroll_distance)
76 '''Distance to move before dragging the :class:`DragBehavior`, in pixels.
77 As soon as the distance has been traveled, the :class:`DragBehavior` will
78 start to drag, and no touch event will be dispatched to the children.
79 It is advisable that you base this value on the dpi of your target device's
80 screen.
81
82 :attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty` and
83 defaults to the `scroll_distance` as defined in the user
84 :class:`~kivy.config.Config` (20 pixels by default).
85 '''
86
87 drag_timeout = NumericProperty(_scroll_timeout)
88 '''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
89 If the user has not moved :attr:`drag_distance` within the timeout,
90 dragging will be disabled, and the touch event will be dispatched to the
91 children.
92
93 :attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty` and
94 defaults to the `scroll_timeout` as defined in the user
95 :class:`~kivy.config.Config` (55 milliseconds by default).
96 '''
97
98 drag_rect_x = NumericProperty(0)
99 '''X position of the axis aligned bounding rectangle where dragging
100 is allowed (in window coordinates).
101
102 :attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty` and
103 defaults to 0.
104 '''
105
106 drag_rect_y = NumericProperty(0)
107 '''Y position of the axis aligned bounding rectangle where dragging
108 is allowed (in window coordinates).
109
110 :attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty` and
111 defaults to 0.
112 '''
113
114 drag_rect_width = NumericProperty(100)
115 '''Width of the axis aligned bounding rectangle where dragging is allowed.
116
117 :attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty` and
118 defaults to 100.
119 '''
120
121 drag_rect_height = NumericProperty(100)
122 '''Height of the axis aligned bounding rectangle where dragging is allowed.
123
124 :attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty` and
125 defaults to 100.
126 '''
127
128 drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
129 drag_rect_width, drag_rect_height)
130 '''Position and size of the axis aligned bounding rectangle where dragging
131 is allowed.
132
133 :attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
134 of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
135 :attr:`drag_rect_height`) properties.
136 '''
137
138 def __init__(self, **kwargs):
139 self._drag_touch = None
140 super(DragBehavior, self).__init__(**kwargs)
141
142 def _get_uid(self, prefix='sv'):
143 return '{0}.{1}'.format(prefix, self.uid)
144
145 def on_touch_down(self, touch):
146 xx, yy, w, h = self.drag_rectangle
147 x, y = touch.pos
148 if not self.collide_point(x, y):
149 touch.ud[self._get_uid('svavoid')] = True
150 return super(DragBehavior, self).on_touch_down(touch)
151 if self._drag_touch or ('button' in touch.profile and
152 touch.button.startswith('scroll')) or\
153 not ((xx < x <= xx + w) and (yy < y <= yy + h)):
154 return super(DragBehavior, self).on_touch_down(touch)
155
156 # no mouse scrolling, so the user is going to drag with this touch.
157 self._drag_touch = touch
158 uid = self._get_uid()
159 touch.grab(self)
160 touch.ud[uid] = {
161 'mode': 'unknown',
162 'dx': 0,
163 'dy': 0}
164 Clock.schedule_once(self._change_touch_mode,
165 self.drag_timeout / 1000.)
166 return True
167
168 def on_touch_move(self, touch):
169 if self._get_uid('svavoid') in touch.ud or\
170 self._drag_touch is not touch:
171 return super(DragBehavior, self).on_touch_move(touch) or\
172 self._get_uid() in touch.ud
173 if touch.grab_current is not self:
174 return True
175
176 uid = self._get_uid()
177 ud = touch.ud[uid]
178 mode = ud['mode']
179 if mode == 'unknown':
180 ud['dx'] += abs(touch.dx)
181 ud['dy'] += abs(touch.dy)
182 if ud['dx'] > sp(self.drag_distance):
183 mode = 'drag'
184 if ud['dy'] > sp(self.drag_distance):
185 mode = 'drag'
186 ud['mode'] = mode
187 if mode == 'drag':
188 self.x += touch.dx
189 self.y += touch.dy
190 return True
191
192 def on_touch_up(self, touch):
193 if self._get_uid('svavoid') in touch.ud:
194 return super(DragBehavior, self).on_touch_up(touch)
195
196 if self._drag_touch and self in [x() for x in touch.grab_list]:
197 touch.ungrab(self)
198 self._drag_touch = None
199 ud = touch.ud[self._get_uid()]
200 if ud['mode'] == 'unknown':
201 super(DragBehavior, self).on_touch_down(touch)
202 Clock.schedule_once(partial(self._do_touch_up, touch), .1)
203 else:
204 if self._drag_touch is not touch:
205 super(DragBehavior, self).on_touch_up(touch)
206 return self._get_uid() in touch.ud
207
208 def _do_touch_up(self, touch, *largs):
209 super(DragBehavior, self).on_touch_up(touch)
210 # don't forget about grab event!
211 for x in touch.grab_list[:]:
212 touch.grab_list.remove(x)
213 x = x()
214 if not x:
215 continue
216 touch.grab_current = x
217 super(DragBehavior, self).on_touch_up(touch)
218 touch.grab_current = None
219
220 def _change_touch_mode(self, *largs):
221 if not self._drag_touch:
222 return
223 uid = self._get_uid()
224 touch = self._drag_touch
225 ud = touch.ud[uid]
226 if ud['mode'] != 'unknown':
227 return
228 touch.ungrab(self)
229 self._drag_touch = None
230 super(DragBehavior, self).on_touch_down(touch)
231 return
232
[end of kivy/uix/behaviors/drag.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/uix/behaviors/drag.py b/kivy/uix/behaviors/drag.py
--- a/kivy/uix/behaviors/drag.py
+++ b/kivy/uix/behaviors/drag.py
@@ -227,5 +227,8 @@
return
touch.ungrab(self)
self._drag_touch = None
+ touch.push()
+ touch.apply_transform_2d(self.parent.to_widget)
super(DragBehavior, self).on_touch_down(touch)
+ touch.pop()
return
|
{"golden_diff": "diff --git a/kivy/uix/behaviors/drag.py b/kivy/uix/behaviors/drag.py\n--- a/kivy/uix/behaviors/drag.py\n+++ b/kivy/uix/behaviors/drag.py\n@@ -227,5 +227,8 @@\n return\n touch.ungrab(self)\n self._drag_touch = None\n+ touch.push()\n+ touch.apply_transform_2d(self.parent.to_widget)\n super(DragBehavior, self).on_touch_down(touch)\n+ touch.pop()\n return\n", "issue": "DragBehavior dispatching touch event incorrectly\n<!--\r\nThe issue tracker is a tool to address bugs.\r\nPlease use the #kivy IRC channel on freenode or Stack Overflow for\r\nsupport questions, more information at https://git.io/vM1yQ.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://git.io/vM1iE\r\n * prepare a short, runnable example that reproduces the issue\r\n * reproduce the problem with the latest development version of Kivy\r\n * double-check that the issue is indeed a bug and not a support request\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.6.0\r\n* OS: Linux Mint ver17.3 (based on Ubuntu 14.04)\r\n* Kivy: 1.10\r\n* Kivy installation method: pip (using pyenv)\r\n\r\n### Description\r\n\r\non_press() isn't called when holding down button A or B or D.\r\n\r\n### Code and Logs\r\n\r\n```python\r\nfrom kivy.lang import Builder\r\nfrom kivy.base import runTouchApp\r\n\r\nroot = Builder.load_string(r'''\r\n<DraggableButton@DragBehavior+Button>:\r\n font_size: 40\r\n drag_rectangle: [*self.pos, *self.size, ]\r\n size_hint: None, None\r\n on_touch_down: print(self.text, 'on_touch_down')\r\n on_press: print(self.text, 'on_press')\r\n\r\nGridLayout:\r\n on_touch_down: print('------------------------------------------')\r\n cols: 2\r\n RelativeLayout:\r\n DraggableButton:\r\n text: 'A'\r\n RelativeLayout:\r\n DraggableButton:\r\n text: 'B'\r\n RelativeLayout:\r\n DraggableButton:\r\n text: 'C'\r\n RelativeLayout:\r\n DraggableButton:\r\n text: 'D'\r\n''')\r\n\r\nrunTouchApp(root)\r\n```\r\n\r\n##### hold down A\r\n\r\n```text\r\n------------------------------------------\r\nD on_touch_down\r\nC on_touch_down\r\nB on_touch_down\r\nA on_touch_down\r\n```\r\n\r\n##### hold down B\r\n\r\n```text\r\n------------------------------------------\r\nD on_touch_down\r\nC on_touch_down\r\nB on_touch_down\r\n```\r\n\r\n##### hold down C\r\n\r\n```text\r\n------------------------------------------\r\nD on_touch_down\r\nC on_touch_down\r\nC on_press\r\n```\r\n\r\n##### hold down D\r\n\r\n```text\r\n------------------------------------------\r\nD on_touch_down\r\n```\n", "before_files": [{"content": "\"\"\"\nDrag Behavior\n=============\n\nThe :class:`~kivy.uix.behaviors.drag.DragBehavior`\n`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides Drag behavior.\nWhen combined with a widget, dragging in the rectangle defined by the\n:attr:`~kivy.uix.behaviors.drag.DragBehavior.drag_rectangle` will drag the\nwidget.\n\nExample\n-------\n\nThe following example creates a draggable label::\n\n from kivy.uix.label import Label\n from kivy.app import App\n from kivy.uix.behaviors import DragBehavior\n from kivy.lang import Builder\n\n # You could also put the following in your kv file...\n kv = '''\n <DragLabel>:\n # Define the properties for the DragLabel\n drag_rectangle: self.x, self.y, self.width, self.height\n drag_timeout: 10000000\n drag_distance: 0\n\n FloatLayout:\n # Define the root widget\n DragLabel:\n size_hint: 0.25, 0.2\n text: 'Drag me'\n '''\n\n\n class DragLabel(DragBehavior, Label):\n pass\n\n\n class TestApp(App):\n def build(self):\n return Builder.load_string(kv)\n\n TestApp().run()\n\n\"\"\"\n\n__all__ = ('DragBehavior', )\n\nfrom kivy.clock import Clock\nfrom kivy.properties import NumericProperty, ReferenceListProperty\nfrom kivy.config import Config\nfrom kivy.metrics import sp\nfrom functools import partial\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = Config.getint('widgets', 'scroll_distance')\n\n\nclass DragBehavior(object):\n '''\n The DragBehavior `mixin <https://en.wikipedia.org/wiki/Mixin>`_ provides\n Drag behavior. When combined with a widget, dragging in the rectangle\n defined by :attr:`drag_rectangle` will drag the widget. Please see\n the :mod:`drag behaviors module <kivy.uix.behaviors.drag>` documentation\n for more information.\n\n .. versionadded:: 1.8.0\n '''\n\n drag_distance = NumericProperty(_scroll_distance)\n '''Distance to move before dragging the :class:`DragBehavior`, in pixels.\n As soon as the distance has been traveled, the :class:`DragBehavior` will\n start to drag, and no touch event will be dispatched to the children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to the `scroll_distance` as defined in the user\n :class:`~kivy.config.Config` (20 pixels by default).\n '''\n\n drag_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.\n If the user has not moved :attr:`drag_distance` within the timeout,\n dragging will be disabled, and the touch event will be dispatched to the\n children.\n\n :attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to the `scroll_timeout` as defined in the user\n :class:`~kivy.config.Config` (55 milliseconds by default).\n '''\n\n drag_rect_x = NumericProperty(0)\n '''X position of the axis aligned bounding rectangle where dragging\n is allowed (in window coordinates).\n\n :attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n drag_rect_y = NumericProperty(0)\n '''Y position of the axis aligned bounding rectangle where dragging\n is allowed (in window coordinates).\n\n :attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n drag_rect_width = NumericProperty(100)\n '''Width of the axis aligned bounding rectangle where dragging is allowed.\n\n :attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 100.\n '''\n\n drag_rect_height = NumericProperty(100)\n '''Height of the axis aligned bounding rectangle where dragging is allowed.\n\n :attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 100.\n '''\n\n drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,\n drag_rect_width, drag_rect_height)\n '''Position and size of the axis aligned bounding rectangle where dragging\n is allowed.\n\n :attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`\n of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,\n :attr:`drag_rect_height`) properties.\n '''\n\n def __init__(self, **kwargs):\n self._drag_touch = None\n super(DragBehavior, self).__init__(**kwargs)\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def on_touch_down(self, touch):\n xx, yy, w, h = self.drag_rectangle\n x, y = touch.pos\n if not self.collide_point(x, y):\n touch.ud[self._get_uid('svavoid')] = True\n return super(DragBehavior, self).on_touch_down(touch)\n if self._drag_touch or ('button' in touch.profile and\n touch.button.startswith('scroll')) or\\\n not ((xx < x <= xx + w) and (yy < y <= yy + h)):\n return super(DragBehavior, self).on_touch_down(touch)\n\n # no mouse scrolling, so the user is going to drag with this touch.\n self._drag_touch = touch\n uid = self._get_uid()\n touch.grab(self)\n touch.ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0}\n Clock.schedule_once(self._change_touch_mode,\n self.drag_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._get_uid('svavoid') in touch.ud or\\\n self._drag_touch is not touch:\n return super(DragBehavior, self).on_touch_move(touch) or\\\n self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n uid = self._get_uid()\n ud = touch.ud[uid]\n mode = ud['mode']\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > sp(self.drag_distance):\n mode = 'drag'\n if ud['dy'] > sp(self.drag_distance):\n mode = 'drag'\n ud['mode'] = mode\n if mode == 'drag':\n self.x += touch.dx\n self.y += touch.dy\n return True\n\n def on_touch_up(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return super(DragBehavior, self).on_touch_up(touch)\n\n if self._drag_touch and self in [x() for x in touch.grab_list]:\n touch.ungrab(self)\n self._drag_touch = None\n ud = touch.ud[self._get_uid()]\n if ud['mode'] == 'unknown':\n super(DragBehavior, self).on_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .1)\n else:\n if self._drag_touch is not touch:\n super(DragBehavior, self).on_touch_up(touch)\n return self._get_uid() in touch.ud\n\n def _do_touch_up(self, touch, *largs):\n super(DragBehavior, self).on_touch_up(touch)\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n super(DragBehavior, self).on_touch_up(touch)\n touch.grab_current = None\n\n def _change_touch_mode(self, *largs):\n if not self._drag_touch:\n return\n uid = self._get_uid()\n touch = self._drag_touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown':\n return\n touch.ungrab(self)\n self._drag_touch = None\n super(DragBehavior, self).on_touch_down(touch)\n return\n", "path": "kivy/uix/behaviors/drag.py"}]}
| 3,537 | 122 |
gh_patches_debug_38111
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4781
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BC_K8S_14/CKV_K8S_15 "Image Pull Policy should be Always" should not be reported when using a digest
**Describe the issue**
[BC_K8S_14/CKV_K8S_15 "Image Pull Policy should be Always"](https://docs.bridgecrew.io/docs/bc_k8s_14) should not be reported when using a digest.
Per the documentation:
> When the imagePullPolicy is set to Always, you ensure the latest version of the image is deployed every time the pod is started.
By specifying the digest, the same image is always used, so there is no concern about the latest version (since the the version cannot change), negating the need to always pull the image.
**Examples**
Save the following as `deployment.yaml`:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: <name>
spec:
containers:
- name: nginx
image: 'nginx@sha256:b0ad43f7ee5edbc0effbc14645ae7055e21bc1973aee5150745632a24a752661'
imagePullPolicy: IfNotPresent
```
Run: `checkov --framework kubernetes -f deployment.yaml -c CKV_K8S_15`
Actual result:
FAILED
Expected result:
Passed
**Version (please complete the following information):**
- Checkov Version 2.3.126
</issue>
<code>
[start of checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class ImagePullPolicyAlways(BaseResourceCheck):
6
7 def __init__(self):
8 """
9 Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct
10 Default is 'IfNotPresent' unless image tag is omitted or :latest
11 https://kubernetes.io/docs/concepts/configuration/overview/#container-images
12
13 An admission controller could be used to enforce imagePullPolicy
14 """
15 name = "Image Pull Policy should be Always"
16 id = "CKV_K8S_15"
17 supported_resources = ["kubernetes_pod", "kubernetes_pod_v1",
18 "kubernetes_deployment", "kubernetes_deployment_v1"]
19 categories = [CheckCategories.GENERAL_SECURITY]
20 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
21
22 def scan_resource_conf(self, conf) -> CheckResult:
23 spec = conf.get('spec', [None])[0]
24 if isinstance(spec, dict) and spec:
25 evaluated_keys_path = "spec"
26
27 template = spec.get("template")
28 if template and isinstance(template, list):
29 template = template[0]
30 template_spec = template.get("spec")
31 if template_spec and isinstance(template_spec, list):
32 spec = template_spec[0]
33 evaluated_keys_path = f'{evaluated_keys_path}/[0]/template/[0]/spec'
34
35 containers = spec.get("container")
36 if containers is None:
37 return CheckResult.UNKNOWN
38 for idx, container in enumerate(containers):
39 if not isinstance(container, dict):
40 return CheckResult.UNKNOWN
41
42 if container.get("image_pull_policy"):
43 if container.get("image_pull_policy")[0] == "Always":
44 break
45 else:
46 if container.get("image"):
47 name = container.get("image")[0]
48 if "latest" in name:
49 break
50 self.evaluated_keys = [f'{evaluated_keys_path}/[0]/container/[{idx}]']
51 return CheckResult.FAILED
52 return CheckResult.PASSED
53 return CheckResult.FAILED
54
55
56 check = ImagePullPolicyAlways()
57
[end of checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py]
[start of checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py]
1 import re
2 from typing import Any, Dict
3
4 from checkov.common.models.consts import DOCKER_IMAGE_REGEX
5 from checkov.common.models.enums import CheckResult
6 from checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck
7
8
9 class ImagePullPolicyAlways(BaseK8sContainerCheck):
10 def __init__(self) -> None:
11 """
12 Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct
13 Default is 'IfNotPresent' unless image tag is omitted or :latest
14 https://kubernetes.io/docs/concepts/configuration/overview/#container-images
15
16 An admission controller could be used to enforce imagePullPolicy
17 """
18 name = "Image Pull Policy should be Always"
19 id = "CKV_K8S_15"
20 # Location: container .imagePullPolicy
21 super().__init__(name=name, id=id)
22
23 def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:
24 self.evaluated_container_keys = ["image", "imagePullPolicy"]
25 if conf.get("image"):
26 # Remove the digest, if present
27 image_val = conf["image"]
28 if not isinstance(image_val, str) or image_val.strip() == "":
29 return CheckResult.UNKNOWN
30 if "@" in image_val:
31 image_val = image_val[0 : image_val.index("@")]
32 if "imagePullPolicy" not in conf:
33 image_tag_match = re.findall(DOCKER_IMAGE_REGEX, image_val)
34 if len(image_tag_match) != 1:
35 # If image name is invalid, check result can not be determined
36 return CheckResult.UNKNOWN
37 (image, tag) = image_tag_match[0]
38 if tag == "latest" or tag == "":
39 # Default imagePullPolicy = Always
40 return CheckResult.PASSED
41 else:
42 # Default imagePullPolicy = IfNotPresent
43 return CheckResult.FAILED
44 else:
45 if conf["imagePullPolicy"] != "Always":
46 return CheckResult.FAILED
47
48 else:
49 return CheckResult.FAILED
50 return CheckResult.PASSED
51
52
53 check = ImagePullPolicyAlways()
54
[end of checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py b/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py
--- a/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py
+++ b/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py
@@ -29,6 +29,9 @@
return CheckResult.UNKNOWN
if "@" in image_val:
image_val = image_val[0 : image_val.index("@")]
+ has_digest = True
+ else:
+ has_digest = False
if "imagePullPolicy" not in conf:
image_tag_match = re.findall(DOCKER_IMAGE_REGEX, image_val)
if len(image_tag_match) != 1:
@@ -38,11 +41,13 @@
if tag == "latest" or tag == "":
# Default imagePullPolicy = Always
return CheckResult.PASSED
+ elif has_digest:
+ return CheckResult.PASSED
else:
# Default imagePullPolicy = IfNotPresent
return CheckResult.FAILED
else:
- if conf["imagePullPolicy"] != "Always":
+ if not has_digest and conf["imagePullPolicy"] != "Always":
return CheckResult.FAILED
else:
diff --git a/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py b/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py
--- a/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py
+++ b/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py
@@ -7,7 +7,7 @@
def __init__(self):
"""
Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct
- Default is 'IfNotPresent' unless image tag is omitted or :latest
+ Default is 'IfNotPresent' unless image tag/digest is omitted or :latest
https://kubernetes.io/docs/concepts/configuration/overview/#container-images
An admission controller could be used to enforce imagePullPolicy
@@ -47,6 +47,8 @@
name = container.get("image")[0]
if "latest" in name:
break
+ if "@" in name:
+ break
self.evaluated_keys = [f'{evaluated_keys_path}/[0]/container/[{idx}]']
return CheckResult.FAILED
return CheckResult.PASSED
|
{"golden_diff": "diff --git a/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py b/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py\n--- a/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py\n+++ b/checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py\n@@ -29,6 +29,9 @@\n return CheckResult.UNKNOWN\n if \"@\" in image_val:\n image_val = image_val[0 : image_val.index(\"@\")]\n+ has_digest = True\n+ else:\n+ has_digest = False\n if \"imagePullPolicy\" not in conf:\n image_tag_match = re.findall(DOCKER_IMAGE_REGEX, image_val)\n if len(image_tag_match) != 1:\n@@ -38,11 +41,13 @@\n if tag == \"latest\" or tag == \"\":\n # Default imagePullPolicy = Always\n return CheckResult.PASSED\n+ elif has_digest:\n+ return CheckResult.PASSED\n else:\n # Default imagePullPolicy = IfNotPresent\n return CheckResult.FAILED\n else:\n- if conf[\"imagePullPolicy\"] != \"Always\":\n+ if not has_digest and conf[\"imagePullPolicy\"] != \"Always\":\n return CheckResult.FAILED\n \n else:\ndiff --git a/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py b/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py\n--- a/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py\n+++ b/checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py\n@@ -7,7 +7,7 @@\n def __init__(self):\n \"\"\"\n Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct\n- Default is 'IfNotPresent' unless image tag is omitted or :latest\n+ Default is 'IfNotPresent' unless image tag/digest is omitted or :latest\n https://kubernetes.io/docs/concepts/configuration/overview/#container-images\n \n An admission controller could be used to enforce imagePullPolicy\n@@ -47,6 +47,8 @@\n name = container.get(\"image\")[0]\n if \"latest\" in name:\n break\n+ if \"@\" in name:\n+ break\n self.evaluated_keys = [f'{evaluated_keys_path}/[0]/container/[{idx}]']\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "BC_K8S_14/CKV_K8S_15 \"Image Pull Policy should be Always\" should not be reported when using a digest\n**Describe the issue**\r\n[BC_K8S_14/CKV_K8S_15 \"Image Pull Policy should be Always\"](https://docs.bridgecrew.io/docs/bc_k8s_14) should not be reported when using a digest.\r\n\r\nPer the documentation:\r\n> When the imagePullPolicy is set to Always, you ensure the latest version of the image is deployed every time the pod is started.\r\n\r\nBy specifying the digest, the same image is always used, so there is no concern about the latest version (since the the version cannot change), negating the need to always pull the image.\r\n\r\n**Examples**\r\nSave the following as `deployment.yaml`:\r\n```yaml\r\napiVersion: v1\r\nkind: Pod\r\nmetadata:\r\n name: <name>\r\nspec:\r\n containers:\r\n - name: nginx\r\n image: 'nginx@sha256:b0ad43f7ee5edbc0effbc14645ae7055e21bc1973aee5150745632a24a752661'\r\n imagePullPolicy: IfNotPresent\r\n```\r\nRun: `checkov --framework kubernetes -f deployment.yaml -c CKV_K8S_15`\r\n\r\nActual result:\r\nFAILED\r\n\r\nExpected result:\r\nPassed\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.126\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass ImagePullPolicyAlways(BaseResourceCheck):\n\n def __init__(self):\n \"\"\"\n Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct\n Default is 'IfNotPresent' unless image tag is omitted or :latest\n https://kubernetes.io/docs/concepts/configuration/overview/#container-images\n\n An admission controller could be used to enforce imagePullPolicy\n \"\"\"\n name = \"Image Pull Policy should be Always\"\n id = \"CKV_K8S_15\"\n supported_resources = [\"kubernetes_pod\", \"kubernetes_pod_v1\",\n \"kubernetes_deployment\", \"kubernetes_deployment_v1\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n spec = conf.get('spec', [None])[0]\n if isinstance(spec, dict) and spec:\n evaluated_keys_path = \"spec\"\n\n template = spec.get(\"template\")\n if template and isinstance(template, list):\n template = template[0]\n template_spec = template.get(\"spec\")\n if template_spec and isinstance(template_spec, list):\n spec = template_spec[0]\n evaluated_keys_path = f'{evaluated_keys_path}/[0]/template/[0]/spec'\n\n containers = spec.get(\"container\")\n if containers is None:\n return CheckResult.UNKNOWN\n for idx, container in enumerate(containers):\n if not isinstance(container, dict):\n return CheckResult.UNKNOWN\n\n if container.get(\"image_pull_policy\"):\n if container.get(\"image_pull_policy\")[0] == \"Always\":\n break\n else:\n if container.get(\"image\"):\n name = container.get(\"image\")[0]\n if \"latest\" in name:\n break\n self.evaluated_keys = [f'{evaluated_keys_path}/[0]/container/[{idx}]']\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = ImagePullPolicyAlways()\n", "path": "checkov/terraform/checks/resource/kubernetes/ImagePullPolicyAlways.py"}, {"content": "import re\nfrom typing import Any, Dict\n\nfrom checkov.common.models.consts import DOCKER_IMAGE_REGEX\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.kubernetes.checks.resource.base_container_check import BaseK8sContainerCheck\n\n\nclass ImagePullPolicyAlways(BaseK8sContainerCheck):\n def __init__(self) -> None:\n \"\"\"\n Image pull policy should be set to always to ensure you get the correct image and imagePullSecrets are correct\n Default is 'IfNotPresent' unless image tag is omitted or :latest\n https://kubernetes.io/docs/concepts/configuration/overview/#container-images\n\n An admission controller could be used to enforce imagePullPolicy\n \"\"\"\n name = \"Image Pull Policy should be Always\"\n id = \"CKV_K8S_15\"\n # Location: container .imagePullPolicy\n super().__init__(name=name, id=id)\n\n def scan_container_conf(self, metadata: Dict[str, Any], conf: Dict[str, Any]) -> CheckResult:\n self.evaluated_container_keys = [\"image\", \"imagePullPolicy\"]\n if conf.get(\"image\"):\n # Remove the digest, if present\n image_val = conf[\"image\"]\n if not isinstance(image_val, str) or image_val.strip() == \"\":\n return CheckResult.UNKNOWN\n if \"@\" in image_val:\n image_val = image_val[0 : image_val.index(\"@\")]\n if \"imagePullPolicy\" not in conf:\n image_tag_match = re.findall(DOCKER_IMAGE_REGEX, image_val)\n if len(image_tag_match) != 1:\n # If image name is invalid, check result can not be determined\n return CheckResult.UNKNOWN\n (image, tag) = image_tag_match[0]\n if tag == \"latest\" or tag == \"\":\n # Default imagePullPolicy = Always\n return CheckResult.PASSED\n else:\n # Default imagePullPolicy = IfNotPresent\n return CheckResult.FAILED\n else:\n if conf[\"imagePullPolicy\"] != \"Always\":\n return CheckResult.FAILED\n\n else:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = ImagePullPolicyAlways()\n", "path": "checkov/kubernetes/checks/resource/k8s/ImagePullPolicyAlways.py"}]}
| 2,096 | 543 |
gh_patches_debug_36453
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-418
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix asgi instrumentation default span name
Similar to [wsgi](https://github.com/open-telemetry/opentelemetry-python/blob/master/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py#L171), the default span name for SERVER spans should be `HTTP <method>` as outlined by the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#name).
Part of https://github.com/open-telemetry/opentelemetry-python-contrib/issues/434
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used
17 on any ASGI framework (such as Django-channels / Quart) to track requests
18 timing through OpenTelemetry.
19 """
20
21 import typing
22 import urllib
23 from functools import wraps
24 from typing import Tuple
25
26 from asgiref.compatibility import guarantee_single_callable
27
28 from opentelemetry import context, trace
29 from opentelemetry.instrumentation.asgi.version import __version__ # noqa
30 from opentelemetry.instrumentation.utils import http_status_to_status_code
31 from opentelemetry.propagate import extract
32 from opentelemetry.propagators.textmap import Getter
33 from opentelemetry.semconv.trace import SpanAttributes
34 from opentelemetry.trace.status import Status, StatusCode
35
36
37 class ASGIGetter(Getter):
38 def get(
39 self, carrier: dict, key: str
40 ) -> typing.Optional[typing.List[str]]:
41 """Getter implementation to retrieve a HTTP header value from the ASGI
42 scope.
43
44 Args:
45 carrier: ASGI scope object
46 key: header name in scope
47 Returns:
48 A list with a single string with the header value if it exists,
49 else None.
50 """
51 headers = carrier.get("headers")
52 if not headers:
53 return None
54
55 # asgi header keys are in lower case
56 key = key.lower()
57 decoded = [
58 _value.decode("utf8")
59 for (_key, _value) in headers
60 if _key.decode("utf8") == key
61 ]
62 if not decoded:
63 return None
64 return decoded
65
66 def keys(self, carrier: dict) -> typing.List[str]:
67 return list(carrier.keys())
68
69
70 asgi_getter = ASGIGetter()
71
72
73 def collect_request_attributes(scope):
74 """Collects HTTP request attributes from the ASGI scope and returns a
75 dictionary to be used as span creation attributes."""
76 server_host, port, http_url = get_host_port_url_tuple(scope)
77 query_string = scope.get("query_string")
78 if query_string and http_url:
79 if isinstance(query_string, bytes):
80 query_string = query_string.decode("utf8")
81 http_url = http_url + ("?" + urllib.parse.unquote(query_string))
82
83 result = {
84 SpanAttributes.HTTP_SCHEME: scope.get("scheme"),
85 SpanAttributes.HTTP_HOST: server_host,
86 SpanAttributes.NET_HOST_PORT: port,
87 SpanAttributes.HTTP_FLAVOR: scope.get("http_version"),
88 SpanAttributes.HTTP_TARGET: scope.get("path"),
89 SpanAttributes.HTTP_URL: http_url,
90 }
91 http_method = scope.get("method")
92 if http_method:
93 result[SpanAttributes.HTTP_METHOD] = http_method
94
95 http_host_value_list = asgi_getter.get(scope, "host")
96 if http_host_value_list:
97 result[SpanAttributes.HTTP_SERVER_NAME] = ",".join(
98 http_host_value_list
99 )
100 http_user_agent = asgi_getter.get(scope, "user-agent")
101 if http_user_agent:
102 result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]
103
104 if "client" in scope and scope["client"] is not None:
105 result[SpanAttributes.NET_PEER_IP] = scope.get("client")[0]
106 result[SpanAttributes.NET_PEER_PORT] = scope.get("client")[1]
107
108 # remove None values
109 result = {k: v for k, v in result.items() if v is not None}
110
111 return result
112
113
114 def get_host_port_url_tuple(scope):
115 """Returns (host, port, full_url) tuple."""
116 server = scope.get("server") or ["0.0.0.0", 80]
117 port = server[1]
118 server_host = server[0] + (":" + str(port) if port != 80 else "")
119 full_path = scope.get("root_path", "") + scope.get("path", "")
120 http_url = scope.get("scheme", "http") + "://" + server_host + full_path
121 return server_host, port, http_url
122
123
124 def set_status_code(span, status_code):
125 """Adds HTTP response attributes to span using the status_code argument."""
126 if not span.is_recording():
127 return
128 try:
129 status_code = int(status_code)
130 except ValueError:
131 span.set_status(
132 Status(
133 StatusCode.ERROR,
134 "Non-integer HTTP status: " + repr(status_code),
135 )
136 )
137 else:
138 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
139 span.set_status(Status(http_status_to_status_code(status_code)))
140
141
142 def get_default_span_details(scope: dict) -> Tuple[str, dict]:
143 """Default implementation for span_details_callback
144
145 Args:
146 scope: the asgi scope dictionary
147
148 Returns:
149 a tuple of the span, and any attributes to attach to the
150 span.
151 """
152 method_or_path = scope.get("method") or scope.get("path")
153
154 return method_or_path, {}
155
156
157 class OpenTelemetryMiddleware:
158 """The ASGI application middleware.
159
160 This class is an ASGI middleware that starts and annotates spans for any
161 requests it is invoked with.
162
163 Args:
164 app: The ASGI application callable to forward requests to.
165 span_details_callback: Callback which should return a string
166 and a tuple, representing the desired span name and a
167 dictionary with any additional span attributes to set.
168 Optional: Defaults to get_default_span_details.
169 tracer_provider: The optional tracer provider to use. If omitted
170 the current globally configured one is used.
171 """
172
173 def __init__(
174 self,
175 app,
176 excluded_urls=None,
177 span_details_callback=None,
178 tracer_provider=None,
179 ):
180 self.app = guarantee_single_callable(app)
181 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)
182 self.span_details_callback = (
183 span_details_callback or get_default_span_details
184 )
185 self.excluded_urls = excluded_urls
186
187 async def __call__(self, scope, receive, send):
188 """The ASGI application
189
190 Args:
191 scope: A ASGI environment.
192 receive: An awaitable callable yielding dictionaries
193 send: An awaitable callable taking a single dictionary as argument.
194 """
195 if scope["type"] not in ("http", "websocket"):
196 return await self.app(scope, receive, send)
197
198 _, _, url = get_host_port_url_tuple(scope)
199 if self.excluded_urls and self.excluded_urls.url_disabled(url):
200 return await self.app(scope, receive, send)
201
202 token = context.attach(extract(scope, getter=asgi_getter))
203 span_name, additional_attributes = self.span_details_callback(scope)
204
205 try:
206 with self.tracer.start_as_current_span(
207 span_name + " asgi", kind=trace.SpanKind.SERVER,
208 ) as span:
209 if span.is_recording():
210 attributes = collect_request_attributes(scope)
211 attributes.update(additional_attributes)
212 for key, value in attributes.items():
213 span.set_attribute(key, value)
214
215 @wraps(receive)
216 async def wrapped_receive():
217 with self.tracer.start_as_current_span(
218 span_name + " asgi." + scope["type"] + ".receive"
219 ) as receive_span:
220 message = await receive()
221 if receive_span.is_recording():
222 if message["type"] == "websocket.receive":
223 set_status_code(receive_span, 200)
224 receive_span.set_attribute("type", message["type"])
225 return message
226
227 @wraps(send)
228 async def wrapped_send(message):
229 with self.tracer.start_as_current_span(
230 span_name + " asgi." + scope["type"] + ".send"
231 ) as send_span:
232 if send_span.is_recording():
233 if message["type"] == "http.response.start":
234 status_code = message["status"]
235 set_status_code(span, status_code)
236 set_status_code(send_span, status_code)
237 elif message["type"] == "websocket.send":
238 set_status_code(span, 200)
239 set_status_code(send_span, 200)
240 send_span.set_attribute("type", message["type"])
241 await send(message)
242
243 await self.app(scope, wrapped_receive, wrapped_send)
244 finally:
245 context.detach(token)
246
[end of instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
@@ -146,12 +146,13 @@
scope: the asgi scope dictionary
Returns:
- a tuple of the span, and any attributes to attach to the
- span.
+ a tuple of the span name, and any attributes to attach to the span.
"""
- method_or_path = scope.get("method") or scope.get("path")
+ span_name = scope.get("path", "").strip() or "HTTP {}".format(
+ scope.get("method", "").strip()
+ )
- return method_or_path, {}
+ return span_name, {}
class OpenTelemetryMiddleware:
@@ -204,7 +205,7 @@
try:
with self.tracer.start_as_current_span(
- span_name + " asgi", kind=trace.SpanKind.SERVER,
+ span_name, kind=trace.SpanKind.SERVER,
) as span:
if span.is_recording():
attributes = collect_request_attributes(scope)
@@ -215,7 +216,7 @@
@wraps(receive)
async def wrapped_receive():
with self.tracer.start_as_current_span(
- span_name + " asgi." + scope["type"] + ".receive"
+ " ".join((span_name, scope["type"], "receive"))
) as receive_span:
message = await receive()
if receive_span.is_recording():
@@ -227,7 +228,7 @@
@wraps(send)
async def wrapped_send(message):
with self.tracer.start_as_current_span(
- span_name + " asgi." + scope["type"] + ".send"
+ " ".join((span_name, scope["type"], "send"))
) as send_span:
if send_span.is_recording():
if message["type"] == "http.response.start":
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n@@ -146,12 +146,13 @@\n scope: the asgi scope dictionary\n \n Returns:\n- a tuple of the span, and any attributes to attach to the\n- span.\n+ a tuple of the span name, and any attributes to attach to the span.\n \"\"\"\n- method_or_path = scope.get(\"method\") or scope.get(\"path\")\n+ span_name = scope.get(\"path\", \"\").strip() or \"HTTP {}\".format(\n+ scope.get(\"method\", \"\").strip()\n+ )\n \n- return method_or_path, {}\n+ return span_name, {}\n \n \n class OpenTelemetryMiddleware:\n@@ -204,7 +205,7 @@\n \n try:\n with self.tracer.start_as_current_span(\n- span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n+ span_name, kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n@@ -215,7 +216,7 @@\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n- span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n+ \" \".join((span_name, scope[\"type\"], \"receive\"))\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n@@ -227,7 +228,7 @@\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n- span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n+ \" \".join((span_name, scope[\"type\"], \"send\"))\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n", "issue": "Fix asgi instrumentation default span name\nSimilar to [wsgi](https://github.com/open-telemetry/opentelemetry-python/blob/master/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py#L171), the default span name for SERVER spans should be `HTTP <method>` as outlined by the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#name).\r\n\r\nPart of https://github.com/open-telemetry/opentelemetry-python-contrib/issues/434\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n span_details_callback=None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}]}
| 3,228 | 512 |
gh_patches_debug_38868
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-716
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Идея: добавить лайки постов в выгружаемый архив с моими данными
Предлагаю добавить в архив файл где будут собраны посты (батлы, интро, проекты, и т.д) которые я лайкал. Формат: название поста, дата поста, ссылка на пост.
Мой конкретный кейс: в клуб заходит мой друг, хочу ему показать что-то из того, что мне понравилось из прошлого.
</issue>
<code>
[start of gdpr/serializers.py]
1 from typing import List
2
3 from django.conf import settings
4 from django.urls import reverse
5
6 from bookmarks.models import PostBookmark
7 from comments.models import Comment
8 from posts.models.post import Post
9 from users.models.expertise import UserExpertise
10 from users.models.tags import UserTag
11 from users.models.user import User
12
13
14 def post_to_md(post: Post) -> str:
15 return f"# {post.title}\n\n{post.text}"
16
17
18 def post_to_json(post: Post) -> dict:
19 return {
20 "id": str(post.id),
21 "slug": post.slug,
22 "author_id": str(post.author_id),
23 "type": post.type,
24 "topic": post.topic.name if post.topic else None,
25 "label": post.label,
26 "title": post.title,
27 "text": post.text,
28 "url": post.url,
29 "image": post.image,
30 "metadata": post.metadata,
31 "created_at": post.created_at.isoformat(),
32 "updated_at": post.updated_at.isoformat(),
33 "last_activity_at": post.last_activity_at.isoformat(),
34 "comment_count": post.comment_count,
35 "view_count": post.view_count,
36 "upvotes": post.upvotes,
37 }
38
39
40 def comment_to_md(comment: Comment) -> str:
41 return f"{comment.text}\n\n- {comment.author.slug}"
42
43
44 def comments_to_json(comments: List[Comment]) -> dict:
45 return {
46 "comments": [
47 comment_to_json(comment) for comment in comments
48 ]
49 }
50
51
52 def comment_to_json(comment: Comment) -> dict:
53 return {
54 "id": str(comment.id),
55 "author_id": str(comment.author_id),
56 "post_id": str(comment.post_id),
57 "post_title": comment.post.title,
58 "reply_to_id": str(comment.reply_to_id) if comment.reply_to else None,
59 "title": comment.title,
60 "text": comment.text,
61 "url": comment.url,
62 "metadata": comment.metadata,
63 "created_at": comment.created_at.isoformat(),
64 "updated_at": comment.updated_at.isoformat(),
65 "upvotes": comment.upvotes,
66 "is_visible": comment.is_visible,
67 "is_deleted": comment.is_deleted,
68 "is_pinned": comment.is_pinned,
69 }
70
71
72 def user_to_json(user: User) -> dict:
73 return {
74 "id": str(user.id),
75 "slug": user.slug,
76 "email": user.email,
77 "full_name": user.full_name,
78 "avatar": user.avatar,
79 "company": user.company,
80 "position": user.position,
81 "city": user.city,
82 "country": user.country,
83 "bio": user.bio,
84 "contact": user.contact,
85 "hat": user.hat,
86 "balance": user.balance,
87 "upvotes": user.upvotes,
88 "created_at": user.created_at.isoformat(),
89 "updated_at": user.updated_at.isoformat() if user.updated_at else None,
90 "last_activity_at": user.last_activity_at.isoformat() if user.last_activity_at else None,
91 "membership_started_at": user.membership_started_at.isoformat() if user.membership_started_at else None,
92 "membership_expires_at": user.membership_expires_at.isoformat() if user.membership_expires_at else None,
93 "membership_platform_type": user.membership_platform_type,
94 "patreon_id": user.patreon_id,
95 "email_digest_type": user.email_digest_type,
96 "telegram_id": user.telegram_id,
97 "telegram_data": user.telegram_data,
98 "stripe_id": user.stripe_id,
99 "is_email_verified": user.is_email_verified,
100 "is_email_unsubscribed": user.is_email_unsubscribed,
101 "moderation_status": user.moderation_status,
102 "roles": user.roles,
103 }
104
105
106 def user_tags_to_json(user_tags: List[UserTag]) -> dict:
107 return {
108 "user_tags": [
109 user_tag_to_json(user_tag) for user_tag in user_tags
110 ]
111 }
112
113
114 def user_tag_to_json(user_tag: UserTag) -> dict:
115 return {
116 "user_id": str(user_tag.user_id),
117 "name": user_tag.name,
118 "created_at": user_tag.created_at.isoformat() if user_tag.created_at else None,
119 }
120
121
122 def user_expertises_to_json(user_expertises: List[UserExpertise]) -> dict:
123 return {
124 "user_expertise": [
125 user_expertise_to_json(user_expertise) for user_expertise in user_expertises
126 ]
127 }
128
129
130 def user_expertise_to_json(user_expertise: UserExpertise) -> dict:
131 return {
132 "user_id": str(user_expertise.user_id),
133 "name": user_expertise.name,
134 "value": user_expertise.value,
135 "created_at": user_expertise.created_at.isoformat() if user_expertise.created_at else None,
136 }
137
138
139 def bookmarks_to_json(bookmarks: List[PostBookmark]) -> dict:
140 return {
141 "bookmarks": [
142 bookmark_to_json(bookmark) for bookmark in bookmarks
143 ]
144 }
145
146
147 def bookmark_to_json(bookmark: PostBookmark) -> dict:
148 return {
149 "url": settings.APP_HOST + reverse("show_post", kwargs={
150 "post_type": bookmark.post.type, "post_slug": bookmark.post.slug
151 }),
152 "created_at": bookmark.created_at.isoformat() if bookmark.created_at else None,
153 }
154
[end of gdpr/serializers.py]
[start of gdpr/archive.py]
1 import json
2 import os
3 import random
4 import shutil
5 import tempfile
6 from datetime import datetime
7
8 from django.conf import settings
9 from django_q.tasks import schedule
10
11 from bookmarks.models import PostBookmark
12 from comments.models import Comment
13 from gdpr.serializers import post_to_json, post_to_md, user_to_json, comments_to_json, user_tags_to_json, \
14 user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json
15 from notifications.email.users import send_data_archive_ready_email
16 from posts.models.post import Post
17 from users.models.expertise import UserExpertise
18 from users.models.tags import UserTag
19
20
21 def generate_data_archive(user, save_path=settings.GDPR_ARCHIVE_STORAGE_PATH):
22 with tempfile.TemporaryDirectory() as tmp_dir:
23 user_dir = os.path.join(tmp_dir, user.slug)
24 os.makedirs(user_dir)
25
26 # dump data
27 dump_user_profile(user_dir, user)
28 dump_user_posts(user_dir, user)
29 dump_user_comments(user_dir, user)
30 dump_user_bookmarks(user_dir, user)
31
32 # save zip archive
33 archive_name = f"{user.slug}-{datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}-{random.randint(1000000, 9999998)}"
34 archive_path = shutil.make_archive(os.path.join(save_path, archive_name), "zip", tmp_dir)
35
36 # schedule a task to remove archive after timeout
37 schedule(
38 "gdpr.archive.delete_data_archive",
39 archive_path,
40 next_run=datetime.utcnow() + settings.GDPR_ARCHIVE_DELETE_TIMEDELTA
41 )
42
43 # notify the user
44 send_data_archive_ready_email(
45 user=user,
46 url=settings.GDPR_ARCHIVE_URL + os.path.basename(archive_path),
47 )
48
49
50 def delete_data_archive(archive_path):
51 os.remove(archive_path)
52
53
54 def dump_user_profile(user_dir, user):
55 with open(os.path.join(user_dir, "profile.json"), "w", encoding="utf-8") as f:
56 f.write(json.dumps(user_to_json(user), ensure_ascii=False))
57
58 user_tags = UserTag.objects.filter(user=user)
59 with open(os.path.join(user_dir, "tags.json"), "w", encoding="utf-8") as f:
60 f.write(json.dumps(user_tags_to_json(user_tags), ensure_ascii=False))
61
62 user_expertises = UserExpertise.objects.filter(user=user)
63 with open(os.path.join(user_dir, "expertise.json"), "w", encoding="utf-8") as f:
64 f.write(json.dumps(user_expertises_to_json(user_expertises), ensure_ascii=False))
65
66
67 def dump_user_posts(user_dir, user):
68 posts = Post.objects.filter(author=user).select_related("author", "topic")
69
70 for post in posts:
71 post_dir = os.path.join(user_dir, f"posts/{post.slug}")
72 os.makedirs(post_dir)
73
74 with open(os.path.join(post_dir, f"{post.slug}.md"), "w", encoding="utf-8") as f:
75 f.write(post_to_md(post))
76
77 with open(os.path.join(post_dir, f"{post.slug}.json"), "w", encoding="utf-8") as f:
78 f.write(json.dumps(post_to_json(post), ensure_ascii=False))
79
80 # dump post comments
81 post_comments = Comment.objects.filter(post=post).select_related("author", "post")
82 with open(os.path.join(post_dir, f"comments.json"), "w", encoding="utf-8") as f:
83 f.write(json.dumps(comments_to_json(post_comments), ensure_ascii=False))
84
85
86 def dump_user_comments(user_dir, user):
87 comments = Comment.objects.filter(author=user).select_related("author", "post")
88
89 for comment in comments:
90 comment_dir = os.path.join(user_dir, f"comments/{comment.id}")
91 os.makedirs(comment_dir)
92
93 with open(os.path.join(comment_dir, f"{comment.id}.md"), "w", encoding="utf-8") as f:
94 f.write(comment_to_md(comment))
95
96 with open(os.path.join(comment_dir, f"{comment.id}.json"), "w", encoding="utf-8") as f:
97 f.write(json.dumps(comment_to_json(comment), ensure_ascii=False))
98
99 # dump replies
100 comment_replies = Comment.objects.filter(reply_to=comment).select_related("author", "post")
101 with open(os.path.join(comment_dir, f"replies.json"), "w", encoding="utf-8") as f:
102 f.write(json.dumps(comments_to_json(comment_replies), ensure_ascii=False))
103
104
105 def dump_user_bookmarks(user_dir, user):
106 bookmarks = PostBookmark.objects.filter(user=user).select_related("post")
107
108 with open(os.path.join(user_dir, "bookmarks.json"), "w", encoding="utf-8") as f:
109 f.write(json.dumps(bookmarks_to_json(bookmarks), ensure_ascii=False))
110
[end of gdpr/archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gdpr/archive.py b/gdpr/archive.py
--- a/gdpr/archive.py
+++ b/gdpr/archive.py
@@ -11,9 +11,10 @@
from bookmarks.models import PostBookmark
from comments.models import Comment
from gdpr.serializers import post_to_json, post_to_md, user_to_json, comments_to_json, user_tags_to_json, \
- user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json
+ user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json, upvotes_to_json
from notifications.email.users import send_data_archive_ready_email
from posts.models.post import Post
+from posts.models.votes import PostVote
from users.models.expertise import UserExpertise
from users.models.tags import UserTag
@@ -28,6 +29,7 @@
dump_user_posts(user_dir, user)
dump_user_comments(user_dir, user)
dump_user_bookmarks(user_dir, user)
+ dump_user_upvotes(user_dir, user)
# save zip archive
archive_name = f"{user.slug}-{datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}-{random.randint(1000000, 9999998)}"
@@ -107,3 +109,10 @@
with open(os.path.join(user_dir, "bookmarks.json"), "w", encoding="utf-8") as f:
f.write(json.dumps(bookmarks_to_json(bookmarks), ensure_ascii=False))
+
+
+def dump_user_upvotes(user_dir, user):
+ upvotes = PostVote.objects.filter(user=user).select_related("post")
+
+ with open(os.path.join(user_dir, "upvotes.json"), "w", encoding="utf-8") as f:
+ f.write(json.dumps(upvotes_to_json(upvotes), ensure_ascii=False))
diff --git a/gdpr/serializers.py b/gdpr/serializers.py
--- a/gdpr/serializers.py
+++ b/gdpr/serializers.py
@@ -6,6 +6,7 @@
from bookmarks.models import PostBookmark
from comments.models import Comment
from posts.models.post import Post
+from posts.models.votes import PostVote
from users.models.expertise import UserExpertise
from users.models.tags import UserTag
from users.models.user import User
@@ -151,3 +152,21 @@
}),
"created_at": bookmark.created_at.isoformat() if bookmark.created_at else None,
}
+
+
+def upvotes_to_json(upvotes: List[PostVote]) -> dict:
+ return {
+ "upvotes": [
+ upvote_to_json(upvote) for upvote in upvotes
+ ]
+ }
+
+
+def upvote_to_json(upvote: PostVote) -> dict:
+ return {
+ "url": settings.APP_HOST + reverse("show_post", kwargs={
+ "post_type": upvote.post.type, "post_slug": upvote.post.slug
+ }),
+ "created_at": upvote.created_at.isoformat() if upvote.created_at else None,
+ "title": upvote.post.title,
+ }
|
{"golden_diff": "diff --git a/gdpr/archive.py b/gdpr/archive.py\n--- a/gdpr/archive.py\n+++ b/gdpr/archive.py\n@@ -11,9 +11,10 @@\n from bookmarks.models import PostBookmark\n from comments.models import Comment\n from gdpr.serializers import post_to_json, post_to_md, user_to_json, comments_to_json, user_tags_to_json, \\\n- user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json\n+ user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json, upvotes_to_json\n from notifications.email.users import send_data_archive_ready_email\n from posts.models.post import Post\n+from posts.models.votes import PostVote\n from users.models.expertise import UserExpertise\n from users.models.tags import UserTag\n \n@@ -28,6 +29,7 @@\n dump_user_posts(user_dir, user)\n dump_user_comments(user_dir, user)\n dump_user_bookmarks(user_dir, user)\n+ dump_user_upvotes(user_dir, user)\n \n # save zip archive\n archive_name = f\"{user.slug}-{datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}-{random.randint(1000000, 9999998)}\"\n@@ -107,3 +109,10 @@\n \n with open(os.path.join(user_dir, \"bookmarks.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(bookmarks_to_json(bookmarks), ensure_ascii=False))\n+\n+\n+def dump_user_upvotes(user_dir, user):\n+ upvotes = PostVote.objects.filter(user=user).select_related(\"post\")\n+\n+ with open(os.path.join(user_dir, \"upvotes.json\"), \"w\", encoding=\"utf-8\") as f:\n+ f.write(json.dumps(upvotes_to_json(upvotes), ensure_ascii=False))\ndiff --git a/gdpr/serializers.py b/gdpr/serializers.py\n--- a/gdpr/serializers.py\n+++ b/gdpr/serializers.py\n@@ -6,6 +6,7 @@\n from bookmarks.models import PostBookmark\n from comments.models import Comment\n from posts.models.post import Post\n+from posts.models.votes import PostVote\n from users.models.expertise import UserExpertise\n from users.models.tags import UserTag\n from users.models.user import User\n@@ -151,3 +152,21 @@\n }),\n \"created_at\": bookmark.created_at.isoformat() if bookmark.created_at else None,\n }\n+\n+\n+def upvotes_to_json(upvotes: List[PostVote]) -> dict:\n+ return {\n+ \"upvotes\": [\n+ upvote_to_json(upvote) for upvote in upvotes\n+ ]\n+ }\n+\n+\n+def upvote_to_json(upvote: PostVote) -> dict:\n+ return {\n+ \"url\": settings.APP_HOST + reverse(\"show_post\", kwargs={\n+ \"post_type\": upvote.post.type, \"post_slug\": upvote.post.slug\n+ }),\n+ \"created_at\": upvote.created_at.isoformat() if upvote.created_at else None,\n+ \"title\": upvote.post.title,\n+ }\n", "issue": "\u0418\u0434\u0435\u044f: \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043b\u0430\u0439\u043a\u0438 \u043f\u043e\u0441\u0442\u043e\u0432 \u0432 \u0432\u044b\u0433\u0440\u0443\u0436\u0430\u0435\u043c\u044b\u0439 \u0430\u0440\u0445\u0438\u0432 \u0441 \u043c\u043e\u0438\u043c\u0438 \u0434\u0430\u043d\u043d\u044b\u043c\u0438\n\u041f\u0440\u0435\u0434\u043b\u0430\u0433\u0430\u044e \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0432 \u0430\u0440\u0445\u0438\u0432 \u0444\u0430\u0439\u043b \u0433\u0434\u0435 \u0431\u0443\u0434\u0443\u0442 \u0441\u043e\u0431\u0440\u0430\u043d\u044b \u043f\u043e\u0441\u0442\u044b (\u0431\u0430\u0442\u043b\u044b, \u0438\u043d\u0442\u0440\u043e, \u043f\u0440\u043e\u0435\u043a\u0442\u044b, \u0438 \u0442.\u0434) \u043a\u043e\u0442\u043e\u0440\u044b\u0435 \u044f \u043b\u0430\u0439\u043a\u0430\u043b. \u0424\u043e\u0440\u043c\u0430\u0442: \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u043f\u043e\u0441\u0442\u0430, \u0434\u0430\u0442\u0430 \u043f\u043e\u0441\u0442\u0430, \u0441\u0441\u044b\u043b\u043a\u0430 \u043d\u0430 \u043f\u043e\u0441\u0442.\r\n\r\n\u041c\u043e\u0439 \u043a\u043e\u043d\u043a\u0440\u0435\u0442\u043d\u044b\u0439 \u043a\u0435\u0439\u0441: \u0432 \u043a\u043b\u0443\u0431 \u0437\u0430\u0445\u043e\u0434\u0438\u0442 \u043c\u043e\u0439 \u0434\u0440\u0443\u0433, \u0445\u043e\u0447\u0443 \u0435\u043c\u0443 \u043f\u043e\u043a\u0430\u0437\u0430\u0442\u044c \u0447\u0442\u043e-\u0442\u043e \u0438\u0437 \u0442\u043e\u0433\u043e, \u0447\u0442\u043e \u043c\u043d\u0435 \u043f\u043e\u043d\u0440\u0430\u0432\u0438\u043b\u043e\u0441\u044c \u0438\u0437 \u043f\u0440\u043e\u0448\u043b\u043e\u0433\u043e.\n", "before_files": [{"content": "from typing import List\n\nfrom django.conf import settings\nfrom django.urls import reverse\n\nfrom bookmarks.models import PostBookmark\nfrom comments.models import Comment\nfrom posts.models.post import Post\nfrom users.models.expertise import UserExpertise\nfrom users.models.tags import UserTag\nfrom users.models.user import User\n\n\ndef post_to_md(post: Post) -> str:\n return f\"# {post.title}\\n\\n{post.text}\"\n\n\ndef post_to_json(post: Post) -> dict:\n return {\n \"id\": str(post.id),\n \"slug\": post.slug,\n \"author_id\": str(post.author_id),\n \"type\": post.type,\n \"topic\": post.topic.name if post.topic else None,\n \"label\": post.label,\n \"title\": post.title,\n \"text\": post.text,\n \"url\": post.url,\n \"image\": post.image,\n \"metadata\": post.metadata,\n \"created_at\": post.created_at.isoformat(),\n \"updated_at\": post.updated_at.isoformat(),\n \"last_activity_at\": post.last_activity_at.isoformat(),\n \"comment_count\": post.comment_count,\n \"view_count\": post.view_count,\n \"upvotes\": post.upvotes,\n }\n\n\ndef comment_to_md(comment: Comment) -> str:\n return f\"{comment.text}\\n\\n- {comment.author.slug}\"\n\n\ndef comments_to_json(comments: List[Comment]) -> dict:\n return {\n \"comments\": [\n comment_to_json(comment) for comment in comments\n ]\n }\n\n\ndef comment_to_json(comment: Comment) -> dict:\n return {\n \"id\": str(comment.id),\n \"author_id\": str(comment.author_id),\n \"post_id\": str(comment.post_id),\n \"post_title\": comment.post.title,\n \"reply_to_id\": str(comment.reply_to_id) if comment.reply_to else None,\n \"title\": comment.title,\n \"text\": comment.text,\n \"url\": comment.url,\n \"metadata\": comment.metadata,\n \"created_at\": comment.created_at.isoformat(),\n \"updated_at\": comment.updated_at.isoformat(),\n \"upvotes\": comment.upvotes,\n \"is_visible\": comment.is_visible,\n \"is_deleted\": comment.is_deleted,\n \"is_pinned\": comment.is_pinned,\n }\n\n\ndef user_to_json(user: User) -> dict:\n return {\n \"id\": str(user.id),\n \"slug\": user.slug,\n \"email\": user.email,\n \"full_name\": user.full_name,\n \"avatar\": user.avatar,\n \"company\": user.company,\n \"position\": user.position,\n \"city\": user.city,\n \"country\": user.country,\n \"bio\": user.bio,\n \"contact\": user.contact,\n \"hat\": user.hat,\n \"balance\": user.balance,\n \"upvotes\": user.upvotes,\n \"created_at\": user.created_at.isoformat(),\n \"updated_at\": user.updated_at.isoformat() if user.updated_at else None,\n \"last_activity_at\": user.last_activity_at.isoformat() if user.last_activity_at else None,\n \"membership_started_at\": user.membership_started_at.isoformat() if user.membership_started_at else None,\n \"membership_expires_at\": user.membership_expires_at.isoformat() if user.membership_expires_at else None,\n \"membership_platform_type\": user.membership_platform_type,\n \"patreon_id\": user.patreon_id,\n \"email_digest_type\": user.email_digest_type,\n \"telegram_id\": user.telegram_id,\n \"telegram_data\": user.telegram_data,\n \"stripe_id\": user.stripe_id,\n \"is_email_verified\": user.is_email_verified,\n \"is_email_unsubscribed\": user.is_email_unsubscribed,\n \"moderation_status\": user.moderation_status,\n \"roles\": user.roles,\n }\n\n\ndef user_tags_to_json(user_tags: List[UserTag]) -> dict:\n return {\n \"user_tags\": [\n user_tag_to_json(user_tag) for user_tag in user_tags\n ]\n }\n\n\ndef user_tag_to_json(user_tag: UserTag) -> dict:\n return {\n \"user_id\": str(user_tag.user_id),\n \"name\": user_tag.name,\n \"created_at\": user_tag.created_at.isoformat() if user_tag.created_at else None,\n }\n\n\ndef user_expertises_to_json(user_expertises: List[UserExpertise]) -> dict:\n return {\n \"user_expertise\": [\n user_expertise_to_json(user_expertise) for user_expertise in user_expertises\n ]\n }\n\n\ndef user_expertise_to_json(user_expertise: UserExpertise) -> dict:\n return {\n \"user_id\": str(user_expertise.user_id),\n \"name\": user_expertise.name,\n \"value\": user_expertise.value,\n \"created_at\": user_expertise.created_at.isoformat() if user_expertise.created_at else None,\n }\n\n\ndef bookmarks_to_json(bookmarks: List[PostBookmark]) -> dict:\n return {\n \"bookmarks\": [\n bookmark_to_json(bookmark) for bookmark in bookmarks\n ]\n }\n\n\ndef bookmark_to_json(bookmark: PostBookmark) -> dict:\n return {\n \"url\": settings.APP_HOST + reverse(\"show_post\", kwargs={\n \"post_type\": bookmark.post.type, \"post_slug\": bookmark.post.slug\n }),\n \"created_at\": bookmark.created_at.isoformat() if bookmark.created_at else None,\n }\n", "path": "gdpr/serializers.py"}, {"content": "import json\nimport os\nimport random\nimport shutil\nimport tempfile\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django_q.tasks import schedule\n\nfrom bookmarks.models import PostBookmark\nfrom comments.models import Comment\nfrom gdpr.serializers import post_to_json, post_to_md, user_to_json, comments_to_json, user_tags_to_json, \\\n user_expertises_to_json, comment_to_md, comment_to_json, bookmarks_to_json\nfrom notifications.email.users import send_data_archive_ready_email\nfrom posts.models.post import Post\nfrom users.models.expertise import UserExpertise\nfrom users.models.tags import UserTag\n\n\ndef generate_data_archive(user, save_path=settings.GDPR_ARCHIVE_STORAGE_PATH):\n with tempfile.TemporaryDirectory() as tmp_dir:\n user_dir = os.path.join(tmp_dir, user.slug)\n os.makedirs(user_dir)\n\n # dump data\n dump_user_profile(user_dir, user)\n dump_user_posts(user_dir, user)\n dump_user_comments(user_dir, user)\n dump_user_bookmarks(user_dir, user)\n\n # save zip archive\n archive_name = f\"{user.slug}-{datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}-{random.randint(1000000, 9999998)}\"\n archive_path = shutil.make_archive(os.path.join(save_path, archive_name), \"zip\", tmp_dir)\n\n # schedule a task to remove archive after timeout\n schedule(\n \"gdpr.archive.delete_data_archive\",\n archive_path,\n next_run=datetime.utcnow() + settings.GDPR_ARCHIVE_DELETE_TIMEDELTA\n )\n\n # notify the user\n send_data_archive_ready_email(\n user=user,\n url=settings.GDPR_ARCHIVE_URL + os.path.basename(archive_path),\n )\n\n\ndef delete_data_archive(archive_path):\n os.remove(archive_path)\n\n\ndef dump_user_profile(user_dir, user):\n with open(os.path.join(user_dir, \"profile.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(user_to_json(user), ensure_ascii=False))\n\n user_tags = UserTag.objects.filter(user=user)\n with open(os.path.join(user_dir, \"tags.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(user_tags_to_json(user_tags), ensure_ascii=False))\n\n user_expertises = UserExpertise.objects.filter(user=user)\n with open(os.path.join(user_dir, \"expertise.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(user_expertises_to_json(user_expertises), ensure_ascii=False))\n\n\ndef dump_user_posts(user_dir, user):\n posts = Post.objects.filter(author=user).select_related(\"author\", \"topic\")\n\n for post in posts:\n post_dir = os.path.join(user_dir, f\"posts/{post.slug}\")\n os.makedirs(post_dir)\n\n with open(os.path.join(post_dir, f\"{post.slug}.md\"), \"w\", encoding=\"utf-8\") as f:\n f.write(post_to_md(post))\n\n with open(os.path.join(post_dir, f\"{post.slug}.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(post_to_json(post), ensure_ascii=False))\n\n # dump post comments\n post_comments = Comment.objects.filter(post=post).select_related(\"author\", \"post\")\n with open(os.path.join(post_dir, f\"comments.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(comments_to_json(post_comments), ensure_ascii=False))\n\n\ndef dump_user_comments(user_dir, user):\n comments = Comment.objects.filter(author=user).select_related(\"author\", \"post\")\n\n for comment in comments:\n comment_dir = os.path.join(user_dir, f\"comments/{comment.id}\")\n os.makedirs(comment_dir)\n\n with open(os.path.join(comment_dir, f\"{comment.id}.md\"), \"w\", encoding=\"utf-8\") as f:\n f.write(comment_to_md(comment))\n\n with open(os.path.join(comment_dir, f\"{comment.id}.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(comment_to_json(comment), ensure_ascii=False))\n\n # dump replies\n comment_replies = Comment.objects.filter(reply_to=comment).select_related(\"author\", \"post\")\n with open(os.path.join(comment_dir, f\"replies.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(comments_to_json(comment_replies), ensure_ascii=False))\n\n\ndef dump_user_bookmarks(user_dir, user):\n bookmarks = PostBookmark.objects.filter(user=user).select_related(\"post\")\n\n with open(os.path.join(user_dir, \"bookmarks.json\"), \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(bookmarks_to_json(bookmarks), ensure_ascii=False))\n", "path": "gdpr/archive.py"}]}
| 3,477 | 705 |
gh_patches_debug_2904
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-4228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configured keyboards limited to 3
### The issue:
In my config I've defined 4 keyboard layouts, however in the widget on the bar, it never reaches the fourth layout when left-clicking. When the third layout is removed (moving the fourth into the third position) it is suddenly accessible so it's not a problem with the layout itself. I don't notice any logs that would apply to this.
```
widget.KeyboardLayout(
font = defaults['font'],
fontsize = defaults['fontsize'],
configured_keyboards = ['us', 'es', 'semimak-jq', 'mtgap'],
display_map = { #makes everything lowercase
'us': 'us',
'es': 'es',
'workman': 'wm',
'semimak': 'sm',
'mtgap': 'mt',
}
),
```
From my config, I defined us, es, semimak-jq, and mtgap, but I can never rotate onto mtgap unless I remove semimak-jq. When I manually set the layout with setxkbmap, it is correctly displayed as 'mt' in the widget, I just can't rotate onto it via left-click on the widget.
Qtile Version: 0.22.1, X11
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
</issue>
<code>
[start of libqtile/widget/keyboardlayout.py]
1 # Copyright (c) 2013 Jacob Mourelos
2 # Copyright (c) 2014 Shepilov Vladislav
3 # Copyright (c) 2014-2015 Sean Vig
4 # Copyright (c) 2014 Tycho Andersen
5 # Copyright (c) 2019 zordsdavini
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 from __future__ import annotations
26
27 import re
28 from abc import ABCMeta, abstractmethod
29 from subprocess import CalledProcessError, check_output
30 from typing import TYPE_CHECKING
31
32 from libqtile.command.base import expose_command
33 from libqtile.confreader import ConfigError
34 from libqtile.log_utils import logger
35 from libqtile.widget import base
36
37 if TYPE_CHECKING:
38 from libqtile.core.manager import Qtile
39
40
41 class _BaseLayoutBackend(metaclass=ABCMeta):
42 def __init__(self, qtile: Qtile):
43 """
44 This handles getting and setter the keyboard layout with the appropriate
45 backend.
46 """
47
48 @abstractmethod
49 def get_keyboard(self) -> str:
50 """
51 Return the currently used keyboard layout as a string
52
53 Examples: "us", "us dvorak". In case of error returns "unknown".
54 """
55
56 def set_keyboard(self, layout: str, options: str | None) -> None:
57 """
58 Set the keyboard layout with specified options.
59 """
60
61
62 class _X11LayoutBackend(_BaseLayoutBackend):
63 kb_layout_regex = re.compile(r"layout:\s+(?P<layout>\w+)")
64 kb_variant_regex = re.compile(r"variant:\s+(?P<variant>\w+)")
65
66 def get_keyboard(self) -> str:
67 try:
68 command = "setxkbmap -verbose 10 -query"
69 setxkbmap_output = check_output(command.split(" ")).decode()
70 except CalledProcessError:
71 logger.exception("Can not get the keyboard layout:")
72 return "unknown"
73 except OSError:
74 logger.exception("Please, check that xset is available:")
75 return "unknown"
76
77 match_layout = self.kb_layout_regex.search(setxkbmap_output)
78 if match_layout is None:
79 return "ERR"
80 keyboard = match_layout.group("layout")
81
82 match_variant = self.kb_variant_regex.search(setxkbmap_output)
83 if match_variant:
84 keyboard += " " + match_variant.group("variant")
85 return keyboard
86
87 def set_keyboard(self, layout: str, options: str | None) -> None:
88 command = ["setxkbmap"]
89 command.extend(layout.split(" "))
90 if options:
91 command.extend(["-option", options])
92 try:
93 check_output(command)
94 except CalledProcessError:
95 logger.error("Can not change the keyboard layout:")
96 except OSError:
97 logger.error("Please, check that setxkbmap is available:")
98
99
100 class _WaylandLayoutBackend(_BaseLayoutBackend):
101 def __init__(self, qtile: Qtile) -> None:
102 self.set_keymap = qtile.core.set_keymap
103 self._layout: str = ""
104
105 def get_keyboard(self) -> str:
106 return self._layout
107
108 def set_keyboard(self, layout: str, options: str | None) -> None:
109 maybe_variant: str | None = None
110 if " " in layout:
111 layout_name, maybe_variant = layout.split(" ", maxsplit=1)
112 else:
113 layout_name = layout
114 self.set_keymap(layout_name, options, maybe_variant)
115 self._layout = layout
116
117
118 layout_backends = {
119 "x11": _X11LayoutBackend,
120 "wayland": _WaylandLayoutBackend,
121 }
122
123
124 class KeyboardLayout(base.InLoopPollText):
125 """Widget for changing and displaying the current keyboard layout
126
127 To use this widget effectively you need to specify keyboard layouts you want to use
128 (using "configured_keyboards") and bind function "next_keyboard" to specific keys in
129 order to change layouts.
130
131 For example:
132
133 Key([mod], "space", lazy.widget["keyboardlayout"].next_keyboard(), desc="Next keyboard layout."),
134
135 When running Qtile with the X11 backend, this widget requires setxkbmap to be available.
136 """
137
138 defaults = [
139 ("update_interval", 1, "Update time in seconds."),
140 (
141 "configured_keyboards",
142 ["us"],
143 "A list of predefined keyboard layouts "
144 "represented as strings. For example: "
145 "['us', 'us colemak', 'es', 'fr'].",
146 ),
147 (
148 "display_map",
149 {},
150 "Custom display of layout. Key should be in format "
151 "'layout variant'. For example: "
152 "{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}",
153 ),
154 ("option", None, "string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'"),
155 ]
156
157 def __init__(self, **config):
158 base.InLoopPollText.__init__(self, **config)
159 self.add_defaults(KeyboardLayout.defaults)
160 self.add_callbacks({"Button1": self.next_keyboard})
161
162 def _configure(self, qtile, bar):
163 base.InLoopPollText._configure(self, qtile, bar)
164
165 if qtile.core.name not in layout_backends:
166 raise ConfigError("KeyboardLayout does not support backend: " + qtile.core.name)
167
168 self.backend = layout_backends[qtile.core.name](qtile)
169 self.backend.set_keyboard(self.configured_keyboards[0], self.option)
170
171 @expose_command()
172 def next_keyboard(self):
173 """set the next layout in the list of configured keyboard layouts as
174 new current layout in use
175
176 If the current keyboard layout is not in the list, it will set as new
177 layout the first one in the list.
178 """
179
180 current_keyboard = self.backend.get_keyboard()
181 if current_keyboard in self.configured_keyboards:
182 # iterate the list circularly
183 next_keyboard = self.configured_keyboards[
184 (self.configured_keyboards.index(current_keyboard) + 1)
185 % len(self.configured_keyboards)
186 ]
187 else:
188 next_keyboard = self.configured_keyboards[0]
189
190 self.backend.set_keyboard(next_keyboard, self.option)
191
192 self.tick()
193
194 def poll(self):
195 keyboard = self.backend.get_keyboard()
196 if keyboard in self.display_map.keys():
197 return self.display_map[keyboard]
198 return keyboard.upper()
199
[end of libqtile/widget/keyboardlayout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py
--- a/libqtile/widget/keyboardlayout.py
+++ b/libqtile/widget/keyboardlayout.py
@@ -60,7 +60,7 @@
class _X11LayoutBackend(_BaseLayoutBackend):
- kb_layout_regex = re.compile(r"layout:\s+(?P<layout>\w+)")
+ kb_layout_regex = re.compile(r"layout:\s+(?P<layout>[\w-]+)")
kb_variant_regex = re.compile(r"variant:\s+(?P<variant>\w+)")
def get_keyboard(self) -> str:
|
{"golden_diff": "diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py\n--- a/libqtile/widget/keyboardlayout.py\n+++ b/libqtile/widget/keyboardlayout.py\n@@ -60,7 +60,7 @@\n \n \n class _X11LayoutBackend(_BaseLayoutBackend):\n- kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>\\w+)\")\n+ kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>[\\w-]+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n \n def get_keyboard(self) -> str:\n", "issue": "Configured keyboards limited to 3\n### The issue:\n\nIn my config I've defined 4 keyboard layouts, however in the widget on the bar, it never reaches the fourth layout when left-clicking. When the third layout is removed (moving the fourth into the third position) it is suddenly accessible so it's not a problem with the layout itself. I don't notice any logs that would apply to this.\r\n\r\n```\r\nwidget.KeyboardLayout(\r\n font = defaults['font'],\r\n fontsize = defaults['fontsize'],\r\n configured_keyboards = ['us', 'es', 'semimak-jq', 'mtgap'],\r\n display_map = { #makes everything lowercase\r\n 'us': 'us',\r\n 'es': 'es',\r\n 'workman': 'wm',\r\n 'semimak': 'sm',\r\n 'mtgap': 'mt',\r\n }\r\n),\r\n```\r\n\r\nFrom my config, I defined us, es, semimak-jq, and mtgap, but I can never rotate onto mtgap unless I remove semimak-jq. When I manually set the layout with setxkbmap, it is correctly displayed as 'mt' in the widget, I just can't rotate onto it via left-click on the widget.\r\n\r\nQtile Version: 0.22.1, X11\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>\\w+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Can not change the keyboard layout:\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available:\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n", "path": "libqtile/widget/keyboardlayout.py"}]}
| 2,959 | 146 |
gh_patches_debug_29353
|
rasdani/github-patches
|
git_diff
|
nextcloud__appstore-431
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Continuation from #414: comment defaults
On initial page load the comment language does not default to English if no comments are present in the user's language
</issue>
<code>
[start of nextcloudappstore/core/views.py]
1 from urllib.parse import urlencode
2
3 from django.conf import settings
4 from django.contrib.auth.mixins import LoginRequiredMixin
5 from django.contrib.auth.models import User
6 from django.core.exceptions import ObjectDoesNotExist
7 from django.db.models import Q
8 from django.http import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils.functional import cached_property
11 from django.utils.translation import get_language, get_language_info
12 from django.views.decorators.http import etag
13 from django.views.generic import FormView
14 from django.views.generic.base import TemplateView
15 from django.views.generic.detail import DetailView
16 from django.views.generic.list import ListView
17 from rest_framework.generics import ListAPIView
18 from semantic_version import Version
19
20 from nextcloudappstore.core.api.v1.serializers import AppRatingSerializer
21 from nextcloudappstore.core.caching import app_etag
22 from nextcloudappstore.core.facades import flatmap
23 from nextcloudappstore.core.forms import AppRatingForm, AppReleaseUploadForm, \
24 AppRegisterForm
25 from nextcloudappstore.core.models import App, Category, AppRating, \
26 NextcloudRelease
27 from nextcloudappstore.core.scaffolding.archive import build_archive
28 from nextcloudappstore.core.scaffolding.forms import AppScaffoldingForm
29 from nextcloudappstore.core.versioning import pad_min_version
30
31
32 @etag(app_etag)
33 def app_description(request, id):
34 app = get_object_or_404(App, id=id)
35 return HttpResponse(app.description, content_type='text/plain')
36
37
38 class AppRatingApi(ListAPIView):
39 serializer_class = AppRatingSerializer
40
41 def get_queryset(self):
42 id = self.kwargs.get('id')
43 app = get_object_or_404(App, id=id)
44 return AppRating.objects.language(self.request.LANGUAGE_CODE).filter(
45 app=app)
46
47
48 class LegalNoticeView(TemplateView):
49 template_name = 'legal.html'
50
51
52 class AppDetailView(DetailView):
53 queryset = App.objects.prefetch_related('releases', 'screenshots', 'owner',
54 'co_maintainers')
55 template_name = 'app/detail.html'
56 slug_field = 'id'
57 slug_url_kwarg = 'id'
58
59 def post(self, request, id):
60 form = AppRatingForm(request.POST, id=id, user=request.user)
61 # there is no way that a rating can be invalid by default
62 if form.is_valid() and request.user.is_authenticated:
63 form.save()
64 return redirect('app-detail', id=id)
65
66 def get_context_data(self, **kwargs):
67 context = super().get_context_data(**kwargs)
68 context['DISCOURSE_URL'] = settings.DISCOURSE_URL.rstrip('/')
69 context['rating_form'] = AppRatingForm(
70 initial={'language_code': get_language()})
71
72 ratings = AppRating.objects.filter(app=context['app'])
73 rating_languages = flatmap(
74 lambda r: r.get_available_languages(), ratings)
75
76 # make sure current session language is in the list even if there are
77 # no comments
78 rating_languages = list(rating_languages)
79 if get_language() not in rating_languages:
80 rating_languages.append(get_language())
81
82 context['languages'] = sorted(rating_languages)
83 context['user_has_rated_app'] = False
84 if self.request.user.is_authenticated:
85 try:
86 app_rating = AppRating.objects.get(user=self.request.user,
87 app=context['app'])
88 # when accessing an empty comment django-parler tries to
89 # fall back to the default language. However for comments
90 # the default (English) does not always exist. Unfortunately
91 # it throws the same exception as non existing models,
92 # so we need to access it beforehand
93 try:
94 comment = app_rating.comment
95 except AppRating.DoesNotExist:
96 comment = ''
97
98 context['rating_form'] = AppRatingForm({
99 'rating': app_rating.rating,
100 'comment': comment,
101 'language_code': app_rating.get_current_language(),
102 })
103 context['user_has_rated_app'] = True
104 except AppRating.DoesNotExist:
105 pass
106 context['categories'] = Category.objects.prefetch_related(
107 'translations').all()
108 context['latest_releases_by_platform_v'] = \
109 self.object.latest_releases_by_platform_v()
110 return context
111
112
113 class AppReleasesView(DetailView):
114 queryset = App.objects.prefetch_related(
115 'releases',
116 'releases__databases',
117 'releases__licenses',
118 'releases__phpextensiondependencies__php_extension',
119 'releases__databasedependencies__database',
120 'releases__shell_commands'
121 )
122 template_name = 'app/releases.html'
123 slug_field = 'id'
124 slug_url_kwarg = 'id'
125
126 def get_context_data(self, **kwargs):
127 context = super().get_context_data(**kwargs)
128 context['categories'] = Category.objects.prefetch_related(
129 'translations').all()
130
131 releases = self.object.releases_by_platform_v()
132 unstables = self.object.unstable_releases_by_platform_v()
133 versions = set(list(releases.keys()) + list(unstables.keys()))
134 all_releases = list(map(
135 lambda v: (v, releases.get(v, []) + unstables.get(v, [])),
136 versions))
137 context['releases_by_platform_v'] = \
138 self._sort_by_platform_v(all_releases)
139 return context
140
141 def _sort_by_platform_v(self, releases_by_platform, reverse=True):
142 """Sorts a list of tuples like (<platform version>, [releases]) by
143 platform version.
144
145 :param releases_by_platform: A list of tuples.
146 :param reverse: Descending order if True, ascending otherwise.
147 :return sorted list of tuples.
148 """
149
150 return sorted(releases_by_platform, reverse=reverse,
151 key=lambda v: Version(pad_min_version(v[0])))
152
153
154 class CategoryAppListView(ListView):
155 model = App
156 template_name = 'app/list.html'
157 allow_empty = True
158
159 def get_queryset(self):
160 order_by = self.request.GET.get('order_by', 'rating_overall')
161 ordering = self.request.GET.get('ordering', 'desc')
162 is_featured = self.request.GET.get('is_featured', False)
163 maintainer = self.request.GET.get('maintainer', False)
164 sort_columns = []
165
166 allowed_order_by = {'name', 'last_release', 'rating_overall',
167 'rating_recent'}
168 if order_by in allowed_order_by:
169 if order_by == 'name':
170 order_by = 'translations__name'
171 if ordering == 'desc':
172 sort_columns.append('-' + order_by)
173 else:
174 sort_columns.append(order_by)
175
176 lang = get_language_info(get_language())['code']
177 category_id = self.kwargs['id']
178 queryset = App.objects.search(self.search_terms, lang).order_by(
179 *sort_columns).filter(releases__gt=0)
180 if maintainer:
181 try:
182 user = User.objects.get_by_natural_key(maintainer)
183 queryset = queryset.filter(Q(owner=user) |
184 Q(co_maintainers=user))
185 except ObjectDoesNotExist:
186 return queryset.none()
187 if category_id:
188 queryset = queryset.filter(categories__id=category_id)
189 if is_featured == "true":
190 queryset = queryset.filter(is_featured=True)
191 return queryset.prefetch_related('screenshots', 'translations')
192
193 def get_context_data(self, **kwargs):
194 context = super().get_context_data(**kwargs)
195 context['categories'] = Category.objects.prefetch_related(
196 'translations').all()
197 category_id = self.kwargs['id']
198 if category_id:
199 context['current_category'] = Category.objects.get(id=category_id)
200 if self.search_terms:
201 context['search_query'] = ' '.join(self.search_terms)
202 context['url_params'] = self.url_params
203 return context
204
205 @cached_property
206 def url_params(self):
207 """URL encoded strings with the GET params of the last request.
208
209 Intended for preserving GET params upon clicking a link by including
210 one (and only one) of these strings in the "href" attribute.
211
212 The parameters are divided into three groups: search, filters and
213 ordering. In addition to these three, the returned dict also contains
214 some combinations of them, as specified by the dict keys.
215
216 No leading "?" or "&".
217
218 :return dict with URL encoded strings.
219 """
220
221 search = self._url_params_str('search')
222 filters = self._url_params_str('is_featured', 'maintainer')
223 ordering = self._url_params_str('order_by', 'ordering')
224
225 return {
226 'search': search,
227 'filters': filters,
228 'ordering': ordering,
229 'search_filters': self._join_url_params_strs(search, filters),
230 'filters_ordering': self._join_url_params_strs(filters, ordering),
231 }
232
233 def _url_params_str(self, *params):
234 args = map(lambda param: (param, self.request.GET.get(param, '')),
235 params)
236 present_args = filter(lambda a: a[1], args)
237 return urlencode(dict(present_args))
238
239 def _join_url_params_strs(self, *strings):
240 return '&'.join(filter(None, strings))
241
242 @cached_property
243 def search_terms(self):
244 return self.request.GET.get('search', '').strip().split()
245
246
247 class AppUploadView(LoginRequiredMixin, TemplateView):
248 template_name = 'app/upload.html'
249
250 def get_context_data(self, **kwargs):
251 context = super().get_context_data(**kwargs)
252 context['form'] = AppReleaseUploadForm()
253 return context
254
255
256 class AppScaffoldingView(FormView):
257 template_name = 'app/scaffold.html'
258 form_class = AppScaffoldingForm
259
260 def get_initial(self):
261 init = {
262 'platform': NextcloudRelease.get_current_main(),
263 'categories': ('tools',)
264 }
265 if self.request.user.is_authenticated:
266 user = self.request.user
267 init['author_name'] = '%s %s' % (user.first_name, user.last_name)
268 init['author_email'] = user.email
269 return init
270
271 def form_valid(self, form):
272 buffer = build_archive(form.cleaned_data)
273 response = HttpResponse(content_type='application/tar+gzip')
274 response['Content-Disposition'] = 'attachment; filename="app.tar.gz"'
275 value = buffer.getvalue()
276 buffer.close()
277 response.write(value)
278 return response
279
280
281 class AppRegisterView(LoginRequiredMixin, TemplateView):
282 template_name = 'app/register.html'
283
284 def get_context_data(self, **kwargs):
285 context = super().get_context_data(**kwargs)
286 context['form'] = AppRegisterForm()
287 return context
288
[end of nextcloudappstore/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nextcloudappstore/core/views.py b/nextcloudappstore/core/views.py
--- a/nextcloudappstore/core/views.py
+++ b/nextcloudappstore/core/views.py
@@ -74,7 +74,7 @@
lambda r: r.get_available_languages(), ratings)
# make sure current session language is in the list even if there are
- # no comments
+ # no comments.
rating_languages = list(rating_languages)
if get_language() not in rating_languages:
rating_languages.append(get_language())
@@ -85,6 +85,18 @@
try:
app_rating = AppRating.objects.get(user=self.request.user,
app=context['app'])
+
+ # if parler fallsback to a fallback language
+ # it doesn't set the language as current language
+ # and we can't select the correct language in the
+ # frontend. So we try and find a languge that is
+ # available
+ language_code = app_rating.get_current_language()
+ if not app_rating.has_translation(language_code):
+ for fallback in app_rating.get_fallback_languages():
+ if app_rating.has_translation(fallback):
+ app_rating.set_current_language(fallback)
+
# when accessing an empty comment django-parler tries to
# fall back to the default language. However for comments
# the default (English) does not always exist. Unfortunately
|
{"golden_diff": "diff --git a/nextcloudappstore/core/views.py b/nextcloudappstore/core/views.py\n--- a/nextcloudappstore/core/views.py\n+++ b/nextcloudappstore/core/views.py\n@@ -74,7 +74,7 @@\n lambda r: r.get_available_languages(), ratings)\n \n # make sure current session language is in the list even if there are\n- # no comments\n+ # no comments.\n rating_languages = list(rating_languages)\n if get_language() not in rating_languages:\n rating_languages.append(get_language())\n@@ -85,6 +85,18 @@\n try:\n app_rating = AppRating.objects.get(user=self.request.user,\n app=context['app'])\n+\n+ # if parler fallsback to a fallback language\n+ # it doesn't set the language as current language\n+ # and we can't select the correct language in the\n+ # frontend. So we try and find a languge that is\n+ # available\n+ language_code = app_rating.get_current_language()\n+ if not app_rating.has_translation(language_code):\n+ for fallback in app_rating.get_fallback_languages():\n+ if app_rating.has_translation(fallback):\n+ app_rating.set_current_language(fallback)\n+\n # when accessing an empty comment django-parler tries to\n # fall back to the default language. However for comments\n # the default (English) does not always exist. Unfortunately\n", "issue": "Continuation from #414: comment defaults\nOn initial page load the comment language does not default to English if no comments are present in the user's language\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language, get_language_info\nfrom django.views.decorators.http import etag\nfrom django.views.generic import FormView\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom rest_framework.generics import ListAPIView\nfrom semantic_version import Version\n\nfrom nextcloudappstore.core.api.v1.serializers import AppRatingSerializer\nfrom nextcloudappstore.core.caching import app_etag\nfrom nextcloudappstore.core.facades import flatmap\nfrom nextcloudappstore.core.forms import AppRatingForm, AppReleaseUploadForm, \\\n AppRegisterForm\nfrom nextcloudappstore.core.models import App, Category, AppRating, \\\n NextcloudRelease\nfrom nextcloudappstore.core.scaffolding.archive import build_archive\nfrom nextcloudappstore.core.scaffolding.forms import AppScaffoldingForm\nfrom nextcloudappstore.core.versioning import pad_min_version\n\n\n@etag(app_etag)\ndef app_description(request, id):\n app = get_object_or_404(App, id=id)\n return HttpResponse(app.description, content_type='text/plain')\n\n\nclass AppRatingApi(ListAPIView):\n serializer_class = AppRatingSerializer\n\n def get_queryset(self):\n id = self.kwargs.get('id')\n app = get_object_or_404(App, id=id)\n return AppRating.objects.language(self.request.LANGUAGE_CODE).filter(\n app=app)\n\n\nclass LegalNoticeView(TemplateView):\n template_name = 'legal.html'\n\n\nclass AppDetailView(DetailView):\n queryset = App.objects.prefetch_related('releases', 'screenshots', 'owner',\n 'co_maintainers')\n template_name = 'app/detail.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def post(self, request, id):\n form = AppRatingForm(request.POST, id=id, user=request.user)\n # there is no way that a rating can be invalid by default\n if form.is_valid() and request.user.is_authenticated:\n form.save()\n return redirect('app-detail', id=id)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['DISCOURSE_URL'] = settings.DISCOURSE_URL.rstrip('/')\n context['rating_form'] = AppRatingForm(\n initial={'language_code': get_language()})\n\n ratings = AppRating.objects.filter(app=context['app'])\n rating_languages = flatmap(\n lambda r: r.get_available_languages(), ratings)\n\n # make sure current session language is in the list even if there are\n # no comments\n rating_languages = list(rating_languages)\n if get_language() not in rating_languages:\n rating_languages.append(get_language())\n\n context['languages'] = sorted(rating_languages)\n context['user_has_rated_app'] = False\n if self.request.user.is_authenticated:\n try:\n app_rating = AppRating.objects.get(user=self.request.user,\n app=context['app'])\n # when accessing an empty comment django-parler tries to\n # fall back to the default language. However for comments\n # the default (English) does not always exist. Unfortunately\n # it throws the same exception as non existing models,\n # so we need to access it beforehand\n try:\n comment = app_rating.comment\n except AppRating.DoesNotExist:\n comment = ''\n\n context['rating_form'] = AppRatingForm({\n 'rating': app_rating.rating,\n 'comment': comment,\n 'language_code': app_rating.get_current_language(),\n })\n context['user_has_rated_app'] = True\n except AppRating.DoesNotExist:\n pass\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n context['latest_releases_by_platform_v'] = \\\n self.object.latest_releases_by_platform_v()\n return context\n\n\nclass AppReleasesView(DetailView):\n queryset = App.objects.prefetch_related(\n 'releases',\n 'releases__databases',\n 'releases__licenses',\n 'releases__phpextensiondependencies__php_extension',\n 'releases__databasedependencies__database',\n 'releases__shell_commands'\n )\n template_name = 'app/releases.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n\n releases = self.object.releases_by_platform_v()\n unstables = self.object.unstable_releases_by_platform_v()\n versions = set(list(releases.keys()) + list(unstables.keys()))\n all_releases = list(map(\n lambda v: (v, releases.get(v, []) + unstables.get(v, [])),\n versions))\n context['releases_by_platform_v'] = \\\n self._sort_by_platform_v(all_releases)\n return context\n\n def _sort_by_platform_v(self, releases_by_platform, reverse=True):\n \"\"\"Sorts a list of tuples like (<platform version>, [releases]) by\n platform version.\n\n :param releases_by_platform: A list of tuples.\n :param reverse: Descending order if True, ascending otherwise.\n :return sorted list of tuples.\n \"\"\"\n\n return sorted(releases_by_platform, reverse=reverse,\n key=lambda v: Version(pad_min_version(v[0])))\n\n\nclass CategoryAppListView(ListView):\n model = App\n template_name = 'app/list.html'\n allow_empty = True\n\n def get_queryset(self):\n order_by = self.request.GET.get('order_by', 'rating_overall')\n ordering = self.request.GET.get('ordering', 'desc')\n is_featured = self.request.GET.get('is_featured', False)\n maintainer = self.request.GET.get('maintainer', False)\n sort_columns = []\n\n allowed_order_by = {'name', 'last_release', 'rating_overall',\n 'rating_recent'}\n if order_by in allowed_order_by:\n if order_by == 'name':\n order_by = 'translations__name'\n if ordering == 'desc':\n sort_columns.append('-' + order_by)\n else:\n sort_columns.append(order_by)\n\n lang = get_language_info(get_language())['code']\n category_id = self.kwargs['id']\n queryset = App.objects.search(self.search_terms, lang).order_by(\n *sort_columns).filter(releases__gt=0)\n if maintainer:\n try:\n user = User.objects.get_by_natural_key(maintainer)\n queryset = queryset.filter(Q(owner=user) |\n Q(co_maintainers=user))\n except ObjectDoesNotExist:\n return queryset.none()\n if category_id:\n queryset = queryset.filter(categories__id=category_id)\n if is_featured == \"true\":\n queryset = queryset.filter(is_featured=True)\n return queryset.prefetch_related('screenshots', 'translations')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n category_id = self.kwargs['id']\n if category_id:\n context['current_category'] = Category.objects.get(id=category_id)\n if self.search_terms:\n context['search_query'] = ' '.join(self.search_terms)\n context['url_params'] = self.url_params\n return context\n\n @cached_property\n def url_params(self):\n \"\"\"URL encoded strings with the GET params of the last request.\n\n Intended for preserving GET params upon clicking a link by including\n one (and only one) of these strings in the \"href\" attribute.\n\n The parameters are divided into three groups: search, filters and\n ordering. In addition to these three, the returned dict also contains\n some combinations of them, as specified by the dict keys.\n\n No leading \"?\" or \"&\".\n\n :return dict with URL encoded strings.\n \"\"\"\n\n search = self._url_params_str('search')\n filters = self._url_params_str('is_featured', 'maintainer')\n ordering = self._url_params_str('order_by', 'ordering')\n\n return {\n 'search': search,\n 'filters': filters,\n 'ordering': ordering,\n 'search_filters': self._join_url_params_strs(search, filters),\n 'filters_ordering': self._join_url_params_strs(filters, ordering),\n }\n\n def _url_params_str(self, *params):\n args = map(lambda param: (param, self.request.GET.get(param, '')),\n params)\n present_args = filter(lambda a: a[1], args)\n return urlencode(dict(present_args))\n\n def _join_url_params_strs(self, *strings):\n return '&'.join(filter(None, strings))\n\n @cached_property\n def search_terms(self):\n return self.request.GET.get('search', '').strip().split()\n\n\nclass AppUploadView(LoginRequiredMixin, TemplateView):\n template_name = 'app/upload.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppReleaseUploadForm()\n return context\n\n\nclass AppScaffoldingView(FormView):\n template_name = 'app/scaffold.html'\n form_class = AppScaffoldingForm\n\n def get_initial(self):\n init = {\n 'platform': NextcloudRelease.get_current_main(),\n 'categories': ('tools',)\n }\n if self.request.user.is_authenticated:\n user = self.request.user\n init['author_name'] = '%s %s' % (user.first_name, user.last_name)\n init['author_email'] = user.email\n return init\n\n def form_valid(self, form):\n buffer = build_archive(form.cleaned_data)\n response = HttpResponse(content_type='application/tar+gzip')\n response['Content-Disposition'] = 'attachment; filename=\"app.tar.gz\"'\n value = buffer.getvalue()\n buffer.close()\n response.write(value)\n return response\n\n\nclass AppRegisterView(LoginRequiredMixin, TemplateView):\n template_name = 'app/register.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppRegisterForm()\n return context\n", "path": "nextcloudappstore/core/views.py"}]}
| 3,624 | 313 |
gh_patches_debug_64689
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-2992
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Install bug: Mock required for gradient_check
#2972 Install bug
Chainer installed with `pip install chainer`
`from chainer import gradient_check` fails due to unable to find mock to import
Fixed by `conda install mock`
`gradient_check` is included in the block declarations in the tutorial, so it should either be removed from there or mock should be added to default install so that people doing the tutorial do not get an error during the import commands.
```
from chainer import gradient_check
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-1-0ba4708b632d> in <module>()
1 import numpy as np
2 import chainer
----> 3 from chainer import gradient_check
4 from chainer import datasets, iterators, optimizers, serializers
5 from chainer import Link, Chain, ChainList
/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/gradient_check.py in <module>()
7 from chainer import cuda
8 from chainer.functions.math import identity
----> 9 from chainer import testing
10 from chainer import variable
11
/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/testing/__init__.py in <module>()
5 from chainer.testing import parameterized # NOQA
6 from chainer.testing import serializer # NOQA
----> 7 from chainer.testing import training # NOQA
8 from chainer.testing import unary_math_function_test # NOQA
9
/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/testing/training.py in <module>()
1 from __future__ import division
2
----> 3 import mock
4
5 from chainer import training
ImportError: No module named 'mock'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import pkg_resources
5 import sys
6
7 from setuptools import setup
8
9
10 if sys.version_info[:3] == (3, 5, 0):
11 if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
12 msg = """
13 Chainer does not work with Python 3.5.0.
14
15 We strongly recommend to use another version of Python.
16 If you want to use Chainer with Python 3.5.0 at your own risk,
17 set CHAINER_PYTHON_350_FORCE environment variable to 1."""
18 print(msg)
19 sys.exit(1)
20
21
22 setup_requires = []
23 install_requires = [
24 'filelock',
25 'nose',
26 'numpy>=1.9.0',
27 'protobuf>=2.6.0',
28 'six>=1.9.0',
29 ]
30 cupy_require = 'cupy==2.0.0a1'
31
32 cupy_pkg = None
33 try:
34 cupy_pkg = pkg_resources.get_distribution('cupy')
35 except pkg_resources.DistributionNotFound:
36 pass
37
38 if cupy_pkg is not None:
39 install_requires.append(cupy_require)
40 print('Use %s' % cupy_require)
41
42 setup(
43 name='chainer',
44 version='3.0.0a1',
45 description='A flexible framework of neural networks',
46 author='Seiya Tokui',
47 author_email='[email protected]',
48 url='https://chainer.org/',
49 license='MIT License',
50 packages=['chainer',
51 'chainer.dataset',
52 'chainer.datasets',
53 'chainer.functions',
54 'chainer.functions.activation',
55 'chainer.functions.array',
56 'chainer.functions.connection',
57 'chainer.functions.evaluation',
58 'chainer.functions.loss',
59 'chainer.functions.math',
60 'chainer.functions.noise',
61 'chainer.functions.normalization',
62 'chainer.functions.pooling',
63 'chainer.functions.theano',
64 'chainer.functions.util',
65 'chainer.function_hooks',
66 'chainer.iterators',
67 'chainer.initializers',
68 'chainer.links',
69 'chainer.links.activation',
70 'chainer.links.caffe',
71 'chainer.links.caffe.protobuf2',
72 'chainer.links.caffe.protobuf3',
73 'chainer.links.connection',
74 'chainer.links.loss',
75 'chainer.links.model',
76 'chainer.links.model.vision',
77 'chainer.links.normalization',
78 'chainer.links.theano',
79 'chainer.optimizers',
80 'chainer.serializers',
81 'chainer.testing',
82 'chainer.training',
83 'chainer.training.extensions',
84 'chainer.training.triggers',
85 'chainer.training.updaters',
86 'chainer.utils'],
87 zip_safe=False,
88 setup_requires=setup_requires,
89 install_requires=install_requires,
90 tests_require=['mock',
91 'nose'],
92 )
93
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@
setup_requires = []
install_requires = [
'filelock',
+ 'mock',
'nose',
'numpy>=1.9.0',
'protobuf>=2.6.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,7 @@\n setup_requires = []\n install_requires = [\n 'filelock',\n+ 'mock',\n 'nose',\n 'numpy>=1.9.0',\n 'protobuf>=2.6.0',\n", "issue": "Install bug: Mock required for gradient_check\n#2972 Install bug\r\n\r\nChainer installed with `pip install chainer`\r\n`from chainer import gradient_check` fails due to unable to find mock to import\r\nFixed by `conda install mock`\r\n\r\n`gradient_check` is included in the block declarations in the tutorial, so it should either be removed from there or mock should be added to default install so that people doing the tutorial do not get an error during the import commands.\r\n\r\n```\r\nfrom chainer import gradient_check\r\n\r\n---------------------------------------------------------------------------\r\nImportError Traceback (most recent call last)\r\n<ipython-input-1-0ba4708b632d> in <module>()\r\n 1 import numpy as np\r\n 2 import chainer\r\n----> 3 from chainer import gradient_check\r\n 4 from chainer import datasets, iterators, optimizers, serializers\r\n 5 from chainer import Link, Chain, ChainList\r\n\r\n/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/gradient_check.py in <module>()\r\n 7 from chainer import cuda\r\n 8 from chainer.functions.math import identity\r\n----> 9 from chainer import testing\r\n 10 from chainer import variable\r\n 11 \r\n\r\n/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/testing/__init__.py in <module>()\r\n 5 from chainer.testing import parameterized # NOQA\r\n 6 from chainer.testing import serializer # NOQA\r\n----> 7 from chainer.testing import training # NOQA\r\n 8 from chainer.testing import unary_math_function_test # NOQA\r\n 9 \r\n\r\n/home/crissman/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/testing/training.py in <module>()\r\n 1 from __future__ import division\r\n 2 \r\n----> 3 import mock\r\n 4 \r\n 5 from chainer import training\r\n\r\nImportError: No module named 'mock'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport pkg_resources\nimport sys\n\nfrom setuptools import setup\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nChainer does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use Chainer with Python 3.5.0 at your own risk,\nset CHAINER_PYTHON_350_FORCE environment variable to 1.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nsetup_requires = []\ninstall_requires = [\n 'filelock',\n 'nose',\n 'numpy>=1.9.0',\n 'protobuf>=2.6.0',\n 'six>=1.9.0',\n]\ncupy_require = 'cupy==2.0.0a1'\n\ncupy_pkg = None\ntry:\n cupy_pkg = pkg_resources.get_distribution('cupy')\nexcept pkg_resources.DistributionNotFound:\n pass\n\nif cupy_pkg is not None:\n install_requires.append(cupy_require)\n print('Use %s' % cupy_require)\n\nsetup(\n name='chainer',\n version='3.0.0a1',\n description='A flexible framework of neural networks',\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://chainer.org/',\n license='MIT License',\n packages=['chainer',\n 'chainer.dataset',\n 'chainer.datasets',\n 'chainer.functions',\n 'chainer.functions.activation',\n 'chainer.functions.array',\n 'chainer.functions.connection',\n 'chainer.functions.evaluation',\n 'chainer.functions.loss',\n 'chainer.functions.math',\n 'chainer.functions.noise',\n 'chainer.functions.normalization',\n 'chainer.functions.pooling',\n 'chainer.functions.theano',\n 'chainer.functions.util',\n 'chainer.function_hooks',\n 'chainer.iterators',\n 'chainer.initializers',\n 'chainer.links',\n 'chainer.links.activation',\n 'chainer.links.caffe',\n 'chainer.links.caffe.protobuf2',\n 'chainer.links.caffe.protobuf3',\n 'chainer.links.connection',\n 'chainer.links.loss',\n 'chainer.links.model',\n 'chainer.links.model.vision',\n 'chainer.links.normalization',\n 'chainer.links.theano',\n 'chainer.optimizers',\n 'chainer.serializers',\n 'chainer.testing',\n 'chainer.training',\n 'chainer.training.extensions',\n 'chainer.training.triggers',\n 'chainer.training.updaters',\n 'chainer.utils'],\n zip_safe=False,\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=['mock',\n 'nose'],\n)\n", "path": "setup.py"}]}
| 1,807 | 75 |
gh_patches_debug_55104
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-13667
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uploading report without test method yields "Server Error"
### NetBox version
v3.6.0 (But reproducible in 3.6.1-dev, too)
### Python version
3.11
### Steps to Reproduce
1.Create `a.py`:
```py
from extras.reports import Report
class DeviceConnectionsReport(Report):
description = "Validate the minimum physical connections for each device"
pass
```
(Note, the class doesn't have a test method)
2.Upload it as report
### Expected Behavior
I expected it to either get an error, that there are no test methods in it, and the ability to delete it without using the CLI.
### Observed Behavior
Reality:
```
<class 'Exception'>
A report must contain at least one test method.
Python version: 3.11.4
NetBox version: 3.6.1-dev
Plugins: None installed
```

Screenshot from develop branch, but reproducible with stable, too
</issue>
<code>
[start of netbox/extras/reports.py]
1 import inspect
2 import logging
3 import traceback
4 from datetime import timedelta
5
6 from django.utils import timezone
7 from django.utils.functional import classproperty
8 from django_rq import job
9
10 from core.choices import JobStatusChoices
11 from core.models import Job
12 from .choices import LogLevelChoices
13 from .models import ReportModule
14
15 __all__ = (
16 'Report',
17 'get_module_and_report',
18 'run_report',
19 )
20
21 logger = logging.getLogger(__name__)
22
23
24 def get_module_and_report(module_name, report_name):
25 module = ReportModule.objects.get(file_path=f'{module_name}.py')
26 report = module.reports.get(report_name)
27 return module, report
28
29
30 @job('default')
31 def run_report(job, *args, **kwargs):
32 """
33 Helper function to call the run method on a report. This is needed to get around the inability to pickle an instance
34 method for queueing into the background processor.
35 """
36 job.start()
37
38 module = ReportModule.objects.get(pk=job.object_id)
39 report = module.reports.get(job.name)()
40
41 try:
42 report.run(job)
43 except Exception:
44 job.terminate(status=JobStatusChoices.STATUS_ERRORED)
45 logging.error(f"Error during execution of report {job.name}")
46 finally:
47 # Schedule the next job if an interval has been set
48 if job.interval:
49 new_scheduled_time = job.scheduled + timedelta(minutes=job.interval)
50 Job.enqueue(
51 run_report,
52 instance=job.object,
53 name=job.name,
54 user=job.user,
55 job_timeout=report.job_timeout,
56 schedule_at=new_scheduled_time,
57 interval=job.interval
58 )
59
60
61 class Report(object):
62 """
63 NetBox users can extend this object to write custom reports to be used for validating data within NetBox. Each
64 report must have one or more test methods named `test_*`.
65
66 The `_results` attribute of a completed report will take the following form:
67
68 {
69 'test_bar': {
70 'failures': 42,
71 'log': [
72 (<datetime>, <level>, <object>, <message>),
73 ...
74 ]
75 },
76 'test_foo': {
77 'failures': 0,
78 'log': [
79 (<datetime>, <level>, <object>, <message>),
80 ...
81 ]
82 }
83 }
84 """
85 description = None
86 scheduling_enabled = True
87 job_timeout = None
88
89 def __init__(self):
90
91 self._results = {}
92 self.active_test = None
93 self.failed = False
94
95 self.logger = logging.getLogger(f"netbox.reports.{self.__module__}.{self.__class__.__name__}")
96
97 # Compile test methods and initialize results skeleton
98 test_methods = []
99 for method in dir(self):
100 if method.startswith('test_') and callable(getattr(self, method)):
101 test_methods.append(method)
102 self._results[method] = {
103 'success': 0,
104 'info': 0,
105 'warning': 0,
106 'failure': 0,
107 'log': [],
108 }
109 if not test_methods:
110 raise Exception("A report must contain at least one test method.")
111 self.test_methods = test_methods
112
113 @classproperty
114 def module(self):
115 return self.__module__
116
117 @classproperty
118 def class_name(self):
119 return self.__name__
120
121 @classproperty
122 def full_name(self):
123 return f'{self.module}.{self.class_name}'
124
125 @property
126 def name(self):
127 """
128 Override this attribute to set a custom display name.
129 """
130 return self.class_name
131
132 @property
133 def filename(self):
134 return inspect.getfile(self.__class__)
135
136 @property
137 def source(self):
138 return inspect.getsource(self.__class__)
139
140 #
141 # Logging methods
142 #
143
144 def _log(self, obj, message, level=LogLevelChoices.LOG_DEFAULT):
145 """
146 Log a message from a test method. Do not call this method directly; use one of the log_* wrappers below.
147 """
148 if level not in LogLevelChoices.values():
149 raise Exception(f"Unknown logging level: {level}")
150 self._results[self.active_test]['log'].append((
151 timezone.now().isoformat(),
152 level,
153 str(obj) if obj else None,
154 obj.get_absolute_url() if hasattr(obj, 'get_absolute_url') else None,
155 message,
156 ))
157
158 def log(self, message):
159 """
160 Log a message which is not associated with a particular object.
161 """
162 self._log(None, message, level=LogLevelChoices.LOG_DEFAULT)
163 self.logger.info(message)
164
165 def log_success(self, obj, message=None):
166 """
167 Record a successful test against an object. Logging a message is optional.
168 """
169 if message:
170 self._log(obj, message, level=LogLevelChoices.LOG_SUCCESS)
171 self._results[self.active_test]['success'] += 1
172 self.logger.info(f"Success | {obj}: {message}")
173
174 def log_info(self, obj, message):
175 """
176 Log an informational message.
177 """
178 self._log(obj, message, level=LogLevelChoices.LOG_INFO)
179 self._results[self.active_test]['info'] += 1
180 self.logger.info(f"Info | {obj}: {message}")
181
182 def log_warning(self, obj, message):
183 """
184 Log a warning.
185 """
186 self._log(obj, message, level=LogLevelChoices.LOG_WARNING)
187 self._results[self.active_test]['warning'] += 1
188 self.logger.info(f"Warning | {obj}: {message}")
189
190 def log_failure(self, obj, message):
191 """
192 Log a failure. Calling this method will automatically mark the report as failed.
193 """
194 self._log(obj, message, level=LogLevelChoices.LOG_FAILURE)
195 self._results[self.active_test]['failure'] += 1
196 self.logger.info(f"Failure | {obj}: {message}")
197 self.failed = True
198
199 #
200 # Run methods
201 #
202
203 def run(self, job):
204 """
205 Run the report and save its results. Each test method will be executed in order.
206 """
207 self.logger.info(f"Running report")
208
209 # Perform any post-run tasks
210 self.pre_run()
211
212 try:
213 for method_name in self.test_methods:
214 self.active_test = method_name
215 test_method = getattr(self, method_name)
216 test_method()
217 job.data = self._results
218 if self.failed:
219 self.logger.warning("Report failed")
220 job.terminate(status=JobStatusChoices.STATUS_FAILED)
221 else:
222 self.logger.info("Report completed successfully")
223 job.terminate()
224 except Exception as e:
225 stacktrace = traceback.format_exc()
226 self.log_failure(None, f"An exception occurred: {type(e).__name__}: {e} <pre>{stacktrace}</pre>")
227 logger.error(f"Exception raised during report execution: {e}")
228 job.terminate(status=JobStatusChoices.STATUS_ERRORED)
229
230 # Perform any post-run tasks
231 self.post_run()
232
233 def pre_run(self):
234 """
235 Extend this method to include any tasks which should execute *before* the report is run.
236 """
237 pass
238
239 def post_run(self):
240 """
241 Extend this method to include any tasks which should execute *after* the report is run.
242 """
243 pass
244
[end of netbox/extras/reports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/extras/reports.py b/netbox/extras/reports.py
--- a/netbox/extras/reports.py
+++ b/netbox/extras/reports.py
@@ -106,8 +106,6 @@
'failure': 0,
'log': [],
}
- if not test_methods:
- raise Exception("A report must contain at least one test method.")
self.test_methods = test_methods
@classproperty
|
{"golden_diff": "diff --git a/netbox/extras/reports.py b/netbox/extras/reports.py\n--- a/netbox/extras/reports.py\n+++ b/netbox/extras/reports.py\n@@ -106,8 +106,6 @@\n 'failure': 0,\n 'log': [],\n }\n- if not test_methods:\n- raise Exception(\"A report must contain at least one test method.\")\n self.test_methods = test_methods\n \n @classproperty\n", "issue": "Uploading report without test method yields \"Server Error\"\n### NetBox version\n\nv3.6.0 (But reproducible in 3.6.1-dev, too)\n\n### Python version\n\n3.11\n\n### Steps to Reproduce\n\n1.Create `a.py`:\r\n```py\r\nfrom extras.reports import Report\r\n\r\nclass DeviceConnectionsReport(Report):\r\n description = \"Validate the minimum physical connections for each device\"\r\n pass\r\n```\r\n(Note, the class doesn't have a test method)\r\n\r\n2.Upload it as report\r\n\n\n### Expected Behavior\n\nI expected it to either get an error, that there are no test methods in it, and the ability to delete it without using the CLI.\n\n### Observed Behavior\n\nReality:\r\n```\r\n<class 'Exception'>\r\n\r\nA report must contain at least one test method.\r\n\r\nPython version: 3.11.4\r\nNetBox version: 3.6.1-dev\r\nPlugins: None installed\r\n```\r\n\r\n\r\n\r\n\r\nScreenshot from develop branch, but reproducible with stable, too\n", "before_files": [{"content": "import inspect\nimport logging\nimport traceback\nfrom datetime import timedelta\n\nfrom django.utils import timezone\nfrom django.utils.functional import classproperty\nfrom django_rq import job\n\nfrom core.choices import JobStatusChoices\nfrom core.models import Job\nfrom .choices import LogLevelChoices\nfrom .models import ReportModule\n\n__all__ = (\n 'Report',\n 'get_module_and_report',\n 'run_report',\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_module_and_report(module_name, report_name):\n module = ReportModule.objects.get(file_path=f'{module_name}.py')\n report = module.reports.get(report_name)\n return module, report\n\n\n@job('default')\ndef run_report(job, *args, **kwargs):\n \"\"\"\n Helper function to call the run method on a report. This is needed to get around the inability to pickle an instance\n method for queueing into the background processor.\n \"\"\"\n job.start()\n\n module = ReportModule.objects.get(pk=job.object_id)\n report = module.reports.get(job.name)()\n\n try:\n report.run(job)\n except Exception:\n job.terminate(status=JobStatusChoices.STATUS_ERRORED)\n logging.error(f\"Error during execution of report {job.name}\")\n finally:\n # Schedule the next job if an interval has been set\n if job.interval:\n new_scheduled_time = job.scheduled + timedelta(minutes=job.interval)\n Job.enqueue(\n run_report,\n instance=job.object,\n name=job.name,\n user=job.user,\n job_timeout=report.job_timeout,\n schedule_at=new_scheduled_time,\n interval=job.interval\n )\n\n\nclass Report(object):\n \"\"\"\n NetBox users can extend this object to write custom reports to be used for validating data within NetBox. Each\n report must have one or more test methods named `test_*`.\n\n The `_results` attribute of a completed report will take the following form:\n\n {\n 'test_bar': {\n 'failures': 42,\n 'log': [\n (<datetime>, <level>, <object>, <message>),\n ...\n ]\n },\n 'test_foo': {\n 'failures': 0,\n 'log': [\n (<datetime>, <level>, <object>, <message>),\n ...\n ]\n }\n }\n \"\"\"\n description = None\n scheduling_enabled = True\n job_timeout = None\n\n def __init__(self):\n\n self._results = {}\n self.active_test = None\n self.failed = False\n\n self.logger = logging.getLogger(f\"netbox.reports.{self.__module__}.{self.__class__.__name__}\")\n\n # Compile test methods and initialize results skeleton\n test_methods = []\n for method in dir(self):\n if method.startswith('test_') and callable(getattr(self, method)):\n test_methods.append(method)\n self._results[method] = {\n 'success': 0,\n 'info': 0,\n 'warning': 0,\n 'failure': 0,\n 'log': [],\n }\n if not test_methods:\n raise Exception(\"A report must contain at least one test method.\")\n self.test_methods = test_methods\n\n @classproperty\n def module(self):\n return self.__module__\n\n @classproperty\n def class_name(self):\n return self.__name__\n\n @classproperty\n def full_name(self):\n return f'{self.module}.{self.class_name}'\n\n @property\n def name(self):\n \"\"\"\n Override this attribute to set a custom display name.\n \"\"\"\n return self.class_name\n\n @property\n def filename(self):\n return inspect.getfile(self.__class__)\n\n @property\n def source(self):\n return inspect.getsource(self.__class__)\n\n #\n # Logging methods\n #\n\n def _log(self, obj, message, level=LogLevelChoices.LOG_DEFAULT):\n \"\"\"\n Log a message from a test method. Do not call this method directly; use one of the log_* wrappers below.\n \"\"\"\n if level not in LogLevelChoices.values():\n raise Exception(f\"Unknown logging level: {level}\")\n self._results[self.active_test]['log'].append((\n timezone.now().isoformat(),\n level,\n str(obj) if obj else None,\n obj.get_absolute_url() if hasattr(obj, 'get_absolute_url') else None,\n message,\n ))\n\n def log(self, message):\n \"\"\"\n Log a message which is not associated with a particular object.\n \"\"\"\n self._log(None, message, level=LogLevelChoices.LOG_DEFAULT)\n self.logger.info(message)\n\n def log_success(self, obj, message=None):\n \"\"\"\n Record a successful test against an object. Logging a message is optional.\n \"\"\"\n if message:\n self._log(obj, message, level=LogLevelChoices.LOG_SUCCESS)\n self._results[self.active_test]['success'] += 1\n self.logger.info(f\"Success | {obj}: {message}\")\n\n def log_info(self, obj, message):\n \"\"\"\n Log an informational message.\n \"\"\"\n self._log(obj, message, level=LogLevelChoices.LOG_INFO)\n self._results[self.active_test]['info'] += 1\n self.logger.info(f\"Info | {obj}: {message}\")\n\n def log_warning(self, obj, message):\n \"\"\"\n Log a warning.\n \"\"\"\n self._log(obj, message, level=LogLevelChoices.LOG_WARNING)\n self._results[self.active_test]['warning'] += 1\n self.logger.info(f\"Warning | {obj}: {message}\")\n\n def log_failure(self, obj, message):\n \"\"\"\n Log a failure. Calling this method will automatically mark the report as failed.\n \"\"\"\n self._log(obj, message, level=LogLevelChoices.LOG_FAILURE)\n self._results[self.active_test]['failure'] += 1\n self.logger.info(f\"Failure | {obj}: {message}\")\n self.failed = True\n\n #\n # Run methods\n #\n\n def run(self, job):\n \"\"\"\n Run the report and save its results. Each test method will be executed in order.\n \"\"\"\n self.logger.info(f\"Running report\")\n\n # Perform any post-run tasks\n self.pre_run()\n\n try:\n for method_name in self.test_methods:\n self.active_test = method_name\n test_method = getattr(self, method_name)\n test_method()\n job.data = self._results\n if self.failed:\n self.logger.warning(\"Report failed\")\n job.terminate(status=JobStatusChoices.STATUS_FAILED)\n else:\n self.logger.info(\"Report completed successfully\")\n job.terminate()\n except Exception as e:\n stacktrace = traceback.format_exc()\n self.log_failure(None, f\"An exception occurred: {type(e).__name__}: {e} <pre>{stacktrace}</pre>\")\n logger.error(f\"Exception raised during report execution: {e}\")\n job.terminate(status=JobStatusChoices.STATUS_ERRORED)\n\n # Perform any post-run tasks\n self.post_run()\n\n def pre_run(self):\n \"\"\"\n Extend this method to include any tasks which should execute *before* the report is run.\n \"\"\"\n pass\n\n def post_run(self):\n \"\"\"\n Extend this method to include any tasks which should execute *after* the report is run.\n \"\"\"\n pass\n", "path": "netbox/extras/reports.py"}]}
| 3,007 | 103 |
gh_patches_debug_41317
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-6088
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support `ignore_availability` in preparer
Motivation
----------
There are two possible behaviors:
- either requests are ignored when fetched from the database: https://github.com/rucio/rucio/blob/9a5d044d9f5760a224dba9feb14b71bfdf213252/lib/rucio/core/request.py#L420
- or `ignore_availability` is set and they are marked NO_SOURCES: https://github.com/rucio/rucio/blob/9a5d044d9f5760a224dba9feb14b71bfdf213252/lib/rucio/core/transfer.py#L863
Submitter has the ability to switch between these two modes using a CLI switch. Preparer is hard-coded into `ignore_availability = False`
Modification
------------
Implement this switch from the submitter:
https://github.com/rucio/rucio/blob/ac95ef3fabb8e1fcabddf1075cc19ae06db44a34/bin/rucio-conveyor-submitter#L108
in the preparer:
https://github.com/rucio/rucio/blob/2a41a64899f41b356660fff80a7530765b5d59e0/bin/rucio-conveyor-preparer#L35
And pass it through the call stack to this function:
https://github.com/rucio/rucio/blob/e3c77683184063424fdcf9ecfda7d37cb4641c4f/lib/rucio/daemons/conveyor/preparer.py#L128
Add a test for the two behaviors. A example test (which will have to be simplified and adapted for this issue):
https://github.com/rucio/rucio/blob/2d81b39b8560f376f412fcba62a622a6db748780/lib/rucio/tests/test_conveyor.py#L611
</issue>
<code>
[start of lib/rucio/daemons/conveyor/preparer.py]
1 # -*- coding: utf-8 -*-
2 # Copyright European Organization for Nuclear Research (CERN) since 2012
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import functools
17 import logging
18 import threading
19 from time import time
20 from typing import TYPE_CHECKING
21
22 import rucio.db.sqla.util
23 from rucio.common import exception
24 from rucio.common.config import config_get_list
25 from rucio.common.exception import RucioException
26 from rucio.common.logging import setup_logging
27 from rucio.core import transfer as transfer_core
28 from rucio.core.request import set_requests_state_if_possible, list_transfer_requests_and_source_replicas
29 from rucio.core.transfer import prepare_transfers, list_transfer_admin_accounts, build_transfer_paths
30 from rucio.core.topology import Topology
31 from rucio.db.sqla.constants import RequestState, RequestType
32 from rucio.daemons.common import run_daemon
33
34 if TYPE_CHECKING:
35 from typing import Optional, List
36 from sqlalchemy.orm import Session
37 from rucio.daemons.common import HeartbeatHandler
38
39 graceful_stop = threading.Event()
40
41
42 def stop():
43 """
44 Graceful exit.
45 """
46
47 graceful_stop.set()
48
49
50 def run(once=False, threads=1, sleep_time=10, bulk=100):
51 """
52 Running the preparer daemon either once or by default in a loop until stop is called.
53 """
54 setup_logging()
55
56 if rucio.db.sqla.util.is_old_db():
57 raise exception.DatabaseException('Database was not updated, daemon won\'t start')
58
59 def preparer_kwargs():
60 # not sure if this is needed for threading.Thread, but it always returns a fresh dictionary
61 return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}
62
63 threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]
64 for thr in threads:
65 thr.start()
66
67 all_running = True
68 while all_running:
69 for thr in threads:
70 thr.join(timeout=3.14)
71 if not thr.is_alive() or graceful_stop.is_set():
72 all_running = False
73 break
74
75 if graceful_stop.is_set() or once:
76 logging.info('conveyor-preparer: gracefully stopping')
77 else:
78 logging.warning('conveyor-preparer: stopping out of the ordinary')
79 graceful_stop.set()
80
81 for thr in threads:
82 thr.join(timeout=3.14)
83
84 logging.info('conveyor-preparer: stopped')
85
86
87 def preparer(once, sleep_time, bulk, partition_wait_time=10):
88 # Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
89 logger_prefix = executable = 'conveyor-preparer'
90 transfertools = config_get_list('conveyor', 'transfertool', False, None)
91
92 run_daemon(
93 once=once,
94 graceful_stop=graceful_stop,
95 executable=executable,
96 logger_prefix=logger_prefix,
97 partition_wait_time=partition_wait_time,
98 sleep_time=sleep_time,
99 run_once_fnc=functools.partial(
100 run_once,
101 transfertools=transfertools,
102 bulk=bulk,
103 ),
104 activities=None,
105 )
106
107
108 def run_once(
109 transfertools: "Optional[List[str]]" = None,
110 bulk: int = 100,
111 heartbeat_handler: "Optional[HeartbeatHandler]" = None,
112 session: "Optional[Session]" = None,
113 **kwargs
114 ) -> bool:
115 if heartbeat_handler:
116 worker_number, total_workers, logger = heartbeat_handler.live()
117 else:
118 # This is used in tests
119 worker_number, total_workers, logger = 0, 0, logging.log
120 if not transfertools:
121 transfertools = list(transfer_core.TRANSFERTOOL_CLASSES_BY_NAME)
122
123 start_time = time()
124 requests_handled = 0
125 try:
126 admin_accounts = list_transfer_admin_accounts()
127 topology = Topology.create_from_config(logger=logger)
128 requests_with_sources = list_transfer_requests_and_source_replicas(
129 total_workers=total_workers,
130 worker_number=worker_number,
131 limit=bulk,
132 request_state=RequestState.PREPARING,
133 request_type=[RequestType.TRANSFER, RequestType.STAGEIN],
134 session=session
135 )
136 ret = build_transfer_paths(
137 topology=topology,
138 requests_with_sources=list(requests_with_sources.values()),
139 admin_accounts=admin_accounts,
140 preparer_mode=True,
141 logger=logger,
142 session=session,
143 )
144 requests_handled = sum(len(i) for i in ret)
145 if not requests_handled:
146 updated_msg = 'had nothing to do'
147 else:
148 candidate_paths, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source, _ = ret
149 updated_reqs, reqs_no_transfertool = prepare_transfers(candidate_paths, transfertools=transfertools, logger=logger, session=session)
150 updated_msg = f'updated {len(updated_reqs)}/{bulk} requests'
151
152 if reqs_no_transfertool:
153 logger(logging.INFO, "Ignoring request because of unsupported transfertool: %s", reqs_no_transfertool)
154 reqs_no_source.update(reqs_no_transfertool)
155 if reqs_no_source:
156 logger(logging.INFO, "Marking requests as no-sources: %s", reqs_no_source)
157 set_requests_state_if_possible(reqs_no_source, RequestState.NO_SOURCES, logger=logger)
158 if reqs_only_tape_source:
159 logger(logging.INFO, "Marking requests as only-tape-sources: %s", reqs_only_tape_source)
160 set_requests_state_if_possible(reqs_only_tape_source, RequestState.ONLY_TAPE_SOURCES, logger=logger)
161 if reqs_scheme_mismatch:
162 logger(logging.INFO, "Marking requests as scheme-mismatch: %s", reqs_scheme_mismatch)
163 set_requests_state_if_possible(reqs_scheme_mismatch, RequestState.MISMATCH_SCHEME, logger=logger)
164 except RucioException:
165 logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)
166 updated_msg = 'errored'
167 logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time() - start_time))
168
169 must_sleep = False
170 if requests_handled < bulk / 2:
171 logger(logging.INFO, "Only %s transfers, which is less than half of the bulk %s", requests_handled, bulk)
172 must_sleep = True
173 return must_sleep
174
[end of lib/rucio/daemons/conveyor/preparer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/rucio/daemons/conveyor/preparer.py b/lib/rucio/daemons/conveyor/preparer.py
--- a/lib/rucio/daemons/conveyor/preparer.py
+++ b/lib/rucio/daemons/conveyor/preparer.py
@@ -47,7 +47,7 @@
graceful_stop.set()
-def run(once=False, threads=1, sleep_time=10, bulk=100):
+def run(once=False, threads=1, sleep_time=10, bulk=100, ignore_availability: bool = False):
"""
Running the preparer daemon either once or by default in a loop until stop is called.
"""
@@ -58,7 +58,7 @@
def preparer_kwargs():
# not sure if this is needed for threading.Thread, but it always returns a fresh dictionary
- return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}
+ return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk, 'ignore_availability': ignore_availability}
threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]
for thr in threads:
@@ -84,7 +84,7 @@
logging.info('conveyor-preparer: stopped')
-def preparer(once, sleep_time, bulk, partition_wait_time=10):
+def preparer(once, sleep_time, bulk, ignore_availability: bool, partition_wait_time: int = 10):
# Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
logger_prefix = executable = 'conveyor-preparer'
transfertools = config_get_list('conveyor', 'transfertool', False, None)
@@ -100,6 +100,7 @@
run_once,
transfertools=transfertools,
bulk=bulk,
+ ignore_availability=ignore_availability,
),
activities=None,
)
@@ -110,6 +111,7 @@
bulk: int = 100,
heartbeat_handler: "Optional[HeartbeatHandler]" = None,
session: "Optional[Session]" = None,
+ ignore_availability: bool = False,
**kwargs
) -> bool:
if heartbeat_handler:
@@ -124,14 +126,15 @@
requests_handled = 0
try:
admin_accounts = list_transfer_admin_accounts()
- topology = Topology.create_from_config(logger=logger)
+ topology = Topology.create_from_config(logger=logger, ignore_availability=ignore_availability)
requests_with_sources = list_transfer_requests_and_source_replicas(
total_workers=total_workers,
worker_number=worker_number,
limit=bulk,
request_state=RequestState.PREPARING,
request_type=[RequestType.TRANSFER, RequestType.STAGEIN],
- session=session
+ session=session,
+ ignore_availability=ignore_availability
)
ret = build_transfer_paths(
topology=topology,
|
{"golden_diff": "diff --git a/lib/rucio/daemons/conveyor/preparer.py b/lib/rucio/daemons/conveyor/preparer.py\n--- a/lib/rucio/daemons/conveyor/preparer.py\n+++ b/lib/rucio/daemons/conveyor/preparer.py\n@@ -47,7 +47,7 @@\n graceful_stop.set()\n \n \n-def run(once=False, threads=1, sleep_time=10, bulk=100):\n+def run(once=False, threads=1, sleep_time=10, bulk=100, ignore_availability: bool = False):\n \"\"\"\n Running the preparer daemon either once or by default in a loop until stop is called.\n \"\"\"\n@@ -58,7 +58,7 @@\n \n def preparer_kwargs():\n # not sure if this is needed for threading.Thread, but it always returns a fresh dictionary\n- return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}\n+ return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk, 'ignore_availability': ignore_availability}\n \n threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]\n for thr in threads:\n@@ -84,7 +84,7 @@\n logging.info('conveyor-preparer: stopped')\n \n \n-def preparer(once, sleep_time, bulk, partition_wait_time=10):\n+def preparer(once, sleep_time, bulk, ignore_availability: bool, partition_wait_time: int = 10):\n # Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try\n logger_prefix = executable = 'conveyor-preparer'\n transfertools = config_get_list('conveyor', 'transfertool', False, None)\n@@ -100,6 +100,7 @@\n run_once,\n transfertools=transfertools,\n bulk=bulk,\n+ ignore_availability=ignore_availability,\n ),\n activities=None,\n )\n@@ -110,6 +111,7 @@\n bulk: int = 100,\n heartbeat_handler: \"Optional[HeartbeatHandler]\" = None,\n session: \"Optional[Session]\" = None,\n+ ignore_availability: bool = False,\n **kwargs\n ) -> bool:\n if heartbeat_handler:\n@@ -124,14 +126,15 @@\n requests_handled = 0\n try:\n admin_accounts = list_transfer_admin_accounts()\n- topology = Topology.create_from_config(logger=logger)\n+ topology = Topology.create_from_config(logger=logger, ignore_availability=ignore_availability)\n requests_with_sources = list_transfer_requests_and_source_replicas(\n total_workers=total_workers,\n worker_number=worker_number,\n limit=bulk,\n request_state=RequestState.PREPARING,\n request_type=[RequestType.TRANSFER, RequestType.STAGEIN],\n- session=session\n+ session=session,\n+ ignore_availability=ignore_availability\n )\n ret = build_transfer_paths(\n topology=topology,\n", "issue": "Support `ignore_availability` in preparer\nMotivation\r\n----------\r\nThere are two possible behaviors: \r\n- either requests are ignored when fetched from the database: https://github.com/rucio/rucio/blob/9a5d044d9f5760a224dba9feb14b71bfdf213252/lib/rucio/core/request.py#L420 \r\n- or `ignore_availability` is set and they are marked NO_SOURCES: https://github.com/rucio/rucio/blob/9a5d044d9f5760a224dba9feb14b71bfdf213252/lib/rucio/core/transfer.py#L863 \r\n\r\nSubmitter has the ability to switch between these two modes using a CLI switch. Preparer is hard-coded into `ignore_availability = False`\r\n\r\nModification\r\n------------\r\nImplement this switch from the submitter:\r\n\r\nhttps://github.com/rucio/rucio/blob/ac95ef3fabb8e1fcabddf1075cc19ae06db44a34/bin/rucio-conveyor-submitter#L108\r\n\r\nin the preparer: \r\nhttps://github.com/rucio/rucio/blob/2a41a64899f41b356660fff80a7530765b5d59e0/bin/rucio-conveyor-preparer#L35\r\n\r\nAnd pass it through the call stack to this function:\r\nhttps://github.com/rucio/rucio/blob/e3c77683184063424fdcf9ecfda7d37cb4641c4f/lib/rucio/daemons/conveyor/preparer.py#L128\r\n\r\nAdd a test for the two behaviors. A example test (which will have to be simplified and adapted for this issue):\r\nhttps://github.com/rucio/rucio/blob/2d81b39b8560f376f412fcba62a622a6db748780/lib/rucio/tests/test_conveyor.py#L611\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport logging\nimport threading\nfrom time import time\nfrom typing import TYPE_CHECKING\n\nimport rucio.db.sqla.util\nfrom rucio.common import exception\nfrom rucio.common.config import config_get_list\nfrom rucio.common.exception import RucioException\nfrom rucio.common.logging import setup_logging\nfrom rucio.core import transfer as transfer_core\nfrom rucio.core.request import set_requests_state_if_possible, list_transfer_requests_and_source_replicas\nfrom rucio.core.transfer import prepare_transfers, list_transfer_admin_accounts, build_transfer_paths\nfrom rucio.core.topology import Topology\nfrom rucio.db.sqla.constants import RequestState, RequestType\nfrom rucio.daemons.common import run_daemon\n\nif TYPE_CHECKING:\n from typing import Optional, List\n from sqlalchemy.orm import Session\n from rucio.daemons.common import HeartbeatHandler\n\ngraceful_stop = threading.Event()\n\n\ndef stop():\n \"\"\"\n Graceful exit.\n \"\"\"\n\n graceful_stop.set()\n\n\ndef run(once=False, threads=1, sleep_time=10, bulk=100):\n \"\"\"\n Running the preparer daemon either once or by default in a loop until stop is called.\n \"\"\"\n setup_logging()\n\n if rucio.db.sqla.util.is_old_db():\n raise exception.DatabaseException('Database was not updated, daemon won\\'t start')\n\n def preparer_kwargs():\n # not sure if this is needed for threading.Thread, but it always returns a fresh dictionary\n return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}\n\n threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]\n for thr in threads:\n thr.start()\n\n all_running = True\n while all_running:\n for thr in threads:\n thr.join(timeout=3.14)\n if not thr.is_alive() or graceful_stop.is_set():\n all_running = False\n break\n\n if graceful_stop.is_set() or once:\n logging.info('conveyor-preparer: gracefully stopping')\n else:\n logging.warning('conveyor-preparer: stopping out of the ordinary')\n graceful_stop.set()\n\n for thr in threads:\n thr.join(timeout=3.14)\n\n logging.info('conveyor-preparer: stopped')\n\n\ndef preparer(once, sleep_time, bulk, partition_wait_time=10):\n # Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try\n logger_prefix = executable = 'conveyor-preparer'\n transfertools = config_get_list('conveyor', 'transfertool', False, None)\n\n run_daemon(\n once=once,\n graceful_stop=graceful_stop,\n executable=executable,\n logger_prefix=logger_prefix,\n partition_wait_time=partition_wait_time,\n sleep_time=sleep_time,\n run_once_fnc=functools.partial(\n run_once,\n transfertools=transfertools,\n bulk=bulk,\n ),\n activities=None,\n )\n\n\ndef run_once(\n transfertools: \"Optional[List[str]]\" = None,\n bulk: int = 100,\n heartbeat_handler: \"Optional[HeartbeatHandler]\" = None,\n session: \"Optional[Session]\" = None,\n **kwargs\n) -> bool:\n if heartbeat_handler:\n worker_number, total_workers, logger = heartbeat_handler.live()\n else:\n # This is used in tests\n worker_number, total_workers, logger = 0, 0, logging.log\n if not transfertools:\n transfertools = list(transfer_core.TRANSFERTOOL_CLASSES_BY_NAME)\n\n start_time = time()\n requests_handled = 0\n try:\n admin_accounts = list_transfer_admin_accounts()\n topology = Topology.create_from_config(logger=logger)\n requests_with_sources = list_transfer_requests_and_source_replicas(\n total_workers=total_workers,\n worker_number=worker_number,\n limit=bulk,\n request_state=RequestState.PREPARING,\n request_type=[RequestType.TRANSFER, RequestType.STAGEIN],\n session=session\n )\n ret = build_transfer_paths(\n topology=topology,\n requests_with_sources=list(requests_with_sources.values()),\n admin_accounts=admin_accounts,\n preparer_mode=True,\n logger=logger,\n session=session,\n )\n requests_handled = sum(len(i) for i in ret)\n if not requests_handled:\n updated_msg = 'had nothing to do'\n else:\n candidate_paths, reqs_no_source, reqs_scheme_mismatch, reqs_only_tape_source, _ = ret\n updated_reqs, reqs_no_transfertool = prepare_transfers(candidate_paths, transfertools=transfertools, logger=logger, session=session)\n updated_msg = f'updated {len(updated_reqs)}/{bulk} requests'\n\n if reqs_no_transfertool:\n logger(logging.INFO, \"Ignoring request because of unsupported transfertool: %s\", reqs_no_transfertool)\n reqs_no_source.update(reqs_no_transfertool)\n if reqs_no_source:\n logger(logging.INFO, \"Marking requests as no-sources: %s\", reqs_no_source)\n set_requests_state_if_possible(reqs_no_source, RequestState.NO_SOURCES, logger=logger)\n if reqs_only_tape_source:\n logger(logging.INFO, \"Marking requests as only-tape-sources: %s\", reqs_only_tape_source)\n set_requests_state_if_possible(reqs_only_tape_source, RequestState.ONLY_TAPE_SOURCES, logger=logger)\n if reqs_scheme_mismatch:\n logger(logging.INFO, \"Marking requests as scheme-mismatch: %s\", reqs_scheme_mismatch)\n set_requests_state_if_possible(reqs_scheme_mismatch, RequestState.MISMATCH_SCHEME, logger=logger)\n except RucioException:\n logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)\n updated_msg = 'errored'\n logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time() - start_time))\n\n must_sleep = False\n if requests_handled < bulk / 2:\n logger(logging.INFO, \"Only %s transfers, which is less than half of the bulk %s\", requests_handled, bulk)\n must_sleep = True\n return must_sleep\n", "path": "lib/rucio/daemons/conveyor/preparer.py"}]}
| 3,023 | 697 |
gh_patches_debug_9803
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-990
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rasterization Impossible for (At Least Some) `class_id_to_filter` Arguments
A setup like this:
```python
class_id_filter_dict = {
0: ['==', 'default', 'Background'],
1: ['==', 'default', 'Cloud'],
}
label_source = SemanticSegmentationLabelSourceConfig(
raster_source=RasterizedSourceConfig(
vector_source=GeoJSONVectorSourceConfig(
uri=hrefs[1],
class_id_to_filter=class_id_filter_dict,
default_class_id=0),
rasterizer_config=RasterizerConfig(background_class_id=0)
))
```
produces this error
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 248, in <module>
main()
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 240, in run_command
runner=runner)
File "/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py", line 217, in _run_command [94/1935]
command_fn()
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 107, in chip
chip_scene(s.build(class_cfg, self.tmp_dir), TRAIN)
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py", line 96, in chip_scene
labels = self.get_train_labels(window, scene)
File "/opt/src/rastervision_core/rastervision/core/rv_pipeline/semantic_segmentation.py", line 112, in get_train_labels
return scene.ground_truth_label_source.get_labels(window=window)
File "/opt/src/rastervision_core/rastervision/core/data/label_source/semantic_segmentation_label_source.py", line 98, in get_labels
raw_labels = self.raster_source.get_raw_chip(window)
File "/opt/src/rastervision_core/rastervision/core/data/raster_source/raster_source.py", line 101, in get_raw_chip
return self._get_chip(window)
File "/opt/src/rastervision_core/rastervision/core/data/raster_source/rasterized_source.py", line 107, in _get_chip
self.get_extent())
File "/opt/src/rastervision_core/rastervision/core/data/raster_source/rasterized_source.py", line 44, in geoms_to_raster
all_touched=all_touched)
File "/opt/conda/lib/python3.6/site-packages/rasterio/env.py", line 366, in wrapper
return f(*args, **kwds)
File "/opt/conda/lib/python3.6/site-packages/rasterio/features.py", line 280, in rasterize
if not validate_dtype(shape_values, valid_dtypes):
File "/opt/conda/lib/python3.6/site-packages/rasterio/dtypes.py", line 184, in validate_dtype
get_minimum_dtype(values) in valid_dtypes)
File "/opt/conda/lib/python3.6/site-packages/rasterio/dtypes.py", line 107, in get_minimum_dtype
min_value = values.min()
File "/opt/conda/lib/python3.6/site-packages/numpy/core/_methods.py", line 32, in _amin
return umr_minimum(a, axis, None, out, keepdims, initial)
TypeError: cannot perform reduce with flexible type
```
which is evidently due to the `class_id`s being strings rather than integers.
</issue>
<code>
[start of rastervision_core/rastervision/core/data/vector_source/class_inference.py]
1 import copy
2
3 from rastervision.core.data.vector_source.label_maker.filter import (
4 create_filter)
5
6
7 class ClassInference():
8 """Infers missing class_ids from GeoJSON features."""
9
10 def __init__(self,
11 default_class_id,
12 class_config=None,
13 class_id_to_filter=None):
14 self.class_config = class_config
15 self.class_id_to_filter = class_id_to_filter
16 self.default_class_id = default_class_id
17
18 if self.class_id_to_filter is not None:
19 self.class_id_to_filter = {}
20 for class_id, filter_exp in class_id_to_filter.items():
21 self.class_id_to_filter[class_id] = create_filter(filter_exp)
22
23 def infer_class_id(self, feature):
24 """Infer the class_id for a GeoJSON feature.
25
26 Args:
27 feature: (dict) GeoJSON feature
28
29 Rules:
30 1) If class_id is in feature['properties'], use it.
31 2) If class_name or label are in feature['properties'] and in class_config,
32 use corresponding class_id.
33 3) If class_id_to_filter is set and filter is true when applied to feature,
34 use corresponding class_id.
35 4) Otherwise, return the default_class_id
36 """
37 class_id = feature.get('properties', {}).get('class_id')
38 if class_id is not None:
39 return class_id
40
41 if self.class_config is not None:
42 class_name = feature.get('properties', {}).get('class_name')
43 if class_name in self.class_config.names:
44 return self.class_config.names.index(class_name)
45
46 label = feature.get('properties', {}).get('label')
47 if label in self.class_config.names:
48 return self.class_config.names.index(label)
49
50 if self.class_id_to_filter is not None:
51 for class_id, filter_fn in self.class_id_to_filter.items():
52 if filter_fn(feature):
53 return class_id
54
55 return self.default_class_id
56
57 def transform_geojson(self, geojson):
58 """Transform GeoJSON by appending class_ids and removing features with no class.
59
60 For each feature in geojson, the class_id is inferred and is set into
61 feature['properties']. If the class_id is None (because none of the rules apply
62 and the default_class_id is None), the feature is dropped.
63 """
64 new_features = []
65 for feature in geojson['features']:
66 class_id = self.infer_class_id(feature)
67 if class_id is not None:
68 feature = copy.deepcopy(feature)
69 properties = feature.get('properties', {})
70 properties['class_id'] = class_id
71 feature['properties'] = properties
72 new_features.append(feature)
73 new_geojson = {'type': 'FeatureCollection', 'features': new_features}
74 return new_geojson
75
[end of rastervision_core/rastervision/core/data/vector_source/class_inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rastervision_core/rastervision/core/data/vector_source/class_inference.py b/rastervision_core/rastervision/core/data/vector_source/class_inference.py
--- a/rastervision_core/rastervision/core/data/vector_source/class_inference.py
+++ b/rastervision_core/rastervision/core/data/vector_source/class_inference.py
@@ -18,7 +18,8 @@
if self.class_id_to_filter is not None:
self.class_id_to_filter = {}
for class_id, filter_exp in class_id_to_filter.items():
- self.class_id_to_filter[class_id] = create_filter(filter_exp)
+ self.class_id_to_filter[int(class_id)] = create_filter(
+ filter_exp)
def infer_class_id(self, feature):
"""Infer the class_id for a GeoJSON feature.
|
{"golden_diff": "diff --git a/rastervision_core/rastervision/core/data/vector_source/class_inference.py b/rastervision_core/rastervision/core/data/vector_source/class_inference.py\n--- a/rastervision_core/rastervision/core/data/vector_source/class_inference.py\n+++ b/rastervision_core/rastervision/core/data/vector_source/class_inference.py\n@@ -18,7 +18,8 @@\n if self.class_id_to_filter is not None:\n self.class_id_to_filter = {}\n for class_id, filter_exp in class_id_to_filter.items():\n- self.class_id_to_filter[class_id] = create_filter(filter_exp)\n+ self.class_id_to_filter[int(class_id)] = create_filter(\n+ filter_exp)\n \n def infer_class_id(self, feature):\n \"\"\"Infer the class_id for a GeoJSON feature.\n", "issue": "Rasterization Impossible for (At Least Some) `class_id_to_filter` Arguments\nA setup like this:\r\n```python\r\n class_id_filter_dict = {\r\n 0: ['==', 'default', 'Background'],\r\n 1: ['==', 'default', 'Cloud'],\r\n }\r\n\r\n label_source = SemanticSegmentationLabelSourceConfig(\r\n raster_source=RasterizedSourceConfig(\r\n vector_source=GeoJSONVectorSourceConfig(\r\n uri=hrefs[1],\r\n class_id_to_filter=class_id_filter_dict,\r\n default_class_id=0),\r\n rasterizer_config=RasterizerConfig(background_class_id=0)\r\n ))\r\n```\r\n\r\nproduces this error\r\n```\r\nTraceback (most recent call last): \r\n File \"/opt/conda/lib/python3.6/runpy.py\", line 193, in _run_module_as_main \r\n \"__main__\", mod_spec) \r\n File \"/opt/conda/lib/python3.6/runpy.py\", line 85, in _run_code \r\n exec(code, run_globals) \r\n File \"/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py\", line 248, in <module> \r\n main() \r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 722, in __call__ \r\n return self.main(*args, **kwargs) \r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 697, in main \r\n rv = self.invoke(ctx) \r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 1066, in invoke \r\n return _process_result(sub_ctx.command.invoke(sub_ctx)) \r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 895, in invoke \r\n return ctx.invoke(self.callback, **ctx.params) \r\n File \"/opt/conda/lib/python3.6/site-packages/click/core.py\", line 535, in invoke \r\n return callback(*args, **kwargs) \r\n File \"/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py\", line 240, in run_command \r\n runner=runner)\r\n File \"/opt/src/rastervision_pipeline/rastervision/pipeline/cli.py\", line 217, in _run_command [94/1935]\r\n command_fn() \r\n File \"/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py\", line 107, in chip \r\n chip_scene(s.build(class_cfg, self.tmp_dir), TRAIN) \r\n File \"/opt/src/rastervision_core/rastervision/core/rv_pipeline/rv_pipeline.py\", line 96, in chip_scene \r\n labels = self.get_train_labels(window, scene) \r\n File \"/opt/src/rastervision_core/rastervision/core/rv_pipeline/semantic_segmentation.py\", line 112, in get_train_labels \r\n return scene.ground_truth_label_source.get_labels(window=window) \r\n File \"/opt/src/rastervision_core/rastervision/core/data/label_source/semantic_segmentation_label_source.py\", line 98, in get_labels \r\n raw_labels = self.raster_source.get_raw_chip(window) \r\n File \"/opt/src/rastervision_core/rastervision/core/data/raster_source/raster_source.py\", line 101, in get_raw_chip \r\n return self._get_chip(window) \r\n File \"/opt/src/rastervision_core/rastervision/core/data/raster_source/rasterized_source.py\", line 107, in _get_chip \r\n self.get_extent()) \r\n File \"/opt/src/rastervision_core/rastervision/core/data/raster_source/rasterized_source.py\", line 44, in geoms_to_raster\r\n all_touched=all_touched) \r\n File \"/opt/conda/lib/python3.6/site-packages/rasterio/env.py\", line 366, in wrapper\r\n return f(*args, **kwds) \r\n File \"/opt/conda/lib/python3.6/site-packages/rasterio/features.py\", line 280, in rasterize \r\n if not validate_dtype(shape_values, valid_dtypes): \r\n File \"/opt/conda/lib/python3.6/site-packages/rasterio/dtypes.py\", line 184, in validate_dtype\r\n get_minimum_dtype(values) in valid_dtypes) \r\n File \"/opt/conda/lib/python3.6/site-packages/rasterio/dtypes.py\", line 107, in get_minimum_dtype\r\n min_value = values.min() \r\n File \"/opt/conda/lib/python3.6/site-packages/numpy/core/_methods.py\", line 32, in _amin\r\n return umr_minimum(a, axis, None, out, keepdims, initial) \r\nTypeError: cannot perform reduce with flexible type\r\n```\r\nwhich is evidently due to the `class_id`s being strings rather than integers.\r\n\n", "before_files": [{"content": "import copy\n\nfrom rastervision.core.data.vector_source.label_maker.filter import (\n create_filter)\n\n\nclass ClassInference():\n \"\"\"Infers missing class_ids from GeoJSON features.\"\"\"\n\n def __init__(self,\n default_class_id,\n class_config=None,\n class_id_to_filter=None):\n self.class_config = class_config\n self.class_id_to_filter = class_id_to_filter\n self.default_class_id = default_class_id\n\n if self.class_id_to_filter is not None:\n self.class_id_to_filter = {}\n for class_id, filter_exp in class_id_to_filter.items():\n self.class_id_to_filter[class_id] = create_filter(filter_exp)\n\n def infer_class_id(self, feature):\n \"\"\"Infer the class_id for a GeoJSON feature.\n\n Args:\n feature: (dict) GeoJSON feature\n\n Rules:\n 1) If class_id is in feature['properties'], use it.\n 2) If class_name or label are in feature['properties'] and in class_config,\n use corresponding class_id.\n 3) If class_id_to_filter is set and filter is true when applied to feature,\n use corresponding class_id.\n 4) Otherwise, return the default_class_id\n \"\"\"\n class_id = feature.get('properties', {}).get('class_id')\n if class_id is not None:\n return class_id\n\n if self.class_config is not None:\n class_name = feature.get('properties', {}).get('class_name')\n if class_name in self.class_config.names:\n return self.class_config.names.index(class_name)\n\n label = feature.get('properties', {}).get('label')\n if label in self.class_config.names:\n return self.class_config.names.index(label)\n\n if self.class_id_to_filter is not None:\n for class_id, filter_fn in self.class_id_to_filter.items():\n if filter_fn(feature):\n return class_id\n\n return self.default_class_id\n\n def transform_geojson(self, geojson):\n \"\"\"Transform GeoJSON by appending class_ids and removing features with no class.\n\n For each feature in geojson, the class_id is inferred and is set into\n feature['properties']. If the class_id is None (because none of the rules apply\n and the default_class_id is None), the feature is dropped.\n \"\"\"\n new_features = []\n for feature in geojson['features']:\n class_id = self.infer_class_id(feature)\n if class_id is not None:\n feature = copy.deepcopy(feature)\n properties = feature.get('properties', {})\n properties['class_id'] = class_id\n feature['properties'] = properties\n new_features.append(feature)\n new_geojson = {'type': 'FeatureCollection', 'features': new_features}\n return new_geojson\n", "path": "rastervision_core/rastervision/core/data/vector_source/class_inference.py"}]}
| 2,450 | 184 |
gh_patches_debug_13362
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation improvements for Randomizable
In here: https://docs.monai.io/en/latest/transforms.html#randomizable
From the text description.
I think it is not clear that this self.R is a class variable instead of an instance variable.
And it is also not clear that self.R is of type np.random.RandomState
</issue>
<code>
[start of monai/transforms/compose.py]
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11 """
12 A collection of generic interfaces for MONAI transforms.
13 """
14
15 import warnings
16 from typing import Hashable, Optional, Tuple, Any
17 from abc import ABC, abstractmethod
18 import numpy as np
19
20 from monai.config.type_definitions import KeysCollection
21 from monai.utils.misc import ensure_tuple, get_seed
22 from .utils import apply_transform
23
24
25 class Transform(ABC):
26 """
27 An abstract class of a ``Transform``.
28 A transform is callable that processes ``data``.
29
30 It could be stateful and may modify ``data`` in place,
31 the implementation should be aware of:
32
33 #. thread safety when mutating its own states.
34 When used from a multi-process context, transform's instance variables are read-only.
35 #. ``data`` content unused by this transform may still be used in the
36 subsequent transforms in a composed transform.
37 #. storing too much information in ``data`` may not scale.
38
39 See Also
40
41 :py:class:`monai.transforms.Compose`
42 """
43
44 @abstractmethod
45 def __call__(self, data: Any):
46 """
47 ``data`` is an element which often comes from an iteration over an
48 iterable, such as :py:class:`torch.utils.data.Dataset`. This method should
49 return an updated version of ``data``.
50 To simplify the input validations, most of the transforms assume that
51
52 - ``data`` is a Numpy ndarray, PyTorch Tensor or string
53 - the data shape can be:
54
55 #. string data without shape, `LoadNifti` and `LoadPNG` transforms expect file paths
56 #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,
57 except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and
58 `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)
59 #. most of the post-processing transforms expect
60 ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``
61
62 - the channel dimension is not omitted even if number of channels is one
63
64 This method can optionally take additional arguments to help execute transformation operation.
65
66 Raises:
67 NotImplementedError: Subclass {self.__class__.__name__} must implement the compute method
68
69 """
70 raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement the compute method")
71
72
73 class Randomizable(ABC):
74 """
75 An interface for handling local numpy random state.
76 this is mainly for randomized data augmentation transforms.
77 """
78
79 R: np.random.RandomState = np.random.RandomState()
80
81 def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None):
82 """
83 Set the random state locally, to control the randomness, the derived
84 classes should use :py:attr:`self.R` instead of `np.random` to introduce random
85 factors.
86
87 Args:
88 seed: set the random state with an integer seed.
89 state (np.random.RandomState): set the random state with a `np.random.RandomState` object.
90
91 Returns:
92 a Randomizable instance.
93
94 Raises:
95 ValueError: `state` must be a `np.random.RandomState`, got {type(state)}
96
97 """
98 if seed is not None:
99 _seed = id(seed) if not isinstance(seed, int) else seed
100 self.R = np.random.RandomState(_seed)
101 return self
102
103 if state is not None:
104 if not isinstance(state, np.random.RandomState):
105 raise ValueError(f"`state` must be a `np.random.RandomState`, got {type(state)}")
106 self.R = state
107 return self
108
109 self.R = np.random.RandomState()
110 return self
111
112 @abstractmethod
113 def randomize(self):
114 """
115 Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.
116
117 all :py:attr:`self.R` calls happen here so that we have a better chance to
118 identify errors of sync the random state.
119
120 This method can optionally take additional arguments so that the random factors are generated based on
121 properties of the input data.
122
123 Raises:
124 NotImplementedError: Subclass {self.__class__.__name__} must implement the compute method
125
126 """
127 raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement the compute method")
128
129
130 class Compose(Randomizable):
131 """
132 ``Compose`` provides the ability to chain a series of calls together in a
133 sequence. Each transform in the sequence must take a single argument and
134 return a single value, so that the transforms can be called in a chain.
135
136 ``Compose`` can be used in two ways:
137
138 #. With a series of transforms that accept and return a single
139 ndarray / tensor / tensor-like parameter.
140 #. With a series of transforms that accept and return a dictionary that
141 contains one or more parameters. Such transforms must have pass-through
142 semantics; unused values in the dictionary must be copied to the return
143 dictionary. It is required that the dictionary is copied between input
144 and output of each transform.
145
146 If some transform generates a list batch of data in the transform chain,
147 every item in the list is still a dictionary, and all the following
148 transforms will apply to every item of the list, for example:
149
150 #. transformA normalizes the intensity of 'img' field in the dict data.
151 #. transformB crops out a list batch of images on 'img' and 'seg' field.
152 And constructs a list of dict data, other fields are copied::
153
154 { [{ {
155 'img': [1, 2], 'img': [1], 'img': [2],
156 'seg': [1, 2], 'seg': [1], 'seg': [2],
157 'extra': 123, --> 'extra': 123, 'extra': 123,
158 'shape': 'CHWD' 'shape': 'CHWD' 'shape': 'CHWD'
159 } }, }]
160
161 #. transformC then randomly rotates or flips 'img' and 'seg' fields of
162 every dictionary item in the list.
163
164 The composed transforms will be set the same global random seed if user called
165 `set_determinism()`.
166
167 When using the pass-through dictionary operation, you can make use of
168 :class:`monai.transforms.adaptors.adaptor` to wrap transforms that don't conform
169 to the requirements. This approach allows you to use transforms from
170 otherwise incompatible libraries with minimal additional work.
171
172 Note:
173
174 In many cases, Compose is not the best way to create pre-processing
175 pipelines. Pre-processing is often not a strictly sequential series of
176 operations, and much of the complexity arises when a not-sequential
177 set of functions must be called as if it were a sequence.
178
179 Example: images and labels
180 Images typically require some kind of normalisation that labels do not.
181 Both are then typically augmented through the use of random rotations,
182 flips, and deformations.
183 Compose can be used with a series of transforms that take a dictionary
184 that contains 'image' and 'label' entries. This might require wrapping
185 `torchvision` transforms before passing them to compose.
186 Alternatively, one can create a class with a `__call__` function that
187 calls your pre-processing functions taking into account that not all of
188 them are called on the labels.
189 """
190
191 def __init__(self, transforms=None) -> None:
192 if transforms is None:
193 transforms = []
194 if not isinstance(transforms, (list, tuple)):
195 raise ValueError("Parameters 'transforms' must be a list or tuple.")
196 self.transforms = transforms
197 self.set_random_state(seed=get_seed())
198
199 def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None):
200 for _transform in self.transforms:
201 if not isinstance(_transform, Randomizable):
202 continue
203 _transform.set_random_state(seed, state)
204
205 def randomize(self):
206 for _transform in self.transforms:
207 if not isinstance(_transform, Randomizable):
208 continue
209 try:
210 _transform.randomize()
211 except TypeError as type_error:
212 tfm_name: str = type(_transform).__name__
213 warnings.warn(
214 f'Transform "{tfm_name}" in Compose not randomized\n{tfm_name}.{type_error}.', RuntimeWarning
215 )
216
217 def __call__(self, input_):
218 for _transform in self.transforms:
219 input_ = apply_transform(_transform, input_)
220 return input_
221
222
223 class MapTransform(Transform):
224 """
225 A subclass of :py:class:`monai.transforms.Transform` with an assumption
226 that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``.
227
228 The ``keys`` parameter will be used to get and set the actual data
229 item to transform. That is, the callable of this transform should
230 follow the pattern:
231
232 .. code-block:: python
233
234 def __call__(self, data):
235 for key in self.keys:
236 if key in data:
237 # update output data with some_transform_function(data[key]).
238 else:
239 # do nothing or some exceptions handling.
240 return data
241
242 """
243
244 def __init__(self, keys: KeysCollection):
245 self.keys: Tuple[Any, ...] = ensure_tuple(keys)
246 if not self.keys:
247 raise ValueError("keys unspecified")
248 for key in self.keys:
249 if not isinstance(key, Hashable):
250 raise ValueError(f"keys should be a hashable or a sequence of hashables, got {type(key)}")
251
252 @abstractmethod
253 def __call__(self, data):
254 """
255 ``data`` often comes from an iteration over an iterable,
256 such as :py:class:`torch.utils.data.Dataset`.
257
258 To simplify the input validations, this method assumes:
259
260 - ``data`` is a Python dictionary
261 - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element
262 of ``self.keys``, the data shape can be:
263
264 #. string data without shape, `LoadNiftid` and `LoadPNGd` transforms expect file paths
265 #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,
266 except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and
267 `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)
268 #. most of the post-processing transforms expect
269 ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``
270
271 - the channel dimension is not omitted even if number of channels is one
272
273 returns:
274 An updated dictionary version of ``data`` by applying the transform.
275 """
276 raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement the compute method")
277
[end of monai/transforms/compose.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py
--- a/monai/transforms/compose.py
+++ b/monai/transforms/compose.py
@@ -72,8 +72,20 @@
class Randomizable(ABC):
"""
- An interface for handling local numpy random state.
- this is mainly for randomized data augmentation transforms.
+ An interface for handling random state locally, currently based on a class variable `R`,
+ which is an instance of `np.random.RandomState`.
+ This is mainly for randomized data augmentation transforms. For example::
+
+ class RandShiftIntensity(Randomizable):
+ def randomize():
+ self._offset = self.R.uniform(low=0, high=100)
+ def __call__(self, img):
+ self.randomize()
+ return img + self._offset
+
+ transform = RandShiftIntensity()
+ transform.set_random_state(seed=0)
+
"""
R: np.random.RandomState = np.random.RandomState()
|
{"golden_diff": "diff --git a/monai/transforms/compose.py b/monai/transforms/compose.py\n--- a/monai/transforms/compose.py\n+++ b/monai/transforms/compose.py\n@@ -72,8 +72,20 @@\n \n class Randomizable(ABC):\n \"\"\"\n- An interface for handling local numpy random state.\n- this is mainly for randomized data augmentation transforms.\n+ An interface for handling random state locally, currently based on a class variable `R`,\n+ which is an instance of `np.random.RandomState`.\n+ This is mainly for randomized data augmentation transforms. For example::\n+\n+ class RandShiftIntensity(Randomizable):\n+ def randomize():\n+ self._offset = self.R.uniform(low=0, high=100)\n+ def __call__(self, img):\n+ self.randomize()\n+ return img + self._offset\n+\n+ transform = RandShiftIntensity()\n+ transform.set_random_state(seed=0)\n+\n \"\"\"\n \n R: np.random.RandomState = np.random.RandomState()\n", "issue": "Documentation improvements for Randomizable\nIn here: https://docs.monai.io/en/latest/transforms.html#randomizable\r\n\r\nFrom the text description.\r\nI think it is not clear that this self.R is a class variable instead of an instance variable.\r\nAnd it is also not clear that self.R is of type np.random.RandomState\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of generic interfaces for MONAI transforms.\n\"\"\"\n\nimport warnings\nfrom typing import Hashable, Optional, Tuple, Any\nfrom abc import ABC, abstractmethod\nimport numpy as np\n\nfrom monai.config.type_definitions import KeysCollection\nfrom monai.utils.misc import ensure_tuple, get_seed\nfrom .utils import apply_transform\n\n\nclass Transform(ABC):\n \"\"\"\n An abstract class of a ``Transform``.\n A transform is callable that processes ``data``.\n\n It could be stateful and may modify ``data`` in place,\n the implementation should be aware of:\n\n #. thread safety when mutating its own states.\n When used from a multi-process context, transform's instance variables are read-only.\n #. ``data`` content unused by this transform may still be used in the\n subsequent transforms in a composed transform.\n #. storing too much information in ``data`` may not scale.\n\n See Also\n\n :py:class:`monai.transforms.Compose`\n \"\"\"\n\n @abstractmethod\n def __call__(self, data: Any):\n \"\"\"\n ``data`` is an element which often comes from an iteration over an\n iterable, such as :py:class:`torch.utils.data.Dataset`. This method should\n return an updated version of ``data``.\n To simplify the input validations, most of the transforms assume that\n\n - ``data`` is a Numpy ndarray, PyTorch Tensor or string\n - the data shape can be:\n\n #. string data without shape, `LoadNifti` and `LoadPNG` transforms expect file paths\n #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,\n except that `AddChannel` expects (spatial_dim_1[, spatial_dim_2, ...]) and\n `AsChannelFirst` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)\n #. most of the post-processing transforms expect\n ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``\n\n - the channel dimension is not omitted even if number of channels is one\n\n This method can optionally take additional arguments to help execute transformation operation.\n\n Raises:\n NotImplementedError: Subclass {self.__class__.__name__} must implement the compute method\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement the compute method\")\n\n\nclass Randomizable(ABC):\n \"\"\"\n An interface for handling local numpy random state.\n this is mainly for randomized data augmentation transforms.\n \"\"\"\n\n R: np.random.RandomState = np.random.RandomState()\n\n def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None):\n \"\"\"\n Set the random state locally, to control the randomness, the derived\n classes should use :py:attr:`self.R` instead of `np.random` to introduce random\n factors.\n\n Args:\n seed: set the random state with an integer seed.\n state (np.random.RandomState): set the random state with a `np.random.RandomState` object.\n\n Returns:\n a Randomizable instance.\n\n Raises:\n ValueError: `state` must be a `np.random.RandomState`, got {type(state)}\n\n \"\"\"\n if seed is not None:\n _seed = id(seed) if not isinstance(seed, int) else seed\n self.R = np.random.RandomState(_seed)\n return self\n\n if state is not None:\n if not isinstance(state, np.random.RandomState):\n raise ValueError(f\"`state` must be a `np.random.RandomState`, got {type(state)}\")\n self.R = state\n return self\n\n self.R = np.random.RandomState()\n return self\n\n @abstractmethod\n def randomize(self):\n \"\"\"\n Within this method, :py:attr:`self.R` should be used, instead of `np.random`, to introduce random factors.\n\n all :py:attr:`self.R` calls happen here so that we have a better chance to\n identify errors of sync the random state.\n\n This method can optionally take additional arguments so that the random factors are generated based on\n properties of the input data.\n\n Raises:\n NotImplementedError: Subclass {self.__class__.__name__} must implement the compute method\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement the compute method\")\n\n\nclass Compose(Randomizable):\n \"\"\"\n ``Compose`` provides the ability to chain a series of calls together in a\n sequence. Each transform in the sequence must take a single argument and\n return a single value, so that the transforms can be called in a chain.\n\n ``Compose`` can be used in two ways:\n\n #. With a series of transforms that accept and return a single\n ndarray / tensor / tensor-like parameter.\n #. With a series of transforms that accept and return a dictionary that\n contains one or more parameters. Such transforms must have pass-through\n semantics; unused values in the dictionary must be copied to the return\n dictionary. It is required that the dictionary is copied between input\n and output of each transform.\n\n If some transform generates a list batch of data in the transform chain,\n every item in the list is still a dictionary, and all the following\n transforms will apply to every item of the list, for example:\n\n #. transformA normalizes the intensity of 'img' field in the dict data.\n #. transformB crops out a list batch of images on 'img' and 'seg' field.\n And constructs a list of dict data, other fields are copied::\n\n { [{ {\n 'img': [1, 2], 'img': [1], 'img': [2],\n 'seg': [1, 2], 'seg': [1], 'seg': [2],\n 'extra': 123, --> 'extra': 123, 'extra': 123,\n 'shape': 'CHWD' 'shape': 'CHWD' 'shape': 'CHWD'\n } }, }]\n\n #. transformC then randomly rotates or flips 'img' and 'seg' fields of\n every dictionary item in the list.\n\n The composed transforms will be set the same global random seed if user called\n `set_determinism()`.\n\n When using the pass-through dictionary operation, you can make use of\n :class:`monai.transforms.adaptors.adaptor` to wrap transforms that don't conform\n to the requirements. This approach allows you to use transforms from\n otherwise incompatible libraries with minimal additional work.\n\n Note:\n\n In many cases, Compose is not the best way to create pre-processing\n pipelines. Pre-processing is often not a strictly sequential series of\n operations, and much of the complexity arises when a not-sequential\n set of functions must be called as if it were a sequence.\n\n Example: images and labels\n Images typically require some kind of normalisation that labels do not.\n Both are then typically augmented through the use of random rotations,\n flips, and deformations.\n Compose can be used with a series of transforms that take a dictionary\n that contains 'image' and 'label' entries. This might require wrapping\n `torchvision` transforms before passing them to compose.\n Alternatively, one can create a class with a `__call__` function that\n calls your pre-processing functions taking into account that not all of\n them are called on the labels.\n \"\"\"\n\n def __init__(self, transforms=None) -> None:\n if transforms is None:\n transforms = []\n if not isinstance(transforms, (list, tuple)):\n raise ValueError(\"Parameters 'transforms' must be a list or tuple.\")\n self.transforms = transforms\n self.set_random_state(seed=get_seed())\n\n def set_random_state(self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None):\n for _transform in self.transforms:\n if not isinstance(_transform, Randomizable):\n continue\n _transform.set_random_state(seed, state)\n\n def randomize(self):\n for _transform in self.transforms:\n if not isinstance(_transform, Randomizable):\n continue\n try:\n _transform.randomize()\n except TypeError as type_error:\n tfm_name: str = type(_transform).__name__\n warnings.warn(\n f'Transform \"{tfm_name}\" in Compose not randomized\\n{tfm_name}.{type_error}.', RuntimeWarning\n )\n\n def __call__(self, input_):\n for _transform in self.transforms:\n input_ = apply_transform(_transform, input_)\n return input_\n\n\nclass MapTransform(Transform):\n \"\"\"\n A subclass of :py:class:`monai.transforms.Transform` with an assumption\n that the ``data`` input of ``self.__call__`` is a MutableMapping such as ``dict``.\n\n The ``keys`` parameter will be used to get and set the actual data\n item to transform. That is, the callable of this transform should\n follow the pattern:\n\n .. code-block:: python\n\n def __call__(self, data):\n for key in self.keys:\n if key in data:\n # update output data with some_transform_function(data[key]).\n else:\n # do nothing or some exceptions handling.\n return data\n\n \"\"\"\n\n def __init__(self, keys: KeysCollection):\n self.keys: Tuple[Any, ...] = ensure_tuple(keys)\n if not self.keys:\n raise ValueError(\"keys unspecified\")\n for key in self.keys:\n if not isinstance(key, Hashable):\n raise ValueError(f\"keys should be a hashable or a sequence of hashables, got {type(key)}\")\n\n @abstractmethod\n def __call__(self, data):\n \"\"\"\n ``data`` often comes from an iteration over an iterable,\n such as :py:class:`torch.utils.data.Dataset`.\n\n To simplify the input validations, this method assumes:\n\n - ``data`` is a Python dictionary\n - ``data[key]`` is a Numpy ndarray, PyTorch Tensor or string, where ``key`` is an element\n of ``self.keys``, the data shape can be:\n\n #. string data without shape, `LoadNiftid` and `LoadPNGd` transforms expect file paths\n #. most of the pre-processing transforms expect: ``(num_channels, spatial_dim_1[, spatial_dim_2, ...])``,\n except that `AddChanneld` expects (spatial_dim_1[, spatial_dim_2, ...]) and\n `AsChannelFirstd` expects (spatial_dim_1[, spatial_dim_2, ...], num_channels)\n #. most of the post-processing transforms expect\n ``(batch_size, num_channels, spatial_dim_1[, spatial_dim_2, ...])``\n\n - the channel dimension is not omitted even if number of channels is one\n\n returns:\n An updated dictionary version of ``data`` by applying the transform.\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement the compute method\")\n", "path": "monai/transforms/compose.py"}]}
| 3,890 | 230 |
gh_patches_debug_43111
|
rasdani/github-patches
|
git_diff
|
cobbler__cobbler-2444
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PAM authentication failure
### Describe the bug
<!--- A clear and concise description of what the bug is. -->
<!--- HINT: You can paste gist.github.com links for long logs or larger files -->
Enabling authentication.pam module results in TypeError exception and failure to authenticate.
### To Reproduce
1. Change authentication module to "authentication.pam" in modules.com
2. Restart cobbler service
3. Attempt to login via web UI
4. See error in cobbler.log
````
DEBUG | authn_pam: PAM service is login
INFO | Exception occured: <class 'ctypes.ArgumentError'>
INFO | Exception value: argument 1: <class 'TypeError'>: wrong type
INFO | Exception Info:
File "/usr/lib/python3.6/site-packages/cobbler/remote.py", line 3477, in _dispatch
return method_handle(*params)
File "/usr/lib/python3.6/site-packages/cobbler/remote.py", line 3233, in login
if self.__validate_user(login_user, login_password):
File "/usr/lib/python3.6/site-packages/cobbler/remote.py", line 3113, in __validate_user
return self.api.authenticate(input_user, input_password)
File "/usr/lib/python3.6/site-packages/cobbler/api.py", line 1599, in authenticate
rc = self.authn.authenticate(self, user, password)
File "/usr/lib/python3.6/site-packages/cobbler/modules/authentication/pam.py", line 141, in authenticate
retval = PAM_START(str(service), str(username), pointer(conv), pointer(handle))
````
### Expected behavior
<!--- A clear and concise description of what you expected to happen. -->
PAM authentication should succeed.
### Version
<!--- Paste output from `cobbler version` -->
````
Cobbler 3.1.2
source: ?, ?
build time: Sun May 31 02:32:34 2020
````
### Screenshots
<!--- If applicable, add screenshots to help explain your problem. -->
### Additional context
<!--- Add any other context about the problem here. -->
</issue>
<code>
[start of cobbler/modules/authentication/pam.py]
1 """
2 Authentication module that uses /etc/cobbler/auth.conf
3 Choice of authentication module is in /etc/cobbler/modules.conf
4
5 Copyright 2007-2009, Red Hat, Inc and Others
6 Michael DeHaan <michael.dehaan AT gmail>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301 USA
22
23 PAM python code based on the pam_python code created by Chris AtLee:
24 http://atlee.ca/software/pam/
25
26 #-----------------------------------------------
27 pam_python (c) 2007 Chris AtLee <[email protected]>
28 Licensed under the MIT license:
29 http://www.opensource.org/licenses/mit-license.php
30
31 PAM module for python
32
33 Provides an authenticate function that will allow the caller to authenticate
34 a user against the Pluggable Authentication Modules (PAM) on the system.
35
36 Implemented using ctypes, so no compilation is necessary.
37 """
38
39 from builtins import str
40 from builtins import range
41 from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
42 from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
43 from ctypes.util import find_library
44
45 LIBPAM = CDLL(find_library("pam"))
46 LIBC = CDLL(find_library("c"))
47
48 CALLOC = LIBC.calloc
49 CALLOC.restype = c_void_p
50 CALLOC.argtypes = [c_uint, c_uint]
51
52 STRDUP = LIBC.strdup
53 STRDUP.argstypes = [c_char_p]
54 STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!
55
56 # Various constants
57 PAM_PROMPT_ECHO_OFF = 1
58 PAM_PROMPT_ECHO_ON = 2
59 PAM_ERROR_MSG = 3
60 PAM_TEXT_INFO = 4
61
62
63 def register():
64 """
65 The mandatory Cobbler module registration hook.
66 """
67 return "authn"
68
69
70 class PamHandle(Structure):
71 """wrapper class for pam_handle_t"""
72 _fields_ = [("handle", c_void_p)]
73
74 def __init__(self):
75 Structure.__init__(self)
76 self.handle = 0
77
78
79 class PamMessage(Structure):
80 """wrapper class for pam_message structure"""
81 _fields_ = [("msg_style", c_int), ("msg", c_char_p)]
82
83 def __repr__(self):
84 return "<PamMessage %i '%s'>" % (self.msg_style, self.msg)
85
86
87 class PamResponse(Structure):
88 """wrapper class for pam_response structure"""
89 _fields_ = [("resp", c_char_p), ("resp_retcode", c_int)]
90
91 def __repr__(self):
92 return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp)
93
94
95 CONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)
96
97
98 class PamConv(Structure):
99 """wrapper class for pam_conv structure"""
100 _fields_ = [("conv", CONV_FUNC), ("appdata_ptr", c_void_p)]
101
102
103 PAM_START = LIBPAM.pam_start
104 PAM_START.restype = c_int
105 PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]
106
107 PAM_AUTHENTICATE = LIBPAM.pam_authenticate
108 PAM_AUTHENTICATE.restype = c_int
109 PAM_AUTHENTICATE.argtypes = [PamHandle, c_int]
110
111
112 def authenticate(api_handle, username, password):
113 """
114 Returns True if the given username and password authenticate for the
115 given service. Returns False otherwise
116 """
117
118 @CONV_FUNC
119 def my_conv(n_messages, messages, p_response, app_data):
120 """Simple conversation function that responds to any
121 prompt where the echo is off with the supplied password"""
122 # Create an array of n_messages response objects
123 addr = CALLOC(n_messages, sizeof(PamResponse))
124 p_response[0] = cast(addr, POINTER(PamResponse))
125 for i in range(n_messages):
126 if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
127 pw_copy = STRDUP(str(password))
128 p_response.contents[i].resp = cast(pw_copy, c_char_p)
129 p_response.contents[i].resp_retcode = 0
130 return 0
131
132 try:
133 service = api_handle.settings().authn_pam_service
134 except:
135 service = 'login'
136
137 api_handle.logger.debug("authn_pam: PAM service is %s" % service)
138
139 handle = PamHandle()
140 conv = PamConv(my_conv, 0)
141 retval = PAM_START(service, username, pointer(conv), pointer(handle))
142
143 if retval != 0:
144 # TODO: This is not an authentication error, something
145 # has gone wrong starting up PAM
146 api_handle.logger.error("authn_pam: error initializing PAM library")
147 return False
148
149 retval = PAM_AUTHENTICATE(handle, 0)
150 return retval == 0
151
[end of cobbler/modules/authentication/pam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py
--- a/cobbler/modules/authentication/pam.py
+++ b/cobbler/modules/authentication/pam.py
@@ -36,7 +36,6 @@
Implemented using ctypes, so no compilation is necessary.
"""
-from builtins import str
from builtins import range
from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof
from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int
@@ -68,7 +67,9 @@
class PamHandle(Structure):
- """wrapper class for pam_handle_t"""
+ """
+ wrapper class for pam_handle_t
+ """
_fields_ = [("handle", c_void_p)]
def __init__(self):
@@ -77,7 +78,9 @@
class PamMessage(Structure):
- """wrapper class for pam_message structure"""
+ """
+ wrapper class for pam_message structure
+ """
_fields_ = [("msg_style", c_int), ("msg", c_char_p)]
def __repr__(self):
@@ -85,7 +88,9 @@
class PamResponse(Structure):
- """wrapper class for pam_response structure"""
+ """
+ wrapper class for pam_response structure
+ """
_fields_ = [("resp", c_char_p), ("resp_retcode", c_int)]
def __repr__(self):
@@ -96,7 +101,9 @@
class PamConv(Structure):
- """wrapper class for pam_conv structure"""
+ """
+ wrapper class for pam_conv structure
+ """
_fields_ = [("conv", CONV_FUNC), ("appdata_ptr", c_void_p)]
@@ -111,20 +118,26 @@
def authenticate(api_handle, username, password):
"""
- Returns True if the given username and password authenticate for the
- given service. Returns False otherwise
+ :param api_handle: Used for resolving the the pam service name and getting the Logger.
+ :param username:The username to log in with.
+ :type username: str
+ :param password: The password to log in with.
+ :type password: str
+ :returns: True if the given username and password authenticate for the given service. Otherwise False
+ :rtype: bool
"""
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
- """Simple conversation function that responds to any
- prompt where the echo is off with the supplied password"""
+ """
+ Simple conversation function that responds to any prompt where the echo is off with the supplied password
+ """
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
- pw_copy = STRDUP(str(password))
+ pw_copy = STRDUP(password.encode())
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
@@ -138,11 +151,10 @@
handle = PamHandle()
conv = PamConv(my_conv, 0)
- retval = PAM_START(service, username, pointer(conv), pointer(handle))
+ retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))
if retval != 0:
- # TODO: This is not an authentication error, something
- # has gone wrong starting up PAM
+ # TODO: This is not an authentication error, something has gone wrong starting up PAM
api_handle.logger.error("authn_pam: error initializing PAM library")
return False
|
{"golden_diff": "diff --git a/cobbler/modules/authentication/pam.py b/cobbler/modules/authentication/pam.py\n--- a/cobbler/modules/authentication/pam.py\n+++ b/cobbler/modules/authentication/pam.py\n@@ -36,7 +36,6 @@\n Implemented using ctypes, so no compilation is necessary.\n \"\"\"\n \n-from builtins import str\n from builtins import range\n from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof\n from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int\n@@ -68,7 +67,9 @@\n \n \n class PamHandle(Structure):\n- \"\"\"wrapper class for pam_handle_t\"\"\"\n+ \"\"\"\n+ wrapper class for pam_handle_t\n+ \"\"\"\n _fields_ = [(\"handle\", c_void_p)]\n \n def __init__(self):\n@@ -77,7 +78,9 @@\n \n \n class PamMessage(Structure):\n- \"\"\"wrapper class for pam_message structure\"\"\"\n+ \"\"\"\n+ wrapper class for pam_message structure\n+ \"\"\"\n _fields_ = [(\"msg_style\", c_int), (\"msg\", c_char_p)]\n \n def __repr__(self):\n@@ -85,7 +88,9 @@\n \n \n class PamResponse(Structure):\n- \"\"\"wrapper class for pam_response structure\"\"\"\n+ \"\"\"\n+ wrapper class for pam_response structure\n+ \"\"\"\n _fields_ = [(\"resp\", c_char_p), (\"resp_retcode\", c_int)]\n \n def __repr__(self):\n@@ -96,7 +101,9 @@\n \n \n class PamConv(Structure):\n- \"\"\"wrapper class for pam_conv structure\"\"\"\n+ \"\"\"\n+ wrapper class for pam_conv structure\n+ \"\"\"\n _fields_ = [(\"conv\", CONV_FUNC), (\"appdata_ptr\", c_void_p)]\n \n \n@@ -111,20 +118,26 @@\n \n def authenticate(api_handle, username, password):\n \"\"\"\n- Returns True if the given username and password authenticate for the\n- given service. Returns False otherwise\n+ :param api_handle: Used for resolving the the pam service name and getting the Logger.\n+ :param username:The username to log in with.\n+ :type username: str\n+ :param password: The password to log in with.\n+ :type password: str\n+ :returns: True if the given username and password authenticate for the given service. Otherwise False\n+ :rtype: bool\n \"\"\"\n \n @CONV_FUNC\n def my_conv(n_messages, messages, p_response, app_data):\n- \"\"\"Simple conversation function that responds to any\n- prompt where the echo is off with the supplied password\"\"\"\n+ \"\"\"\n+ Simple conversation function that responds to any prompt where the echo is off with the supplied password\n+ \"\"\"\n # Create an array of n_messages response objects\n addr = CALLOC(n_messages, sizeof(PamResponse))\n p_response[0] = cast(addr, POINTER(PamResponse))\n for i in range(n_messages):\n if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:\n- pw_copy = STRDUP(str(password))\n+ pw_copy = STRDUP(password.encode())\n p_response.contents[i].resp = cast(pw_copy, c_char_p)\n p_response.contents[i].resp_retcode = 0\n return 0\n@@ -138,11 +151,10 @@\n \n handle = PamHandle()\n conv = PamConv(my_conv, 0)\n- retval = PAM_START(service, username, pointer(conv), pointer(handle))\n+ retval = PAM_START(service.encode(), username.encode(), pointer(conv), pointer(handle))\n \n if retval != 0:\n- # TODO: This is not an authentication error, something\n- # has gone wrong starting up PAM\n+ # TODO: This is not an authentication error, something has gone wrong starting up PAM\n api_handle.logger.error(\"authn_pam: error initializing PAM library\")\n return False\n", "issue": "PAM authentication failure\n### Describe the bug\r\n\r\n<!--- A clear and concise description of what the bug is. -->\r\n\r\n<!--- HINT: You can paste gist.github.com links for long logs or larger files -->\r\nEnabling authentication.pam module results in TypeError exception and failure to authenticate.\r\n\r\n### To Reproduce\r\n\r\n1. Change authentication module to \"authentication.pam\" in modules.com\r\n2. Restart cobbler service\r\n3. Attempt to login via web UI\r\n4. See error in cobbler.log\r\n\r\n````\r\nDEBUG | authn_pam: PAM service is login\r\nINFO | Exception occured: <class 'ctypes.ArgumentError'>\r\nINFO | Exception value: argument 1: <class 'TypeError'>: wrong type\r\nINFO | Exception Info:\r\n File \"/usr/lib/python3.6/site-packages/cobbler/remote.py\", line 3477, in _dispatch\r\n return method_handle(*params)\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/remote.py\", line 3233, in login\r\n if self.__validate_user(login_user, login_password):\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/remote.py\", line 3113, in __validate_user\r\n return self.api.authenticate(input_user, input_password)\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/api.py\", line 1599, in authenticate\r\n rc = self.authn.authenticate(self, user, password)\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/modules/authentication/pam.py\", line 141, in authenticate\r\n retval = PAM_START(str(service), str(username), pointer(conv), pointer(handle))\r\n\r\n````\r\n\r\n### Expected behavior\r\n\r\n<!--- A clear and concise description of what you expected to happen. -->\r\nPAM authentication should succeed.\r\n\r\n### Version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````\r\nCobbler 3.1.2\r\n source: ?, ?\r\n build time: Sun May 31 02:32:34 2020\r\n````\r\n\r\n### Screenshots\r\n\r\n<!--- If applicable, add screenshots to help explain your problem. -->\r\n\r\n### Additional context\r\n\r\n<!--- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nAuthentication module that uses /etc/cobbler/auth.conf\nChoice of authentication module is in /etc/cobbler/modules.conf\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\nPAM python code based on the pam_python code created by Chris AtLee:\nhttp://atlee.ca/software/pam/\n\n#-----------------------------------------------\npam_python (c) 2007 Chris AtLee <[email protected]>\nLicensed under the MIT license:\nhttp://www.opensource.org/licenses/mit-license.php\n\nPAM module for python\n\nProvides an authenticate function that will allow the caller to authenticate\na user against the Pluggable Authentication Modules (PAM) on the system.\n\nImplemented using ctypes, so no compilation is necessary.\n\"\"\"\n\nfrom builtins import str\nfrom builtins import range\nfrom ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof\nfrom ctypes import c_void_p, c_uint, c_char_p, c_char, c_int\nfrom ctypes.util import find_library\n\nLIBPAM = CDLL(find_library(\"pam\"))\nLIBC = CDLL(find_library(\"c\"))\n\nCALLOC = LIBC.calloc\nCALLOC.restype = c_void_p\nCALLOC.argtypes = [c_uint, c_uint]\n\nSTRDUP = LIBC.strdup\nSTRDUP.argstypes = [c_char_p]\nSTRDUP.restype = POINTER(c_char) # NOT c_char_p !!!!\n\n# Various constants\nPAM_PROMPT_ECHO_OFF = 1\nPAM_PROMPT_ECHO_ON = 2\nPAM_ERROR_MSG = 3\nPAM_TEXT_INFO = 4\n\n\ndef register():\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"authn\"\n\n\nclass PamHandle(Structure):\n \"\"\"wrapper class for pam_handle_t\"\"\"\n _fields_ = [(\"handle\", c_void_p)]\n\n def __init__(self):\n Structure.__init__(self)\n self.handle = 0\n\n\nclass PamMessage(Structure):\n \"\"\"wrapper class for pam_message structure\"\"\"\n _fields_ = [(\"msg_style\", c_int), (\"msg\", c_char_p)]\n\n def __repr__(self):\n return \"<PamMessage %i '%s'>\" % (self.msg_style, self.msg)\n\n\nclass PamResponse(Structure):\n \"\"\"wrapper class for pam_response structure\"\"\"\n _fields_ = [(\"resp\", c_char_p), (\"resp_retcode\", c_int)]\n\n def __repr__(self):\n return \"<PamResponse %i '%s'>\" % (self.resp_retcode, self.resp)\n\n\nCONV_FUNC = CFUNCTYPE(c_int, c_int, POINTER(POINTER(PamMessage)), POINTER(POINTER(PamResponse)), c_void_p)\n\n\nclass PamConv(Structure):\n \"\"\"wrapper class for pam_conv structure\"\"\"\n _fields_ = [(\"conv\", CONV_FUNC), (\"appdata_ptr\", c_void_p)]\n\n\nPAM_START = LIBPAM.pam_start\nPAM_START.restype = c_int\nPAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), POINTER(PamHandle)]\n\nPAM_AUTHENTICATE = LIBPAM.pam_authenticate\nPAM_AUTHENTICATE.restype = c_int\nPAM_AUTHENTICATE.argtypes = [PamHandle, c_int]\n\n\ndef authenticate(api_handle, username, password):\n \"\"\"\n Returns True if the given username and password authenticate for the\n given service. Returns False otherwise\n \"\"\"\n\n @CONV_FUNC\n def my_conv(n_messages, messages, p_response, app_data):\n \"\"\"Simple conversation function that responds to any\n prompt where the echo is off with the supplied password\"\"\"\n # Create an array of n_messages response objects\n addr = CALLOC(n_messages, sizeof(PamResponse))\n p_response[0] = cast(addr, POINTER(PamResponse))\n for i in range(n_messages):\n if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:\n pw_copy = STRDUP(str(password))\n p_response.contents[i].resp = cast(pw_copy, c_char_p)\n p_response.contents[i].resp_retcode = 0\n return 0\n\n try:\n service = api_handle.settings().authn_pam_service\n except:\n service = 'login'\n\n api_handle.logger.debug(\"authn_pam: PAM service is %s\" % service)\n\n handle = PamHandle()\n conv = PamConv(my_conv, 0)\n retval = PAM_START(service, username, pointer(conv), pointer(handle))\n\n if retval != 0:\n # TODO: This is not an authentication error, something\n # has gone wrong starting up PAM\n api_handle.logger.error(\"authn_pam: error initializing PAM library\")\n return False\n\n retval = PAM_AUTHENTICATE(handle, 0)\n return retval == 0\n", "path": "cobbler/modules/authentication/pam.py"}]}
| 2,614 | 875 |
gh_patches_debug_6471
|
rasdani/github-patches
|
git_diff
|
nonebot__nonebot2-1757
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: `Adapter.bot_disconnect` 不应允许关闭其他适配器创建的 bot
**描述问题:**
当前的 `bot_disconnect` 只会根据 `bot.self_id` 关闭对应 bot。如果 `OneBot V12` 适配器调用 `bot_disconnect` 也能将 `OneBot V11` 适配器创建的 bot 移除。
**如何复现?**
<https://github.com/nonebot/adapter-onebot/pull/45>
<https://github.com/he0119/CoolQBot/issues/264>
**期望的结果**
如果关闭的 bot 不属于当前适配器,则跳过或者报错。
</issue>
<code>
[start of nonebot/internal/adapter/adapter.py]
1 import abc
2 from contextlib import asynccontextmanager
3 from typing import Any, Dict, AsyncGenerator
4
5 from nonebot.config import Config
6 from nonebot.internal.driver import (
7 Driver,
8 Request,
9 Response,
10 WebSocket,
11 ForwardDriver,
12 ReverseDriver,
13 HTTPServerSetup,
14 WebSocketServerSetup,
15 )
16
17 from .bot import Bot
18
19
20 class Adapter(abc.ABC):
21 """协议适配器基类。
22
23 通常,在 Adapter 中编写协议通信相关代码,如: 建立通信连接、处理接收与发送 data 等。
24
25 参数:
26 driver: {ref}`nonebot.drivers.Driver` 实例
27 kwargs: 其他由 {ref}`nonebot.drivers.Driver.register_adapter` 传入的额外参数
28 """
29
30 def __init__(self, driver: Driver, **kwargs: Any):
31 self.driver: Driver = driver
32 """{ref}`nonebot.drivers.Driver` 实例"""
33 self.bots: Dict[str, Bot] = {}
34 """本协议适配器已建立连接的 {ref}`nonebot.adapters.Bot` 实例"""
35
36 def __repr__(self) -> str:
37 return f"Adapter(name={self.get_name()!r})"
38
39 @classmethod
40 @abc.abstractmethod
41 def get_name(cls) -> str:
42 """当前协议适配器的名称"""
43 raise NotImplementedError
44
45 @property
46 def config(self) -> Config:
47 """全局 NoneBot 配置"""
48 return self.driver.config
49
50 def bot_connect(self, bot: Bot) -> None:
51 """告知 NoneBot 建立了一个新的 {ref}`nonebot.adapters.Bot` 连接。
52
53 当有新的 {ref}`nonebot.adapters.Bot` 实例连接建立成功时调用。
54
55 参数:
56 bot: {ref}`nonebot.adapters.Bot` 实例
57 """
58 self.driver._bot_connect(bot)
59 self.bots[bot.self_id] = bot
60
61 def bot_disconnect(self, bot: Bot) -> None:
62 """告知 NoneBot {ref}`nonebot.adapters.Bot` 连接已断开。
63
64 当有 {ref}`nonebot.adapters.Bot` 实例连接断开时调用。
65
66 参数:
67 bot: {ref}`nonebot.adapters.Bot` 实例
68 """
69 self.driver._bot_disconnect(bot)
70 self.bots.pop(bot.self_id, None)
71
72 def setup_http_server(self, setup: HTTPServerSetup):
73 """设置一个 HTTP 服务器路由配置"""
74 if not isinstance(self.driver, ReverseDriver):
75 raise TypeError("Current driver does not support http server")
76 self.driver.setup_http_server(setup)
77
78 def setup_websocket_server(self, setup: WebSocketServerSetup):
79 """设置一个 WebSocket 服务器路由配置"""
80 if not isinstance(self.driver, ReverseDriver):
81 raise TypeError("Current driver does not support websocket server")
82 self.driver.setup_websocket_server(setup)
83
84 async def request(self, setup: Request) -> Response:
85 """进行一个 HTTP 客户端请求"""
86 if not isinstance(self.driver, ForwardDriver):
87 raise TypeError("Current driver does not support http client")
88 return await self.driver.request(setup)
89
90 @asynccontextmanager
91 async def websocket(self, setup: Request) -> AsyncGenerator[WebSocket, None]:
92 """建立一个 WebSocket 客户端连接请求"""
93 if not isinstance(self.driver, ForwardDriver):
94 raise TypeError("Current driver does not support websocket client")
95 async with self.driver.websocket(setup) as ws:
96 yield ws
97
98 @abc.abstractmethod
99 async def _call_api(self, bot: Bot, api: str, **data: Any) -> Any:
100 """`Adapter` 实际调用 api 的逻辑实现函数,实现该方法以调用 api。
101
102 参数:
103 api: API 名称
104 data: API 数据
105 """
106 raise NotImplementedError
107
108
109 __autodoc__ = {"Adapter._call_api": True}
110
[end of nonebot/internal/adapter/adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nonebot/internal/adapter/adapter.py b/nonebot/internal/adapter/adapter.py
--- a/nonebot/internal/adapter/adapter.py
+++ b/nonebot/internal/adapter/adapter.py
@@ -66,8 +66,9 @@
参数:
bot: {ref}`nonebot.adapters.Bot` 实例
"""
+ if self.bots.pop(bot.self_id, None) is None:
+ raise RuntimeError(f"{bot} not found in adapter {self.get_name()}")
self.driver._bot_disconnect(bot)
- self.bots.pop(bot.self_id, None)
def setup_http_server(self, setup: HTTPServerSetup):
"""设置一个 HTTP 服务器路由配置"""
|
{"golden_diff": "diff --git a/nonebot/internal/adapter/adapter.py b/nonebot/internal/adapter/adapter.py\n--- a/nonebot/internal/adapter/adapter.py\n+++ b/nonebot/internal/adapter/adapter.py\n@@ -66,8 +66,9 @@\n \u53c2\u6570:\n bot: {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\n \"\"\"\n+ if self.bots.pop(bot.self_id, None) is None:\n+ raise RuntimeError(f\"{bot} not found in adapter {self.get_name()}\")\n self.driver._bot_disconnect(bot)\n- self.bots.pop(bot.self_id, None)\n \n def setup_http_server(self, setup: HTTPServerSetup):\n \"\"\"\u8bbe\u7f6e\u4e00\u4e2a HTTP \u670d\u52a1\u5668\u8def\u7531\u914d\u7f6e\"\"\"\n", "issue": "Bug: `Adapter.bot_disconnect` \u4e0d\u5e94\u5141\u8bb8\u5173\u95ed\u5176\u4ed6\u9002\u914d\u5668\u521b\u5efa\u7684 bot\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5f53\u524d\u7684 `bot_disconnect` \u53ea\u4f1a\u6839\u636e `bot.self_id` \u5173\u95ed\u5bf9\u5e94 bot\u3002\u5982\u679c `OneBot V12` \u9002\u914d\u5668\u8c03\u7528 `bot_disconnect` \u4e5f\u80fd\u5c06 `OneBot V11` \u9002\u914d\u5668\u521b\u5efa\u7684 bot \u79fb\u9664\u3002\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n<https://github.com/nonebot/adapter-onebot/pull/45>\r\n<https://github.com/he0119/CoolQBot/issues/264>\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\u5982\u679c\u5173\u95ed\u7684 bot \u4e0d\u5c5e\u4e8e\u5f53\u524d\u9002\u914d\u5668\uff0c\u5219\u8df3\u8fc7\u6216\u8005\u62a5\u9519\u3002\r\n\n", "before_files": [{"content": "import abc\nfrom contextlib import asynccontextmanager\nfrom typing import Any, Dict, AsyncGenerator\n\nfrom nonebot.config import Config\nfrom nonebot.internal.driver import (\n Driver,\n Request,\n Response,\n WebSocket,\n ForwardDriver,\n ReverseDriver,\n HTTPServerSetup,\n WebSocketServerSetup,\n)\n\nfrom .bot import Bot\n\n\nclass Adapter(abc.ABC):\n \"\"\"\u534f\u8bae\u9002\u914d\u5668\u57fa\u7c7b\u3002\n\n \u901a\u5e38\uff0c\u5728 Adapter \u4e2d\u7f16\u5199\u534f\u8bae\u901a\u4fe1\u76f8\u5173\u4ee3\u7801\uff0c\u5982: \u5efa\u7acb\u901a\u4fe1\u8fde\u63a5\u3001\u5904\u7406\u63a5\u6536\u4e0e\u53d1\u9001 data \u7b49\u3002\n\n \u53c2\u6570:\n driver: {ref}`nonebot.drivers.Driver` \u5b9e\u4f8b\n kwargs: \u5176\u4ed6\u7531 {ref}`nonebot.drivers.Driver.register_adapter` \u4f20\u5165\u7684\u989d\u5916\u53c2\u6570\n \"\"\"\n\n def __init__(self, driver: Driver, **kwargs: Any):\n self.driver: Driver = driver\n \"\"\"{ref}`nonebot.drivers.Driver` \u5b9e\u4f8b\"\"\"\n self.bots: Dict[str, Bot] = {}\n \"\"\"\u672c\u534f\u8bae\u9002\u914d\u5668\u5df2\u5efa\u7acb\u8fde\u63a5\u7684 {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\"\"\"\n\n def __repr__(self) -> str:\n return f\"Adapter(name={self.get_name()!r})\"\n\n @classmethod\n @abc.abstractmethod\n def get_name(cls) -> str:\n \"\"\"\u5f53\u524d\u534f\u8bae\u9002\u914d\u5668\u7684\u540d\u79f0\"\"\"\n raise NotImplementedError\n\n @property\n def config(self) -> Config:\n \"\"\"\u5168\u5c40 NoneBot \u914d\u7f6e\"\"\"\n return self.driver.config\n\n def bot_connect(self, bot: Bot) -> None:\n \"\"\"\u544a\u77e5 NoneBot \u5efa\u7acb\u4e86\u4e00\u4e2a\u65b0\u7684 {ref}`nonebot.adapters.Bot` \u8fde\u63a5\u3002\n\n \u5f53\u6709\u65b0\u7684 {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\u8fde\u63a5\u5efa\u7acb\u6210\u529f\u65f6\u8c03\u7528\u3002\n\n \u53c2\u6570:\n bot: {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\n \"\"\"\n self.driver._bot_connect(bot)\n self.bots[bot.self_id] = bot\n\n def bot_disconnect(self, bot: Bot) -> None:\n \"\"\"\u544a\u77e5 NoneBot {ref}`nonebot.adapters.Bot` \u8fde\u63a5\u5df2\u65ad\u5f00\u3002\n\n \u5f53\u6709 {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\u8fde\u63a5\u65ad\u5f00\u65f6\u8c03\u7528\u3002\n\n \u53c2\u6570:\n bot: {ref}`nonebot.adapters.Bot` \u5b9e\u4f8b\n \"\"\"\n self.driver._bot_disconnect(bot)\n self.bots.pop(bot.self_id, None)\n\n def setup_http_server(self, setup: HTTPServerSetup):\n \"\"\"\u8bbe\u7f6e\u4e00\u4e2a HTTP \u670d\u52a1\u5668\u8def\u7531\u914d\u7f6e\"\"\"\n if not isinstance(self.driver, ReverseDriver):\n raise TypeError(\"Current driver does not support http server\")\n self.driver.setup_http_server(setup)\n\n def setup_websocket_server(self, setup: WebSocketServerSetup):\n \"\"\"\u8bbe\u7f6e\u4e00\u4e2a WebSocket \u670d\u52a1\u5668\u8def\u7531\u914d\u7f6e\"\"\"\n if not isinstance(self.driver, ReverseDriver):\n raise TypeError(\"Current driver does not support websocket server\")\n self.driver.setup_websocket_server(setup)\n\n async def request(self, setup: Request) -> Response:\n \"\"\"\u8fdb\u884c\u4e00\u4e2a HTTP \u5ba2\u6237\u7aef\u8bf7\u6c42\"\"\"\n if not isinstance(self.driver, ForwardDriver):\n raise TypeError(\"Current driver does not support http client\")\n return await self.driver.request(setup)\n\n @asynccontextmanager\n async def websocket(self, setup: Request) -> AsyncGenerator[WebSocket, None]:\n \"\"\"\u5efa\u7acb\u4e00\u4e2a WebSocket \u5ba2\u6237\u7aef\u8fde\u63a5\u8bf7\u6c42\"\"\"\n if not isinstance(self.driver, ForwardDriver):\n raise TypeError(\"Current driver does not support websocket client\")\n async with self.driver.websocket(setup) as ws:\n yield ws\n\n @abc.abstractmethod\n async def _call_api(self, bot: Bot, api: str, **data: Any) -> Any:\n \"\"\"`Adapter` \u5b9e\u9645\u8c03\u7528 api \u7684\u903b\u8f91\u5b9e\u73b0\u51fd\u6570\uff0c\u5b9e\u73b0\u8be5\u65b9\u6cd5\u4ee5\u8c03\u7528 api\u3002\n\n \u53c2\u6570:\n api: API \u540d\u79f0\n data: API \u6570\u636e\n \"\"\"\n raise NotImplementedError\n\n\n__autodoc__ = {\"Adapter._call_api\": True}\n", "path": "nonebot/internal/adapter/adapter.py"}]}
| 1,775 | 157 |
gh_patches_debug_39990
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-4756
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Encode package version as a shared object symbol
One of the more common issues we see with users is an `AttributeError` where one of our `Cryptograpy_HAS_*` can't be found. This is pretty much always an issue where Python has loaded a newer version of cryptography's Python code, but for some reason loads an older shared object (which does not have the symbol the Python code is looking for).
This is a bad error message for a serious environment problem, and it doesn't always happen immediately. We should encode the version of cryptography in the shared object we build and then raise an `ImportError` on import if the shared object version and cryptography's version do not match.
refs https://github.com/certbot/certbot/issues/5651
</issue>
<code>
[start of src/cryptography/hazmat/bindings/openssl/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import threading
9 import types
10 import warnings
11
12 from cryptography import utils
13 from cryptography.exceptions import InternalError
14 from cryptography.hazmat.bindings._openssl import ffi, lib
15 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
16
17 _OpenSSLErrorWithText = collections.namedtuple(
18 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"]
19 )
20
21
22 class _OpenSSLError(object):
23 def __init__(self, code, lib, func, reason):
24 self._code = code
25 self._lib = lib
26 self._func = func
27 self._reason = reason
28
29 def _lib_reason_match(self, lib, reason):
30 return lib == self.lib and reason == self.reason
31
32 code = utils.read_only_property("_code")
33 lib = utils.read_only_property("_lib")
34 func = utils.read_only_property("_func")
35 reason = utils.read_only_property("_reason")
36
37
38 def _consume_errors(lib):
39 errors = []
40 while True:
41 code = lib.ERR_get_error()
42 if code == 0:
43 break
44
45 err_lib = lib.ERR_GET_LIB(code)
46 err_func = lib.ERR_GET_FUNC(code)
47 err_reason = lib.ERR_GET_REASON(code)
48
49 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
50
51 return errors
52
53
54 def _openssl_assert(lib, ok):
55 if not ok:
56 errors = _consume_errors(lib)
57 errors_with_text = []
58 for err in errors:
59 buf = ffi.new("char[]", 256)
60 lib.ERR_error_string_n(err.code, buf, len(buf))
61 err_text_reason = ffi.string(buf)
62
63 errors_with_text.append(
64 _OpenSSLErrorWithText(
65 err.code, err.lib, err.func, err.reason, err_text_reason
66 )
67 )
68
69 raise InternalError(
70 "Unknown OpenSSL error. This error is commonly encountered when "
71 "another library is not cleaning up the OpenSSL error stack. If "
72 "you are using cryptography with another library that uses "
73 "OpenSSL try disabling it before reporting a bug. Otherwise "
74 "please file an issue at https://github.com/pyca/cryptography/"
75 "issues with information on how to reproduce "
76 "this. ({0!r})".format(errors_with_text),
77 errors_with_text
78 )
79
80
81 def build_conditional_library(lib, conditional_names):
82 conditional_lib = types.ModuleType("lib")
83 conditional_lib._original_lib = lib
84 excluded_names = set()
85 for condition, names_cb in conditional_names.items():
86 if not getattr(lib, condition):
87 excluded_names.update(names_cb())
88
89 for attr in dir(lib):
90 if attr not in excluded_names:
91 setattr(conditional_lib, attr, getattr(lib, attr))
92
93 return conditional_lib
94
95
96 class Binding(object):
97 """
98 OpenSSL API wrapper.
99 """
100 lib = None
101 ffi = ffi
102 _lib_loaded = False
103 _init_lock = threading.Lock()
104 _lock_init_lock = threading.Lock()
105
106 def __init__(self):
107 self._ensure_ffi_initialized()
108
109 @classmethod
110 def _register_osrandom_engine(cls):
111 # Clear any errors extant in the queue before we start. In many
112 # scenarios other things may be interacting with OpenSSL in the same
113 # process space and it has proven untenable to assume that they will
114 # reliably clear the error queue. Once we clear it here we will
115 # error on any subsequent unexpected item in the stack.
116 cls.lib.ERR_clear_error()
117 cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id
118 cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name
119 result = cls.lib.Cryptography_add_osrandom_engine()
120 _openssl_assert(cls.lib, result in (1, 2))
121
122 @classmethod
123 def _ensure_ffi_initialized(cls):
124 with cls._init_lock:
125 if not cls._lib_loaded:
126 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
127 cls._lib_loaded = True
128 # initialize the SSL library
129 cls.lib.SSL_library_init()
130 # adds all ciphers/digests for EVP
131 cls.lib.OpenSSL_add_all_algorithms()
132 # loads error strings for libcrypto and libssl functions
133 cls.lib.SSL_load_error_strings()
134 cls._register_osrandom_engine()
135
136 @classmethod
137 def init_static_locks(cls):
138 with cls._lock_init_lock:
139 cls._ensure_ffi_initialized()
140 # Use Python's implementation if available, importing _ssl triggers
141 # the setup for this.
142 __import__("_ssl")
143
144 if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or
145 cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):
146 return
147
148 # If nothing else has setup a locking callback already, we set up
149 # our own
150 res = lib.Cryptography_setup_ssl_threads()
151 _openssl_assert(cls.lib, res == 1)
152
153
154 def _verify_openssl_version(lib):
155 if (
156 lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and
157 not lib.CRYPTOGRAPHY_IS_LIBRESSL
158 ):
159 warnings.warn(
160 "OpenSSL version 1.0.1 is no longer supported by the OpenSSL "
161 "project, please upgrade. A future version of cryptography will "
162 "drop support for it.",
163 utils.CryptographyDeprecationWarning
164 )
165
166
167 # OpenSSL is not thread safe until the locks are initialized. We call this
168 # method in module scope so that it executes with the import lock. On
169 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
170 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
171 # is per module so this approach will not work.
172 Binding.init_static_locks()
173
174 _verify_openssl_version(Binding.lib)
175
[end of src/cryptography/hazmat/bindings/openssl/binding.py]
[start of src/_cffi_src/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import sys
8 from distutils.ccompiler import new_compiler
9 from distutils.dist import Distribution
10
11 from cffi import FFI
12
13
14 def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],
15 extra_compile_args=[], extra_link_args=[]):
16 """
17 Modules listed in ``modules`` should have the following attributes:
18
19 * ``INCLUDES``: A string containing C includes.
20 * ``TYPES``: A string containing C declarations for types.
21 * ``FUNCTIONS``: A string containing C declarations for functions & macros.
22 * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this
23 can be used to do things like test for a define and provide an
24 alternate implementation based on that.
25 """
26 types = []
27 includes = []
28 functions = []
29 customizations = []
30 for name in modules:
31 __import__(module_prefix + name)
32 module = sys.modules[module_prefix + name]
33
34 types.append(module.TYPES)
35 functions.append(module.FUNCTIONS)
36 includes.append(module.INCLUDES)
37 customizations.append(module.CUSTOMIZATIONS)
38
39 verify_source = "\n".join(
40 includes +
41 customizations
42 )
43 ffi = build_ffi(
44 module_name,
45 cdef_source="\n".join(types + functions),
46 verify_source=verify_source,
47 libraries=libraries,
48 extra_compile_args=extra_compile_args,
49 extra_link_args=extra_link_args,
50 )
51
52 return ffi
53
54
55 def build_ffi(module_name, cdef_source, verify_source, libraries=[],
56 extra_compile_args=[], extra_link_args=[]):
57 ffi = FFI()
58 ffi.cdef(cdef_source)
59 ffi.set_source(
60 module_name,
61 verify_source,
62 libraries=libraries,
63 extra_compile_args=extra_compile_args,
64 extra_link_args=extra_link_args,
65 )
66 return ffi
67
68
69 def extra_link_args(compiler_type):
70 if compiler_type == 'msvc':
71 # Enable NX and ASLR for Windows builds on MSVC. These are enabled by
72 # default on Python 3.3+ but not on 2.x.
73 return ['/NXCOMPAT', '/DYNAMICBASE']
74 else:
75 return []
76
77
78 def compiler_type():
79 """
80 Gets the compiler type from distutils. On Windows with MSVC it will be
81 "msvc". On macOS and linux it is "unix".
82 """
83 dist = Distribution()
84 dist.parse_config_files()
85 cmd = dist.get_command_obj('build')
86 cmd.ensure_finalized()
87 compiler = new_compiler(compiler=cmd.compiler)
88 return compiler.compiler_type
89
[end of src/_cffi_src/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/_cffi_src/utils.py b/src/_cffi_src/utils.py
--- a/src/_cffi_src/utils.py
+++ b/src/_cffi_src/utils.py
@@ -4,6 +4,7 @@
from __future__ import absolute_import, division, print_function
+import os
import sys
from distutils.ccompiler import new_compiler
from distutils.dist import Distribution
@@ -11,6 +12,13 @@
from cffi import FFI
+# Load the cryptography __about__ to get the current package version
+base_src = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+about = {}
+with open(os.path.join(base_src, "cryptography", "__about__.py")) as f:
+ exec(f.read(), about)
+
+
def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],
extra_compile_args=[], extra_link_args=[]):
"""
@@ -55,6 +63,11 @@
def build_ffi(module_name, cdef_source, verify_source, libraries=[],
extra_compile_args=[], extra_link_args=[]):
ffi = FFI()
+ # Always add the CRYPTOGRAPHY_PACKAGE_VERSION to the shared object
+ cdef_source += "\nstatic const char *const CRYPTOGRAPHY_PACKAGE_VERSION;"
+ verify_source += '\n#define CRYPTOGRAPHY_PACKAGE_VERSION "{}"'.format(
+ about["__version__"]
+ )
ffi.cdef(cdef_source)
ffi.set_source(
module_name,
diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -9,6 +9,7 @@
import types
import warnings
+import cryptography
from cryptography import utils
from cryptography.exceptions import InternalError
from cryptography.hazmat.bindings._openssl import ffi, lib
@@ -164,6 +165,29 @@
)
+def _verify_package_version(version):
+ # Occasionally we run into situations where the version of the Python
+ # package does not match the version of the shared object that is loaded.
+ # This may occur in environments where multiple versions of cryptography
+ # are installed and available in the python path. To avoid errors cropping
+ # up later this code checks that the currently imported package and the
+ # shared object that were loaded have the same version and raise an
+ # ImportError if they do not
+ so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)
+ if version.encode("ascii") != so_package_version:
+ raise ImportError(
+ "The version of cryptography does not match the loaded "
+ "shared object. This can happen if you have multiple copies of "
+ "cryptography installed in your Python path. Please try creating "
+ "a new virtual environment to resolve this issue. "
+ "Loaded python version: {}, shared object version: {}".format(
+ version, so_package_version
+ )
+ )
+
+
+_verify_package_version(cryptography.__version__)
+
# OpenSSL is not thread safe until the locks are initialized. We call this
# method in module scope so that it executes with the import lock. On
# Pythons < 3.4 this import lock is a global lock, which can prevent a race
|
{"golden_diff": "diff --git a/src/_cffi_src/utils.py b/src/_cffi_src/utils.py\n--- a/src/_cffi_src/utils.py\n+++ b/src/_cffi_src/utils.py\n@@ -4,6 +4,7 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import os\n import sys\n from distutils.ccompiler import new_compiler\n from distutils.dist import Distribution\n@@ -11,6 +12,13 @@\n from cffi import FFI\n \n \n+# Load the cryptography __about__ to get the current package version\n+base_src = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n+about = {}\n+with open(os.path.join(base_src, \"cryptography\", \"__about__.py\")) as f:\n+ exec(f.read(), about)\n+\n+\n def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],\n extra_compile_args=[], extra_link_args=[]):\n \"\"\"\n@@ -55,6 +63,11 @@\n def build_ffi(module_name, cdef_source, verify_source, libraries=[],\n extra_compile_args=[], extra_link_args=[]):\n ffi = FFI()\n+ # Always add the CRYPTOGRAPHY_PACKAGE_VERSION to the shared object\n+ cdef_source += \"\\nstatic const char *const CRYPTOGRAPHY_PACKAGE_VERSION;\"\n+ verify_source += '\\n#define CRYPTOGRAPHY_PACKAGE_VERSION \"{}\"'.format(\n+ about[\"__version__\"]\n+ )\n ffi.cdef(cdef_source)\n ffi.set_source(\n module_name,\ndiff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -9,6 +9,7 @@\n import types\n import warnings\n \n+import cryptography\n from cryptography import utils\n from cryptography.exceptions import InternalError\n from cryptography.hazmat.bindings._openssl import ffi, lib\n@@ -164,6 +165,29 @@\n )\n \n \n+def _verify_package_version(version):\n+ # Occasionally we run into situations where the version of the Python\n+ # package does not match the version of the shared object that is loaded.\n+ # This may occur in environments where multiple versions of cryptography\n+ # are installed and available in the python path. To avoid errors cropping\n+ # up later this code checks that the currently imported package and the\n+ # shared object that were loaded have the same version and raise an\n+ # ImportError if they do not\n+ so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n+ if version.encode(\"ascii\") != so_package_version:\n+ raise ImportError(\n+ \"The version of cryptography does not match the loaded \"\n+ \"shared object. This can happen if you have multiple copies of \"\n+ \"cryptography installed in your Python path. Please try creating \"\n+ \"a new virtual environment to resolve this issue. \"\n+ \"Loaded python version: {}, shared object version: {}\".format(\n+ version, so_package_version\n+ )\n+ )\n+\n+\n+_verify_package_version(cryptography.__version__)\n+\n # OpenSSL is not thread safe until the locks are initialized. We call this\n # method in module scope so that it executes with the import lock. On\n # Pythons < 3.4 this import lock is a global lock, which can prevent a race\n", "issue": "Encode package version as a shared object symbol\nOne of the more common issues we see with users is an `AttributeError` where one of our `Cryptograpy_HAS_*` can't be found. This is pretty much always an issue where Python has loaded a newer version of cryptography's Python code, but for some reason loads an older shared object (which does not have the symbol the Python code is looking for).\r\n\r\nThis is a bad error message for a serious environment problem, and it doesn't always happen immediately. We should encode the version of cryptography in the shared object we build and then raise an `ImportError` on import if the shared object version and cryptography's version do not match.\r\n\r\nrefs https://github.com/certbot/certbot/issues/5651\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n cls._osrandom_engine_id = cls.lib.Cryptography_osrandom_engine_id\n cls._osrandom_engine_name = cls.lib.Cryptography_osrandom_engine_name\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS or\n cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102 and\n not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n warnings.warn(\n \"OpenSSL version 1.0.1 is no longer supported by the OpenSSL \"\n \"project, please upgrade. A future version of cryptography will \"\n \"drop support for it.\",\n utils.CryptographyDeprecationWarning\n )\n\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nfrom distutils.ccompiler import new_compiler\nfrom distutils.dist import Distribution\n\nfrom cffi import FFI\n\n\ndef build_ffi_for_binding(module_name, module_prefix, modules, libraries=[],\n extra_compile_args=[], extra_link_args=[]):\n \"\"\"\n Modules listed in ``modules`` should have the following attributes:\n\n * ``INCLUDES``: A string containing C includes.\n * ``TYPES``: A string containing C declarations for types.\n * ``FUNCTIONS``: A string containing C declarations for functions & macros.\n * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this\n can be used to do things like test for a define and provide an\n alternate implementation based on that.\n \"\"\"\n types = []\n includes = []\n functions = []\n customizations = []\n for name in modules:\n __import__(module_prefix + name)\n module = sys.modules[module_prefix + name]\n\n types.append(module.TYPES)\n functions.append(module.FUNCTIONS)\n includes.append(module.INCLUDES)\n customizations.append(module.CUSTOMIZATIONS)\n\n verify_source = \"\\n\".join(\n includes +\n customizations\n )\n ffi = build_ffi(\n module_name,\n cdef_source=\"\\n\".join(types + functions),\n verify_source=verify_source,\n libraries=libraries,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n )\n\n return ffi\n\n\ndef build_ffi(module_name, cdef_source, verify_source, libraries=[],\n extra_compile_args=[], extra_link_args=[]):\n ffi = FFI()\n ffi.cdef(cdef_source)\n ffi.set_source(\n module_name,\n verify_source,\n libraries=libraries,\n extra_compile_args=extra_compile_args,\n extra_link_args=extra_link_args,\n )\n return ffi\n\n\ndef extra_link_args(compiler_type):\n if compiler_type == 'msvc':\n # Enable NX and ASLR for Windows builds on MSVC. These are enabled by\n # default on Python 3.3+ but not on 2.x.\n return ['/NXCOMPAT', '/DYNAMICBASE']\n else:\n return []\n\n\ndef compiler_type():\n \"\"\"\n Gets the compiler type from distutils. On Windows with MSVC it will be\n \"msvc\". On macOS and linux it is \"unix\".\n \"\"\"\n dist = Distribution()\n dist.parse_config_files()\n cmd = dist.get_command_obj('build')\n cmd.ensure_finalized()\n compiler = new_compiler(compiler=cmd.compiler)\n return compiler.compiler_type\n", "path": "src/_cffi_src/utils.py"}]}
| 3,278 | 760 |
gh_patches_debug_9201
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-6154
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PIL version check for enum change appears to break SIMD versions
### 🐛 Describe the bug
This change appears to break current Pillow-SIMD version #5898
```
if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
File "/home/.../lib/python3.10/site-packages/torchvision/transforms/_pil_constants.py", line 7, in <genexpr>
if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
ValueError: invalid literal for int() with base 10: 'post1'
```
Amusingly enough, I warned against this approach in a users PR in `timm` https://github.com/rwightman/pytorch-image-models/pull/1256
Would be nice to have it fixed before 1.12 is finalized, I just hit this trying out the RC
### Versions
PT 1.12 RC, TV 0.13.0
</issue>
<code>
[start of torchvision/transforms/_pil_constants.py]
1 import PIL
2 from PIL import Image
3
4 # See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations
5 # TODO: Remove this file once PIL minimal version is >= 9.1
6
7 if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
8 BICUBIC = Image.Resampling.BICUBIC
9 BILINEAR = Image.Resampling.BILINEAR
10 LINEAR = Image.Resampling.BILINEAR
11 NEAREST = Image.Resampling.NEAREST
12
13 AFFINE = Image.Transform.AFFINE
14 FLIP_LEFT_RIGHT = Image.Transpose.FLIP_LEFT_RIGHT
15 FLIP_TOP_BOTTOM = Image.Transpose.FLIP_TOP_BOTTOM
16 PERSPECTIVE = Image.Transform.PERSPECTIVE
17 else:
18 BICUBIC = Image.BICUBIC
19 BILINEAR = Image.BILINEAR
20 NEAREST = Image.NEAREST
21 LINEAR = Image.LINEAR
22
23 AFFINE = Image.AFFINE
24 FLIP_LEFT_RIGHT = Image.FLIP_LEFT_RIGHT
25 FLIP_TOP_BOTTOM = Image.FLIP_TOP_BOTTOM
26 PERSPECTIVE = Image.PERSPECTIVE
27
[end of torchvision/transforms/_pil_constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchvision/transforms/_pil_constants.py b/torchvision/transforms/_pil_constants.py
--- a/torchvision/transforms/_pil_constants.py
+++ b/torchvision/transforms/_pil_constants.py
@@ -1,10 +1,9 @@
-import PIL
from PIL import Image
# See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations
# TODO: Remove this file once PIL minimal version is >= 9.1
-if tuple(int(part) for part in PIL.__version__.split(".")) >= (9, 1):
+if hasattr(Image, "Resampling"):
BICUBIC = Image.Resampling.BICUBIC
BILINEAR = Image.Resampling.BILINEAR
LINEAR = Image.Resampling.BILINEAR
|
{"golden_diff": "diff --git a/torchvision/transforms/_pil_constants.py b/torchvision/transforms/_pil_constants.py\n--- a/torchvision/transforms/_pil_constants.py\n+++ b/torchvision/transforms/_pil_constants.py\n@@ -1,10 +1,9 @@\n-import PIL\n from PIL import Image\n \n # See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations\n # TODO: Remove this file once PIL minimal version is >= 9.1\n \n-if tuple(int(part) for part in PIL.__version__.split(\".\")) >= (9, 1):\n+if hasattr(Image, \"Resampling\"):\n BICUBIC = Image.Resampling.BICUBIC\n BILINEAR = Image.Resampling.BILINEAR\n LINEAR = Image.Resampling.BILINEAR\n", "issue": "PIL version check for enum change appears to break SIMD versions\n### \ud83d\udc1b Describe the bug\n\nThis change appears to break current Pillow-SIMD version #5898 \r\n\r\n```\r\n if tuple(int(part) for part in PIL.__version__.split(\".\")) >= (9, 1):\r\n File \"/home/.../lib/python3.10/site-packages/torchvision/transforms/_pil_constants.py\", line 7, in <genexpr>\r\n if tuple(int(part) for part in PIL.__version__.split(\".\")) >= (9, 1):\r\nValueError: invalid literal for int() with base 10: 'post1'\r\n```\r\n\r\nAmusingly enough, I warned against this approach in a users PR in `timm` https://github.com/rwightman/pytorch-image-models/pull/1256\r\n\r\nWould be nice to have it fixed before 1.12 is finalized, I just hit this trying out the RC\n\n### Versions\n\nPT 1.12 RC, TV 0.13.0\n", "before_files": [{"content": "import PIL\nfrom PIL import Image\n\n# See https://pillow.readthedocs.io/en/stable/releasenotes/9.1.0.html#deprecations\n# TODO: Remove this file once PIL minimal version is >= 9.1\n\nif tuple(int(part) for part in PIL.__version__.split(\".\")) >= (9, 1):\n BICUBIC = Image.Resampling.BICUBIC\n BILINEAR = Image.Resampling.BILINEAR\n LINEAR = Image.Resampling.BILINEAR\n NEAREST = Image.Resampling.NEAREST\n\n AFFINE = Image.Transform.AFFINE\n FLIP_LEFT_RIGHT = Image.Transpose.FLIP_LEFT_RIGHT\n FLIP_TOP_BOTTOM = Image.Transpose.FLIP_TOP_BOTTOM\n PERSPECTIVE = Image.Transform.PERSPECTIVE\nelse:\n BICUBIC = Image.BICUBIC\n BILINEAR = Image.BILINEAR\n NEAREST = Image.NEAREST\n LINEAR = Image.LINEAR\n\n AFFINE = Image.AFFINE\n FLIP_LEFT_RIGHT = Image.FLIP_LEFT_RIGHT\n FLIP_TOP_BOTTOM = Image.FLIP_TOP_BOTTOM\n PERSPECTIVE = Image.PERSPECTIVE\n", "path": "torchvision/transforms/_pil_constants.py"}]}
| 1,082 | 186 |
gh_patches_debug_16865
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-9228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updating Slack "Staff PRs" notifications to be more specific
<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->
When our daily slack bot runs to tell us of new staff PRs we want to ignore:
- `needs:submitter`
- `draft`
- `blocked`
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of scripts/pr_slack_digest.py]
1 from datetime import datetime
2 import requests
3 import os
4
5
6 def send_slack_message(message: str):
7 response = requests.post(
8 'https://slack.com/api/chat.postMessage',
9 headers={
10 'Authorization': f"Bearer {os.environ.get('SLACK_TOKEN')}",
11 'Content-Type': 'application/json; charset=utf-8',
12 },
13 json={
14 'channel': '#team-abc-plus',
15 'text': message,
16 },
17 )
18 if response.status_code != 200:
19 print(f"Failed to send message to Slack. Status code: {response.status_code}")
20 else:
21 print("Message sent to Slack successfully!")
22 print(response.content)
23
24
25 if __name__ == "__main__":
26 GH_LOGIN_TO_SLACK = {
27 'cdrini': '<@cdrini>',
28 'jimchamp': '<@U01ARTHG9EV>',
29 'mekarpeles': '<@mek>',
30 'scottbarnes': '<@U03MNR6T7FH>',
31 }
32 LABEL_EMOJI = {
33 'Priority: 0': '🚨 ',
34 'Priority: 1': '❗️ ',
35 }
36 # apparently `author` acts like an OR in this API and only this API -_-
37 query = "repo:internetarchive/openlibrary is:open is:pr author:cdrini author:jimchamp author:mekarpeles author:scottbarnes -is:draft"
38 prs = requests.get(
39 "https://api.github.com/search/issues",
40 params={
41 "q": query,
42 },
43 ).json()["items"]
44
45 message = f"{len(prs)} open staff PRs:\n\n"
46 for pr in prs:
47 pr_url = pr['html_url']
48 pr_age_days = (
49 datetime.now() - datetime.strptime(pr['created_at'], '%Y-%m-%dT%H:%M:%SZ')
50 ).days
51 message += f"<{pr_url}|*#{pr['number']}* | {pr['title']}>\n"
52 message += ' | '.join(
53 [
54 f"by {pr['user']['login']} {pr_age_days} days ago",
55 f"Assigned: {GH_LOGIN_TO_SLACK[pr['assignee']['login']] if pr['assignee'] else '⚠️ None'}",
56 f"{', '.join(LABEL_EMOJI.get(label['name'], '') + label['name'] for label in pr['labels'])}\n\n",
57 ]
58 )
59
60 send_slack_message(message)
61
[end of scripts/pr_slack_digest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/pr_slack_digest.py b/scripts/pr_slack_digest.py
--- a/scripts/pr_slack_digest.py
+++ b/scripts/pr_slack_digest.py
@@ -33,8 +33,18 @@
'Priority: 0': '🚨 ',
'Priority: 1': '❗️ ',
}
+
+ INCLUDE_AUTHORS = ['mekarpeles', 'cdrini', 'scottbarnes', 'jimchamp']
+ EXCLUDE_LABELS = [
+ 'Needs: Submitter Input',
+ 'State: Blocked',
+ ]
+ query = 'repo:internetarchive/openlibrary is:open is:pr -is:draft'
# apparently `author` acts like an OR in this API and only this API -_-
- query = "repo:internetarchive/openlibrary is:open is:pr author:cdrini author:jimchamp author:mekarpeles author:scottbarnes -is:draft"
+ included_authors = " ".join([f"author:{author}" for author in INCLUDE_AUTHORS])
+ excluded_labels = " ".join([f'-label:"{label}"' for label in EXCLUDE_LABELS])
+ query = f'{query} {included_authors} {excluded_labels}'
+
prs = requests.get(
"https://api.github.com/search/issues",
params={
|
{"golden_diff": "diff --git a/scripts/pr_slack_digest.py b/scripts/pr_slack_digest.py\n--- a/scripts/pr_slack_digest.py\n+++ b/scripts/pr_slack_digest.py\n@@ -33,8 +33,18 @@\n 'Priority: 0': '\ud83d\udea8 ',\n 'Priority: 1': '\u2757\ufe0f ',\n }\n+\n+ INCLUDE_AUTHORS = ['mekarpeles', 'cdrini', 'scottbarnes', 'jimchamp']\n+ EXCLUDE_LABELS = [\n+ 'Needs: Submitter Input',\n+ 'State: Blocked',\n+ ]\n+ query = 'repo:internetarchive/openlibrary is:open is:pr -is:draft'\n # apparently `author` acts like an OR in this API and only this API -_-\n- query = \"repo:internetarchive/openlibrary is:open is:pr author:cdrini author:jimchamp author:mekarpeles author:scottbarnes -is:draft\"\n+ included_authors = \" \".join([f\"author:{author}\" for author in INCLUDE_AUTHORS])\n+ excluded_labels = \" \".join([f'-label:\"{label}\"' for label in EXCLUDE_LABELS])\n+ query = f'{query} {included_authors} {excluded_labels}'\n+\n prs = requests.get(\n \"https://api.github.com/search/issues\",\n params={\n", "issue": "Updating Slack \"Staff PRs\" notifications to be more specific\n<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->\r\n\r\nWhen our daily slack bot runs to tell us of new staff PRs we want to ignore: \r\n- `needs:submitter`\r\n- `draft`\r\n- `blocked`\r\n\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime\nimport requests\nimport os\n\n\ndef send_slack_message(message: str):\n response = requests.post(\n 'https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': f\"Bearer {os.environ.get('SLACK_TOKEN')}\",\n 'Content-Type': 'application/json; charset=utf-8',\n },\n json={\n 'channel': '#team-abc-plus',\n 'text': message,\n },\n )\n if response.status_code != 200:\n print(f\"Failed to send message to Slack. Status code: {response.status_code}\")\n else:\n print(\"Message sent to Slack successfully!\")\n print(response.content)\n\n\nif __name__ == \"__main__\":\n GH_LOGIN_TO_SLACK = {\n 'cdrini': '<@cdrini>',\n 'jimchamp': '<@U01ARTHG9EV>',\n 'mekarpeles': '<@mek>',\n 'scottbarnes': '<@U03MNR6T7FH>',\n }\n LABEL_EMOJI = {\n 'Priority: 0': '\ud83d\udea8 ',\n 'Priority: 1': '\u2757\ufe0f ',\n }\n # apparently `author` acts like an OR in this API and only this API -_-\n query = \"repo:internetarchive/openlibrary is:open is:pr author:cdrini author:jimchamp author:mekarpeles author:scottbarnes -is:draft\"\n prs = requests.get(\n \"https://api.github.com/search/issues\",\n params={\n \"q\": query,\n },\n ).json()[\"items\"]\n\n message = f\"{len(prs)} open staff PRs:\\n\\n\"\n for pr in prs:\n pr_url = pr['html_url']\n pr_age_days = (\n datetime.now() - datetime.strptime(pr['created_at'], '%Y-%m-%dT%H:%M:%SZ')\n ).days\n message += f\"<{pr_url}|*#{pr['number']}* | {pr['title']}>\\n\"\n message += ' | '.join(\n [\n f\"by {pr['user']['login']} {pr_age_days} days ago\",\n f\"Assigned: {GH_LOGIN_TO_SLACK[pr['assignee']['login']] if pr['assignee'] else '\u26a0\ufe0f None'}\",\n f\"{', '.join(LABEL_EMOJI.get(label['name'], '') + label['name'] for label in pr['labels'])}\\n\\n\",\n ]\n )\n\n send_slack_message(message)\n", "path": "scripts/pr_slack_digest.py"}]}
| 1,398 | 302 |
gh_patches_debug_9867
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-357
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception in default_literal_parser() when ValueNode has no "value" member
The [default_literal_parser() function](https://github.com/mirumee/ariadne/blob/master/ariadne/scalars.py#L90) in ariadne.scalars expects nodes to have a "value" member. However, this is not the case with, for example, `ObjectValueNode` or `ListValueNode`. This causes an exception when trying to pass such nodes.
My suggestion is to use `graphql.utilities.value_from_ast_untyped` instead:
```
return value_parser(value_from_ast_untyped(ast))
```
I'm happy to do a PR if you guys like this change.
</issue>
<code>
[start of ariadne/scalars.py]
1 from typing import Optional, cast
2
3 from graphql.language.ast import (
4 BooleanValueNode,
5 FloatValueNode,
6 IntValueNode,
7 StringValueNode,
8 )
9 from graphql.type import (
10 GraphQLNamedType,
11 GraphQLScalarLiteralParser,
12 GraphQLScalarSerializer,
13 GraphQLScalarType,
14 GraphQLScalarValueParser,
15 GraphQLSchema,
16 )
17
18 from .types import SchemaBindable
19
20
21 class ScalarType(SchemaBindable):
22 _serialize: Optional[GraphQLScalarSerializer]
23 _parse_value: Optional[GraphQLScalarValueParser]
24 _parse_literal: Optional[GraphQLScalarLiteralParser]
25
26 def __init__(
27 self,
28 name: str,
29 *,
30 serializer: GraphQLScalarSerializer = None,
31 value_parser: GraphQLScalarValueParser = None,
32 literal_parser: GraphQLScalarLiteralParser = None,
33 ) -> None:
34 self.name = name
35 self._serialize = serializer
36 self._parse_value = value_parser
37 self._parse_literal = literal_parser
38
39 def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:
40 self._serialize = f
41 return f
42
43 def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
44 self._parse_value = f
45 if not self._parse_literal:
46 self._parse_literal = create_default_literal_parser(f)
47 return f
48
49 def set_literal_parser(
50 self, f: GraphQLScalarLiteralParser
51 ) -> GraphQLScalarLiteralParser:
52 self._parse_literal = f
53 return f
54
55 # Alias above setters for consistent decorator API
56 serializer = set_serializer
57 value_parser = set_value_parser
58 literal_parser = set_literal_parser
59
60 def bind_to_schema(self, schema: GraphQLSchema) -> None:
61 graphql_type = schema.type_map.get(self.name)
62 self.validate_graphql_type(graphql_type)
63 graphql_type = cast(GraphQLScalarType, graphql_type)
64
65 if self._serialize:
66 # See mypy bug https://github.com/python/mypy/issues/2427
67 graphql_type.serialize = self._serialize # type: ignore
68 if self._parse_value:
69 graphql_type.parse_value = self._parse_value # type: ignore
70 if self._parse_literal:
71 graphql_type.parse_literal = self._parse_literal # type: ignore
72
73 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
74 if not graphql_type:
75 raise ValueError("Scalar %s is not defined in the schema" % self.name)
76 if not isinstance(graphql_type, GraphQLScalarType):
77 raise ValueError(
78 "%s is defined in the schema, but it is instance of %s (expected %s)"
79 % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
80 )
81
82
83 SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)
84
85
86 def create_default_literal_parser(
87 value_parser: GraphQLScalarValueParser,
88 ) -> GraphQLScalarLiteralParser:
89 def default_literal_parser(ast):
90 return value_parser(ast.value)
91
92 return default_literal_parser
93
[end of ariadne/scalars.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ariadne/scalars.py b/ariadne/scalars.py
--- a/ariadne/scalars.py
+++ b/ariadne/scalars.py
@@ -14,6 +14,7 @@
GraphQLScalarValueParser,
GraphQLSchema,
)
+from graphql.utilities import value_from_ast_untyped
from .types import SchemaBindable
@@ -87,6 +88,6 @@
value_parser: GraphQLScalarValueParser,
) -> GraphQLScalarLiteralParser:
def default_literal_parser(ast):
- return value_parser(ast.value)
+ return value_parser(value_from_ast_untyped(ast))
return default_literal_parser
|
{"golden_diff": "diff --git a/ariadne/scalars.py b/ariadne/scalars.py\n--- a/ariadne/scalars.py\n+++ b/ariadne/scalars.py\n@@ -14,6 +14,7 @@\n GraphQLScalarValueParser,\n GraphQLSchema,\n )\n+from graphql.utilities import value_from_ast_untyped\n \n from .types import SchemaBindable\n \n@@ -87,6 +88,6 @@\n value_parser: GraphQLScalarValueParser,\n ) -> GraphQLScalarLiteralParser:\n def default_literal_parser(ast):\n- return value_parser(ast.value)\n+ return value_parser(value_from_ast_untyped(ast))\n \n return default_literal_parser\n", "issue": "Exception in default_literal_parser() when ValueNode has no \"value\" member\nThe [default_literal_parser() function](https://github.com/mirumee/ariadne/blob/master/ariadne/scalars.py#L90) in ariadne.scalars expects nodes to have a \"value\" member. However, this is not the case with, for example, `ObjectValueNode` or `ListValueNode`. This causes an exception when trying to pass such nodes.\r\n\r\nMy suggestion is to use `graphql.utilities.value_from_ast_untyped` instead:\r\n```\r\nreturn value_parser(value_from_ast_untyped(ast))\r\n``` \r\n\r\nI'm happy to do a PR if you guys like this change.\n", "before_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.language.ast import (\n BooleanValueNode,\n FloatValueNode,\n IntValueNode,\n StringValueNode,\n)\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n if not self._parse_literal:\n self._parse_literal = create_default_literal_parser(f)\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value # type: ignore\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal # type: ignore\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n\n\nSCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n\n\ndef create_default_literal_parser(\n value_parser: GraphQLScalarValueParser,\n) -> GraphQLScalarLiteralParser:\n def default_literal_parser(ast):\n return value_parser(ast.value)\n\n return default_literal_parser\n", "path": "ariadne/scalars.py"}]}
| 1,527 | 143 |
gh_patches_debug_33234
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-1373
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install modin[all] should choose what to install based on the OS
When a Windows user runs `pip install modin[all]` it will not work because Ray does not have any Windows releases. We should still support `pip install modin[all]` in Windows.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"]
8 ray_deps = ["ray==0.8.3"]
9
10 setup(
11 name="modin",
12 version=versioneer.get_version(),
13 cmdclass=versioneer.get_cmdclass(),
14 description="Modin: Make your pandas code run faster by changing one line of code.",
15 packages=find_packages(),
16 url="https://github.com/modin-project/modin",
17 long_description=long_description,
18 long_description_content_type="text/markdown",
19 install_requires=["pandas==1.0.3", "packaging"],
20 extras_require={
21 # can be installed by pip install modin[dask]
22 "dask": dask_deps,
23 "ray": ray_deps,
24 "all": dask_deps + ray_deps,
25 },
26 python_requires=">=3.5",
27 )
28
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,18 +1,60 @@
from setuptools import setup, find_packages
import versioneer
+import os
+from setuptools.dist import Distribution
+
+try:
+ from wheel.bdist_wheel import bdist_wheel
+
+ HAS_WHEEL = True
+except ImportError:
+ HAS_WHEEL = False
with open("README.md", "r") as fh:
long_description = fh.read()
+if HAS_WHEEL:
+
+ class ModinWheel(bdist_wheel):
+ def finalize_options(self):
+ bdist_wheel.finalize_options(self)
+ self.root_is_pure = False
+
+ def get_tag(self):
+ _, _, plat = bdist_wheel.get_tag(self)
+ py = "py3"
+ abi = "none"
+ return py, abi, plat
+
+
+class ModinDistribution(Distribution):
+ def __init__(self, *attrs):
+ Distribution.__init__(self, *attrs)
+ if HAS_WHEEL:
+ self.cmdclass["bdist_wheel"] = ModinWheel
+
+ def is_pure(self):
+ return False
+
+
dask_deps = ["dask>=2.1.0", "distributed>=2.3.2"]
ray_deps = ["ray==0.8.3"]
+if "SETUP_PLAT_NAME" in os.environ:
+ if "win" in os.environ["SETUP_PLAT_NAME"]:
+ all_deps = dask_deps
+ else:
+ all_deps = dask_deps + ray_deps
+else:
+ all_deps = dask_deps if os.name == "nt" else dask_deps + ray_deps
setup(
name="modin",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
+ distclass=ModinDistribution,
description="Modin: Make your pandas code run faster by changing one line of code.",
packages=find_packages(),
+ license="Apache 2",
url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
@@ -21,7 +63,7 @@
# can be installed by pip install modin[dask]
"dask": dask_deps,
"ray": ray_deps,
- "all": dask_deps + ray_deps,
+ "all": all_deps,
},
python_requires=">=3.5",
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,18 +1,60 @@\n from setuptools import setup, find_packages\n import versioneer\n+import os\n+from setuptools.dist import Distribution\n+\n+try:\n+ from wheel.bdist_wheel import bdist_wheel\n+\n+ HAS_WHEEL = True\n+except ImportError:\n+ HAS_WHEEL = False\n \n with open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n \n+if HAS_WHEEL:\n+\n+ class ModinWheel(bdist_wheel):\n+ def finalize_options(self):\n+ bdist_wheel.finalize_options(self)\n+ self.root_is_pure = False\n+\n+ def get_tag(self):\n+ _, _, plat = bdist_wheel.get_tag(self)\n+ py = \"py3\"\n+ abi = \"none\"\n+ return py, abi, plat\n+\n+\n+class ModinDistribution(Distribution):\n+ def __init__(self, *attrs):\n+ Distribution.__init__(self, *attrs)\n+ if HAS_WHEEL:\n+ self.cmdclass[\"bdist_wheel\"] = ModinWheel\n+\n+ def is_pure(self):\n+ return False\n+\n+\n dask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\n ray_deps = [\"ray==0.8.3\"]\n+if \"SETUP_PLAT_NAME\" in os.environ:\n+ if \"win\" in os.environ[\"SETUP_PLAT_NAME\"]:\n+ all_deps = dask_deps\n+ else:\n+ all_deps = dask_deps + ray_deps\n+else:\n+ all_deps = dask_deps if os.name == \"nt\" else dask_deps + ray_deps\n \n setup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n+ distclass=ModinDistribution,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n+ license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n@@ -21,7 +63,7 @@\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n- \"all\": dask_deps + ray_deps,\n+ \"all\": all_deps,\n },\n python_requires=\">=3.5\",\n )\n", "issue": "pip install modin[all] should choose what to install based on the OS\nWhen a Windows user runs `pip install modin[all]` it will not work because Ray does not have any Windows releases. We should still support `pip install modin[all]` in Windows.\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.1.0\", \"distributed>=2.3.2\"]\nray_deps = [\"ray==0.8.3\"]\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==1.0.3\", \"packaging\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"all\": dask_deps + ray_deps,\n },\n python_requires=\">=3.5\",\n)\n", "path": "setup.py"}]}
| 860 | 556 |
gh_patches_debug_7412
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1498
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
skimage.io.imread returns incorrect dimensions (according to docs)
See here: http://stackoverflow.com/questions/30088841
Importing a TIFF image using skimage.io.imread returns an array with shape (3,M,N) instead of (M,N,3). This breaks the flatten and as_grey features. The results are the same calling `imread` with `plugin='tifffile'`.
```
In [1]: im = skimage.io.imread('image.tif')
In [2]: im.shape
Out[2]: (3, 768, 1024)
In [3]: im = skimage.io.imread('image.tif', as_grey = True)
In [4]: im.shape
Out[4]: (3, 768)
In [5]: im = skimage.io.imread('image.tif', flatten = True)
In [6]: im.shape
Out[6]: (3, 768)
```
</issue>
<code>
[start of skimage/io/_io.py]
1 from io import BytesIO
2 import warnings
3
4 import numpy as np
5 import six
6
7 from ..io.manage_plugins import call_plugin
8 from ..color import rgb2grey
9 from .util import file_or_url_context
10 from ..exposure import is_low_contrast
11 from .._shared._warnings import all_warnings
12
13
14 __all__ = ['imread', 'imread_collection', 'imsave', 'imshow', 'show']
15
16
17 def imread(fname, as_grey=False, plugin=None, flatten=None,
18 **plugin_args):
19 """Load an image from file.
20
21 Parameters
22 ----------
23 fname : string
24 Image file name, e.g. ``test.jpg`` or URL.
25 as_grey : bool
26 If True, convert color images to grey-scale (32-bit floats).
27 Images that are already in grey-scale format are not converted.
28 plugin : str
29 Name of plugin to use (Python Imaging Library by default).
30
31 Other Parameters
32 ----------------
33 flatten : bool
34 Backward compatible keyword, superseded by `as_grey`.
35
36 Returns
37 -------
38 img_array : ndarray
39 The different colour bands/channels are stored in the
40 third dimension, such that a grey-image is MxN, an
41 RGB-image MxNx3 and an RGBA-image MxNx4.
42
43 Other parameters
44 ----------------
45 plugin_args : keywords
46 Passed to the given plugin.
47
48 """
49 # Backward compatibility
50 if flatten is not None:
51 as_grey = flatten
52
53 with file_or_url_context(fname) as fname:
54 img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
55
56 if as_grey and getattr(img, 'ndim', 0) >= 3:
57 img = rgb2grey(img)
58
59 return img
60
61
62 def imread_collection(load_pattern, conserve_memory=True,
63 plugin=None, **plugin_args):
64 """
65 Load a collection of images.
66
67 Parameters
68 ----------
69 load_pattern : str or list
70 List of objects to load. These are usually filenames, but may
71 vary depending on the currently active plugin. See the docstring
72 for ``ImageCollection`` for the default behaviour of this parameter.
73 conserve_memory : bool, optional
74 If True, never keep more than one in memory at a specific
75 time. Otherwise, images will be cached once they are loaded.
76
77 Returns
78 -------
79 ic : ImageCollection
80 Collection of images.
81
82 Other parameters
83 ----------------
84 plugin_args : keywords
85 Passed to the given plugin.
86
87 """
88 return call_plugin('imread_collection', load_pattern, conserve_memory,
89 plugin=plugin, **plugin_args)
90
91
92 def imsave(fname, arr, plugin=None, **plugin_args):
93 """Save an image to file.
94
95 Parameters
96 ----------
97 fname : str
98 Target filename.
99 arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)
100 Image data.
101 plugin : str
102 Name of plugin to use. By default, the different plugins are
103 tried (starting with the Python Imaging Library) until a suitable
104 candidate is found.
105
106 Other parameters
107 ----------------
108 plugin_args : keywords
109 Passed to the given plugin.
110
111 """
112 if is_low_contrast(arr):
113 warnings.warn('%s is a low contrast image' % fname)
114 return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
115
116
117 def imshow(arr, plugin=None, **plugin_args):
118 """Display an image.
119
120 Parameters
121 ----------
122 arr : ndarray or str
123 Image data or name of image file.
124 plugin : str
125 Name of plugin to use. By default, the different plugins are
126 tried (starting with the Python Imaging Library) until a suitable
127 candidate is found.
128
129 Other parameters
130 ----------------
131 plugin_args : keywords
132 Passed to the given plugin.
133
134 """
135 if isinstance(arr, six.string_types):
136 arr = call_plugin('imread', arr, plugin=plugin)
137 return call_plugin('imshow', arr, plugin=plugin, **plugin_args)
138
139
140 def show():
141 '''Display pending images.
142
143 Launch the event loop of the current gui plugin, and display all
144 pending images, queued via `imshow`. This is required when using
145 `imshow` from non-interactive scripts.
146
147 A call to `show` will block execution of code until all windows
148 have been closed.
149
150 Examples
151 --------
152 >>> import skimage.io as io
153
154 >>> for i in range(4):
155 ... ax_im = io.imshow(np.random.rand(50, 50))
156 >>> io.show() # doctest: +SKIP
157
158 '''
159 return call_plugin('_app_show')
160
[end of skimage/io/_io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/io/_io.py b/skimage/io/_io.py
--- a/skimage/io/_io.py
+++ b/skimage/io/_io.py
@@ -53,8 +53,16 @@
with file_or_url_context(fname) as fname:
img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
- if as_grey and getattr(img, 'ndim', 0) >= 3:
- img = rgb2grey(img)
+ if not hasattr(img, 'ndim'):
+ return img
+
+ if img.ndim > 2:
+ if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):
+ img = np.swapaxes(img, -1, -3)
+ img = np.swapaxes(img, -2, -3)
+
+ if as_grey:
+ img = rgb2grey(img)
return img
|
{"golden_diff": "diff --git a/skimage/io/_io.py b/skimage/io/_io.py\n--- a/skimage/io/_io.py\n+++ b/skimage/io/_io.py\n@@ -53,8 +53,16 @@\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n \n- if as_grey and getattr(img, 'ndim', 0) >= 3:\n- img = rgb2grey(img)\n+ if not hasattr(img, 'ndim'):\n+ return img\n+\n+ if img.ndim > 2:\n+ if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n+ img = np.swapaxes(img, -1, -3)\n+ img = np.swapaxes(img, -2, -3)\n+\n+ if as_grey:\n+ img = rgb2grey(img)\n \n return img\n", "issue": "skimage.io.imread returns incorrect dimensions (according to docs)\nSee here: http://stackoverflow.com/questions/30088841\n\nImporting a TIFF image using skimage.io.imread returns an array with shape (3,M,N) instead of (M,N,3). This breaks the flatten and as_grey features. The results are the same calling `imread` with `plugin='tifffile'`.\n\n```\nIn [1]: im = skimage.io.imread('image.tif')\n\nIn [2]: im.shape\nOut[2]: (3, 768, 1024)\n\nIn [3]: im = skimage.io.imread('image.tif', as_grey = True)\n\nIn [4]: im.shape\nOut[4]: (3, 768)\n\nIn [5]: im = skimage.io.imread('image.tif', flatten = True)\n\nIn [6]: im.shape\nOut[6]: (3, 768)\n```\n\n", "before_files": [{"content": "from io import BytesIO\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom ..io.manage_plugins import call_plugin\nfrom ..color import rgb2grey\nfrom .util import file_or_url_context\nfrom ..exposure import is_low_contrast\nfrom .._shared._warnings import all_warnings\n\n\n__all__ = ['imread', 'imread_collection', 'imsave', 'imshow', 'show']\n\n\ndef imread(fname, as_grey=False, plugin=None, flatten=None,\n **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_grey : bool\n If True, convert color images to grey-scale (32-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n Name of plugin to use (Python Imaging Library by default).\n\n Other Parameters\n ----------------\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n\n Returns\n -------\n img_array : ndarray\n The different colour bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n as_grey = flatten\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if as_grey and getattr(img, 'ndim', 0) >= 3:\n img = rgb2grey(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if is_low_contrast(arr):\n warnings.warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, six.string_types):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> for i in range(4):\n ... ax_im = io.imshow(np.random.rand(50, 50))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}]}
| 2,150 | 221 |
gh_patches_debug_66813
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-6976
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
griddata docstring doesn't make any sense
The docstring for `griddata` refers to many different integers:
`D`, `n`, `ndim`, `M`:
```
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
```
The only one that is defined is `D`, so users have no idea what the others are.
Furthermore the example provided contradicts the docstring as xi is passed as a tuple but the docstring says it must be a ndarray.
</issue>
<code>
[start of scipy/interpolate/ndgriddata.py]
1 """
2 Convenience interface to N-D interpolation
3
4 .. versionadded:: 0.9
5
6 """
7 from __future__ import division, print_function, absolute_import
8
9 import numpy as np
10 from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
11 CloughTocher2DInterpolator, _ndim_coords_from_arrays
12 from scipy.spatial import cKDTree
13
14 __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
15 'CloughTocher2DInterpolator']
16
17 #------------------------------------------------------------------------------
18 # Nearest-neighbour interpolation
19 #------------------------------------------------------------------------------
20
21
22 class NearestNDInterpolator(NDInterpolatorBase):
23 """
24 NearestNDInterpolator(points, values)
25
26 Nearest-neighbour interpolation in N dimensions.
27
28 .. versionadded:: 0.9
29
30 Methods
31 -------
32 __call__
33
34 Parameters
35 ----------
36 x : (Npoints, Ndims) ndarray of floats
37 Data point coordinates.
38 y : (Npoints,) ndarray of float or complex
39 Data values.
40 rescale : boolean, optional
41 Rescale points to unit cube before performing interpolation.
42 This is useful if some of the input dimensions have
43 incommensurable units and differ by many orders of magnitude.
44
45 .. versionadded:: 0.14.0
46 tree_options : dict, optional
47 Options passed to the underlying ``cKDTree``.
48
49 .. versionadded:: 0.17.0
50
51
52 Notes
53 -----
54 Uses ``scipy.spatial.cKDTree``
55
56 """
57
58 def __init__(self, x, y, rescale=False, tree_options=None):
59 NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
60 need_contiguous=False,
61 need_values=False)
62 if tree_options is None:
63 tree_options = dict()
64 self.tree = cKDTree(self.points, **tree_options)
65 self.values = y
66
67 def __call__(self, *args):
68 """
69 Evaluate interpolator at given points.
70
71 Parameters
72 ----------
73 xi : ndarray of float, shape (..., ndim)
74 Points where to interpolate data at.
75
76 """
77 xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
78 xi = self._check_call_shape(xi)
79 xi = self._scale_x(xi)
80 dist, i = self.tree.query(xi)
81 return self.values[i]
82
83
84 #------------------------------------------------------------------------------
85 # Convenience interface function
86 #------------------------------------------------------------------------------
87
88 def griddata(points, values, xi, method='linear', fill_value=np.nan,
89 rescale=False):
90 """
91 Interpolate unstructured D-dimensional data.
92
93 Parameters
94 ----------
95 points : ndarray of floats, shape (n, D)
96 Data point coordinates. Can either be an array of
97 shape (n, D), or a tuple of `ndim` arrays.
98 values : ndarray of float or complex, shape (n,)
99 Data values.
100 xi : ndarray of float, shape (M, D)
101 Points at which to interpolate data.
102 method : {'linear', 'nearest', 'cubic'}, optional
103 Method of interpolation. One of
104
105 ``nearest``
106 return the value at the data point closest to
107 the point of interpolation. See `NearestNDInterpolator` for
108 more details.
109
110 ``linear``
111 tesselate the input point set to n-dimensional
112 simplices, and interpolate linearly on each simplex. See
113 `LinearNDInterpolator` for more details.
114
115 ``cubic`` (1-D)
116 return the value determined from a cubic
117 spline.
118
119 ``cubic`` (2-D)
120 return the value determined from a
121 piecewise cubic, continuously differentiable (C1), and
122 approximately curvature-minimizing polynomial surface. See
123 `CloughTocher2DInterpolator` for more details.
124 fill_value : float, optional
125 Value used to fill in for requested points outside of the
126 convex hull of the input points. If not provided, then the
127 default is ``nan``. This option has no effect for the
128 'nearest' method.
129 rescale : bool, optional
130 Rescale points to unit cube before performing interpolation.
131 This is useful if some of the input dimensions have
132 incommensurable units and differ by many orders of magnitude.
133
134 .. versionadded:: 0.14.0
135
136 Notes
137 -----
138
139 .. versionadded:: 0.9
140
141 Examples
142 --------
143
144 Suppose we want to interpolate the 2-D function
145
146 >>> def func(x, y):
147 ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
148
149 on a grid in [0, 1]x[0, 1]
150
151 >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
152
153 but we only know its values at 1000 data points:
154
155 >>> points = np.random.rand(1000, 2)
156 >>> values = func(points[:,0], points[:,1])
157
158 This can be done with `griddata` -- below we try out all of the
159 interpolation methods:
160
161 >>> from scipy.interpolate import griddata
162 >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
163 >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
164 >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
165
166 One can see that the exact result is reproduced by all of the
167 methods to some degree, but for this smooth function the piecewise
168 cubic interpolant gives the best results:
169
170 >>> import matplotlib.pyplot as plt
171 >>> plt.subplot(221)
172 >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
173 >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
174 >>> plt.title('Original')
175 >>> plt.subplot(222)
176 >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
177 >>> plt.title('Nearest')
178 >>> plt.subplot(223)
179 >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
180 >>> plt.title('Linear')
181 >>> plt.subplot(224)
182 >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
183 >>> plt.title('Cubic')
184 >>> plt.gcf().set_size_inches(6, 6)
185 >>> plt.show()
186
187 """
188
189 points = _ndim_coords_from_arrays(points)
190
191 if points.ndim < 2:
192 ndim = points.ndim
193 else:
194 ndim = points.shape[-1]
195
196 if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
197 from .interpolate import interp1d
198 points = points.ravel()
199 if isinstance(xi, tuple):
200 if len(xi) != 1:
201 raise ValueError("invalid number of dimensions in xi")
202 xi, = xi
203 # Sort points/values together, necessary as input for interp1d
204 idx = np.argsort(points)
205 points = points[idx]
206 values = values[idx]
207 if method == 'nearest':
208 fill_value = 'extrapolate'
209 ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
210 fill_value=fill_value)
211 return ip(xi)
212 elif method == 'nearest':
213 ip = NearestNDInterpolator(points, values, rescale=rescale)
214 return ip(xi)
215 elif method == 'linear':
216 ip = LinearNDInterpolator(points, values, fill_value=fill_value,
217 rescale=rescale)
218 return ip(xi)
219 elif method == 'cubic' and ndim == 2:
220 ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
221 rescale=rescale)
222 return ip(xi)
223 else:
224 raise ValueError("Unknown interpolation method %r for "
225 "%d dimensional data" % (method, ndim))
226
[end of scipy/interpolate/ndgriddata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py
--- a/scipy/interpolate/ndgriddata.py
+++ b/scipy/interpolate/ndgriddata.py
@@ -97,7 +97,7 @@
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
- xi : ndarray of float, shape (M, D)
+ xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
|
{"golden_diff": "diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py\n--- a/scipy/interpolate/ndgriddata.py\n+++ b/scipy/interpolate/ndgriddata.py\n@@ -97,7 +97,7 @@\n shape (n, D), or a tuple of `ndim` arrays.\n values : ndarray of float or complex, shape (n,)\n Data values.\n- xi : ndarray of float, shape (M, D)\n+ xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D)\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n", "issue": "griddata docstring doesn't make any sense\nThe docstring for `griddata` refers to many different integers:\n\n`D`, `n`, `ndim`, `M`:\n\n```\nInterpolate unstructured D-dimensional data.\n\nParameters\n----------\npoints : ndarray of floats, shape (n, D)\n Data point coordinates. Can either be an array of\n shape (n, D), or a tuple of `ndim` arrays.\nvalues : ndarray of float or complex, shape (n,)\n Data values.\nxi : ndarray of float, shape (M, D)\n Points at which to interpolate data.\n```\n\nThe only one that is defined is `D`, so users have no idea what the others are.\n\nFurthermore the example provided contradicts the docstring as xi is passed as a tuple but the docstring says it must be a ndarray.\n\n", "before_files": [{"content": "\"\"\"\nConvenience interface to N-D interpolation\n\n.. versionadded:: 0.9\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .interpnd import LinearNDInterpolator, NDInterpolatorBase, \\\n CloughTocher2DInterpolator, _ndim_coords_from_arrays\nfrom scipy.spatial import cKDTree\n\n__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',\n 'CloughTocher2DInterpolator']\n\n#------------------------------------------------------------------------------\n# Nearest-neighbour interpolation\n#------------------------------------------------------------------------------\n\n\nclass NearestNDInterpolator(NDInterpolatorBase):\n \"\"\"\n NearestNDInterpolator(points, values)\n\n Nearest-neighbour interpolation in N dimensions.\n\n .. versionadded:: 0.9\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x : (Npoints, Ndims) ndarray of floats\n Data point coordinates.\n y : (Npoints,) ndarray of float or complex\n Data values.\n rescale : boolean, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n tree_options : dict, optional\n Options passed to the underlying ``cKDTree``.\n\n .. versionadded:: 0.17.0\n\n\n Notes\n -----\n Uses ``scipy.spatial.cKDTree``\n\n \"\"\"\n\n def __init__(self, x, y, rescale=False, tree_options=None):\n NDInterpolatorBase.__init__(self, x, y, rescale=rescale,\n need_contiguous=False,\n need_values=False)\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n self.values = y\n\n def __call__(self, *args):\n \"\"\"\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n \"\"\"\n xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n xi = self._check_call_shape(xi)\n xi = self._scale_x(xi)\n dist, i = self.tree.query(xi)\n return self.values[i]\n\n\n#------------------------------------------------------------------------------\n# Convenience interface function\n#------------------------------------------------------------------------------\n\ndef griddata(points, values, xi, method='linear', fill_value=np.nan,\n rescale=False):\n \"\"\"\n Interpolate unstructured D-dimensional data.\n\n Parameters\n ----------\n points : ndarray of floats, shape (n, D)\n Data point coordinates. Can either be an array of\n shape (n, D), or a tuple of `ndim` arrays.\n values : ndarray of float or complex, shape (n,)\n Data values.\n xi : ndarray of float, shape (M, D)\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n\n ``nearest``\n return the value at the data point closest to\n the point of interpolation. See `NearestNDInterpolator` for\n more details.\n\n ``linear``\n tesselate the input point set to n-dimensional\n simplices, and interpolate linearly on each simplex. See\n `LinearNDInterpolator` for more details.\n\n ``cubic`` (1-D)\n return the value determined from a cubic\n spline.\n\n ``cubic`` (2-D)\n return the value determined from a\n piecewise cubic, continuously differentiable (C1), and\n approximately curvature-minimizing polynomial surface. See\n `CloughTocher2DInterpolator` for more details.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then the\n default is ``nan``. This option has no effect for the\n 'nearest' method.\n rescale : bool, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n\n Notes\n -----\n\n .. versionadded:: 0.9\n\n Examples\n --------\n\n Suppose we want to interpolate the 2-D function\n\n >>> def func(x, y):\n ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2\n\n on a grid in [0, 1]x[0, 1]\n\n >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]\n\n but we only know its values at 1000 data points:\n\n >>> points = np.random.rand(1000, 2)\n >>> values = func(points[:,0], points[:,1])\n\n This can be done with `griddata` -- below we try out all of the\n interpolation methods:\n\n >>> from scipy.interpolate import griddata\n >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')\n >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')\n >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')\n\n One can see that the exact result is reproduced by all of the\n methods to some degree, but for this smooth function the piecewise\n cubic interpolant gives the best results:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(221)\n >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')\n >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)\n >>> plt.title('Original')\n >>> plt.subplot(222)\n >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Nearest')\n >>> plt.subplot(223)\n >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Linear')\n >>> plt.subplot(224)\n >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Cubic')\n >>> plt.gcf().set_size_inches(6, 6)\n >>> plt.show()\n\n \"\"\"\n\n points = _ndim_coords_from_arrays(points)\n\n if points.ndim < 2:\n ndim = points.ndim\n else:\n ndim = points.shape[-1]\n\n if ndim == 1 and method in ('nearest', 'linear', 'cubic'):\n from .interpolate import interp1d\n points = points.ravel()\n if isinstance(xi, tuple):\n if len(xi) != 1:\n raise ValueError(\"invalid number of dimensions in xi\")\n xi, = xi\n # Sort points/values together, necessary as input for interp1d\n idx = np.argsort(points)\n points = points[idx]\n values = values[idx]\n if method == 'nearest':\n fill_value = 'extrapolate'\n ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,\n fill_value=fill_value)\n return ip(xi)\n elif method == 'nearest':\n ip = NearestNDInterpolator(points, values, rescale=rescale)\n return ip(xi)\n elif method == 'linear':\n ip = LinearNDInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n elif method == 'cubic' and ndim == 2:\n ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n else:\n raise ValueError(\"Unknown interpolation method %r for \"\n \"%d dimensional data\" % (method, ndim))\n", "path": "scipy/interpolate/ndgriddata.py"}]}
| 3,098 | 167 |
gh_patches_debug_20597
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1633
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when serving images through the URL generator
I posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.
Hi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).
Cheers,
Jordi
Internal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/
Traceback (most recent call last):
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 204, in get_response
response = middleware_method(request, response)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py", line 121, in process_response
self.cache.set(cache_key, response, timeout)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py", line 239, in set
result = self._set(key, pickle.dumps(value), timeout, client, _add_only)
File "/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle instancemethod objects
Request repr():
<WSGIRequest
path:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,
GET:<QueryDict: {}>,
POST:<QueryDict: {}>,
COOKIES:{'_ga': 'GA1.3.1219121887.1434427204',
'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},
META:{'CONTENT_LENGTH': '',
'CONTENT_TYPE': '',
u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'DOCUMENT_ROOT': '/usr/share/nginx/html',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'HTTP_HOST': 'www.buildability.co.nz',
'HTTP_UPGRADE_INSECURE_REQUESTS': '1',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',
'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',
'QUERY_STRING': '',
'REMOTE_ADDR': '131.203.137.142',
'REMOTE_PORT': '51455',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',
u'SCRIPT_NAME': u'',
'SERVER_NAME': 'www.buildability.co.nz',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'UWSGI_SCHEME': 'http',
'uwsgi.core': 7,
'uwsgi.node': 'avinton',
'uwsgi.version': '1.9.17.1-debian',
'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,
'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,
'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,
'wsgi.multiprocess': True,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0)}>
</issue>
<code>
[start of wagtail/wagtailimages/views/frontend.py]
1 from wsgiref.util import FileWrapper
2 import imghdr
3
4 from django.shortcuts import get_object_or_404
5 from django.http import HttpResponse
6 from django.core.exceptions import PermissionDenied
7
8 from wagtail.wagtailimages.models import get_image_model
9 from wagtail.wagtailimages.utils import verify_signature
10 from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
11
12
13 def serve(request, signature, image_id, filter_spec):
14 image = get_object_or_404(get_image_model(), id=image_id)
15
16 if not verify_signature(signature.encode(), image_id, filter_spec):
17 raise PermissionDenied
18
19 try:
20 rendition = image.get_rendition(filter_spec)
21 rendition.file.open('rb')
22 image_format = imghdr.what(rendition.file)
23 return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
24 except InvalidFilterSpecError:
25 return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
26
[end of wagtail/wagtailimages/views/frontend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py
--- a/wagtail/wagtailimages/views/frontend.py
+++ b/wagtail/wagtailimages/views/frontend.py
@@ -2,7 +2,7 @@
import imghdr
from django.shortcuts import get_object_or_404
-from django.http import HttpResponse
+from django.http import HttpResponse, StreamingHttpResponse
from django.core.exceptions import PermissionDenied
from wagtail.wagtailimages.models import get_image_model
@@ -20,6 +20,6 @@
rendition = image.get_rendition(filter_spec)
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
|
{"golden_diff": "diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py\n--- a/wagtail/wagtailimages/views/frontend.py\n+++ b/wagtail/wagtailimages/views/frontend.py\n@@ -2,7 +2,7 @@\n import imghdr\n \n from django.shortcuts import get_object_or_404\n-from django.http import HttpResponse\n+from django.http import HttpResponse, StreamingHttpResponse\n from django.core.exceptions import PermissionDenied\n \n from wagtail.wagtailimages.models import get_image_model\n@@ -20,6 +20,6 @@\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "issue": "Error when serving images through the URL generator\nI posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.\n\nHi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).\n\nCheers,\nJordi\n\nInternal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/\nTraceback (most recent call last):\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py\", line 204, in get_response\n response = middleware_method(request, response)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py\", line 121, in process_response\n self.cache.set(cache_key, response, timeout)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py\", line 239, in set\n result = self._set(key, pickle.dumps(value), timeout, client, _add_only)\n File \"/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py\", line 70, in _reduce_ex\n raise TypeError, \"can't pickle %s objects\" % base.__name__\nTypeError: can't pickle instancemethod objects\n\nRequest repr(): \n<WSGIRequest\npath:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,\nGET:<QueryDict: {}>,\nPOST:<QueryDict: {}>,\nCOOKIES:{'_ga': 'GA1.3.1219121887.1434427204',\n 'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},\nMETA:{'CONTENT_LENGTH': '',\n 'CONTENT_TYPE': '',\n u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'DOCUMENT_ROOT': '/usr/share/nginx/html',\n 'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',\n 'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',\n 'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',\n 'HTTP_CACHE_CONTROL': 'max-age=0',\n 'HTTP_CONNECTION': 'keep-alive',\n 'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'HTTP_HOST': 'www.buildability.co.nz',\n 'HTTP_UPGRADE_INSECURE_REQUESTS': '1',\n 'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',\n 'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '131.203.137.142',\n 'REMOTE_PORT': '51455',\n 'REQUEST_METHOD': 'GET',\n 'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',\n u'SCRIPT_NAME': u'',\n 'SERVER_NAME': 'www.buildability.co.nz',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'UWSGI_SCHEME': 'http',\n 'uwsgi.core': 7,\n 'uwsgi.node': 'avinton',\n 'uwsgi.version': '1.9.17.1-debian',\n 'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,\n 'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,\n 'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': True,\n 'wsgi.run_once': False,\n 'wsgi.url_scheme': 'http',\n 'wsgi.version': (1, 0)}>\n\n", "before_files": [{"content": "from wsgiref.util import FileWrapper\nimport imghdr\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import PermissionDenied\n\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtailimages.utils import verify_signature\nfrom wagtail.wagtailimages.exceptions import InvalidFilterSpecError\n\n\ndef serve(request, signature, image_id, filter_spec):\n image = get_object_or_404(get_image_model(), id=image_id)\n\n if not verify_signature(signature.encode(), image_id, filter_spec):\n raise PermissionDenied\n\n try:\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "path": "wagtail/wagtailimages/views/frontend.py"}]}
| 1,896 | 232 |
gh_patches_debug_20827
|
rasdani/github-patches
|
git_diff
|
shuup__shuup-742
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
System check to verify Parler sanity
Shuup should check that the Parler configuration is sane before starting.
@JsseL and @juhakujala puzzled over an unrelated exception (`'shuup.admin.modules.services.behavior_form_part.BehaviorFormSet object' has no attribute 'empty_form'`) for a while – turns out it was an `AttributeError` ([which, as we unfortunately know, are hidden within `@property`s](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/utils/form_group.py#L86-L100)) within `FormSet.empty_form` calls that happens due to `PARLER_DEFAULT_LANGUAGE_CODE` being undefined:
```
Traceback (most recent call last):
File "~/django/forms/formsets.py", line 187, in empty_form
empty_permitted=True,
File "~/shuup/admin/modules/services/behavior_form_part.py", line 49, in form
kwargs.setdefault("default_language", settings.PARLER_DEFAULT_LANGUAGE_CODE)
File "~/django/conf/__init__.py", line 49, in __getattr__
return getattr(self._wrapped, name)
AttributeError: 'Settings' object has no attribute 'PARLER_DEFAULT_LANGUAGE_CODE'
```
My suggestion is to add a simple system check in [ShuupCoreAppConfig.ready()](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/core/__init__.py#L11) that throws an exception if some of the Parler settings (`PARLER_DEFAULT_LANGUAGE_CODE` and `PARLER_LANGUAGES`) are unset -- or perhaps it could automatically derive them based on the Django `LANGUAGES` setting, as "sane defaults" go?
</issue>
<code>
[start of shuup/core/__init__.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
5 #
6 # This source code is licensed under the AGPLv3 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from shuup.apps import AppConfig
9
10
11 class ShuupCoreAppConfig(AppConfig):
12 name = "shuup.core"
13 verbose_name = "Shuup Core"
14 label = "shuup" # Use "shuup" as app_label instead of "core"
15 required_installed_apps = (
16 "django.contrib.auth",
17 "django.contrib.contenttypes",
18 "easy_thumbnails",
19 "filer",
20 )
21 provides = {
22 "api_populator": [
23 "shuup.core.api:populate_core_api"
24 ],
25 "pricing_module": [
26 "shuup.core.pricing.default_pricing:DefaultPricingModule"
27 ],
28 }
29
30
31 default_app_config = "shuup.core.ShuupCoreAppConfig"
32
[end of shuup/core/__init__.py]
[start of shuup/core/excs.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
5 #
6 # This source code is licensed under the AGPLv3 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from shuup.utils.excs import Problem
9
10
11 class ImmutabilityError(ValueError):
12 pass
13
14
15 class NoProductsToShipException(Exception):
16 pass
17
18
19 class NoPaymentToCreateException(Exception):
20 pass
21
22
23 class NoRefundToCreateException(Exception):
24 pass
25
26
27 class RefundExceedsAmountException(Exception):
28 pass
29
30
31 class InvalidRefundAmountException(Exception):
32 pass
33
34
35 class ProductNotOrderableProblem(Problem):
36 pass
37
38
39 class ProductNotVisibleProblem(Problem):
40 pass
41
42
43 class ImpossibleProductModeException(ValueError):
44 def __init__(self, message, code=None):
45 super(ImpossibleProductModeException, self).__init__(message)
46 self.code = code
47
[end of shuup/core/excs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shuup/core/__init__.py b/shuup/core/__init__.py
--- a/shuup/core/__init__.py
+++ b/shuup/core/__init__.py
@@ -6,6 +6,7 @@
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
+from shuup.core.excs import MissingSettingException
class ShuupCoreAppConfig(AppConfig):
@@ -27,5 +28,12 @@
],
}
+ def ready(self):
+ from django.conf import settings
+ if not getattr(settings, "PARLER_DEFAULT_LANGUAGE_CODE", None):
+ raise MissingSettingException("PARLER_DEFAULT_LANGUAGE_CODE must be set.")
+ if not getattr(settings, "PARLER_LANGUAGES", None):
+ raise MissingSettingException("PARLER_LANGUAGES must be set.")
+
default_app_config = "shuup.core.ShuupCoreAppConfig"
diff --git a/shuup/core/excs.py b/shuup/core/excs.py
--- a/shuup/core/excs.py
+++ b/shuup/core/excs.py
@@ -32,6 +32,10 @@
pass
+class MissingSettingException(Exception):
+ pass
+
+
class ProductNotOrderableProblem(Problem):
pass
|
{"golden_diff": "diff --git a/shuup/core/__init__.py b/shuup/core/__init__.py\n--- a/shuup/core/__init__.py\n+++ b/shuup/core/__init__.py\n@@ -6,6 +6,7 @@\n # This source code is licensed under the AGPLv3 license found in the\n # LICENSE file in the root directory of this source tree.\n from shuup.apps import AppConfig\n+from shuup.core.excs import MissingSettingException\n \n \n class ShuupCoreAppConfig(AppConfig):\n@@ -27,5 +28,12 @@\n ],\n }\n \n+ def ready(self):\n+ from django.conf import settings\n+ if not getattr(settings, \"PARLER_DEFAULT_LANGUAGE_CODE\", None):\n+ raise MissingSettingException(\"PARLER_DEFAULT_LANGUAGE_CODE must be set.\")\n+ if not getattr(settings, \"PARLER_LANGUAGES\", None):\n+ raise MissingSettingException(\"PARLER_LANGUAGES must be set.\")\n+\n \n default_app_config = \"shuup.core.ShuupCoreAppConfig\"\ndiff --git a/shuup/core/excs.py b/shuup/core/excs.py\n--- a/shuup/core/excs.py\n+++ b/shuup/core/excs.py\n@@ -32,6 +32,10 @@\n pass\n \n \n+class MissingSettingException(Exception):\n+ pass\n+\n+\n class ProductNotOrderableProblem(Problem):\n pass\n", "issue": "System check to verify Parler sanity\nShuup should check that the Parler configuration is sane before starting.\n\n@JsseL and @juhakujala puzzled over an unrelated exception (`'shuup.admin.modules.services.behavior_form_part.BehaviorFormSet object' has no attribute 'empty_form'`) for a while \u2013 turns out it was an `AttributeError` ([which, as we unfortunately know, are hidden within `@property`s](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/utils/form_group.py#L86-L100)) within `FormSet.empty_form` calls that happens due to `PARLER_DEFAULT_LANGUAGE_CODE` being undefined:\n\n```\nTraceback (most recent call last):\n File \"~/django/forms/formsets.py\", line 187, in empty_form\n empty_permitted=True,\n File \"~/shuup/admin/modules/services/behavior_form_part.py\", line 49, in form\n kwargs.setdefault(\"default_language\", settings.PARLER_DEFAULT_LANGUAGE_CODE)\n File \"~/django/conf/__init__.py\", line 49, in __getattr__\n return getattr(self._wrapped, name)\nAttributeError: 'Settings' object has no attribute 'PARLER_DEFAULT_LANGUAGE_CODE'\n```\n\nMy suggestion is to add a simple system check in [ShuupCoreAppConfig.ready()](https://github.com/shuup/shuup/blob/5584ebf912bae415fe367ea0c00ad4c5cff49244/shuup/core/__init__.py#L11) that throws an exception if some of the Parler settings (`PARLER_DEFAULT_LANGUAGE_CODE` and `PARLER_LANGUAGES`) are unset -- or perhaps it could automatically derive them based on the Django `LANGUAGES` setting, as \"sane defaults\" go?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.apps import AppConfig\n\n\nclass ShuupCoreAppConfig(AppConfig):\n name = \"shuup.core\"\n verbose_name = \"Shuup Core\"\n label = \"shuup\" # Use \"shuup\" as app_label instead of \"core\"\n required_installed_apps = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"easy_thumbnails\",\n \"filer\",\n )\n provides = {\n \"api_populator\": [\n \"shuup.core.api:populate_core_api\"\n ],\n \"pricing_module\": [\n \"shuup.core.pricing.default_pricing:DefaultPricingModule\"\n ],\n }\n\n\ndefault_app_config = \"shuup.core.ShuupCoreAppConfig\"\n", "path": "shuup/core/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.\n#\n# This source code is licensed under the AGPLv3 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.utils.excs import Problem\n\n\nclass ImmutabilityError(ValueError):\n pass\n\n\nclass NoProductsToShipException(Exception):\n pass\n\n\nclass NoPaymentToCreateException(Exception):\n pass\n\n\nclass NoRefundToCreateException(Exception):\n pass\n\n\nclass RefundExceedsAmountException(Exception):\n pass\n\n\nclass InvalidRefundAmountException(Exception):\n pass\n\n\nclass ProductNotOrderableProblem(Problem):\n pass\n\n\nclass ProductNotVisibleProblem(Problem):\n pass\n\n\nclass ImpossibleProductModeException(ValueError):\n def __init__(self, message, code=None):\n super(ImpossibleProductModeException, self).__init__(message)\n self.code = code\n", "path": "shuup/core/excs.py"}]}
| 1,601 | 311 |
gh_patches_debug_31300
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.aws-1555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[6.0.0] wafv2_rule_group_info - Remove deprecated `state` argument
### Summary
The `state` argument does nothing and never has (likely a copy&paste mistake). Remove it for consistency
### Issue Type
Feature Idea
### Component Name
plugins/modules/wafv2_rule_group_info.py
### Additional Information
Originally deprecated as part of #1210
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/wafv2_rule_group_info.py]
1 #!/usr/bin/python
2 # Copyright: Ansible Project
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4 from __future__ import absolute_import, division, print_function
5 __metaclass__ = type
6
7
8 DOCUMENTATION = '''
9 ---
10 module: wafv2_rule_group_info
11 version_added: 1.5.0
12 author:
13 - "Markus Bergholz (@markuman)"
14 short_description: wafv2_web_acl_info
15 description:
16 - Get informations about existing wafv2 rule groups.
17 options:
18 state:
19 description:
20 - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.
21 required: false
22 type: str
23 name:
24 description:
25 - The name of the rule group.
26 required: true
27 type: str
28 scope:
29 description:
30 - Scope of wafv2 rule group.
31 required: true
32 choices: ["CLOUDFRONT","REGIONAL"]
33 type: str
34
35 extends_documentation_fragment:
36 - amazon.aws.aws
37 - amazon.aws.ec2
38 - amazon.aws.boto3
39
40 '''
41
42 EXAMPLES = '''
43 - name: rule group info
44 community.aws.wafv2_rule_group_info:
45 name: test02
46 state: present
47 scope: REGIONAL
48 '''
49
50 RETURN = """
51 arn:
52 description: Rule group arn
53 sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7
54 type: str
55 returned: Always, as long as the web acl exists
56 description:
57 description: Description of the rule group
58 sample: Some rule group description
59 returned: Always, as long as the web acl exists
60 type: str
61 capacity:
62 description: Current capacity of the rule group
63 sample: 500
64 returned: Always, as long as the rule group exists
65 type: int
66 name:
67 description: Rule group name
68 sample: test02
69 returned: Always, as long as the rule group exists
70 type: str
71 rules:
72 description: Current rules of the rule group
73 returned: Always, as long as the rule group exists
74 type: list
75 sample:
76 - action:
77 allow: {}
78 name: eins
79 priority: 1
80 statement:
81 ip_set_reference_statement:
82 arn: arn:aws:wafv2:eu-central-1:111111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a
83 visibility_config:
84 cloud_watch_metrics_enabled: True
85 metric_name: fsd
86 sampled_requests_enabled: True
87 visibility_config:
88 description: Visibility config of the rule group
89 returned: Always, as long as the rule group exists
90 type: dict
91 sample:
92 cloud_watch_metrics_enabled: True
93 metric_name: blub
94 sampled_requests_enabled: False
95 """
96
97 try:
98 from botocore.exceptions import ClientError, BotoCoreError
99 except ImportError:
100 pass # caught by AnsibleAWSModule
101
102 from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
103 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict
104 from ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups
105 from ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags
106
107
108 def get_rule_group(wafv2, name, scope, id, fail_json_aws):
109 try:
110 response = wafv2.get_rule_group(
111 Name=name,
112 Scope=scope,
113 Id=id
114 )
115 except (BotoCoreError, ClientError) as e:
116 fail_json_aws(e, msg="Failed to get wafv2 rule group.")
117 return response
118
119
120 def main():
121 arg_spec = dict(
122 state=dict(type='str', required=False),
123 name=dict(type='str', required=True),
124 scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
125 )
126
127 module = AnsibleAWSModule(
128 argument_spec=arg_spec,
129 supports_check_mode=True
130 )
131
132 state = module.params.get("state")
133 name = module.params.get("name")
134 scope = module.params.get("scope")
135
136 wafv2 = module.client('wafv2')
137
138 if state:
139 module.deprecate(
140 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',
141 version='6.0.0', collection_name='community.aws')
142
143 # check if rule group exists
144 response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)
145 id = None
146 retval = {}
147
148 for item in response.get('RuleGroups'):
149 if item.get('Name') == name:
150 id = item.get('Id')
151 arn = item.get('ARN')
152
153 existing_group = None
154 if id:
155 existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws)
156 retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup'))
157 tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)
158 retval['tags'] = tags or {}
159
160 module.exit_json(**retval)
161
162
163 if __name__ == '__main__':
164 main()
165
[end of plugins/modules/wafv2_rule_group_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/modules/wafv2_rule_group_info.py b/plugins/modules/wafv2_rule_group_info.py
--- a/plugins/modules/wafv2_rule_group_info.py
+++ b/plugins/modules/wafv2_rule_group_info.py
@@ -15,11 +15,6 @@
description:
- Get informations about existing wafv2 rule groups.
options:
- state:
- description:
- - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.
- required: false
- type: str
name:
description:
- The name of the rule group.
@@ -43,7 +38,6 @@
- name: rule group info
community.aws.wafv2_rule_group_info:
name: test02
- state: present
scope: REGIONAL
'''
@@ -119,7 +113,6 @@
def main():
arg_spec = dict(
- state=dict(type='str', required=False),
name=dict(type='str', required=True),
scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])
)
@@ -129,17 +122,11 @@
supports_check_mode=True
)
- state = module.params.get("state")
name = module.params.get("name")
scope = module.params.get("scope")
wafv2 = module.client('wafv2')
- if state:
- module.deprecate(
- 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',
- version='6.0.0', collection_name='community.aws')
-
# check if rule group exists
response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)
id = None
|
{"golden_diff": "diff --git a/plugins/modules/wafv2_rule_group_info.py b/plugins/modules/wafv2_rule_group_info.py\n--- a/plugins/modules/wafv2_rule_group_info.py\n+++ b/plugins/modules/wafv2_rule_group_info.py\n@@ -15,11 +15,6 @@\n description:\n - Get informations about existing wafv2 rule groups.\n options:\n- state:\n- description:\n- - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.\n- required: false\n- type: str\n name:\n description:\n - The name of the rule group.\n@@ -43,7 +38,6 @@\n - name: rule group info\n community.aws.wafv2_rule_group_info:\n name: test02\n- state: present\n scope: REGIONAL\n '''\n \n@@ -119,7 +113,6 @@\n \n def main():\n arg_spec = dict(\n- state=dict(type='str', required=False),\n name=dict(type='str', required=True),\n scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])\n )\n@@ -129,17 +122,11 @@\n supports_check_mode=True\n )\n \n- state = module.params.get(\"state\")\n name = module.params.get(\"name\")\n scope = module.params.get(\"scope\")\n \n wafv2 = module.client('wafv2')\n \n- if state:\n- module.deprecate(\n- 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',\n- version='6.0.0', collection_name='community.aws')\n-\n # check if rule group exists\n response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)\n id = None\n", "issue": "[6.0.0] wafv2_rule_group_info - Remove deprecated `state` argument \n### Summary\n\nThe `state` argument does nothing and never has (likely a copy&paste mistake). Remove it for consistency\n\n### Issue Type\n\nFeature Idea\n\n### Component Name\n\nplugins/modules/wafv2_rule_group_info.py\n\n### Additional Information\n\nOriginally deprecated as part of #1210\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: wafv2_rule_group_info\nversion_added: 1.5.0\nauthor:\n - \"Markus Bergholz (@markuman)\"\nshort_description: wafv2_web_acl_info\ndescription:\n - Get informations about existing wafv2 rule groups.\noptions:\n state:\n description:\n - This option does nothing, has been deprecated, and will be removed in a release after 2022-12-01.\n required: false\n type: str\n name:\n description:\n - The name of the rule group.\n required: true\n type: str\n scope:\n description:\n - Scope of wafv2 rule group.\n required: true\n choices: [\"CLOUDFRONT\",\"REGIONAL\"]\n type: str\n\nextends_documentation_fragment:\n - amazon.aws.aws\n - amazon.aws.ec2\n - amazon.aws.boto3\n\n'''\n\nEXAMPLES = '''\n- name: rule group info\n community.aws.wafv2_rule_group_info:\n name: test02\n state: present\n scope: REGIONAL\n'''\n\nRETURN = \"\"\"\narn:\n description: Rule group arn\n sample: arn:aws:wafv2:eu-central-1:11111111:regional/rulegroup/test02/6e90c01a-e4eb-43e5-b6aa-b1604cedf7d7\n type: str\n returned: Always, as long as the web acl exists\ndescription:\n description: Description of the rule group\n sample: Some rule group description\n returned: Always, as long as the web acl exists\n type: str\ncapacity:\n description: Current capacity of the rule group\n sample: 500\n returned: Always, as long as the rule group exists\n type: int\nname:\n description: Rule group name\n sample: test02\n returned: Always, as long as the rule group exists\n type: str\nrules:\n description: Current rules of the rule group\n returned: Always, as long as the rule group exists\n type: list\n sample:\n - action:\n allow: {}\n name: eins\n priority: 1\n statement:\n ip_set_reference_statement:\n arn: arn:aws:wafv2:eu-central-1:111111111:regional/ipset/test02/b6978915-c67b-4d1c-8832-2b1bb452143a\n visibility_config:\n cloud_watch_metrics_enabled: True\n metric_name: fsd\n sampled_requests_enabled: True\nvisibility_config:\n description: Visibility config of the rule group\n returned: Always, as long as the rule group exists\n type: dict\n sample:\n cloud_watch_metrics_enabled: True\n metric_name: blub\n sampled_requests_enabled: False\n\"\"\"\n\ntry:\n from botocore.exceptions import ClientError, BotoCoreError\nexcept ImportError:\n pass # caught by AnsibleAWSModule\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict\nfrom ansible_collections.community.aws.plugins.module_utils.wafv2 import wafv2_list_rule_groups\nfrom ansible_collections.community.aws.plugins.module_utils.wafv2 import describe_wafv2_tags\n\n\ndef get_rule_group(wafv2, name, scope, id, fail_json_aws):\n try:\n response = wafv2.get_rule_group(\n Name=name,\n Scope=scope,\n Id=id\n )\n except (BotoCoreError, ClientError) as e:\n fail_json_aws(e, msg=\"Failed to get wafv2 rule group.\")\n return response\n\n\ndef main():\n arg_spec = dict(\n state=dict(type='str', required=False),\n name=dict(type='str', required=True),\n scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL'])\n )\n\n module = AnsibleAWSModule(\n argument_spec=arg_spec,\n supports_check_mode=True\n )\n\n state = module.params.get(\"state\")\n name = module.params.get(\"name\")\n scope = module.params.get(\"scope\")\n\n wafv2 = module.client('wafv2')\n\n if state:\n module.deprecate(\n 'The state parameter does nothing, has been deprecated, and will be removed in a future release.',\n version='6.0.0', collection_name='community.aws')\n\n # check if rule group exists\n response = wafv2_list_rule_groups(wafv2, scope, module.fail_json_aws)\n id = None\n retval = {}\n\n for item in response.get('RuleGroups'):\n if item.get('Name') == name:\n id = item.get('Id')\n arn = item.get('ARN')\n\n existing_group = None\n if id:\n existing_group = get_rule_group(wafv2, name, scope, id, module.fail_json_aws)\n retval = camel_dict_to_snake_dict(existing_group.get('RuleGroup'))\n tags = describe_wafv2_tags(wafv2, arn, module.fail_json_aws)\n retval['tags'] = tags or {}\n\n module.exit_json(**retval)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/wafv2_rule_group_info.py"}]}
| 2,317 | 424 |
gh_patches_debug_29881
|
rasdani/github-patches
|
git_diff
|
e2nIEE__pandapower-880
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing dependencies: xlswriter, xlrd, cryptography
Hi,
I am currently following the instructions for the installation of the development version, as shown here: https://www.pandapower.org/start/#develop
I have a brand new virtual environment on Python 3.8.3 (Windows 10, 64 bits), and the tests failed because of the following missing dependencies:
> Edit: Same result on Python 3.7.8.
1. xlsxwriter: `FAILED pandapower\test\api\test_file_io.py::test_excel[1] - ModuleNotFoundError: No module named 'xlsxwriter'`
2. xlrd: `FAILED pandapower\test\api\test_file_io.py::test_excel[1] - ImportError: Missing optional dependency 'xlrd'. Install xlrd >= 1.0.0 for Excel support Use pip or conda to install xlrd.`
3. cryptography: `FAILED pandapower\test\api\test_file_io.py::test_encrypted_json[1] - ModuleNotFoundError: No module named 'cryptography'`
The permanent solution would most likely be to add those to setup.py and mention them in the documentation, but you might want to check if you should restrict the version.
P.S.: The tests still ended up failing, but that's a seperate issue (see issue #876 ).
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6 from setuptools import setup, find_packages
7 import re
8
9 with open('README.rst', 'rb') as f:
10 install = f.read().decode('utf-8')
11
12 with open('CHANGELOG.rst', 'rb') as f:
13 changelog = f.read().decode('utf-8')
14
15 classifiers = [
16 'Development Status :: 5 - Production/Stable',
17 'Environment :: Console',
18 'Intended Audience :: Developers',
19 'Intended Audience :: Education',
20 'Intended Audience :: Science/Research',
21 'License :: OSI Approved :: BSD License',
22 'Natural Language :: English',
23 'Operating System :: OS Independent',
24 'Programming Language :: Python',
25 'Programming Language :: Python :: 3']
26
27 with open('.travis.yml', 'rb') as f:
28 lines = f.read().decode('utf-8')
29 for version in re.findall('python: 3.[0-9]', lines):
30 classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])
31
32 long_description = '\n\n'.join((install, changelog))
33
34 setup(
35 name='pandapower',
36 version='2.3.0',
37 author='Leon Thurner, Alexander Scheidler',
38 author_email='[email protected], [email protected]',
39 description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
40 long_description=long_description,
41 long_description_content_type='text/x-rst',
42 url='http://www.pandapower.org',
43 license='BSD',
44 install_requires=["pandas>=0.17",
45 "networkx",
46 "scipy",
47 "numpy>=0.11",
48 "packaging"],
49 extras_require={":python_version<'3.0'": ["future"],
50 "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
51 "plotting": ["plotly", "matplotlib", "python-igraph"],
52 "test": ["pytest", "pytest-xdist"]},
53 packages=find_packages(),
54 include_package_data=True,
55 classifiers=classifiers
56 )
57
[end of setup.py]
[start of pandapower/__init__.py]
1 __version__ = "2.3.0"
2
3 import os
4 pp_dir = os.path.dirname(os.path.realpath(__file__))
5
6 from pandapower.auxiliary import *
7 from pandapower.convert_format import *
8 from pandapower.create import *
9 from pandapower.diagnostic import *
10 from pandapower.file_io import *
11 from pandapower.run import *
12 from pandapower.runpm import *
13 from pandapower.std_types import *
14 from pandapower.toolbox import *
15 from pandapower.powerflow import *
16 from pandapower.opf import *
17 from pandapower.optimal_powerflow import OPFNotConverged
18 from pandapower.pf.runpp_3ph import runpp_3ph
19 import pandas as pd
20 pd.options.mode.chained_assignment = None # default='warn'
21
[end of pandapower/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pandapower/__init__.py b/pandapower/__init__.py
--- a/pandapower/__init__.py
+++ b/pandapower/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "2.3.0"
+__version__ = "2.3.1"
import os
pp_dir = os.path.dirname(os.path.realpath(__file__))
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,10 +33,10 @@
setup(
name='pandapower',
- version='2.3.0',
+ version='2.3.1',
author='Leon Thurner, Alexander Scheidler',
author_email='[email protected], [email protected]',
- description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
+ description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://www.pandapower.org',
@@ -45,11 +45,14 @@
"networkx",
"scipy",
"numpy>=0.11",
- "packaging"],
- extras_require={":python_version<'3.0'": ["future"],
- "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
- "plotting": ["plotly", "matplotlib", "python-igraph"],
- "test": ["pytest", "pytest-xdist"]},
+ "packaging",
+ "xlsxwriter",
+ "xlrd",
+ "cryptography"],
+ extras_require={
+ "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
+ "plotting": ["plotly", "matplotlib", "python-igraph"],
+ "test": ["pytest", "pytest-xdist"]},
packages=find_packages(),
include_package_data=True,
classifiers=classifiers
|
{"golden_diff": "diff --git a/pandapower/__init__.py b/pandapower/__init__.py\n--- a/pandapower/__init__.py\n+++ b/pandapower/__init__.py\n@@ -1,4 +1,4 @@\n-__version__ = \"2.3.0\"\n+__version__ = \"2.3.1\"\n \n import os\n pp_dir = os.path.dirname(os.path.realpath(__file__))\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,10 +33,10 @@\n \n setup(\n name='pandapower',\n- version='2.3.0',\n+ version='2.3.1',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n- description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n+ description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n \tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n@@ -45,11 +45,14 @@\n \"networkx\",\n \"scipy\",\n \"numpy>=0.11\",\n- \"packaging\"],\n- extras_require={\":python_version<'3.0'\": [\"future\"],\n- \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n- \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n- \"test\": [\"pytest\", \"pytest-xdist\"]},\n+ \"packaging\",\n+\t\t\t\t\t \"xlsxwriter\",\n+\t\t\t\t\t \"xlrd\",\n+\t\t\t\t\t \"cryptography\"],\n+ extras_require={\n+\t\t\"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n+\t\t\"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n+\t\t\"test\": [\"pytest\", \"pytest-xdist\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n", "issue": "Missing dependencies: xlswriter, xlrd, cryptography\nHi,\r\n\r\nI am currently following the instructions for the installation of the development version, as shown here: https://www.pandapower.org/start/#develop\r\n\r\nI have a brand new virtual environment on Python 3.8.3 (Windows 10, 64 bits), and the tests failed because of the following missing dependencies:\r\n\r\n> Edit: Same result on Python 3.7.8.\r\n\r\n1. xlsxwriter: `FAILED pandapower\\test\\api\\test_file_io.py::test_excel[1] - ModuleNotFoundError: No module named 'xlsxwriter'`\r\n2. xlrd: `FAILED pandapower\\test\\api\\test_file_io.py::test_excel[1] - ImportError: Missing optional dependency 'xlrd'. Install xlrd >= 1.0.0 for Excel support Use pip or conda to install xlrd.`\r\n3. cryptography: `FAILED pandapower\\test\\api\\test_file_io.py::test_encrypted_json[1] - ModuleNotFoundError: No module named 'cryptography'`\r\n\r\nThe permanent solution would most likely be to add those to setup.py and mention them in the documentation, but you might want to check if you should restrict the version.\r\n\r\nP.S.: The tests still ended up failing, but that's a seperate issue (see issue #876 ).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.travis.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n for version in re.findall('python: 3.[0-9]', lines):\n classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.3.0',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',\n long_description=long_description,\n\tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n \"networkx\",\n \"scipy\",\n \"numpy>=0.11\",\n \"packaging\"],\n extras_require={\":python_version<'3.0'\": [\"future\"],\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n \"test\": [\"pytest\", \"pytest-xdist\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}, {"content": "__version__ = \"2.3.0\"\n\nimport os\npp_dir = os.path.dirname(os.path.realpath(__file__))\n\nfrom pandapower.auxiliary import *\nfrom pandapower.convert_format import *\nfrom pandapower.create import *\nfrom pandapower.diagnostic import *\nfrom pandapower.file_io import *\nfrom pandapower.run import *\nfrom pandapower.runpm import *\nfrom pandapower.std_types import *\nfrom pandapower.toolbox import *\nfrom pandapower.powerflow import *\nfrom pandapower.opf import *\nfrom pandapower.optimal_powerflow import OPFNotConverged\nfrom pandapower.pf.runpp_3ph import runpp_3ph\nimport pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\n", "path": "pandapower/__init__.py"}]}
| 1,670 | 489 |
gh_patches_debug_19563
|
rasdani/github-patches
|
git_diff
|
Flexget__Flexget-1345
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError with Form Login Plugin
### Expected behaviour:
Task runs without generating error.
### Actual behaviour:
Task runs and generates the following error
```
TypeError: must be unicode, not str
```
### Steps to reproduce:
- Step 1: Install latest version of Flexget using virtualenv
- Step 2: pip install mechanize
- Step 3: Create config.yml
- Step 4: flexget --test execute
#### Config:
```
tasks:
test task:
form:
url: http://example.com/login.php
username: email address
password: password
html:
url: http://example.com/
```
#### Log:
Crash:
```
2016-08-16 11:40 DEBUG manager test task Traceback:
Traceback (most recent call last):
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/task.py", line 444, in __run_plugin
return method(*args, **kwargs)
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/event.py", line 23, in __call__
return self.func(*args, **kwargs)
File "/home/username/flexget/local/lib/python2.7/site-packages/flexget/plugins/plugin_formlogin.py", line 73, in on_task_start
f.write(br.response().get_data())
TypeError: must be unicode, not str
2016-08-16 11:40 WARNING task test task Aborting task (plugin: form)
2016-08-16 11:40 DEBUG task_queue task test task aborted: TaskAbort(reason=BUG: Unhandled error in plugin form: must be unicode, not str, silent=False)
```
Full log.
```
http://pastebin.com/yBRqhYjR
```
### Additional information:
- Flexget Version: 2.2.20
- Python Version: 2.7.9
- Installation method: Virtualenv
- OS and version: Debian 8
</issue>
<code>
[start of flexget/plugins/plugin_formlogin.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import os
6 import socket
7
8 from flexget import plugin
9 from flexget.event import event
10
11 log = logging.getLogger('formlogin')
12
13
14 class FormLogin(object):
15 """
16 Login on form
17 """
18
19 schema = {
20 'type': 'object',
21 'properties': {
22 'url': {'type': 'string', 'format': 'url'},
23 'username': {'type': 'string'},
24 'password': {'type': 'string'},
25 'userfield': {'type': 'string'},
26 'passfield': {'type': 'string'}
27 },
28 'required': ['url', 'username', 'password'],
29 'additionalProperties': False
30 }
31
32 def on_task_start(self, task, config):
33 try:
34 from mechanize import Browser
35 except ImportError:
36 raise plugin.PluginError('mechanize required (python module), please install it.', log)
37
38 userfield = config.get('userfield', 'username')
39 passfield = config.get('passfield', 'password')
40
41 url = config['url']
42 username = config['username']
43 password = config['password']
44
45 br = Browser()
46 br.set_handle_robots(False)
47 try:
48 br.open(url)
49 except Exception:
50 # TODO: improve error handling
51 raise plugin.PluginError('Unable to post login form', log)
52
53 # br.set_debug_redirects(True)
54 # br.set_debug_responses(True)
55 # br.set_debug_http(True)
56
57 try:
58 for form in br.forms():
59 loginform = form
60
61 try:
62 loginform[userfield] = username
63 loginform[passfield] = password
64 break
65 except Exception:
66 pass
67 else:
68 received = os.path.join(task.manager.config_base, 'received')
69 if not os.path.isdir(received):
70 os.mkdir(received)
71 filename = os.path.join(received, '%s.formlogin.html' % task.name)
72 with open(filename, 'w') as f:
73 f.write(br.response().get_data())
74 log.critical('I have saved the login page content to %s for you to view' % filename)
75 raise plugin.PluginError('Unable to find login fields', log)
76 except socket.timeout:
77 raise plugin.PluginError('Timed out on url %s' % url)
78
79 br.form = loginform
80
81 br.submit()
82
83 cookiejar = br._ua_handlers["_cookies"].cookiejar
84
85 # Add cookiejar to our requests session
86 task.requests.add_cookiejar(cookiejar)
87
88
89 @event('plugin.register')
90 def register_plugin():
91 plugin.register(FormLogin, 'form', api_ver=2)
92
[end of flexget/plugins/plugin_formlogin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/flexget/plugins/plugin_formlogin.py b/flexget/plugins/plugin_formlogin.py
--- a/flexget/plugins/plugin_formlogin.py
+++ b/flexget/plugins/plugin_formlogin.py
@@ -2,6 +2,7 @@
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
+import io
import os
import socket
@@ -69,7 +70,7 @@
if not os.path.isdir(received):
os.mkdir(received)
filename = os.path.join(received, '%s.formlogin.html' % task.name)
- with open(filename, 'w') as f:
+ with io.open(filename, 'wb') as f:
f.write(br.response().get_data())
log.critical('I have saved the login page content to %s for you to view' % filename)
raise plugin.PluginError('Unable to find login fields', log)
|
{"golden_diff": "diff --git a/flexget/plugins/plugin_formlogin.py b/flexget/plugins/plugin_formlogin.py\n--- a/flexget/plugins/plugin_formlogin.py\n+++ b/flexget/plugins/plugin_formlogin.py\n@@ -2,6 +2,7 @@\n from builtins import * # pylint: disable=unused-import, redefined-builtin\n \n import logging\n+import io\n import os\n import socket\n \n@@ -69,7 +70,7 @@\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n- with open(filename, 'w') as f:\n+ with io.open(filename, 'wb') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view' % filename)\n raise plugin.PluginError('Unable to find login fields', log)\n", "issue": "TypeError with Form Login Plugin\n### Expected behaviour:\n\nTask runs without generating error.\n### Actual behaviour:\n\nTask runs and generates the following error\n\n```\nTypeError: must be unicode, not str\n```\n### Steps to reproduce:\n- Step 1: Install latest version of Flexget using virtualenv\n- Step 2: pip install mechanize\n- Step 3: Create config.yml\n- Step 4: flexget --test execute\n#### Config:\n\n```\ntasks:\n test task:\n form:\n url: http://example.com/login.php\n username: email address\n password: password\n html:\n url: http://example.com/\n```\n#### Log:\n\nCrash:\n\n```\n2016-08-16 11:40 DEBUG manager test task Traceback:\nTraceback (most recent call last):\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/task.py\", line 444, in __run_plugin\n return method(*args, **kwargs)\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/event.py\", line 23, in __call__\n return self.func(*args, **kwargs)\n File \"/home/username/flexget/local/lib/python2.7/site-packages/flexget/plugins/plugin_formlogin.py\", line 73, in on_task_start\n f.write(br.response().get_data())\nTypeError: must be unicode, not str\n2016-08-16 11:40 WARNING task test task Aborting task (plugin: form)\n2016-08-16 11:40 DEBUG task_queue task test task aborted: TaskAbort(reason=BUG: Unhandled error in plugin form: must be unicode, not str, silent=False)\n```\n\nFull log.\n\n```\nhttp://pastebin.com/yBRqhYjR\n```\n### Additional information:\n- Flexget Version: 2.2.20\n- Python Version: 2.7.9\n- Installation method: Virtualenv\n- OS and version: Debian 8\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport os\nimport socket\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('formlogin')\n\n\nclass FormLogin(object):\n \"\"\"\n Login on form\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'url'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'userfield': {'type': 'string'},\n 'passfield': {'type': 'string'}\n },\n 'required': ['url', 'username', 'password'],\n 'additionalProperties': False\n }\n\n def on_task_start(self, task, config):\n try:\n from mechanize import Browser\n except ImportError:\n raise plugin.PluginError('mechanize required (python module), please install it.', log)\n\n userfield = config.get('userfield', 'username')\n passfield = config.get('passfield', 'password')\n\n url = config['url']\n username = config['username']\n password = config['password']\n\n br = Browser()\n br.set_handle_robots(False)\n try:\n br.open(url)\n except Exception:\n # TODO: improve error handling\n raise plugin.PluginError('Unable to post login form', log)\n\n # br.set_debug_redirects(True)\n # br.set_debug_responses(True)\n # br.set_debug_http(True)\n\n try:\n for form in br.forms():\n loginform = form\n\n try:\n loginform[userfield] = username\n loginform[passfield] = password\n break\n except Exception:\n pass\n else:\n received = os.path.join(task.manager.config_base, 'received')\n if not os.path.isdir(received):\n os.mkdir(received)\n filename = os.path.join(received, '%s.formlogin.html' % task.name)\n with open(filename, 'w') as f:\n f.write(br.response().get_data())\n log.critical('I have saved the login page content to %s for you to view' % filename)\n raise plugin.PluginError('Unable to find login fields', log)\n except socket.timeout:\n raise plugin.PluginError('Timed out on url %s' % url)\n\n br.form = loginform\n\n br.submit()\n\n cookiejar = br._ua_handlers[\"_cookies\"].cookiejar\n\n # Add cookiejar to our requests session\n task.requests.add_cookiejar(cookiejar)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FormLogin, 'form', api_ver=2)\n", "path": "flexget/plugins/plugin_formlogin.py"}]}
| 1,782 | 203 |
gh_patches_debug_32338
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-42472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update GitHub integration to support generic issue types (ticketing feature)
Update the GitHub integration so that it displays the `IssueEvidence` for a generic issue type. See https://github.com/getsentry/sentry/pull/41041 for how it was done for performance issues as it'll be similar.
</issue>
<code>
[start of src/sentry/integrations/github/issues.py]
1 from __future__ import annotations
2
3 from typing import Any, Mapping, Sequence
4
5 from django.urls import reverse
6
7 from sentry.eventstore.models import Event
8 from sentry.integrations.mixins.issues import MAX_CHAR, IssueBasicMixin
9 from sentry.models import ExternalIssue, Group, User
10 from sentry.shared_integrations.exceptions import ApiError, IntegrationError
11 from sentry.types.issues import GroupCategory
12 from sentry.utils.http import absolute_uri
13 from sentry.utils.strings import truncatechars
14
15
16 class GitHubIssueBasic(IssueBasicMixin): # type: ignore
17 def make_external_key(self, data: Mapping[str, Any]) -> str:
18 return "{}#{}".format(data["repo"], data["key"])
19
20 def get_issue_url(self, key: str) -> str:
21 domain_name, user = self.model.metadata["domain_name"].split("/")
22 repo, issue_id = key.split("#")
23 return f"https://{domain_name}/{repo}/issues/{issue_id}"
24
25 def get_performance_issue_body(self, event: Event) -> str:
26 (
27 transaction_name,
28 parent_span,
29 num_repeating_spans,
30 repeating_spans,
31 ) = self.get_performance_issue_description_data(event)
32
33 body = "| | |\n"
34 body += "| ------------- | --------------- |\n"
35 body += f"| **Transaction Name** | {truncatechars(transaction_name, MAX_CHAR)} |\n"
36 body += f"| **Parent Span** | {truncatechars(parent_span, MAX_CHAR)} |\n"
37 body += f"| **Repeating Spans ({num_repeating_spans})** | {truncatechars(repeating_spans, MAX_CHAR)} |"
38 return body
39
40 def get_group_description(self, group: Group, event: Event, **kwargs: Any) -> str:
41 output = self.get_group_link(group, **kwargs)
42
43 if group.issue_category == GroupCategory.PERFORMANCE:
44 body = self.get_performance_issue_body(event)
45 output.extend([body])
46
47 else:
48 body = self.get_group_body(group, event)
49 if body:
50 output.extend(["", "```", body, "```"])
51 return "\n".join(output)
52
53 def after_link_issue(self, external_issue: ExternalIssue, **kwargs: Any) -> None:
54 data = kwargs["data"]
55 client = self.get_client()
56
57 repo, issue_num = external_issue.key.split("#")
58 if not repo:
59 raise IntegrationError("repo must be provided")
60
61 if not issue_num:
62 raise IntegrationError("issue number must be provided")
63
64 comment = data.get("comment")
65 if comment:
66 try:
67 client.create_comment(repo=repo, issue_id=issue_num, data={"body": comment})
68 except ApiError as e:
69 raise IntegrationError(self.message_from_error(e))
70
71 def get_persisted_default_config_fields(self) -> Sequence[str]:
72 return ["repo"]
73
74 def create_default_repo_choice(self, default_repo: str) -> tuple[str, str]:
75 return default_repo, default_repo.split("/")[1]
76
77 def get_create_issue_config(
78 self, group: Group, user: User, **kwargs: Any
79 ) -> Sequence[Mapping[str, Any]]:
80 kwargs["link_referrer"] = "github_integration"
81 fields = super().get_create_issue_config(group, user, **kwargs)
82 default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
83
84 assignees = self.get_allowed_assignees(default_repo) if default_repo else []
85
86 org = group.organization
87 autocomplete_url = reverse(
88 "sentry-extensions-github-search", args=[org.slug, self.model.id]
89 )
90
91 return [
92 {
93 "name": "repo",
94 "label": "GitHub Repository",
95 "type": "select",
96 "default": default_repo,
97 "choices": repo_choices,
98 "url": autocomplete_url,
99 "updatesForm": True,
100 "required": True,
101 },
102 *fields,
103 {
104 "name": "assignee",
105 "label": "Assignee",
106 "default": "",
107 "type": "select",
108 "required": False,
109 "choices": assignees,
110 },
111 ]
112
113 def create_issue(self, data: Mapping[str, Any], **kwargs: Any) -> Mapping[str, Any]:
114 client = self.get_client()
115
116 repo = data.get("repo")
117
118 if not repo:
119 raise IntegrationError("repo kwarg must be provided")
120
121 try:
122 issue = client.create_issue(
123 repo=repo,
124 data={
125 "title": data["title"],
126 "body": data["description"],
127 "assignee": data.get("assignee"),
128 },
129 )
130 except ApiError as e:
131 raise IntegrationError(self.message_from_error(e))
132
133 return {
134 "key": issue["number"],
135 "title": issue["title"],
136 "description": issue["body"],
137 "url": issue["html_url"],
138 "repo": repo,
139 }
140
141 def get_link_issue_config(self, group: Group, **kwargs: Any) -> Sequence[Mapping[str, Any]]:
142 default_repo, repo_choices = self.get_repository_choices(group, **kwargs)
143
144 org = group.organization
145 autocomplete_url = reverse(
146 "sentry-extensions-github-search", args=[org.slug, self.model.id]
147 )
148
149 return [
150 {
151 "name": "repo",
152 "label": "GitHub Repository",
153 "type": "select",
154 "default": default_repo,
155 "choices": repo_choices,
156 "url": autocomplete_url,
157 "required": True,
158 "updatesForm": True,
159 },
160 {
161 "name": "externalIssue",
162 "label": "Issue",
163 "default": "",
164 "choices": [],
165 "type": "select",
166 "url": autocomplete_url,
167 "required": True,
168 },
169 {
170 "name": "comment",
171 "label": "Comment",
172 "default": "Sentry issue: [{issue_id}]({url})".format(
173 url=absolute_uri(
174 group.get_absolute_url(params={"referrer": "github_integration"})
175 ),
176 issue_id=group.qualified_short_id,
177 ),
178 "type": "textarea",
179 "required": False,
180 "autosize": True,
181 "help": "Leave blank if you don't want to add a comment to the GitHub issue.",
182 },
183 ]
184
185 def get_issue(self, issue_id: str, **kwargs: Any) -> Mapping[str, Any]:
186 data = kwargs["data"]
187 repo = data.get("repo")
188 issue_num = data.get("externalIssue")
189 client = self.get_client()
190
191 if not repo:
192 raise IntegrationError("repo must be provided")
193
194 if not issue_num:
195 raise IntegrationError("issue must be provided")
196
197 try:
198 issue = client.get_issue(repo, issue_num)
199 except ApiError as e:
200 raise IntegrationError(self.message_from_error(e))
201
202 return {
203 "key": issue["number"],
204 "title": issue["title"],
205 "description": issue["body"],
206 "url": issue["html_url"],
207 "repo": repo,
208 }
209
210 def get_allowed_assignees(self, repo: str) -> Sequence[tuple[str, str]]:
211 client = self.get_client()
212 try:
213 response = client.get_assignees(repo)
214 except Exception as e:
215 raise self.raise_error(e)
216
217 users = tuple((u["login"], u["login"]) for u in response)
218
219 return (("", "Unassigned"),) + users
220
221 def get_repo_issues(self, repo: str) -> Sequence[tuple[str, str]]:
222 client = self.get_client()
223 try:
224 response = client.get_issues(repo)
225 except Exception as e:
226 raise self.raise_error(e)
227
228 issues = tuple((i["number"], "#{} {}".format(i["number"], i["title"])) for i in response)
229
230 return issues
231
[end of src/sentry/integrations/github/issues.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/integrations/github/issues.py b/src/sentry/integrations/github/issues.py
--- a/src/sentry/integrations/github/issues.py
+++ b/src/sentry/integrations/github/issues.py
@@ -1,10 +1,11 @@
from __future__ import annotations
+from operator import attrgetter
from typing import Any, Mapping, Sequence
from django.urls import reverse
-from sentry.eventstore.models import Event
+from sentry.eventstore.models import Event, GroupEvent
from sentry.integrations.mixins.issues import MAX_CHAR, IssueBasicMixin
from sentry.models import ExternalIssue, Group, User
from sentry.shared_integrations.exceptions import ApiError, IntegrationError
@@ -37,13 +38,25 @@
body += f"| **Repeating Spans ({num_repeating_spans})** | {truncatechars(repeating_spans, MAX_CHAR)} |"
return body
- def get_group_description(self, group: Group, event: Event, **kwargs: Any) -> str:
+ def get_generic_issue_body(self, event: GroupEvent) -> str:
+ body = "| | |\n"
+ body += "| ------------- | --------------- |\n"
+ for evidence in sorted(
+ event.occurrence.evidence_display, key=attrgetter("important"), reverse=True
+ ):
+ body += f"| **{evidence.name}** | {truncatechars(evidence.value, MAX_CHAR)} |\n"
+
+ return body[:-2]
+
+ def get_group_description(self, group: Group, event: Event | GroupEvent, **kwargs: Any) -> str:
output = self.get_group_link(group, **kwargs)
if group.issue_category == GroupCategory.PERFORMANCE:
body = self.get_performance_issue_body(event)
output.extend([body])
-
+ elif isinstance(event, GroupEvent) and event.occurrence is not None:
+ body = self.get_generic_issue_body(event)
+ output.extend([body])
else:
body = self.get_group_body(group, event)
if body:
|
{"golden_diff": "diff --git a/src/sentry/integrations/github/issues.py b/src/sentry/integrations/github/issues.py\n--- a/src/sentry/integrations/github/issues.py\n+++ b/src/sentry/integrations/github/issues.py\n@@ -1,10 +1,11 @@\n from __future__ import annotations\n \n+from operator import attrgetter\n from typing import Any, Mapping, Sequence\n \n from django.urls import reverse\n \n-from sentry.eventstore.models import Event\n+from sentry.eventstore.models import Event, GroupEvent\n from sentry.integrations.mixins.issues import MAX_CHAR, IssueBasicMixin\n from sentry.models import ExternalIssue, Group, User\n from sentry.shared_integrations.exceptions import ApiError, IntegrationError\n@@ -37,13 +38,25 @@\n body += f\"| **Repeating Spans ({num_repeating_spans})** | {truncatechars(repeating_spans, MAX_CHAR)} |\"\n return body\n \n- def get_group_description(self, group: Group, event: Event, **kwargs: Any) -> str:\n+ def get_generic_issue_body(self, event: GroupEvent) -> str:\n+ body = \"| | |\\n\"\n+ body += \"| ------------- | --------------- |\\n\"\n+ for evidence in sorted(\n+ event.occurrence.evidence_display, key=attrgetter(\"important\"), reverse=True\n+ ):\n+ body += f\"| **{evidence.name}** | {truncatechars(evidence.value, MAX_CHAR)} |\\n\"\n+\n+ return body[:-2]\n+\n+ def get_group_description(self, group: Group, event: Event | GroupEvent, **kwargs: Any) -> str:\n output = self.get_group_link(group, **kwargs)\n \n if group.issue_category == GroupCategory.PERFORMANCE:\n body = self.get_performance_issue_body(event)\n output.extend([body])\n-\n+ elif isinstance(event, GroupEvent) and event.occurrence is not None:\n+ body = self.get_generic_issue_body(event)\n+ output.extend([body])\n else:\n body = self.get_group_body(group, event)\n if body:\n", "issue": "Update GitHub integration to support generic issue types (ticketing feature)\nUpdate the GitHub integration so that it displays the `IssueEvidence` for a generic issue type. See https://github.com/getsentry/sentry/pull/41041 for how it was done for performance issues as it'll be similar.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Mapping, Sequence\n\nfrom django.urls import reverse\n\nfrom sentry.eventstore.models import Event\nfrom sentry.integrations.mixins.issues import MAX_CHAR, IssueBasicMixin\nfrom sentry.models import ExternalIssue, Group, User\nfrom sentry.shared_integrations.exceptions import ApiError, IntegrationError\nfrom sentry.types.issues import GroupCategory\nfrom sentry.utils.http import absolute_uri\nfrom sentry.utils.strings import truncatechars\n\n\nclass GitHubIssueBasic(IssueBasicMixin): # type: ignore\n def make_external_key(self, data: Mapping[str, Any]) -> str:\n return \"{}#{}\".format(data[\"repo\"], data[\"key\"])\n\n def get_issue_url(self, key: str) -> str:\n domain_name, user = self.model.metadata[\"domain_name\"].split(\"/\")\n repo, issue_id = key.split(\"#\")\n return f\"https://{domain_name}/{repo}/issues/{issue_id}\"\n\n def get_performance_issue_body(self, event: Event) -> str:\n (\n transaction_name,\n parent_span,\n num_repeating_spans,\n repeating_spans,\n ) = self.get_performance_issue_description_data(event)\n\n body = \"| | |\\n\"\n body += \"| ------------- | --------------- |\\n\"\n body += f\"| **Transaction Name** | {truncatechars(transaction_name, MAX_CHAR)} |\\n\"\n body += f\"| **Parent Span** | {truncatechars(parent_span, MAX_CHAR)} |\\n\"\n body += f\"| **Repeating Spans ({num_repeating_spans})** | {truncatechars(repeating_spans, MAX_CHAR)} |\"\n return body\n\n def get_group_description(self, group: Group, event: Event, **kwargs: Any) -> str:\n output = self.get_group_link(group, **kwargs)\n\n if group.issue_category == GroupCategory.PERFORMANCE:\n body = self.get_performance_issue_body(event)\n output.extend([body])\n\n else:\n body = self.get_group_body(group, event)\n if body:\n output.extend([\"\", \"```\", body, \"```\"])\n return \"\\n\".join(output)\n\n def after_link_issue(self, external_issue: ExternalIssue, **kwargs: Any) -> None:\n data = kwargs[\"data\"]\n client = self.get_client()\n\n repo, issue_num = external_issue.key.split(\"#\")\n if not repo:\n raise IntegrationError(\"repo must be provided\")\n\n if not issue_num:\n raise IntegrationError(\"issue number must be provided\")\n\n comment = data.get(\"comment\")\n if comment:\n try:\n client.create_comment(repo=repo, issue_id=issue_num, data={\"body\": comment})\n except ApiError as e:\n raise IntegrationError(self.message_from_error(e))\n\n def get_persisted_default_config_fields(self) -> Sequence[str]:\n return [\"repo\"]\n\n def create_default_repo_choice(self, default_repo: str) -> tuple[str, str]:\n return default_repo, default_repo.split(\"/\")[1]\n\n def get_create_issue_config(\n self, group: Group, user: User, **kwargs: Any\n ) -> Sequence[Mapping[str, Any]]:\n kwargs[\"link_referrer\"] = \"github_integration\"\n fields = super().get_create_issue_config(group, user, **kwargs)\n default_repo, repo_choices = self.get_repository_choices(group, **kwargs)\n\n assignees = self.get_allowed_assignees(default_repo) if default_repo else []\n\n org = group.organization\n autocomplete_url = reverse(\n \"sentry-extensions-github-search\", args=[org.slug, self.model.id]\n )\n\n return [\n {\n \"name\": \"repo\",\n \"label\": \"GitHub Repository\",\n \"type\": \"select\",\n \"default\": default_repo,\n \"choices\": repo_choices,\n \"url\": autocomplete_url,\n \"updatesForm\": True,\n \"required\": True,\n },\n *fields,\n {\n \"name\": \"assignee\",\n \"label\": \"Assignee\",\n \"default\": \"\",\n \"type\": \"select\",\n \"required\": False,\n \"choices\": assignees,\n },\n ]\n\n def create_issue(self, data: Mapping[str, Any], **kwargs: Any) -> Mapping[str, Any]:\n client = self.get_client()\n\n repo = data.get(\"repo\")\n\n if not repo:\n raise IntegrationError(\"repo kwarg must be provided\")\n\n try:\n issue = client.create_issue(\n repo=repo,\n data={\n \"title\": data[\"title\"],\n \"body\": data[\"description\"],\n \"assignee\": data.get(\"assignee\"),\n },\n )\n except ApiError as e:\n raise IntegrationError(self.message_from_error(e))\n\n return {\n \"key\": issue[\"number\"],\n \"title\": issue[\"title\"],\n \"description\": issue[\"body\"],\n \"url\": issue[\"html_url\"],\n \"repo\": repo,\n }\n\n def get_link_issue_config(self, group: Group, **kwargs: Any) -> Sequence[Mapping[str, Any]]:\n default_repo, repo_choices = self.get_repository_choices(group, **kwargs)\n\n org = group.organization\n autocomplete_url = reverse(\n \"sentry-extensions-github-search\", args=[org.slug, self.model.id]\n )\n\n return [\n {\n \"name\": \"repo\",\n \"label\": \"GitHub Repository\",\n \"type\": \"select\",\n \"default\": default_repo,\n \"choices\": repo_choices,\n \"url\": autocomplete_url,\n \"required\": True,\n \"updatesForm\": True,\n },\n {\n \"name\": \"externalIssue\",\n \"label\": \"Issue\",\n \"default\": \"\",\n \"choices\": [],\n \"type\": \"select\",\n \"url\": autocomplete_url,\n \"required\": True,\n },\n {\n \"name\": \"comment\",\n \"label\": \"Comment\",\n \"default\": \"Sentry issue: [{issue_id}]({url})\".format(\n url=absolute_uri(\n group.get_absolute_url(params={\"referrer\": \"github_integration\"})\n ),\n issue_id=group.qualified_short_id,\n ),\n \"type\": \"textarea\",\n \"required\": False,\n \"autosize\": True,\n \"help\": \"Leave blank if you don't want to add a comment to the GitHub issue.\",\n },\n ]\n\n def get_issue(self, issue_id: str, **kwargs: Any) -> Mapping[str, Any]:\n data = kwargs[\"data\"]\n repo = data.get(\"repo\")\n issue_num = data.get(\"externalIssue\")\n client = self.get_client()\n\n if not repo:\n raise IntegrationError(\"repo must be provided\")\n\n if not issue_num:\n raise IntegrationError(\"issue must be provided\")\n\n try:\n issue = client.get_issue(repo, issue_num)\n except ApiError as e:\n raise IntegrationError(self.message_from_error(e))\n\n return {\n \"key\": issue[\"number\"],\n \"title\": issue[\"title\"],\n \"description\": issue[\"body\"],\n \"url\": issue[\"html_url\"],\n \"repo\": repo,\n }\n\n def get_allowed_assignees(self, repo: str) -> Sequence[tuple[str, str]]:\n client = self.get_client()\n try:\n response = client.get_assignees(repo)\n except Exception as e:\n raise self.raise_error(e)\n\n users = tuple((u[\"login\"], u[\"login\"]) for u in response)\n\n return ((\"\", \"Unassigned\"),) + users\n\n def get_repo_issues(self, repo: str) -> Sequence[tuple[str, str]]:\n client = self.get_client()\n try:\n response = client.get_issues(repo)\n except Exception as e:\n raise self.raise_error(e)\n\n issues = tuple((i[\"number\"], \"#{} {}\".format(i[\"number\"], i[\"title\"])) for i in response)\n\n return issues\n", "path": "src/sentry/integrations/github/issues.py"}]}
| 2,896 | 458 |
gh_patches_debug_3564
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2369
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import with setuptools 50
After upgrading setuptools to 50.0 today, the environment fails to locate the entry points as it could not import distutils
```
$ python --version
Python 3.5.1
$ python -c "import distutils"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "<frozen importlib._bootstrap>", line 969, in _find_and_load
File "<frozen importlib._bootstrap>", line 958, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 666, in _load_unlocked
File "<frozen importlib._bootstrap>", line 577, in module_from_spec
File "/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py", line 82, in create_module
return importlib.import_module('._distutils', 'setuptools')
File "/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 981, in _gcd_import
File "<frozen importlib._bootstrap>", line 931, in _sanity_check
SystemError: Parent module 'setuptools' not loaded, cannot perform relative import
```
The issue could not be found in the python 3.8 environment.
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 def warn_distutils_present():
12 if 'distutils' not in sys.modules:
13 return
14 if is_pypy and sys.version_info < (3, 7):
15 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
16 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
17 return
18 warnings.warn(
19 "Distutils was imported before Setuptools, but importing Setuptools "
20 "also replaces the `distutils` module in `sys.modules`. This may lead "
21 "to undesirable behaviors or errors. To avoid these issues, avoid "
22 "using distutils directly, ensure that setuptools is installed in the "
23 "traditional way (e.g. not an editable install), and/or make sure that "
24 "setuptools is always imported before distutils.")
25
26
27 def clear_distutils():
28 if 'distutils' not in sys.modules:
29 return
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
32 for name in mods:
33 del sys.modules[name]
34
35
36 def enabled():
37 """
38 Allow selection of distutils by environment variable.
39 """
40 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
41 return which == 'local'
42
43
44 def ensure_local_distutils():
45 clear_distutils()
46 distutils = importlib.import_module('setuptools._distutils')
47 distutils.__name__ = 'distutils'
48 sys.modules['distutils'] = distutils
49
50 # sanity check that submodules load as expected
51 core = importlib.import_module('distutils.core')
52 assert '_distutils' in core.__file__, core.__file__
53
54
55 def do_override():
56 """
57 Ensure that the local copy of distutils is preferred over stdlib.
58
59 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
60 for more motivation.
61 """
62 if enabled():
63 warn_distutils_present()
64 ensure_local_distutils()
65
66
67 class DistutilsMetaFinder:
68 def find_spec(self, fullname, path, target=None):
69 if path is not None:
70 return
71
72 method_name = 'spec_for_{fullname}'.format(**locals())
73 method = getattr(self, method_name, lambda: None)
74 return method()
75
76 def spec_for_distutils(self):
77 import importlib.abc
78 import importlib.util
79
80 class DistutilsLoader(importlib.abc.Loader):
81
82 def create_module(self, spec):
83 return importlib.import_module('._distutils', 'setuptools')
84
85 def exec_module(self, module):
86 pass
87
88 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
89
90 def spec_for_pip(self):
91 """
92 Ensure stdlib distutils when running under pip.
93 See pypa/pip#8761 for rationale.
94 """
95 clear_distutils()
96 self.spec_for_distutils = lambda: None
97
98
99 DISTUTILS_FINDER = DistutilsMetaFinder()
100
101
102 def add_shim():
103 sys.meta_path.insert(0, DISTUTILS_FINDER)
104
105
106 def remove_shim():
107 try:
108 sys.meta_path.remove(DISTUTILS_FINDER)
109 except ValueError:
110 pass
111
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -80,7 +80,7 @@
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
- return importlib.import_module('._distutils', 'setuptools')
+ return importlib.import_module('setuptools._distutils')
def exec_module(self, module):
pass
|
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -80,7 +80,7 @@\n class DistutilsLoader(importlib.abc.Loader):\n \n def create_module(self, spec):\n- return importlib.import_module('._distutils', 'setuptools')\n+ return importlib.import_module('setuptools._distutils')\n \n def exec_module(self, module):\n pass\n", "issue": "SystemError: Parent module 'setuptools' not loaded, cannot perform relative import with setuptools 50\nAfter upgrading setuptools to 50.0 today, the environment fails to locate the entry points as it could not import distutils\r\n\r\n```\r\n$ python --version\r\nPython 3.5.1\r\n$ python -c \"import distutils\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"<frozen importlib._bootstrap>\", line 969, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 958, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 666, in _load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 577, in module_from_spec\r\n File \"/home/gchan/tmp/setuptools-python-3.5/lib/python3.5/site-packages/_distutils_hack/__init__.py\", line 82, in create_module\r\n return importlib.import_module('._distutils', 'setuptools')\r\n File \"/home/gchan/tmp/setuptools-python-3.5/lib64/python3.5/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 981, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 931, in _sanity_check\r\nSystemError: Parent module 'setuptools' not loaded, cannot perform relative import\r\n```\r\n\r\nThe issue could not be found in the python 3.8 environment. \n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure that \"\n \"setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('._distutils', 'setuptools')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]}
| 1,941 | 122 |
gh_patches_debug_26474
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-2241
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Weird behavior of `CosineSimilarityMetric` when used with tensors of shape [d]
## 🐛 Bug?
According to the documentation, `CosineSimilarityMetric` requires tensors of shape `[N, d]`, with `N` the batch size and `d` the dimension of the vectors.
Using it with vectors of shape `[d]` does not raise any error, and the call to `compute` behaves weirdly and gives confusing results. I'm not sure whether this is the expected behavior, a bug, or if this usage is simply unintended (and then maybe an error could have been raised).
### To Reproduce
```python
>>> from torchmetrics import CosineSimilarity
>>> from torch import tensor
>>>
>>> cosine_similarity = CosineSimilarity(reduction="mean")
>>> a = tensor([1., 1., 1.])
>>> b = tensor([100., 100., 100.])
>>> cosine_similarity(a, b)
tensor(1.) # a and b have the same direction, so this is normal.
>>> cosine_similarity(b, a)
tensor(1.) # same for b and a.
>>> cosine_similarity.compute()
tensor(0.0200) # I would expect this to be 1 too (the average of the 2 previous calls).
```
The obtained result (0.02) is actually the cosine similarity between `[1, 1, 1, 100, 100, 100]` and `[100, 100, 100, 1, 1, 1]`. I would have expected to get instead the average between the cosine similarity of `[1, 1, 1]` and `[100, 100, 100]` and the cosine similarity of `[100, 100, 100]` and `[1, 1, 1]`, which is 1.
If instead we use it as the documentation says, with tensors of shape `[N, d]`, we get different results:
```python
>>> from torchmetrics import CosineSimilarity
>>> from torch import tensor
>>>
>>> cosine_similarity = CosineSimilarity(reduction="mean")
>>> a = tensor([[1., 1., 1.]]) # tensor of shape [1, 3] instead of [3]
>>> b = tensor([[100., 100., 100.]]) # tensor of shape [1, 3] instead of [3]
>>> cosine_similarity(a, b)
tensor(1.)
>>> cosine_similarity(b, a)
tensor(1.)
>>> cosine_similarity.compute()
tensor(1.) # 1 instead of 0.02
```
Environment:
- TorchMetrics 1.2.0
- Python 3.10.10
- torch 2.1.1
- Ubuntu 20.04.6 LTS
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/functional/regression/cosine_similarity.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional, Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _check_same_shape
20
21
22 def _cosine_similarity_update(
23 preds: Tensor,
24 target: Tensor,
25 ) -> Tuple[Tensor, Tensor]:
26 """Update and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors.
27
28 Args:
29 preds: Predicted tensor
30 target: Ground truth tensor
31
32 """
33 _check_same_shape(preds, target)
34 preds = preds.float()
35 target = target.float()
36
37 return preds, target
38
39
40 def _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: Optional[str] = "sum") -> Tensor:
41 """Compute Cosine Similarity.
42
43 Args:
44 preds: Predicted tensor
45 target: Ground truth tensor
46 reduction:
47 The method of reducing along the batch dimension using sum, mean or taking the individual scores
48
49 Example:
50 >>> target = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])
51 >>> preds = torch.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]])
52 >>> preds, target = _cosine_similarity_update(preds, target)
53 >>> _cosine_similarity_compute(preds, target, 'none')
54 tensor([ 1.0000, -1.0000])
55
56 """
57 dot_product = (preds * target).sum(dim=-1)
58 preds_norm = preds.norm(dim=-1)
59 target_norm = target.norm(dim=-1)
60 similarity = dot_product / (preds_norm * target_norm)
61 reduction_mapping = {
62 "sum": torch.sum,
63 "mean": torch.mean,
64 "none": lambda x: x,
65 None: lambda x: x,
66 }
67 return reduction_mapping[reduction](similarity) # type: ignore[operator]
68
69
70 def cosine_similarity(preds: Tensor, target: Tensor, reduction: Optional[str] = "sum") -> Tensor:
71 r"""Compute the `Cosine Similarity`_.
72
73 .. math::
74 cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} =
75 \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}}
76
77 where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
78
79 Args:
80 preds: Predicted tensor with shape ``(N,d)``
81 target: Ground truth tensor with shape ``(N,d)``
82 reduction:
83 The method of reducing along the batch dimension using sum, mean or taking the individual scores
84
85 Example:
86 >>> from torchmetrics.functional.regression import cosine_similarity
87 >>> target = torch.tensor([[1, 2, 3, 4],
88 ... [1, 2, 3, 4]])
89 >>> preds = torch.tensor([[1, 2, 3, 4],
90 ... [-1, -2, -3, -4]])
91 >>> cosine_similarity(preds, target, 'none')
92 tensor([ 1.0000, -1.0000])
93
94 """
95 preds, target = _cosine_similarity_update(preds, target)
96 return _cosine_similarity_compute(preds, target, reduction)
97
[end of src/torchmetrics/functional/regression/cosine_similarity.py]
[start of src/torchmetrics/regression/cosine_similarity.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, List, Optional, Sequence, Union
15
16 from torch import Tensor
17 from typing_extensions import Literal
18
19 from torchmetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update
20 from torchmetrics.metric import Metric
21 from torchmetrics.utilities.data import dim_zero_cat
22 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
23 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
24
25 if not _MATPLOTLIB_AVAILABLE:
26 __doctest_skip__ = ["CosineSimilarity.plot"]
27
28
29 class CosineSimilarity(Metric):
30 r"""Compute the `Cosine Similarity`_.
31
32 .. math::
33 cos_{sim}(x,y) = \frac{x \cdot y}{||x|| \cdot ||y||} =
34 \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2}\sqrt{\sum_{i=1}^n y_i^2}}
35
36 where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.
37
38 As input to ``forward`` and ``update`` the metric accepts the following input:
39
40 - ``preds`` (:class:`~torch.Tensor`): Predicted float tensor with shape ``(N,d)``
41 - ``target`` (:class:`~torch.Tensor`): Ground truth float tensor with shape ``(N,d)``
42
43 As output of ``forward`` and ``compute`` the metric returns the following output:
44
45 - ``cosine_similarity`` (:class:`~torch.Tensor`): A float tensor with the cosine similarity
46
47 Args:
48 reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' (taking the individual scores)
49 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
50
51 Example:
52 >>> from torch import tensor
53 >>> from torchmetrics.regression import CosineSimilarity
54 >>> target = tensor([[0, 1], [1, 1]])
55 >>> preds = tensor([[0, 1], [0, 1]])
56 >>> cosine_similarity = CosineSimilarity(reduction = 'mean')
57 >>> cosine_similarity(preds, target)
58 tensor(0.8536)
59
60 """
61 is_differentiable: bool = True
62 higher_is_better: bool = True
63 full_state_update: bool = False
64 plot_lower_bound: float = 0.0
65 plot_upper_bound: float = 1.0
66
67 preds: List[Tensor]
68 target: List[Tensor]
69
70 def __init__(
71 self,
72 reduction: Literal["mean", "sum", "none", None] = "sum",
73 **kwargs: Any,
74 ) -> None:
75 super().__init__(**kwargs)
76 allowed_reduction = ("sum", "mean", "none", None)
77 if reduction not in allowed_reduction:
78 raise ValueError(f"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}")
79 self.reduction = reduction
80
81 self.add_state("preds", [], dist_reduce_fx="cat")
82 self.add_state("target", [], dist_reduce_fx="cat")
83
84 def update(self, preds: Tensor, target: Tensor) -> None:
85 """Update metric states with predictions and targets."""
86 preds, target = _cosine_similarity_update(preds, target)
87
88 self.preds.append(preds)
89 self.target.append(target)
90
91 def compute(self) -> Tensor:
92 """Compute metric."""
93 preds = dim_zero_cat(self.preds)
94 target = dim_zero_cat(self.target)
95 return _cosine_similarity_compute(preds, target, self.reduction)
96
97 def plot(
98 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
99 ) -> _PLOT_OUT_TYPE:
100 """Plot a single or multiple values from the metric.
101
102 Args:
103 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
104 If no value is provided, will automatically call `metric.compute` and plot that result.
105 ax: An matplotlib axis object. If provided will add plot to that axis
106
107 Returns:
108 Figure and Axes object
109
110 Raises:
111 ModuleNotFoundError:
112 If `matplotlib` is not installed
113
114 .. plot::
115 :scale: 75
116
117 >>> from torch import randn
118 >>> # Example plotting a single value
119 >>> from torchmetrics.regression import CosineSimilarity
120 >>> metric = CosineSimilarity()
121 >>> metric.update(randn(10,), randn(10,))
122 >>> fig_, ax_ = metric.plot()
123
124 .. plot::
125 :scale: 75
126
127 >>> from torch import randn
128 >>> # Example plotting multiple values
129 >>> from torchmetrics.regression import CosineSimilarity
130 >>> metric = CosineSimilarity()
131 >>> values = []
132 >>> for _ in range(10):
133 ... values.append(metric(randn(10,), randn(10,)))
134 >>> fig, ax = metric.plot(values)
135
136 """
137 return self._plot(val, ax)
138
[end of src/torchmetrics/regression/cosine_similarity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/functional/regression/cosine_similarity.py b/src/torchmetrics/functional/regression/cosine_similarity.py
--- a/src/torchmetrics/functional/regression/cosine_similarity.py
+++ b/src/torchmetrics/functional/regression/cosine_similarity.py
@@ -31,6 +31,11 @@
"""
_check_same_shape(preds, target)
+ if preds.ndim != 2:
+ raise ValueError(
+ "Expected input to cosine similarity to be 2D tensors of shape `[N,D]` where `N` is the number of samples"
+ f" and `D` is the number of dimensions, but got tensor of shape {preds.shape}"
+ )
preds = preds.float()
target = target.float()
diff --git a/src/torchmetrics/regression/cosine_similarity.py b/src/torchmetrics/regression/cosine_similarity.py
--- a/src/torchmetrics/regression/cosine_similarity.py
+++ b/src/torchmetrics/regression/cosine_similarity.py
@@ -118,7 +118,7 @@
>>> # Example plotting a single value
>>> from torchmetrics.regression import CosineSimilarity
>>> metric = CosineSimilarity()
- >>> metric.update(randn(10,), randn(10,))
+ >>> metric.update(randn(10,2), randn(10,2))
>>> fig_, ax_ = metric.plot()
.. plot::
@@ -130,7 +130,7 @@
>>> metric = CosineSimilarity()
>>> values = []
>>> for _ in range(10):
- ... values.append(metric(randn(10,), randn(10,)))
+ ... values.append(metric(randn(10,2), randn(10,2)))
>>> fig, ax = metric.plot(values)
"""
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/cosine_similarity.py b/src/torchmetrics/functional/regression/cosine_similarity.py\n--- a/src/torchmetrics/functional/regression/cosine_similarity.py\n+++ b/src/torchmetrics/functional/regression/cosine_similarity.py\n@@ -31,6 +31,11 @@\n \n \"\"\"\n _check_same_shape(preds, target)\n+ if preds.ndim != 2:\n+ raise ValueError(\n+ \"Expected input to cosine similarity to be 2D tensors of shape `[N,D]` where `N` is the number of samples\"\n+ f\" and `D` is the number of dimensions, but got tensor of shape {preds.shape}\"\n+ )\n preds = preds.float()\n target = target.float()\n \ndiff --git a/src/torchmetrics/regression/cosine_similarity.py b/src/torchmetrics/regression/cosine_similarity.py\n--- a/src/torchmetrics/regression/cosine_similarity.py\n+++ b/src/torchmetrics/regression/cosine_similarity.py\n@@ -118,7 +118,7 @@\n >>> # Example plotting a single value\n >>> from torchmetrics.regression import CosineSimilarity\n >>> metric = CosineSimilarity()\n- >>> metric.update(randn(10,), randn(10,))\n+ >>> metric.update(randn(10,2), randn(10,2))\n >>> fig_, ax_ = metric.plot()\n \n .. plot::\n@@ -130,7 +130,7 @@\n >>> metric = CosineSimilarity()\n >>> values = []\n >>> for _ in range(10):\n- ... values.append(metric(randn(10,), randn(10,)))\n+ ... values.append(metric(randn(10,2), randn(10,2)))\n >>> fig, ax = metric.plot(values)\n \n \"\"\"\n", "issue": "Weird behavior of `CosineSimilarityMetric` when used with tensors of shape [d]\n## \ud83d\udc1b Bug?\r\n\r\nAccording to the documentation, `CosineSimilarityMetric` requires tensors of shape `[N, d]`, with `N` the batch size and `d` the dimension of the vectors.\r\n\r\nUsing it with vectors of shape `[d]` does not raise any error, and the call to `compute` behaves weirdly and gives confusing results. I'm not sure whether this is the expected behavior, a bug, or if this usage is simply unintended (and then maybe an error could have been raised).\r\n\r\n### To Reproduce\r\n\r\n```python\r\n>>> from torchmetrics import CosineSimilarity\r\n>>> from torch import tensor\r\n>>> \r\n>>> cosine_similarity = CosineSimilarity(reduction=\"mean\")\r\n>>> a = tensor([1., 1., 1.])\r\n>>> b = tensor([100., 100., 100.])\r\n>>> cosine_similarity(a, b)\r\ntensor(1.) # a and b have the same direction, so this is normal.\r\n>>> cosine_similarity(b, a) \r\ntensor(1.) # same for b and a.\r\n>>> cosine_similarity.compute()\r\ntensor(0.0200) # I would expect this to be 1 too (the average of the 2 previous calls).\r\n```\r\nThe obtained result (0.02) is actually the cosine similarity between `[1, 1, 1, 100, 100, 100]` and `[100, 100, 100, 1, 1, 1]`. I would have expected to get instead the average between the cosine similarity of `[1, 1, 1]` and `[100, 100, 100]` and the cosine similarity of `[100, 100, 100]` and `[1, 1, 1]`, which is 1.\r\n\r\nIf instead we use it as the documentation says, with tensors of shape `[N, d]`, we get different results:\r\n```python\r\n>>> from torchmetrics import CosineSimilarity\r\n>>> from torch import tensor\r\n>>> \r\n>>> cosine_similarity = CosineSimilarity(reduction=\"mean\")\r\n>>> a = tensor([[1., 1., 1.]]) # tensor of shape [1, 3] instead of [3]\r\n>>> b = tensor([[100., 100., 100.]]) # tensor of shape [1, 3] instead of [3]\r\n>>> cosine_similarity(a, b)\r\ntensor(1.)\r\n>>> cosine_similarity(b, a)\r\ntensor(1.)\r\n>>> cosine_similarity.compute()\r\ntensor(1.) # 1 instead of 0.02\r\n```\r\n\r\nEnvironment:\r\n- TorchMetrics 1.2.0\r\n- Python 3.10.10\r\n- torch 2.1.1\r\n- Ubuntu 20.04.6 LTS\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _cosine_similarity_update(\n preds: Tensor,\n target: Tensor,\n) -> Tuple[Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n\n \"\"\"\n _check_same_shape(preds, target)\n preds = preds.float()\n target = target.float()\n\n return preds, target\n\n\ndef _cosine_similarity_compute(preds: Tensor, target: Tensor, reduction: Optional[str] = \"sum\") -> Tensor:\n \"\"\"Compute Cosine Similarity.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n reduction:\n The method of reducing along the batch dimension using sum, mean or taking the individual scores\n\n Example:\n >>> target = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]])\n >>> preds = torch.tensor([[1, 2, 3, 4], [-1, -2, -3, -4]])\n >>> preds, target = _cosine_similarity_update(preds, target)\n >>> _cosine_similarity_compute(preds, target, 'none')\n tensor([ 1.0000, -1.0000])\n\n \"\"\"\n dot_product = (preds * target).sum(dim=-1)\n preds_norm = preds.norm(dim=-1)\n target_norm = target.norm(dim=-1)\n similarity = dot_product / (preds_norm * target_norm)\n reduction_mapping = {\n \"sum\": torch.sum,\n \"mean\": torch.mean,\n \"none\": lambda x: x,\n None: lambda x: x,\n }\n return reduction_mapping[reduction](similarity) # type: ignore[operator]\n\n\ndef cosine_similarity(preds: Tensor, target: Tensor, reduction: Optional[str] = \"sum\") -> Tensor:\n r\"\"\"Compute the `Cosine Similarity`_.\n\n .. math::\n cos_{sim}(x,y) = \\frac{x \\cdot y}{||x|| \\cdot ||y||} =\n \\frac{\\sum_{i=1}^n x_i y_i}{\\sqrt{\\sum_{i=1}^n x_i^2}\\sqrt{\\sum_{i=1}^n y_i^2}}\n\n where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.\n\n Args:\n preds: Predicted tensor with shape ``(N,d)``\n target: Ground truth tensor with shape ``(N,d)``\n reduction:\n The method of reducing along the batch dimension using sum, mean or taking the individual scores\n\n Example:\n >>> from torchmetrics.functional.regression import cosine_similarity\n >>> target = torch.tensor([[1, 2, 3, 4],\n ... [1, 2, 3, 4]])\n >>> preds = torch.tensor([[1, 2, 3, 4],\n ... [-1, -2, -3, -4]])\n >>> cosine_similarity(preds, target, 'none')\n tensor([ 1.0000, -1.0000])\n\n \"\"\"\n preds, target = _cosine_similarity_update(preds, target)\n return _cosine_similarity_compute(preds, target, reduction)\n", "path": "src/torchmetrics/functional/regression/cosine_similarity.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional, Sequence, Union\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.regression.cosine_similarity import _cosine_similarity_compute, _cosine_similarity_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.data import dim_zero_cat\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"CosineSimilarity.plot\"]\n\n\nclass CosineSimilarity(Metric):\n r\"\"\"Compute the `Cosine Similarity`_.\n\n .. math::\n cos_{sim}(x,y) = \\frac{x \\cdot y}{||x|| \\cdot ||y||} =\n \\frac{\\sum_{i=1}^n x_i y_i}{\\sqrt{\\sum_{i=1}^n x_i^2}\\sqrt{\\sum_{i=1}^n y_i^2}}\n\n where :math:`y` is a tensor of target values, and :math:`x` is a tensor of predictions.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Predicted float tensor with shape ``(N,d)``\n - ``target`` (:class:`~torch.Tensor`): Ground truth float tensor with shape ``(N,d)``\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``cosine_similarity`` (:class:`~torch.Tensor`): A float tensor with the cosine similarity\n\n Args:\n reduction: how to reduce over the batch dimension using 'sum', 'mean' or 'none' (taking the individual scores)\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> from torch import tensor\n >>> from torchmetrics.regression import CosineSimilarity\n >>> target = tensor([[0, 1], [1, 1]])\n >>> preds = tensor([[0, 1], [0, 1]])\n >>> cosine_similarity = CosineSimilarity(reduction = 'mean')\n >>> cosine_similarity(preds, target)\n tensor(0.8536)\n\n \"\"\"\n is_differentiable: bool = True\n higher_is_better: bool = True\n full_state_update: bool = False\n plot_lower_bound: float = 0.0\n plot_upper_bound: float = 1.0\n\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = \"sum\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n allowed_reduction = (\"sum\", \"mean\", \"none\", None)\n if reduction not in allowed_reduction:\n raise ValueError(f\"Expected argument `reduction` to be one of {allowed_reduction} but got {reduction}\")\n self.reduction = reduction\n\n self.add_state(\"preds\", [], dist_reduce_fx=\"cat\")\n self.add_state(\"target\", [], dist_reduce_fx=\"cat\")\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update metric states with predictions and targets.\"\"\"\n preds, target = _cosine_similarity_update(preds, target)\n\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"Compute metric.\"\"\"\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n return _cosine_similarity_compute(preds, target, self.reduction)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting a single value\n >>> from torchmetrics.regression import CosineSimilarity\n >>> metric = CosineSimilarity()\n >>> metric.update(randn(10,), randn(10,))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting multiple values\n >>> from torchmetrics.regression import CosineSimilarity\n >>> metric = CosineSimilarity()\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metric(randn(10,), randn(10,)))\n >>> fig, ax = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/regression/cosine_similarity.py"}]}
| 3,898 | 423 |
gh_patches_debug_23989
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-3526
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Saleor eCommerce, Mirumee Software in pdf
### What I'm trying to achieve
Make pdf with my custom name instead of Saleor eCommerce, Mirumee Software
### Steps to reproduce the problem
make order and fullfill and make pdf
### What I expected to happen
My brand new ecommerce company
### Screenshots
<!-- If applicable, add screenshots to help explain your problem. -->
<img width="1207" alt="schermafbeelding 2018-12-22 om 13 35 35" src="https://user-images.githubusercontent.com/7673074/50374426-8b5d1780-05ee-11e9-81cf-f778935a403b.png">
**System information**
Operating system:
Browser:
</issue>
<code>
[start of saleor/dashboard/order/utils.py]
1 from django.conf import settings
2 from django.contrib.sites.shortcuts import get_current_site
3 from django.template.loader import get_template
4
5 from ...checkout import AddressType
6 from ...checkout.utils import _get_products_voucher_discount
7 from ...core.utils.taxes import ZERO_MONEY
8 from ...discount import VoucherType
9 from ...discount.utils import (
10 get_shipping_voucher_discount, get_value_voucher_discount)
11 from ...product.utils import decrease_stock
12
13 INVOICE_TEMPLATE = 'dashboard/order/pdf/invoice.html'
14 PACKING_SLIP_TEMPLATE = 'dashboard/order/pdf/packing_slip.html'
15
16
17 def get_statics_absolute_url(request):
18 site = get_current_site(request)
19 absolute_url = '%(protocol)s://%(domain)s%(static_url)s' % {
20 'protocol': 'https' if request.is_secure() else 'http',
21 'domain': site.domain,
22 'static_url': settings.STATIC_URL}
23 return absolute_url
24
25
26 def _create_pdf(rendered_template, absolute_url):
27 from weasyprint import HTML
28 pdf_file = (HTML(string=rendered_template, base_url=absolute_url)
29 .write_pdf())
30 return pdf_file
31
32
33 def create_invoice_pdf(order, absolute_url):
34 ctx = {'order': order}
35 rendered_template = get_template(INVOICE_TEMPLATE).render(ctx)
36 pdf_file = _create_pdf(rendered_template, absolute_url)
37 return pdf_file, order
38
39
40 def create_packing_slip_pdf(order, fulfillment, absolute_url):
41 ctx = {'order': order, 'fulfillment': fulfillment}
42 rendered_template = get_template(PACKING_SLIP_TEMPLATE).render(ctx)
43 pdf_file = _create_pdf(rendered_template, absolute_url)
44 return pdf_file, order
45
46
47 def fulfill_order_line(order_line, quantity):
48 """Fulfill order line with given quantity."""
49 if order_line.variant and order_line.variant.track_inventory:
50 decrease_stock(order_line.variant, quantity)
51 order_line.quantity_fulfilled += quantity
52 order_line.save(update_fields=['quantity_fulfilled'])
53
54
55 def update_order_with_user_addresses(order):
56 """Update addresses in an order based on a user assigned to an order."""
57 if order.shipping_address:
58 order.shipping_address.delete()
59 order.shipping_address = None
60
61 if order.billing_address:
62 order.billing_address.delete()
63 order.billing_address = None
64
65 if order.user:
66 order.billing_address = (
67 order.user.default_billing_address.get_copy()
68 if order.user.default_billing_address else None)
69 order.shipping_address = (
70 order.user.default_shipping_address.get_copy()
71 if order.user.default_shipping_address else None)
72
73 order.save(update_fields=['billing_address', 'shipping_address'])
74
75
76 def get_voucher_discount_for_order(order):
77 """Calculate discount value depending on voucher and discount types.
78
79 Raise NotApplicable if voucher of given type cannot be applied.
80 """
81 if not order.voucher:
82 return ZERO_MONEY
83 if order.voucher.type == VoucherType.VALUE:
84 return get_value_voucher_discount(
85 order.voucher, order.get_subtotal())
86 if order.voucher.type == VoucherType.SHIPPING:
87 return get_shipping_voucher_discount(
88 order.voucher, order.get_subtotal(), order.shipping_price)
89 if order.voucher.type in (
90 VoucherType.PRODUCT, VoucherType.COLLECTION, VoucherType.CATEGORY):
91 return _get_products_voucher_discount(order, order.voucher)
92 raise NotImplementedError('Unknown discount type')
93
94
95 def save_address_in_order(order, address, address_type):
96 """Save new address of a given address type in an order.
97
98 If the other type of address is empty, copy it.
99 """
100 if address_type == AddressType.SHIPPING:
101 order.shipping_address = address
102 if not order.billing_address:
103 order.billing_address = address.get_copy()
104 else:
105 order.billing_address = address
106 if not order.shipping_address:
107 order.shipping_address = address.get_copy()
108 order.save(update_fields=['billing_address', 'shipping_address'])
109
110
111 def addresses_are_equal(address_1, address_2):
112 return address_1 and address_2 and address_1 == address_2
113
114
115 def remove_customer_from_order(order):
116 """Remove related customer and user email from order.
117
118 If billing and shipping addresses are set to related customer's default
119 addresses and were not edited, remove them as well.
120 """
121 customer = order.user
122 order.user = None
123 order.user_email = ''
124 order.save()
125
126 if customer:
127 equal_billing_addresses = addresses_are_equal(
128 order.billing_address, customer.default_billing_address)
129 if equal_billing_addresses:
130 order.billing_address.delete()
131 order.billing_address = None
132
133 equal_shipping_addresses = addresses_are_equal(
134 order.shipping_address, customer.default_shipping_address)
135 if equal_shipping_addresses:
136 order.shipping_address.delete()
137 order.shipping_address = None
138
139 if equal_billing_addresses or equal_shipping_addresses:
140 order.save()
141
[end of saleor/dashboard/order/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/dashboard/order/utils.py b/saleor/dashboard/order/utils.py
--- a/saleor/dashboard/order/utils.py
+++ b/saleor/dashboard/order/utils.py
@@ -1,4 +1,5 @@
from django.conf import settings
+from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import get_template
@@ -31,14 +32,19 @@
def create_invoice_pdf(order, absolute_url):
- ctx = {'order': order}
+ ctx = {
+ 'order': order,
+ 'site': Site.objects.get_current()}
rendered_template = get_template(INVOICE_TEMPLATE).render(ctx)
pdf_file = _create_pdf(rendered_template, absolute_url)
return pdf_file, order
def create_packing_slip_pdf(order, fulfillment, absolute_url):
- ctx = {'order': order, 'fulfillment': fulfillment}
+ ctx = {
+ 'order': order,
+ 'fulfillment': fulfillment,
+ 'site': Site.objects.get_current()}
rendered_template = get_template(PACKING_SLIP_TEMPLATE).render(ctx)
pdf_file = _create_pdf(rendered_template, absolute_url)
return pdf_file, order
|
{"golden_diff": "diff --git a/saleor/dashboard/order/utils.py b/saleor/dashboard/order/utils.py\n--- a/saleor/dashboard/order/utils.py\n+++ b/saleor/dashboard/order/utils.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.contrib.sites.models import Site\n from django.contrib.sites.shortcuts import get_current_site\n from django.template.loader import get_template\n \n@@ -31,14 +32,19 @@\n \n \n def create_invoice_pdf(order, absolute_url):\n- ctx = {'order': order}\n+ ctx = {\n+ 'order': order,\n+ 'site': Site.objects.get_current()}\n rendered_template = get_template(INVOICE_TEMPLATE).render(ctx)\n pdf_file = _create_pdf(rendered_template, absolute_url)\n return pdf_file, order\n \n \n def create_packing_slip_pdf(order, fulfillment, absolute_url):\n- ctx = {'order': order, 'fulfillment': fulfillment}\n+ ctx = {\n+ 'order': order,\n+ 'fulfillment': fulfillment,\n+ 'site': Site.objects.get_current()}\n rendered_template = get_template(PACKING_SLIP_TEMPLATE).render(ctx)\n pdf_file = _create_pdf(rendered_template, absolute_url)\n return pdf_file, order\n", "issue": "Saleor eCommerce, Mirumee Software in pdf\n### What I'm trying to achieve\r\nMake pdf with my custom name instead of Saleor eCommerce, Mirumee Software\r\n\r\n### Steps to reproduce the problem\r\nmake order and fullfill and make pdf\r\n### What I expected to happen\r\nMy brand new ecommerce company\r\n\r\n### Screenshots\r\n<!-- If applicable, add screenshots to help explain your problem. -->\r\n<img width=\"1207\" alt=\"schermafbeelding 2018-12-22 om 13 35 35\" src=\"https://user-images.githubusercontent.com/7673074/50374426-8b5d1780-05ee-11e9-81cf-f778935a403b.png\">\r\n\r\n**System information**\r\nOperating system: \r\nBrowser:\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.template.loader import get_template\n\nfrom ...checkout import AddressType\nfrom ...checkout.utils import _get_products_voucher_discount\nfrom ...core.utils.taxes import ZERO_MONEY\nfrom ...discount import VoucherType\nfrom ...discount.utils import (\n get_shipping_voucher_discount, get_value_voucher_discount)\nfrom ...product.utils import decrease_stock\n\nINVOICE_TEMPLATE = 'dashboard/order/pdf/invoice.html'\nPACKING_SLIP_TEMPLATE = 'dashboard/order/pdf/packing_slip.html'\n\n\ndef get_statics_absolute_url(request):\n site = get_current_site(request)\n absolute_url = '%(protocol)s://%(domain)s%(static_url)s' % {\n 'protocol': 'https' if request.is_secure() else 'http',\n 'domain': site.domain,\n 'static_url': settings.STATIC_URL}\n return absolute_url\n\n\ndef _create_pdf(rendered_template, absolute_url):\n from weasyprint import HTML\n pdf_file = (HTML(string=rendered_template, base_url=absolute_url)\n .write_pdf())\n return pdf_file\n\n\ndef create_invoice_pdf(order, absolute_url):\n ctx = {'order': order}\n rendered_template = get_template(INVOICE_TEMPLATE).render(ctx)\n pdf_file = _create_pdf(rendered_template, absolute_url)\n return pdf_file, order\n\n\ndef create_packing_slip_pdf(order, fulfillment, absolute_url):\n ctx = {'order': order, 'fulfillment': fulfillment}\n rendered_template = get_template(PACKING_SLIP_TEMPLATE).render(ctx)\n pdf_file = _create_pdf(rendered_template, absolute_url)\n return pdf_file, order\n\n\ndef fulfill_order_line(order_line, quantity):\n \"\"\"Fulfill order line with given quantity.\"\"\"\n if order_line.variant and order_line.variant.track_inventory:\n decrease_stock(order_line.variant, quantity)\n order_line.quantity_fulfilled += quantity\n order_line.save(update_fields=['quantity_fulfilled'])\n\n\ndef update_order_with_user_addresses(order):\n \"\"\"Update addresses in an order based on a user assigned to an order.\"\"\"\n if order.shipping_address:\n order.shipping_address.delete()\n order.shipping_address = None\n\n if order.billing_address:\n order.billing_address.delete()\n order.billing_address = None\n\n if order.user:\n order.billing_address = (\n order.user.default_billing_address.get_copy()\n if order.user.default_billing_address else None)\n order.shipping_address = (\n order.user.default_shipping_address.get_copy()\n if order.user.default_shipping_address else None)\n\n order.save(update_fields=['billing_address', 'shipping_address'])\n\n\ndef get_voucher_discount_for_order(order):\n \"\"\"Calculate discount value depending on voucher and discount types.\n\n Raise NotApplicable if voucher of given type cannot be applied.\n \"\"\"\n if not order.voucher:\n return ZERO_MONEY\n if order.voucher.type == VoucherType.VALUE:\n return get_value_voucher_discount(\n order.voucher, order.get_subtotal())\n if order.voucher.type == VoucherType.SHIPPING:\n return get_shipping_voucher_discount(\n order.voucher, order.get_subtotal(), order.shipping_price)\n if order.voucher.type in (\n VoucherType.PRODUCT, VoucherType.COLLECTION, VoucherType.CATEGORY):\n return _get_products_voucher_discount(order, order.voucher)\n raise NotImplementedError('Unknown discount type')\n\n\ndef save_address_in_order(order, address, address_type):\n \"\"\"Save new address of a given address type in an order.\n\n If the other type of address is empty, copy it.\n \"\"\"\n if address_type == AddressType.SHIPPING:\n order.shipping_address = address\n if not order.billing_address:\n order.billing_address = address.get_copy()\n else:\n order.billing_address = address\n if not order.shipping_address:\n order.shipping_address = address.get_copy()\n order.save(update_fields=['billing_address', 'shipping_address'])\n\n\ndef addresses_are_equal(address_1, address_2):\n return address_1 and address_2 and address_1 == address_2\n\n\ndef remove_customer_from_order(order):\n \"\"\"Remove related customer and user email from order.\n\n If billing and shipping addresses are set to related customer's default\n addresses and were not edited, remove them as well.\n \"\"\"\n customer = order.user\n order.user = None\n order.user_email = ''\n order.save()\n\n if customer:\n equal_billing_addresses = addresses_are_equal(\n order.billing_address, customer.default_billing_address)\n if equal_billing_addresses:\n order.billing_address.delete()\n order.billing_address = None\n\n equal_shipping_addresses = addresses_are_equal(\n order.shipping_address, customer.default_shipping_address)\n if equal_shipping_addresses:\n order.shipping_address.delete()\n order.shipping_address = None\n\n if equal_billing_addresses or equal_shipping_addresses:\n order.save()\n", "path": "saleor/dashboard/order/utils.py"}]}
| 2,110 | 273 |
gh_patches_debug_13700
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-3100
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Webmail is not exempt from rate limiting
## Environment & Version
- `docker compose version`: _Docker Compose version 2.23.3_
- Version: mailu `2.0.34`
## Description
Trying to open my webmail (roundcube) the browser just showed an error due to _too many redirects_ - which were to `sso.php` of the webmailer.
Debugging this I found the following to be the reason:
> front-1 | [info] 15#15: *153382 client login failed: "Temporary authentication failure (rate-limit)" while in http auth state, client: **172.29.0.3**, server: 0.0.0.0:10143, login: "[email protected]"
Where `172.29.0.3` is the IP of the webmail container.
As far as I could debug this everything else was working fine, `sso.php` could correctly get valid credentials provided by `front` via HTTP headers but trying to use them would fail since the webmail container was rate limited. The failed login would the redirect again to `sso.php` - in a loop...
## Replication Steps
Unfortunately I have no idea how the webmail container's IP could end up on the rate limited list...
The webmail container should only ever try to login with credentials provided by `front` via HTTP headers - which then should always be valid
## Observed behaviour
Webmailer was blocked by rate limiting, preventing it from successfully authenticate, causing its login in to fail and damning the browser into an infinite redirection loop.
## Expected behaviour
The webmailer should not be blocked by rate limiting since the credentials are passed from an already valid login via SSO anyway.
## Possible solutions
1. prevent the webmailer from hitting the rate limits: this requires some more debugging,since I don't know how it could end up rate limited in the first place since every login it tries should be successful...
2. exempt the webmail container from rate limits: this is the workaround I used now by adapting my `docker-compose.yml` config to give the network used by the webmailer a known subnet and exempting it from rate limits:
~~~diff
--- a/mailu.env
+++ b/.mailu.env
@@ -38,6 +38,10 @@ AUTH_RATELIMIT_IP=60/hour
# Authentication rate limit per user (regardless of the source-IP)
AUTH_RATELIMIT_USER=100/day
+# exempt webmail container from rate limiting
+WEBMAIL_SUBNET=172.29.0.0/24
+AUTH_RATELIMIT_EXEMPTION=$WEBMAIL_SUBNET
+
# Opt-out of statistics, replace with "True" to opt out
DISABLE_STATISTICS=False
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -184,3 +184,7 @@ networks:
webmail:
driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: "$WEBMAIL_SUBNET"
~~~
</issue>
<code>
[start of core/admin/mailu/internal/views/auth.py]
1 from mailu import models, utils
2 from mailu.internal import internal, nginx
3 from flask import current_app as app
4
5 import flask
6 import flask_login
7 import base64
8 import sqlalchemy.exc
9 import urllib
10
11 @internal.route("/auth/email")
12 def nginx_authentication():
13 """ Main authentication endpoint for Nginx email server
14 """
15 client_ip = flask.request.headers["Client-Ip"]
16 headers = flask.request.headers
17 is_port_25 = headers["Auth-Port"] == '25'
18 if is_port_25 and headers['Auth-Method'] != 'none':
19 response = flask.Response()
20 response.headers['Auth-Status'] = 'AUTH not supported'
21 response.headers['Auth-Error-Code'] = '502 5.5.1'
22 utils.limiter.rate_limit_ip(client_ip)
23 return response
24 is_from_webmail = headers['Auth-Port'] in ['10143', '10025']
25 is_app_token = utils.is_app_token(headers.get('Auth-Pass',''))
26 if not is_from_webmail and not is_port_25 and not is_app_token and utils.limiter.should_rate_limit_ip(client_ip):
27 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
28 response = flask.Response()
29 response.headers['Auth-Status'] = status
30 response.headers['Auth-Error-Code'] = code
31 return response
32 raw_password = urllib.parse.unquote(headers['Auth-Pass']) if 'Auth-Pass' in headers else ''
33 headers = nginx.handle_authentication(flask.request.headers)
34 response = flask.Response()
35 for key, value in headers.items():
36 response.headers[key] = str(value)
37 is_valid_user = False
38 username = response.headers.get('Auth-User', None)
39 if response.headers.get("Auth-User-Exists") == "True":
40 if not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):
41 # FIXME could be done before handle_authentication()
42 status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
43 response = flask.Response()
44 response.headers['Auth-Status'] = status
45 response.headers['Auth-Error-Code'] = code
46 return response
47 is_valid_user = True
48 if headers.get("Auth-Status") == "OK":
49 # successful email delivery isn't enough to warrant an exemption
50 if not is_port_25:
51 utils.limiter.exempt_ip_from_ratelimits(client_ip)
52 elif is_valid_user:
53 password = None
54 try:
55 password = raw_password.encode("iso8859-1").decode("utf8")
56 except:
57 app.logger.warn(f'Received undecodable password for {username} from nginx: {raw_password!r}')
58 utils.limiter.rate_limit_user(username, client_ip, password=None)
59 else:
60 utils.limiter.rate_limit_user(username, client_ip, password=password)
61 elif not is_from_webmail:
62 utils.limiter.rate_limit_ip(client_ip, username)
63 return response
64
65 @internal.route("/auth/admin")
66 def admin_authentication():
67 """ Fails if the user is not an authenticated admin.
68 """
69 if (not flask_login.current_user.is_anonymous
70 and flask_login.current_user.global_admin
71 and flask_login.current_user.enabled):
72 return ""
73 return flask.abort(403)
74
75 @internal.route("/auth/user")
76 def user_authentication():
77 """ Fails if the user is not authenticated.
78 """
79 if (not flask_login.current_user.is_anonymous
80 and flask_login.current_user.enabled):
81 response = flask.Response()
82 email = flask_login.current_user.get_id()
83 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, email, "")
84 response.headers["X-User-Token"] = utils.gen_temp_token(email, flask.session)
85 return response
86 return flask.abort(403)
87
88
89 @internal.route("/auth/basic")
90 def basic_authentication():
91 """ Tries to authenticate using the Authorization header.
92 """
93 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
94 if utils.limiter.should_rate_limit_ip(client_ip):
95 response = flask.Response(status=401)
96 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit from one source exceeded"'
97 response.headers['Retry-After'] = '60'
98 return response
99 authorization = flask.request.headers.get("Authorization")
100 if authorization and authorization.startswith("Basic "):
101 encoded = authorization.replace("Basic ", "")
102 user_email, password = base64.b64decode(encoded).split(b":", 1)
103 user_email = user_email.decode("utf8")
104 if utils.limiter.should_rate_limit_user(user_email, client_ip):
105 response = flask.Response(status=401)
106 response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit for this username exceeded"'
107 response.headers['Retry-After'] = '60'
108 return response
109 try:
110 user = models.User.query.get(user_email) if '@' in user_email else None
111 except sqlalchemy.exc.StatementError as exc:
112 exc = str(exc).split('\n', 1)[0]
113 app.logger.warn(f'Invalid user {user_email!r}: {exc}')
114 else:
115 if user is not None and nginx.check_credentials(user, password.decode('utf-8'), client_ip, "web", flask.request.headers.get('X-Real-Port', None), user_email):
116 response = flask.Response()
117 response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, user.email, "")
118 utils.limiter.exempt_ip_from_ratelimits(client_ip)
119 return response
120 # We failed check_credentials
121 utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip, user_email)
122 response = flask.Response(status=401)
123 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"'
124 return response
125
[end of core/admin/mailu/internal/views/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -37,7 +37,7 @@
is_valid_user = False
username = response.headers.get('Auth-User', None)
if response.headers.get("Auth-User-Exists") == "True":
- if not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):
+ if not is_from_webmail and not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):
# FIXME could be done before handle_authentication()
status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
response = flask.Response()
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -37,7 +37,7 @@\n is_valid_user = False\n username = response.headers.get('Auth-User', None)\n if response.headers.get(\"Auth-User-Exists\") == \"True\":\n- if not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):\n+ if not is_from_webmail and not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):\n # FIXME could be done before handle_authentication()\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n", "issue": "Webmail is not exempt from rate limiting\n## Environment & Version\r\n\r\n- `docker compose version`: _Docker Compose version 2.23.3_\r\n- Version: mailu `2.0.34`\r\n\r\n## Description\r\nTrying to open my webmail (roundcube) the browser just showed an error due to _too many redirects_ - which were to `sso.php` of the webmailer.\r\n\r\nDebugging this I found the following to be the reason:\r\n> front-1 | [info] 15#15: *153382 client login failed: \"Temporary authentication failure (rate-limit)\" while in http auth state, client: **172.29.0.3**, server: 0.0.0.0:10143, login: \"[email protected]\"\r\n\r\nWhere `172.29.0.3` is the IP of the webmail container.\r\n\r\nAs far as I could debug this everything else was working fine, `sso.php` could correctly get valid credentials provided by `front` via HTTP headers but trying to use them would fail since the webmail container was rate limited. The failed login would the redirect again to `sso.php` - in a loop...\r\n\r\n## Replication Steps\r\nUnfortunately I have no idea how the webmail container's IP could end up on the rate limited list...\r\nThe webmail container should only ever try to login with credentials provided by `front` via HTTP headers - which then should always be valid\r\n\r\n## Observed behaviour\r\nWebmailer was blocked by rate limiting, preventing it from successfully authenticate, causing its login in to fail and damning the browser into an infinite redirection loop.\r\n\r\n## Expected behaviour\r\nThe webmailer should not be blocked by rate limiting since the credentials are passed from an already valid login via SSO anyway.\r\n\r\n## Possible solutions\r\n1. prevent the webmailer from hitting the rate limits: this requires some more debugging,since I don't know how it could end up rate limited in the first place since every login it tries should be successful...\r\n2. exempt the webmail container from rate limits: this is the workaround I used now by adapting my `docker-compose.yml` config to give the network used by the webmailer a known subnet and exempting it from rate limits:\r\n~~~diff\r\n--- a/mailu.env\r\n+++ b/.mailu.env\r\n@@ -38,6 +38,10 @@ AUTH_RATELIMIT_IP=60/hour\r\n # Authentication rate limit per user (regardless of the source-IP)\r\n AUTH_RATELIMIT_USER=100/day\r\n \r\n+# exempt webmail container from rate limiting\r\n+WEBMAIL_SUBNET=172.29.0.0/24\r\n+AUTH_RATELIMIT_EXEMPTION=$WEBMAIL_SUBNET\r\n+\r\n # Opt-out of statistics, replace with \"True\" to opt out\r\n DISABLE_STATISTICS=False\r\n\r\n--- a/docker-compose.yml\r\n+++ b/docker-compose.yml\r\n@@ -184,3 +184,7 @@ networks:\r\n webmail:\r\n driver: bridge\r\n+ ipam:\r\n+ driver: default\r\n+ config:\r\n+ - subnet: \"$WEBMAIL_SUBNET\"\r\n~~~\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\nimport sqlalchemy.exc\nimport urllib\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n client_ip = flask.request.headers[\"Client-Ip\"]\n headers = flask.request.headers\n is_port_25 = headers[\"Auth-Port\"] == '25'\n if is_port_25 and headers['Auth-Method'] != 'none':\n response = flask.Response()\n response.headers['Auth-Status'] = 'AUTH not supported'\n response.headers['Auth-Error-Code'] = '502 5.5.1'\n utils.limiter.rate_limit_ip(client_ip)\n return response\n is_from_webmail = headers['Auth-Port'] in ['10143', '10025']\n is_app_token = utils.is_app_token(headers.get('Auth-Pass',''))\n if not is_from_webmail and not is_port_25 and not is_app_token and utils.limiter.should_rate_limit_ip(client_ip):\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n return response\n raw_password = urllib.parse.unquote(headers['Auth-Pass']) if 'Auth-Pass' in headers else ''\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n is_valid_user = False\n username = response.headers.get('Auth-User', None)\n if response.headers.get(\"Auth-User-Exists\") == \"True\":\n if not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):\n # FIXME could be done before handle_authentication()\n status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')\n response = flask.Response()\n response.headers['Auth-Status'] = status\n response.headers['Auth-Error-Code'] = code\n return response\n is_valid_user = True\n if headers.get(\"Auth-Status\") == \"OK\":\n # successful email delivery isn't enough to warrant an exemption\n if not is_port_25:\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n elif is_valid_user:\n password = None\n try:\n password = raw_password.encode(\"iso8859-1\").decode(\"utf8\")\n except:\n app.logger.warn(f'Received undecodable password for {username} from nginx: {raw_password!r}')\n utils.limiter.rate_limit_user(username, client_ip, password=None)\n else:\n utils.limiter.rate_limit_user(username, client_ip, password=password)\n elif not is_from_webmail:\n utils.limiter.rate_limit_ip(client_ip, username)\n return response\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n email = flask_login.current_user.get_id()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, email, \"\")\n response.headers[\"X-User-Token\"] = utils.gen_temp_token(email, flask.session)\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n if utils.limiter.should_rate_limit_ip(client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit from one source exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user_email = user_email.decode(\"utf8\")\n if utils.limiter.should_rate_limit_user(user_email, client_ip):\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Authentication rate limit for this username exceeded\"'\n response.headers['Retry-After'] = '60'\n return response\n try:\n user = models.User.query.get(user_email) if '@' in user_email else None\n except sqlalchemy.exc.StatementError as exc:\n exc = str(exc).split('\\n', 1)[0]\n app.logger.warn(f'Invalid user {user_email!r}: {exc}')\n else:\n if user is not None and nginx.check_credentials(user, password.decode('utf-8'), client_ip, \"web\", flask.request.headers.get('X-Real-Port', None), user_email):\n response = flask.Response()\n response.headers[\"X-User\"] = models.IdnaEmail.process_bind_param(flask_login, user.email, \"\")\n utils.limiter.exempt_ip_from_ratelimits(client_ip)\n return response\n # We failed check_credentials\n utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip, user_email)\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]}
| 2,788 | 176 |
gh_patches_debug_31088
|
rasdani/github-patches
|
git_diff
|
shapiromatron__hawc-505
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dosing regime dose groups hotfix
We had a reported data corruption issue where a user edited content in a dosing regime and then after saving, we found multiple endpoint-groups with the same endpoint group id, which shouldn't be possible.
After investigation, we found it was an error in the signal which keeps dose-groups and endpoint-groups synced. If there were multiple representation of dose-groups, for example 5 dose-groups and 2 units, then hawc would create 10 endpoint-groups instead of 5. Further, it would create these even for endpoints where data is not extracted.
Here we fix this issue and write a few tests.
</issue>
<code>
[start of hawc/apps/animal/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5
6 @admin.register(models.Experiment)
7 class ExperimentAdmin(admin.ModelAdmin):
8 list_display = (
9 "id",
10 "study",
11 "name",
12 "type",
13 "has_multiple_generations",
14 "chemical",
15 "cas",
16 "created",
17 )
18 list_filter = ("type", "has_multiple_generations", "chemical", "study__assessment")
19 search_fields = (
20 "study__short_citation",
21 "name",
22 )
23
24
25 @admin.register(models.AnimalGroup)
26 class AnimalGroupAdmin(admin.ModelAdmin):
27 list_display = (
28 "id",
29 "experiment",
30 "name",
31 "species",
32 "strain",
33 "sex",
34 "created",
35 )
36 list_filter = ("species", "strain", "sex", "experiment__study__assessment_id")
37 search_fields = ("name",)
38
39
40 @admin.register(models.DosingRegime)
41 class DosingRegimeAdmin(admin.ModelAdmin):
42 list_display = (
43 "id",
44 "dosed_animals",
45 "route_of_exposure",
46 "duration_exposure",
47 "num_dose_groups",
48 "created",
49 )
50 list_filter = (
51 "route_of_exposure",
52 "num_dose_groups",
53 "dosed_animals__experiment__study__assessment_id",
54 )
55 list_select_related = ("dosed_animals",)
56 search_fields = ("dosed_animals__name",)
57
58
59 @admin.register(models.Endpoint)
60 class EndpointAdmin(admin.ModelAdmin):
61 list_display = (
62 "id",
63 "assessment_id",
64 "animal_group",
65 "name",
66 "system",
67 "organ",
68 "effect",
69 "data_extracted",
70 "created",
71 )
72 list_filter = ("system", "organ", "data_extracted", "assessment_id")
73 search_fields = ("name",)
74
[end of hawc/apps/animal/admin.py]
[start of hawc/services/utils/git.py]
1 import subprocess
2 from datetime import datetime
3
4 from pydantic import BaseModel
5
6
7 class Commit(BaseModel):
8 sha: str
9 dt: datetime
10
11 @classmethod
12 def current(cls, cwd: str = ".") -> "Commit":
13 """Return information on the last commit at the repository path desired.
14
15 Returns:
16 A Commit instance
17 """
18 cmd = "git log -1 --format=%H"
19 sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]
20 cmd = "git show -s --format=%ct"
21 dt = datetime.fromtimestamp(
22 int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())
23 )
24 return cls(sha=sha, dt=dt)
25
[end of hawc/services/utils/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hawc/apps/animal/admin.py b/hawc/apps/animal/admin.py
--- a/hawc/apps/animal/admin.py
+++ b/hawc/apps/animal/admin.py
@@ -35,6 +35,13 @@
)
list_filter = ("species", "strain", "sex", "experiment__study__assessment_id")
search_fields = ("name",)
+ raw_id_fields = ("experiment", "species", "strain", "dosing_regime", "siblings")
+
+
+class DoseGroupInline(admin.TabularInline):
+ model = models.DoseGroup
+ raw_id_fields = ("dose_units",)
+ extra = 0
@admin.register(models.DosingRegime)
@@ -54,6 +61,12 @@
)
list_select_related = ("dosed_animals",)
search_fields = ("dosed_animals__name",)
+ inlines = (DoseGroupInline,)
+
+
+class EndpointGroupInline(admin.TabularInline):
+ model = models.EndpointGroup
+ extra = 0
@admin.register(models.Endpoint)
@@ -71,3 +84,13 @@
)
list_filter = ("system", "organ", "data_extracted", "assessment_id")
search_fields = ("name",)
+ raw_id_fields = (
+ "assessment",
+ "animal_group",
+ "system_term",
+ "organ_term",
+ "effect_term",
+ "effect_subtype_term",
+ "name_term",
+ )
+ inlines = (EndpointGroupInline,)
diff --git a/hawc/services/utils/git.py b/hawc/services/utils/git.py
--- a/hawc/services/utils/git.py
+++ b/hawc/services/utils/git.py
@@ -16,7 +16,7 @@
A Commit instance
"""
cmd = "git log -1 --format=%H"
- sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]
+ sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:8]
cmd = "git show -s --format=%ct"
dt = datetime.fromtimestamp(
int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())
|
{"golden_diff": "diff --git a/hawc/apps/animal/admin.py b/hawc/apps/animal/admin.py\n--- a/hawc/apps/animal/admin.py\n+++ b/hawc/apps/animal/admin.py\n@@ -35,6 +35,13 @@\n )\n list_filter = (\"species\", \"strain\", \"sex\", \"experiment__study__assessment_id\")\n search_fields = (\"name\",)\n+ raw_id_fields = (\"experiment\", \"species\", \"strain\", \"dosing_regime\", \"siblings\")\n+\n+\n+class DoseGroupInline(admin.TabularInline):\n+ model = models.DoseGroup\n+ raw_id_fields = (\"dose_units\",)\n+ extra = 0\n \n \n @admin.register(models.DosingRegime)\n@@ -54,6 +61,12 @@\n )\n list_select_related = (\"dosed_animals\",)\n search_fields = (\"dosed_animals__name\",)\n+ inlines = (DoseGroupInline,)\n+\n+\n+class EndpointGroupInline(admin.TabularInline):\n+ model = models.EndpointGroup\n+ extra = 0\n \n \n @admin.register(models.Endpoint)\n@@ -71,3 +84,13 @@\n )\n list_filter = (\"system\", \"organ\", \"data_extracted\", \"assessment_id\")\n search_fields = (\"name\",)\n+ raw_id_fields = (\n+ \"assessment\",\n+ \"animal_group\",\n+ \"system_term\",\n+ \"organ_term\",\n+ \"effect_term\",\n+ \"effect_subtype_term\",\n+ \"name_term\",\n+ )\n+ inlines = (EndpointGroupInline,)\ndiff --git a/hawc/services/utils/git.py b/hawc/services/utils/git.py\n--- a/hawc/services/utils/git.py\n+++ b/hawc/services/utils/git.py\n@@ -16,7 +16,7 @@\n A Commit instance\n \"\"\"\n cmd = \"git log -1 --format=%H\"\n- sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]\n+ sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:8]\n cmd = \"git show -s --format=%ct\"\n dt = datetime.fromtimestamp(\n int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())\n", "issue": "dosing regime dose groups hotfix\nWe had a reported data corruption issue where a user edited content in a dosing regime and then after saving, we found multiple endpoint-groups with the same endpoint group id, which shouldn't be possible.\r\n\r\nAfter investigation, we found it was an error in the signal which keeps dose-groups and endpoint-groups synced. If there were multiple representation of dose-groups, for example 5 dose-groups and 2 units, then hawc would create 10 endpoint-groups instead of 5. Further, it would create these even for endpoints where data is not extracted.\r\n\r\nHere we fix this issue and write a few tests.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\[email protected](models.Experiment)\nclass ExperimentAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"study\",\n \"name\",\n \"type\",\n \"has_multiple_generations\",\n \"chemical\",\n \"cas\",\n \"created\",\n )\n list_filter = (\"type\", \"has_multiple_generations\", \"chemical\", \"study__assessment\")\n search_fields = (\n \"study__short_citation\",\n \"name\",\n )\n\n\[email protected](models.AnimalGroup)\nclass AnimalGroupAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"experiment\",\n \"name\",\n \"species\",\n \"strain\",\n \"sex\",\n \"created\",\n )\n list_filter = (\"species\", \"strain\", \"sex\", \"experiment__study__assessment_id\")\n search_fields = (\"name\",)\n\n\[email protected](models.DosingRegime)\nclass DosingRegimeAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"dosed_animals\",\n \"route_of_exposure\",\n \"duration_exposure\",\n \"num_dose_groups\",\n \"created\",\n )\n list_filter = (\n \"route_of_exposure\",\n \"num_dose_groups\",\n \"dosed_animals__experiment__study__assessment_id\",\n )\n list_select_related = (\"dosed_animals\",)\n search_fields = (\"dosed_animals__name\",)\n\n\[email protected](models.Endpoint)\nclass EndpointAdmin(admin.ModelAdmin):\n list_display = (\n \"id\",\n \"assessment_id\",\n \"animal_group\",\n \"name\",\n \"system\",\n \"organ\",\n \"effect\",\n \"data_extracted\",\n \"created\",\n )\n list_filter = (\"system\", \"organ\", \"data_extracted\", \"assessment_id\")\n search_fields = (\"name\",)\n", "path": "hawc/apps/animal/admin.py"}, {"content": "import subprocess\nfrom datetime import datetime\n\nfrom pydantic import BaseModel\n\n\nclass Commit(BaseModel):\n sha: str\n dt: datetime\n\n @classmethod\n def current(cls, cwd: str = \".\") -> \"Commit\":\n \"\"\"Return information on the last commit at the repository path desired.\n\n Returns:\n A Commit instance\n \"\"\"\n cmd = \"git log -1 --format=%H\"\n sha = subprocess.check_output(cmd.split(), cwd=cwd).decode().strip()[:12]\n cmd = \"git show -s --format=%ct\"\n dt = datetime.fromtimestamp(\n int(subprocess.check_output(cmd.split(), cwd=cwd).decode().strip())\n )\n return cls(sha=sha, dt=dt)\n", "path": "hawc/services/utils/git.py"}]}
| 1,436 | 504 |
gh_patches_debug_3278
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-7294
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certbot's Apache plugin doesn't work on Scientific Linux
See https://community.letsencrypt.org/t/noinstallationerror-cannot-find-apache-executable-apache2ctl/97980.
This should be fixable by adding an override in https://github.com/certbot/certbot/blob/master/certbot-apache/certbot_apache/entrypoint.py#L17.
</issue>
<code>
[start of certbot-apache/certbot_apache/entrypoint.py]
1 """ Entry point for Apache Plugin """
2 # Pylint does not like disutils.version when running inside a venv.
3 # See: https://github.com/PyCQA/pylint/issues/73
4 from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
5
6 from certbot import util
7
8 from certbot_apache import configurator
9 from certbot_apache import override_arch
10 from certbot_apache import override_fedora
11 from certbot_apache import override_darwin
12 from certbot_apache import override_debian
13 from certbot_apache import override_centos
14 from certbot_apache import override_gentoo
15 from certbot_apache import override_suse
16
17 OVERRIDE_CLASSES = {
18 "arch": override_arch.ArchConfigurator,
19 "darwin": override_darwin.DarwinConfigurator,
20 "debian": override_debian.DebianConfigurator,
21 "ubuntu": override_debian.DebianConfigurator,
22 "centos": override_centos.CentOSConfigurator,
23 "centos linux": override_centos.CentOSConfigurator,
24 "fedora_old": override_centos.CentOSConfigurator,
25 "fedora": override_fedora.FedoraConfigurator,
26 "ol": override_centos.CentOSConfigurator,
27 "red hat enterprise linux server": override_centos.CentOSConfigurator,
28 "rhel": override_centos.CentOSConfigurator,
29 "amazon": override_centos.CentOSConfigurator,
30 "gentoo": override_gentoo.GentooConfigurator,
31 "gentoo base system": override_gentoo.GentooConfigurator,
32 "opensuse": override_suse.OpenSUSEConfigurator,
33 "suse": override_suse.OpenSUSEConfigurator,
34 }
35
36
37 def get_configurator():
38 """ Get correct configurator class based on the OS fingerprint """
39 os_name, os_version = util.get_os_info()
40 os_name = os_name.lower()
41 override_class = None
42
43 # Special case for older Fedora versions
44 if os_name == 'fedora' and LooseVersion(os_version) < LooseVersion('29'):
45 os_name = 'fedora_old'
46
47 try:
48 override_class = OVERRIDE_CLASSES[os_name]
49 except KeyError:
50 # OS not found in the list
51 os_like = util.get_systemd_os_like()
52 if os_like:
53 for os_name in os_like:
54 if os_name in OVERRIDE_CLASSES.keys():
55 override_class = OVERRIDE_CLASSES[os_name]
56 if not override_class:
57 # No override class found, return the generic configurator
58 override_class = configurator.ApacheConfigurator
59 return override_class
60
61
62 ENTRYPOINT = get_configurator()
63
[end of certbot-apache/certbot_apache/entrypoint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot-apache/certbot_apache/entrypoint.py b/certbot-apache/certbot_apache/entrypoint.py
--- a/certbot-apache/certbot_apache/entrypoint.py
+++ b/certbot-apache/certbot_apache/entrypoint.py
@@ -31,6 +31,8 @@
"gentoo base system": override_gentoo.GentooConfigurator,
"opensuse": override_suse.OpenSUSEConfigurator,
"suse": override_suse.OpenSUSEConfigurator,
+ "scientific": override_centos.CentOSConfigurator,
+ "scientific linux": override_centos.CentOSConfigurator,
}
|
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/entrypoint.py b/certbot-apache/certbot_apache/entrypoint.py\n--- a/certbot-apache/certbot_apache/entrypoint.py\n+++ b/certbot-apache/certbot_apache/entrypoint.py\n@@ -31,6 +31,8 @@\n \"gentoo base system\": override_gentoo.GentooConfigurator,\n \"opensuse\": override_suse.OpenSUSEConfigurator,\n \"suse\": override_suse.OpenSUSEConfigurator,\n+ \"scientific\": override_centos.CentOSConfigurator,\n+ \"scientific linux\": override_centos.CentOSConfigurator,\n }\n", "issue": "Certbot's Apache plugin doesn't work on Scientific Linux\nSee https://community.letsencrypt.org/t/noinstallationerror-cannot-find-apache-executable-apache2ctl/97980.\r\n\r\nThis should be fixable by adding an override in https://github.com/certbot/certbot/blob/master/certbot-apache/certbot_apache/entrypoint.py#L17.\n", "before_files": [{"content": "\"\"\" Entry point for Apache Plugin \"\"\"\n# Pylint does not like disutils.version when running inside a venv.\n# See: https://github.com/PyCQA/pylint/issues/73\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom certbot import util\n\nfrom certbot_apache import configurator\nfrom certbot_apache import override_arch\nfrom certbot_apache import override_fedora\nfrom certbot_apache import override_darwin\nfrom certbot_apache import override_debian\nfrom certbot_apache import override_centos\nfrom certbot_apache import override_gentoo\nfrom certbot_apache import override_suse\n\nOVERRIDE_CLASSES = {\n \"arch\": override_arch.ArchConfigurator,\n \"darwin\": override_darwin.DarwinConfigurator,\n \"debian\": override_debian.DebianConfigurator,\n \"ubuntu\": override_debian.DebianConfigurator,\n \"centos\": override_centos.CentOSConfigurator,\n \"centos linux\": override_centos.CentOSConfigurator,\n \"fedora_old\": override_centos.CentOSConfigurator,\n \"fedora\": override_fedora.FedoraConfigurator,\n \"ol\": override_centos.CentOSConfigurator,\n \"red hat enterprise linux server\": override_centos.CentOSConfigurator,\n \"rhel\": override_centos.CentOSConfigurator,\n \"amazon\": override_centos.CentOSConfigurator,\n \"gentoo\": override_gentoo.GentooConfigurator,\n \"gentoo base system\": override_gentoo.GentooConfigurator,\n \"opensuse\": override_suse.OpenSUSEConfigurator,\n \"suse\": override_suse.OpenSUSEConfigurator,\n}\n\n\ndef get_configurator():\n \"\"\" Get correct configurator class based on the OS fingerprint \"\"\"\n os_name, os_version = util.get_os_info()\n os_name = os_name.lower()\n override_class = None\n\n # Special case for older Fedora versions\n if os_name == 'fedora' and LooseVersion(os_version) < LooseVersion('29'):\n os_name = 'fedora_old'\n\n try:\n override_class = OVERRIDE_CLASSES[os_name]\n except KeyError:\n # OS not found in the list\n os_like = util.get_systemd_os_like()\n if os_like:\n for os_name in os_like:\n if os_name in OVERRIDE_CLASSES.keys():\n override_class = OVERRIDE_CLASSES[os_name]\n if not override_class:\n # No override class found, return the generic configurator\n override_class = configurator.ApacheConfigurator\n return override_class\n\n\nENTRYPOINT = get_configurator()\n", "path": "certbot-apache/certbot_apache/entrypoint.py"}]}
| 1,339 | 155 |
gh_patches_debug_33938
|
rasdani/github-patches
|
git_diff
|
lightly-ai__lightly-1294
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error Using val_online_cls_top1
print(f"max {metric}: {max(metric_callback.val_metrics[metric])}")
KeyError: 'val_online_cls_top1'
</issue>
<code>
[start of benchmarks/imagenet/resnet50/swav.py]
1 import math
2 from typing import List, Tuple
3
4 import torch
5 from pytorch_lightning import LightningModule
6 from torch import Tensor
7 from torch.nn import Identity, ModuleList
8 from torch.nn import functional as F
9 from torchvision.models import resnet50
10
11 from lightly.loss.memory_bank import MemoryBankModule
12 from lightly.loss.swav_loss import SwaVLoss
13 from lightly.models.modules import SwaVProjectionHead, SwaVPrototypes
14 from lightly.models.utils import get_weight_decay_parameters
15 from lightly.transforms import SwaVTransform
16 from lightly.utils.benchmarking import OnlineLinearClassifier
17 from lightly.utils.lars import LARS
18 from lightly.utils.scheduler import CosineWarmupScheduler
19
20 CROP_COUNTS: Tuple[int, int] = (2, 6)
21
22
23 class SwAV(LightningModule):
24 def __init__(self, batch_size_per_device: int, num_classes: int) -> None:
25 super().__init__()
26 self.save_hyperparameters()
27 self.batch_size_per_device = batch_size_per_device
28
29 resnet = resnet50()
30 resnet.fc = Identity() # Ignore classification head
31 self.backbone = resnet
32 self.projection_head = SwaVProjectionHead()
33 self.prototypes = SwaVPrototypes(n_steps_frozen_prototypes=1)
34 self.criterion = SwaVLoss(sinkhorn_gather_distributed=True)
35 self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)
36
37 # Use a queue for small batch sizes (<= 256).
38 self.start_queue_at_epoch = 15
39 self.n_batches_in_queue = 15
40 self.queues = ModuleList(
41 [
42 MemoryBankModule(
43 size=self.n_batches_in_queue * self.batch_size_per_device
44 )
45 for _ in range(CROP_COUNTS[0])
46 ]
47 )
48
49 def forward(self, x: Tensor) -> Tensor:
50 return self.backbone(x)
51
52 def project(self, x: Tensor) -> Tensor:
53 x = self.projection_head(x)
54 return F.normalize(x, dim=1, p=2)
55
56 def training_step(
57 self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int
58 ) -> Tensor:
59 # Normalize the prototypes so they are on the unit sphere.
60 self.prototypes.normalize()
61
62 # The dataloader returns a list of image crops where the
63 # first few items are high resolution crops and the rest are low
64 # resolution crops.
65 multi_crops, targets = batch[0], batch[1]
66
67 # Forward pass through backbone and projection head.
68 multi_crop_features = [
69 self.forward(crops).flatten(start_dim=1) for crops in multi_crops
70 ]
71 multi_crop_projections = [
72 self.project(features) for features in multi_crop_features
73 ]
74
75 # Get the queue projections and logits.
76 queue_crop_logits = None
77 with torch.no_grad():
78 if self.current_epoch >= self.start_queue_at_epoch:
79 # Start filling the queue.
80 queue_crop_projections = _update_queue(
81 projections=multi_crop_projections[: CROP_COUNTS[0]],
82 queues=self.queues,
83 )
84 if batch_idx > self.n_batches_in_queue:
85 # The queue is filled, so we can start using it.
86 queue_crop_logits = [
87 self.prototypes(projections, step=self.current_epoch)
88 for projections in queue_crop_projections
89 ]
90
91 # Get the rest of the multi-crop logits.
92 multi_crop_logits = [
93 self.prototypes(projections, step=self.current_epoch)
94 for projections in multi_crop_projections
95 ]
96
97 # Calculate the SwAV loss.
98 loss = self.criterion(
99 high_resolution_outputs=multi_crop_logits[: CROP_COUNTS[0]],
100 low_resolution_outputs=multi_crop_logits[CROP_COUNTS[0] :],
101 queue_outputs=queue_crop_logits,
102 )
103 self.log(
104 "train_loss",
105 loss,
106 prog_bar=True,
107 sync_dist=True,
108 batch_size_per_device=len(targets),
109 )
110
111 # Calculate the classification loss.
112 cls_loss, cls_log = self.online_classifier.training_step(
113 (multi_crop_features[0].detach(), targets), batch_idx
114 )
115 self.log_dict(cls_log, sync_dist=True, batch_size_per_device=len(targets))
116 return loss + cls_loss
117
118 def validation_step(
119 self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int
120 ) -> Tensor:
121 images, targets = batch[0], batch[1]
122 features = self.forward(images).flatten(start_dim=1)
123 cls_loss, cls_log = self.online_classifier.validation_step(
124 (features.detach(), targets), batch_idx
125 )
126 self.log_dict(
127 cls_log, prog_bar=True, sync_dist=True, batch_size_per_device=len(targets)
128 )
129 return cls_loss
130
131 def configure_optimizers(self):
132 # Don't use weight decay for batch norm, bias parameters, and classification
133 # head to improve performance.
134 params, params_no_weight_decay = get_weight_decay_parameters(
135 [self.backbone, self.projection_head, self.prototypes]
136 )
137 optimizer = LARS(
138 [
139 {"name": "swav", "params": params},
140 {
141 "name": "swav_no_weight_decay",
142 "params": params_no_weight_decay,
143 "weight_decay": 0.0,
144 },
145 {
146 "name": "online_classifier",
147 "params": self.online_classifier.parameters(),
148 "weight_decay": 0.0,
149 },
150 ],
151 # Smaller learning rate for smaller batches: lr=0.6 for batch_size=256
152 # scaled linearly by batch size to lr=4.8 for batch_size=2048.
153 # See Appendix A.1. and A.6. in SwAV paper https://arxiv.org/pdf/2006.09882.pdf
154 lr=0.6 * (self.batch_size_per_device * self.trainer.world_size) / 256,
155 momentum=0.9,
156 weight_decay=1e-6,
157 )
158 scheduler = {
159 "scheduler": CosineWarmupScheduler(
160 optimizer=optimizer,
161 warmup_epochs=(
162 self.trainer.estimated_stepping_batches
163 / self.trainer.max_epochs
164 * 10
165 ),
166 max_epochs=self.trainer.estimated_stepping_batches,
167 end_value=0.0006
168 * (self.batch_size_per_device * self.trainer.world_size)
169 / 256,
170 ),
171 "interval": "step",
172 }
173 return [optimizer], [scheduler]
174
175
176 transform = SwaVTransform(crop_counts=CROP_COUNTS)
177
178
179 @torch.no_grad()
180 def _update_queue(
181 projections: List[Tensor],
182 queues: ModuleList,
183 ):
184 """Adds the high resolution projections to the queues and returns the queues."""
185
186 if len(projections) != len(queues):
187 raise ValueError(
188 f"The number of queues ({len(queues)}) should be equal to the number of high "
189 f"resolution inputs ({len(projections)})."
190 )
191
192 # Get the queue projections
193 queue_projections = []
194 for i in range(len(queues)):
195 _, projections = queues[i](projections[i], update=True)
196 # Queue projections are in (num_ftrs X queue_length) shape, while the high res
197 # projections are in (batch_size_per_device X num_ftrs). Swap the axes for interoperability.
198 projections = torch.permute(projections, (1, 0))
199 queue_projections.append(projections)
200
201 return queue_projections
202
[end of benchmarks/imagenet/resnet50/swav.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benchmarks/imagenet/resnet50/swav.py b/benchmarks/imagenet/resnet50/swav.py
--- a/benchmarks/imagenet/resnet50/swav.py
+++ b/benchmarks/imagenet/resnet50/swav.py
@@ -105,14 +105,14 @@
loss,
prog_bar=True,
sync_dist=True,
- batch_size_per_device=len(targets),
+ batch_size=len(targets),
)
# Calculate the classification loss.
cls_loss, cls_log = self.online_classifier.training_step(
(multi_crop_features[0].detach(), targets), batch_idx
)
- self.log_dict(cls_log, sync_dist=True, batch_size_per_device=len(targets))
+ self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))
return loss + cls_loss
def validation_step(
@@ -123,9 +123,7 @@
cls_loss, cls_log = self.online_classifier.validation_step(
(features.detach(), targets), batch_idx
)
- self.log_dict(
- cls_log, prog_bar=True, sync_dist=True, batch_size_per_device=len(targets)
- )
+ self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))
return cls_loss
def configure_optimizers(self):
@@ -192,10 +190,10 @@
# Get the queue projections
queue_projections = []
for i in range(len(queues)):
- _, projections = queues[i](projections[i], update=True)
+ _, queue_proj = queues[i](projections[i], update=True)
# Queue projections are in (num_ftrs X queue_length) shape, while the high res
# projections are in (batch_size_per_device X num_ftrs). Swap the axes for interoperability.
- projections = torch.permute(projections, (1, 0))
- queue_projections.append(projections)
+ queue_proj = torch.permute(queue_proj, (1, 0))
+ queue_projections.append(queue_proj)
return queue_projections
|
{"golden_diff": "diff --git a/benchmarks/imagenet/resnet50/swav.py b/benchmarks/imagenet/resnet50/swav.py\n--- a/benchmarks/imagenet/resnet50/swav.py\n+++ b/benchmarks/imagenet/resnet50/swav.py\n@@ -105,14 +105,14 @@\n loss,\n prog_bar=True,\n sync_dist=True,\n- batch_size_per_device=len(targets),\n+ batch_size=len(targets),\n )\n \n # Calculate the classification loss.\n cls_loss, cls_log = self.online_classifier.training_step(\n (multi_crop_features[0].detach(), targets), batch_idx\n )\n- self.log_dict(cls_log, sync_dist=True, batch_size_per_device=len(targets))\n+ self.log_dict(cls_log, sync_dist=True, batch_size=len(targets))\n return loss + cls_loss\n \n def validation_step(\n@@ -123,9 +123,7 @@\n cls_loss, cls_log = self.online_classifier.validation_step(\n (features.detach(), targets), batch_idx\n )\n- self.log_dict(\n- cls_log, prog_bar=True, sync_dist=True, batch_size_per_device=len(targets)\n- )\n+ self.log_dict(cls_log, prog_bar=True, sync_dist=True, batch_size=len(targets))\n return cls_loss\n \n def configure_optimizers(self):\n@@ -192,10 +190,10 @@\n # Get the queue projections\n queue_projections = []\n for i in range(len(queues)):\n- _, projections = queues[i](projections[i], update=True)\n+ _, queue_proj = queues[i](projections[i], update=True)\n # Queue projections are in (num_ftrs X queue_length) shape, while the high res\n # projections are in (batch_size_per_device X num_ftrs). Swap the axes for interoperability.\n- projections = torch.permute(projections, (1, 0))\n- queue_projections.append(projections)\n+ queue_proj = torch.permute(queue_proj, (1, 0))\n+ queue_projections.append(queue_proj)\n \n return queue_projections\n", "issue": "Error Using val_online_cls_top1\n print(f\"max {metric}: {max(metric_callback.val_metrics[metric])}\")\r\nKeyError: 'val_online_cls_top1'\n", "before_files": [{"content": "import math\nfrom typing import List, Tuple\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch import Tensor\nfrom torch.nn import Identity, ModuleList\nfrom torch.nn import functional as F\nfrom torchvision.models import resnet50\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.loss.swav_loss import SwaVLoss\nfrom lightly.models.modules import SwaVProjectionHead, SwaVPrototypes\nfrom lightly.models.utils import get_weight_decay_parameters\nfrom lightly.transforms import SwaVTransform\nfrom lightly.utils.benchmarking import OnlineLinearClassifier\nfrom lightly.utils.lars import LARS\nfrom lightly.utils.scheduler import CosineWarmupScheduler\n\nCROP_COUNTS: Tuple[int, int] = (2, 6)\n\n\nclass SwAV(LightningModule):\n def __init__(self, batch_size_per_device: int, num_classes: int) -> None:\n super().__init__()\n self.save_hyperparameters()\n self.batch_size_per_device = batch_size_per_device\n\n resnet = resnet50()\n resnet.fc = Identity() # Ignore classification head\n self.backbone = resnet\n self.projection_head = SwaVProjectionHead()\n self.prototypes = SwaVPrototypes(n_steps_frozen_prototypes=1)\n self.criterion = SwaVLoss(sinkhorn_gather_distributed=True)\n self.online_classifier = OnlineLinearClassifier(num_classes=num_classes)\n\n # Use a queue for small batch sizes (<= 256).\n self.start_queue_at_epoch = 15\n self.n_batches_in_queue = 15\n self.queues = ModuleList(\n [\n MemoryBankModule(\n size=self.n_batches_in_queue * self.batch_size_per_device\n )\n for _ in range(CROP_COUNTS[0])\n ]\n )\n\n def forward(self, x: Tensor) -> Tensor:\n return self.backbone(x)\n\n def project(self, x: Tensor) -> Tensor:\n x = self.projection_head(x)\n return F.normalize(x, dim=1, p=2)\n\n def training_step(\n self, batch: Tuple[List[Tensor], Tensor, List[str]], batch_idx: int\n ) -> Tensor:\n # Normalize the prototypes so they are on the unit sphere.\n self.prototypes.normalize()\n\n # The dataloader returns a list of image crops where the\n # first few items are high resolution crops and the rest are low\n # resolution crops.\n multi_crops, targets = batch[0], batch[1]\n\n # Forward pass through backbone and projection head.\n multi_crop_features = [\n self.forward(crops).flatten(start_dim=1) for crops in multi_crops\n ]\n multi_crop_projections = [\n self.project(features) for features in multi_crop_features\n ]\n\n # Get the queue projections and logits.\n queue_crop_logits = None\n with torch.no_grad():\n if self.current_epoch >= self.start_queue_at_epoch:\n # Start filling the queue.\n queue_crop_projections = _update_queue(\n projections=multi_crop_projections[: CROP_COUNTS[0]],\n queues=self.queues,\n )\n if batch_idx > self.n_batches_in_queue:\n # The queue is filled, so we can start using it.\n queue_crop_logits = [\n self.prototypes(projections, step=self.current_epoch)\n for projections in queue_crop_projections\n ]\n\n # Get the rest of the multi-crop logits.\n multi_crop_logits = [\n self.prototypes(projections, step=self.current_epoch)\n for projections in multi_crop_projections\n ]\n\n # Calculate the SwAV loss.\n loss = self.criterion(\n high_resolution_outputs=multi_crop_logits[: CROP_COUNTS[0]],\n low_resolution_outputs=multi_crop_logits[CROP_COUNTS[0] :],\n queue_outputs=queue_crop_logits,\n )\n self.log(\n \"train_loss\",\n loss,\n prog_bar=True,\n sync_dist=True,\n batch_size_per_device=len(targets),\n )\n\n # Calculate the classification loss.\n cls_loss, cls_log = self.online_classifier.training_step(\n (multi_crop_features[0].detach(), targets), batch_idx\n )\n self.log_dict(cls_log, sync_dist=True, batch_size_per_device=len(targets))\n return loss + cls_loss\n\n def validation_step(\n self, batch: Tuple[Tensor, Tensor, List[str]], batch_idx: int\n ) -> Tensor:\n images, targets = batch[0], batch[1]\n features = self.forward(images).flatten(start_dim=1)\n cls_loss, cls_log = self.online_classifier.validation_step(\n (features.detach(), targets), batch_idx\n )\n self.log_dict(\n cls_log, prog_bar=True, sync_dist=True, batch_size_per_device=len(targets)\n )\n return cls_loss\n\n def configure_optimizers(self):\n # Don't use weight decay for batch norm, bias parameters, and classification\n # head to improve performance.\n params, params_no_weight_decay = get_weight_decay_parameters(\n [self.backbone, self.projection_head, self.prototypes]\n )\n optimizer = LARS(\n [\n {\"name\": \"swav\", \"params\": params},\n {\n \"name\": \"swav_no_weight_decay\",\n \"params\": params_no_weight_decay,\n \"weight_decay\": 0.0,\n },\n {\n \"name\": \"online_classifier\",\n \"params\": self.online_classifier.parameters(),\n \"weight_decay\": 0.0,\n },\n ],\n # Smaller learning rate for smaller batches: lr=0.6 for batch_size=256\n # scaled linearly by batch size to lr=4.8 for batch_size=2048.\n # See Appendix A.1. and A.6. in SwAV paper https://arxiv.org/pdf/2006.09882.pdf\n lr=0.6 * (self.batch_size_per_device * self.trainer.world_size) / 256,\n momentum=0.9,\n weight_decay=1e-6,\n )\n scheduler = {\n \"scheduler\": CosineWarmupScheduler(\n optimizer=optimizer,\n warmup_epochs=(\n self.trainer.estimated_stepping_batches\n / self.trainer.max_epochs\n * 10\n ),\n max_epochs=self.trainer.estimated_stepping_batches,\n end_value=0.0006\n * (self.batch_size_per_device * self.trainer.world_size)\n / 256,\n ),\n \"interval\": \"step\",\n }\n return [optimizer], [scheduler]\n\n\ntransform = SwaVTransform(crop_counts=CROP_COUNTS)\n\n\[email protected]_grad()\ndef _update_queue(\n projections: List[Tensor],\n queues: ModuleList,\n):\n \"\"\"Adds the high resolution projections to the queues and returns the queues.\"\"\"\n\n if len(projections) != len(queues):\n raise ValueError(\n f\"The number of queues ({len(queues)}) should be equal to the number of high \"\n f\"resolution inputs ({len(projections)}).\"\n )\n\n # Get the queue projections\n queue_projections = []\n for i in range(len(queues)):\n _, projections = queues[i](projections[i], update=True)\n # Queue projections are in (num_ftrs X queue_length) shape, while the high res\n # projections are in (batch_size_per_device X num_ftrs). Swap the axes for interoperability.\n projections = torch.permute(projections, (1, 0))\n queue_projections.append(projections)\n\n return queue_projections\n", "path": "benchmarks/imagenet/resnet50/swav.py"}]}
| 2,738 | 469 |
gh_patches_debug_6071
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3247
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possible bug in poutine/trace_messenger.py, function identify_dense_edges
Hi,
I found some strange code pattern in function identify_dense_edges, in file [poutine/trace_messenger.py](https://github.com/pyro-ppl/pyro/blob/dev/pyro/poutine/trace_messenger.py#L21).
I notice two sequential instances of:
`if site_is_subsample(node):
continue`
The first one appears at line 17 (in the main loop of the function) and the second one at line 21 (in the nested loop).
It seems the second condition will never be reached; if it is, the first one would be selected, going to the next iteration of the main loop.
Should the second not be as follows ?
`if site_is_subsample(past_node):
continue`
(this would be consistent with considering only pairs of non subsample nodes).
Thanks,
Regards,
Xavier
</issue>
<code>
[start of pyro/poutine/trace_messenger.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import sys
5
6 from .messenger import Messenger
7 from .trace_struct import Trace
8 from .util import site_is_subsample
9
10
11 def identify_dense_edges(trace):
12 """
13 Modifies a trace in-place by adding all edges based on the
14 `cond_indep_stack` information stored at each site.
15 """
16 for name, node in trace.nodes.items():
17 if site_is_subsample(node):
18 continue
19 if node["type"] == "sample":
20 for past_name, past_node in trace.nodes.items():
21 if site_is_subsample(node):
22 continue
23 if past_node["type"] == "sample":
24 if past_name == name:
25 break
26 past_node_independent = False
27 for query, target in zip(
28 node["cond_indep_stack"], past_node["cond_indep_stack"]
29 ):
30 if (
31 query.name == target.name
32 and query.counter != target.counter
33 ):
34 past_node_independent = True
35 break
36 if not past_node_independent:
37 trace.add_edge(past_name, name)
38
39
40 class TraceMessenger(Messenger):
41 """
42 Return a handler that records the inputs and outputs of primitive calls
43 and their dependencies.
44
45 Consider the following Pyro program:
46
47 >>> def model(x):
48 ... s = pyro.param("s", torch.tensor(0.5))
49 ... z = pyro.sample("z", dist.Normal(x, s))
50 ... return z ** 2
51
52 We can record its execution using ``trace``
53 and use the resulting data structure to compute the log-joint probability
54 of all of the sample sites in the execution or extract all parameters.
55
56 >>> trace = pyro.poutine.trace(model).get_trace(0.0)
57 >>> logp = trace.log_prob_sum()
58 >>> params = [trace.nodes[name]["value"].unconstrained() for name in trace.param_nodes]
59
60 :param fn: a stochastic function (callable containing Pyro primitive calls)
61 :param graph_type: string that specifies the kind of graph to construct
62 :param param_only: if true, only records params and not samples
63 :returns: stochastic function decorated with a :class:`~pyro.poutine.trace_messenger.TraceMessenger`
64 """
65
66 def __init__(self, graph_type=None, param_only=None):
67 """
68 :param string graph_type: string that specifies the type of graph
69 to construct (currently only "flat" or "dense" supported)
70 :param param_only: boolean that specifies whether to record sample sites
71 """
72 super().__init__()
73 if graph_type is None:
74 graph_type = "flat"
75 if param_only is None:
76 param_only = False
77 assert graph_type in ("flat", "dense")
78 self.graph_type = graph_type
79 self.param_only = param_only
80 self.trace = Trace(graph_type=self.graph_type)
81
82 def __enter__(self):
83 self.trace = Trace(graph_type=self.graph_type)
84 return super().__enter__()
85
86 def __exit__(self, *args, **kwargs):
87 """
88 Adds appropriate edges based on cond_indep_stack information
89 upon exiting the context.
90 """
91 if self.param_only:
92 for node in list(self.trace.nodes.values()):
93 if node["type"] != "param":
94 self.trace.remove_node(node["name"])
95 if self.graph_type == "dense":
96 identify_dense_edges(self.trace)
97 return super().__exit__(*args, **kwargs)
98
99 def __call__(self, fn):
100 """
101 TODO docs
102 """
103 return TraceHandler(self, fn)
104
105 def get_trace(self):
106 """
107 :returns: data structure
108 :rtype: pyro.poutine.Trace
109
110 Helper method for a very common use case.
111 Returns a shallow copy of ``self.trace``.
112 """
113 return self.trace.copy()
114
115 def _reset(self):
116 tr = Trace(graph_type=self.graph_type)
117 if "_INPUT" in self.trace.nodes:
118 tr.add_node(
119 "_INPUT",
120 name="_INPUT",
121 type="input",
122 args=self.trace.nodes["_INPUT"]["args"],
123 kwargs=self.trace.nodes["_INPUT"]["kwargs"],
124 )
125 self.trace = tr
126 super()._reset()
127
128 def _pyro_post_sample(self, msg):
129 if self.param_only:
130 return
131 if msg["infer"].get("_do_not_trace"):
132 assert msg["infer"].get("is_auxiliary")
133 assert not msg["is_observed"]
134 return
135 self.trace.add_node(msg["name"], **msg.copy())
136
137 def _pyro_post_param(self, msg):
138 self.trace.add_node(msg["name"], **msg.copy())
139
140
141 class TraceHandler:
142 """
143 Execution trace poutine.
144
145 A TraceHandler records the input and output to every Pyro primitive
146 and stores them as a site in a Trace().
147 This should, in theory, be sufficient information for every inference algorithm
148 (along with the implicit computational graph in the Variables?)
149
150 We can also use this for visualization.
151 """
152
153 def __init__(self, msngr, fn):
154 self.fn = fn
155 self.msngr = msngr
156
157 def __call__(self, *args, **kwargs):
158 """
159 Runs the stochastic function stored in this poutine,
160 with additional side effects.
161
162 Resets self.trace to an empty trace,
163 installs itself on the global execution stack,
164 runs self.fn with the given arguments,
165 uninstalls itself from the global execution stack,
166 stores the arguments and return value of the function in special sites,
167 and returns self.fn's return value
168 """
169 with self.msngr:
170 self.msngr.trace.add_node(
171 "_INPUT", name="_INPUT", type="args", args=args, kwargs=kwargs
172 )
173 try:
174 ret = self.fn(*args, **kwargs)
175 except (ValueError, RuntimeError) as e:
176 exc_type, exc_value, traceback = sys.exc_info()
177 shapes = self.msngr.trace.format_shapes()
178 exc = exc_type("{}\n{}".format(exc_value, shapes))
179 exc = exc.with_traceback(traceback)
180 raise exc from e
181 self.msngr.trace.add_node(
182 "_RETURN", name="_RETURN", type="return", value=ret
183 )
184 return ret
185
186 @property
187 def trace(self):
188 return self.msngr.trace
189
190 def get_trace(self, *args, **kwargs):
191 """
192 :returns: data structure
193 :rtype: pyro.poutine.Trace
194
195 Helper method for a very common use case.
196 Calls this poutine and returns its trace instead of the function's return value.
197 """
198 self(*args, **kwargs)
199 return self.msngr.get_trace()
200
[end of pyro/poutine/trace_messenger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/poutine/trace_messenger.py b/pyro/poutine/trace_messenger.py
--- a/pyro/poutine/trace_messenger.py
+++ b/pyro/poutine/trace_messenger.py
@@ -18,7 +18,7 @@
continue
if node["type"] == "sample":
for past_name, past_node in trace.nodes.items():
- if site_is_subsample(node):
+ if site_is_subsample(past_node):
continue
if past_node["type"] == "sample":
if past_name == name:
|
{"golden_diff": "diff --git a/pyro/poutine/trace_messenger.py b/pyro/poutine/trace_messenger.py\n--- a/pyro/poutine/trace_messenger.py\n+++ b/pyro/poutine/trace_messenger.py\n@@ -18,7 +18,7 @@\n continue\n if node[\"type\"] == \"sample\":\n for past_name, past_node in trace.nodes.items():\n- if site_is_subsample(node):\n+ if site_is_subsample(past_node):\n continue\n if past_node[\"type\"] == \"sample\":\n if past_name == name:\n", "issue": "Possible bug in poutine/trace_messenger.py, function identify_dense_edges\nHi,\r\n\r\nI found some strange code pattern in function identify_dense_edges, in file [poutine/trace_messenger.py](https://github.com/pyro-ppl/pyro/blob/dev/pyro/poutine/trace_messenger.py#L21).\r\nI notice two sequential instances of:\r\n`if site_is_subsample(node):\r\n continue`\r\nThe first one appears at line 17 (in the main loop of the function) and the second one at line 21 (in the nested loop).\r\nIt seems the second condition will never be reached; if it is, the first one would be selected, going to the next iteration of the main loop.\r\n\r\nShould the second not be as follows ?\r\n`if site_is_subsample(past_node):\r\n continue`\r\n\r\n(this would be consistent with considering only pairs of non subsample nodes).\r\n\r\nThanks,\r\n\r\nRegards,\r\n\r\nXavier\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\n\nfrom .messenger import Messenger\nfrom .trace_struct import Trace\nfrom .util import site_is_subsample\n\n\ndef identify_dense_edges(trace):\n \"\"\"\n Modifies a trace in-place by adding all edges based on the\n `cond_indep_stack` information stored at each site.\n \"\"\"\n for name, node in trace.nodes.items():\n if site_is_subsample(node):\n continue\n if node[\"type\"] == \"sample\":\n for past_name, past_node in trace.nodes.items():\n if site_is_subsample(node):\n continue\n if past_node[\"type\"] == \"sample\":\n if past_name == name:\n break\n past_node_independent = False\n for query, target in zip(\n node[\"cond_indep_stack\"], past_node[\"cond_indep_stack\"]\n ):\n if (\n query.name == target.name\n and query.counter != target.counter\n ):\n past_node_independent = True\n break\n if not past_node_independent:\n trace.add_edge(past_name, name)\n\n\nclass TraceMessenger(Messenger):\n \"\"\"\n Return a handler that records the inputs and outputs of primitive calls\n and their dependencies.\n\n Consider the following Pyro program:\n\n >>> def model(x):\n ... s = pyro.param(\"s\", torch.tensor(0.5))\n ... z = pyro.sample(\"z\", dist.Normal(x, s))\n ... return z ** 2\n\n We can record its execution using ``trace``\n and use the resulting data structure to compute the log-joint probability\n of all of the sample sites in the execution or extract all parameters.\n\n >>> trace = pyro.poutine.trace(model).get_trace(0.0)\n >>> logp = trace.log_prob_sum()\n >>> params = [trace.nodes[name][\"value\"].unconstrained() for name in trace.param_nodes]\n\n :param fn: a stochastic function (callable containing Pyro primitive calls)\n :param graph_type: string that specifies the kind of graph to construct\n :param param_only: if true, only records params and not samples\n :returns: stochastic function decorated with a :class:`~pyro.poutine.trace_messenger.TraceMessenger`\n \"\"\"\n\n def __init__(self, graph_type=None, param_only=None):\n \"\"\"\n :param string graph_type: string that specifies the type of graph\n to construct (currently only \"flat\" or \"dense\" supported)\n :param param_only: boolean that specifies whether to record sample sites\n \"\"\"\n super().__init__()\n if graph_type is None:\n graph_type = \"flat\"\n if param_only is None:\n param_only = False\n assert graph_type in (\"flat\", \"dense\")\n self.graph_type = graph_type\n self.param_only = param_only\n self.trace = Trace(graph_type=self.graph_type)\n\n def __enter__(self):\n self.trace = Trace(graph_type=self.graph_type)\n return super().__enter__()\n\n def __exit__(self, *args, **kwargs):\n \"\"\"\n Adds appropriate edges based on cond_indep_stack information\n upon exiting the context.\n \"\"\"\n if self.param_only:\n for node in list(self.trace.nodes.values()):\n if node[\"type\"] != \"param\":\n self.trace.remove_node(node[\"name\"])\n if self.graph_type == \"dense\":\n identify_dense_edges(self.trace)\n return super().__exit__(*args, **kwargs)\n\n def __call__(self, fn):\n \"\"\"\n TODO docs\n \"\"\"\n return TraceHandler(self, fn)\n\n def get_trace(self):\n \"\"\"\n :returns: data structure\n :rtype: pyro.poutine.Trace\n\n Helper method for a very common use case.\n Returns a shallow copy of ``self.trace``.\n \"\"\"\n return self.trace.copy()\n\n def _reset(self):\n tr = Trace(graph_type=self.graph_type)\n if \"_INPUT\" in self.trace.nodes:\n tr.add_node(\n \"_INPUT\",\n name=\"_INPUT\",\n type=\"input\",\n args=self.trace.nodes[\"_INPUT\"][\"args\"],\n kwargs=self.trace.nodes[\"_INPUT\"][\"kwargs\"],\n )\n self.trace = tr\n super()._reset()\n\n def _pyro_post_sample(self, msg):\n if self.param_only:\n return\n if msg[\"infer\"].get(\"_do_not_trace\"):\n assert msg[\"infer\"].get(\"is_auxiliary\")\n assert not msg[\"is_observed\"]\n return\n self.trace.add_node(msg[\"name\"], **msg.copy())\n\n def _pyro_post_param(self, msg):\n self.trace.add_node(msg[\"name\"], **msg.copy())\n\n\nclass TraceHandler:\n \"\"\"\n Execution trace poutine.\n\n A TraceHandler records the input and output to every Pyro primitive\n and stores them as a site in a Trace().\n This should, in theory, be sufficient information for every inference algorithm\n (along with the implicit computational graph in the Variables?)\n\n We can also use this for visualization.\n \"\"\"\n\n def __init__(self, msngr, fn):\n self.fn = fn\n self.msngr = msngr\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Runs the stochastic function stored in this poutine,\n with additional side effects.\n\n Resets self.trace to an empty trace,\n installs itself on the global execution stack,\n runs self.fn with the given arguments,\n uninstalls itself from the global execution stack,\n stores the arguments and return value of the function in special sites,\n and returns self.fn's return value\n \"\"\"\n with self.msngr:\n self.msngr.trace.add_node(\n \"_INPUT\", name=\"_INPUT\", type=\"args\", args=args, kwargs=kwargs\n )\n try:\n ret = self.fn(*args, **kwargs)\n except (ValueError, RuntimeError) as e:\n exc_type, exc_value, traceback = sys.exc_info()\n shapes = self.msngr.trace.format_shapes()\n exc = exc_type(\"{}\\n{}\".format(exc_value, shapes))\n exc = exc.with_traceback(traceback)\n raise exc from e\n self.msngr.trace.add_node(\n \"_RETURN\", name=\"_RETURN\", type=\"return\", value=ret\n )\n return ret\n\n @property\n def trace(self):\n return self.msngr.trace\n\n def get_trace(self, *args, **kwargs):\n \"\"\"\n :returns: data structure\n :rtype: pyro.poutine.Trace\n\n Helper method for a very common use case.\n Calls this poutine and returns its trace instead of the function's return value.\n \"\"\"\n self(*args, **kwargs)\n return self.msngr.get_trace()\n", "path": "pyro/poutine/trace_messenger.py"}]}
| 2,706 | 126 |
gh_patches_debug_12287
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1493
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive E1029 on API GW Stage Variables with dot syntax
*cfn-lint version: 0.29.6*
In #511 you fixed a false positive when referencing API GW Stage Variables via the syntax `${stageVariables['variableName']}`, but they can also be referenced using dot syntax: `${stageVariables.variableName}`.
```
Request:
Type: AWS::ApiGateway::Method
Properties:
Integration:
Type: HTTP_PROXY
IntegrationHttpMethod: !Ref HttpMethod
ConnectionType: VPC_LINK
ConnectionId: "${stageVariables.VPCLink}" <--- E1029
```
</issue>
<code>
[start of src/cfnlint/rules/functions/SubNeeded.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 import six
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class SubNeeded(CloudFormationLintRule):
12 """Check if a substitution string exists without a substitution function"""
13 id = 'E1029'
14 shortdesc = 'Sub is required if a variable is used in a string'
15 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
17 tags = ['functions', 'sub']
18
19 # Free-form text properties to exclude from this rule
20 # content is part of AWS::CloudFormation::Init
21 # RequestMappingTemplate is because of issue #1485
22 excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
23 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
24 'RequestMappingTemplate']
25 api_excludes = ['Uri', 'Body']
26
27 # IAM Policy has special variables that don't require !Sub, Check for these
28 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
29 # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
30 # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
31 # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
32 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
33 resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
34 '${aws:TokenIssueTime}', '${aws:principaltype}',
35 '${aws:SecureTransport}', '${aws:SourceIp}',
36 '${aws:UserAgent}', '${aws:userid}',
37 '${aws:username}', '${ec2:SourceInstanceARN}',
38 '${iot:Connection.Thing.ThingName}',
39 '${iot:Connection.Thing.ThingTypeName}',
40 '${iot:Connection.Thing.IsAttached}',
41 '${iot:ClientId}', '${transfer:HomeBucket}',
42 '${transfer:HomeDirectory}', '${transfer:HomeFolder}',
43 '${transfer:UserName}', '${redshift:DbUser}',
44 '${cognito-identity.amazonaws.com:aud}',
45 '${cognito-identity.amazonaws.com:sub}',
46 '${cognito-identity.amazonaws.com:amr}']
47
48 # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
49 condition_excludes = [
50 '${redshift:DbUser}',
51 ]
52
53 def __init__(self):
54 """Init"""
55 super(SubNeeded, self).__init__()
56 self.config_definition = {
57 'custom_excludes': {
58 'default': '',
59 'type': 'string'
60 }
61 }
62 self.configure()
63 self.subParameterRegex = re.compile(r'(\$\{[A-Za-z0-9_:\.]+\})')
64
65 def _match_values(self, cfnelem, path):
66 """Recursively search for values matching the searchRegex"""
67 values = []
68 if isinstance(cfnelem, dict):
69 for key in cfnelem:
70 pathprop = path[:]
71 pathprop.append(key)
72 values.extend(self._match_values(cfnelem[key], pathprop))
73 elif isinstance(cfnelem, list):
74 for index, item in enumerate(cfnelem):
75 pathprop = path[:]
76 pathprop.append(index)
77 values.extend(self._match_values(item, pathprop))
78 else:
79 # Leaf node
80 if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):
81 for variable in re.findall(self.subParameterRegex, cfnelem):
82 values.append(path + [variable])
83
84 return values
85
86 def match_values(self, cfn):
87 """
88 Search for values in all parts of the templates that match the searchRegex
89 """
90 results = []
91 results.extend(self._match_values(cfn.template, []))
92 # Globals are removed during a transform. They need to be checked manually
93 results.extend(self._match_values(cfn.template.get('Globals', {}), []))
94 return results
95
96 def _api_exceptions(self, value):
97 """ Key value exceptions """
98 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
99 return re.match(parameter_search, value)
100
101 def _variable_custom_excluded(self, value):
102 """ User-defined exceptions for variables, anywhere in the file """
103 custom_excludes = self.config['custom_excludes']
104 if custom_excludes:
105 custom_search = re.compile(custom_excludes)
106 return re.match(custom_search, value)
107 return False
108
109 def match(self, cfn):
110 """Basic Rule Matching"""
111
112 matches = []
113
114 # Get a list of paths to every leaf node string containing at least one ${parameter}
115 parameter_string_paths = self.match_values(cfn)
116 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
117 for parameter_string_path in parameter_string_paths:
118 if parameter_string_path[0] in ['Parameters']:
119 continue
120 # Exclude the special IAM variables
121 variable = parameter_string_path[-1]
122
123 if 'Resource' in parameter_string_path:
124 if variable in self.resource_excludes:
125 continue
126 if 'NotResource' in parameter_string_path:
127 if variable in self.resource_excludes:
128 continue
129 if 'Condition' in parameter_string_path:
130 if variable in self.condition_excludes:
131 continue
132
133 # Exclude variables that match custom exclude filters, if configured
134 # (for third-party tools that pre-process templates before uploading them to AWS)
135 if self._variable_custom_excluded(variable):
136 continue
137
138 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
139 if variable.startswith('${!'):
140 continue
141
142 found_sub = False
143 # Does the path contain an 'Fn::Sub'?
144 for step in parameter_string_path:
145 if step in self.api_excludes:
146 if self._api_exceptions(parameter_string_path[-1]):
147 found_sub = True
148 elif step == 'Fn::Sub' or step in self.excludes:
149 found_sub = True
150
151 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
152 if not found_sub:
153 # Remove the last item (the variable) to prevent multiple errors on 1 line errors
154 path = parameter_string_path[:-1]
155 message = 'Found an embedded parameter "{}" outside of an "Fn::Sub" at {}'.format(
156 variable, '/'.join(map(str, path)))
157 matches.append(RuleMatch(path, message))
158
159 return matches
160
[end of src/cfnlint/rules/functions/SubNeeded.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -22,7 +22,7 @@
excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
'RequestMappingTemplate']
- api_excludes = ['Uri', 'Body']
+ api_excludes = ['Uri', 'Body', 'ConnectionId']
# IAM Policy has special variables that don't require !Sub, Check for these
# https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -22,7 +22,7 @@\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate']\n- api_excludes = ['Uri', 'Body']\n+ api_excludes = ['Uri', 'Body', 'ConnectionId']\n \n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n", "issue": "False positive E1029 on API GW Stage Variables with dot syntax\n*cfn-lint version: 0.29.6*\r\n\r\nIn #511 you fixed a false positive when referencing API GW Stage Variables via the syntax `${stageVariables['variableName']}`, but they can also be referenced using dot syntax: `${stageVariables.variableName}`.\r\n\r\n```\r\n Request:\r\n Type: AWS::ApiGateway::Method\r\n Properties:\r\n Integration:\r\n Type: HTTP_PROXY\r\n IntegrationHttpMethod: !Ref HttpMethod\r\n ConnectionType: VPC_LINK\r\n ConnectionId: \"${stageVariables.VPCLink}\" <--- E1029\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n # content is part of AWS::CloudFormation::Init\n # RequestMappingTemplate is because of issue #1485\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate']\n api_excludes = ['Uri', 'Body']\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n \"\"\"Basic Rule Matching\"\"\"\n\n matches = []\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'NotResource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(variable):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n variable, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]}
| 2,566 | 168 |
gh_patches_debug_4216
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-4055
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_validator method do not work
Hello!
I have a problem with get_validator component.
Here’s my code:
```
batch_request = BatchRequest(
datasource_name="redshift_",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="daily_chargeback_table_v1", # this is the name of the table you want to retrieve
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
```
I get this exception:
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-67-16f90e0aa558> in <module>
8 )
9 validator = context.get_validator(
---> 10 batch_request=batch_request, expectation_suite_name="test_suite"
11 )
12 print(validator.head())
.
.
.
~/anaconda3/lib/python3.7/site-packages/great_expectations/execution_engine/sqlalchemy_execution_engine.py in _build_selectable_from_batch_spec(self, batch_spec)
979 )
980 .where(
--> 981 sa.and_(
982 split_clause,
983 sampler_fn(**batch_spec["sampling_kwargs"]),
TypeError: table() got an unexpected keyword argument 'schema'
```
My Datasource configuration like:
```
name: redshift_
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
credentials:
host: redshift_host
port: '5443'
username: username
password: password
database: dbname
query:
sslmode: prefer
drivername: postgresql+psycopg2
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
name: whole_table
```
My environment:
MacOS
python 3.7.4
great_expectations 0.13.34
I will be grateful for any help.
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages, setup
2
3 import versioneer
4
5 # Parse requirements.txt
6 with open("requirements.txt") as f:
7 required = f.read().splitlines()
8
9 # try:
10 # import pypandoc
11 # long_description = pypandoc.convert_file('README.md', 'rst')
12 # except (IOError, ImportError):
13 long_description = "Always know what to expect from your data. (See https://github.com/great-expectations/great_expectations for full description)."
14
15 config = {
16 "description": "Always know what to expect from your data.",
17 "author": "The Great Expectations Team",
18 "url": "https://github.com/great-expectations/great_expectations",
19 "author_email": "[email protected]",
20 "version": versioneer.get_version(),
21 "cmdclass": versioneer.get_cmdclass(),
22 "install_requires": required,
23 "extras_require": {
24 "spark": ["pyspark>=2.3.2"],
25 "sqlalchemy": ["sqlalchemy>=1.3.16"],
26 "airflow": ["apache-airflow[s3]>=1.9.0", "boto3>=1.7.3"],
27 "gcp": [
28 "google-cloud>=0.34.0",
29 "google-cloud-storage>=1.28.0",
30 "google-cloud-secret-manager>=1.0.0",
31 "pybigquery==0.4.15",
32 ],
33 "redshift": ["psycopg2>=2.8"],
34 "s3": ["boto3>=1.14"],
35 "aws_secrets": ["boto3>=1.8.7"],
36 "azure_secrets": ["azure-identity>=1.0.0", "azure-keyvault-secrets>=4.0.0"],
37 "snowflake": ["snowflake-sqlalchemy>=1.2"],
38 },
39 "packages": find_packages(exclude=["contrib*", "docs*", "tests*", "examples*"]),
40 "entry_points": {
41 "console_scripts": ["great_expectations=great_expectations.cli:main"]
42 },
43 "name": "great_expectations",
44 "long_description": long_description,
45 "license": "Apache-2.0",
46 "keywords": "data science testing pipeline data quality dataquality validation datavalidation",
47 "include_package_data": True,
48 "classifiers": [
49 "Development Status :: 4 - Beta",
50 "Intended Audience :: Developers",
51 "Intended Audience :: Science/Research",
52 "Intended Audience :: Other Audience",
53 "Topic :: Scientific/Engineering",
54 "Topic :: Software Development",
55 "Topic :: Software Development :: Testing",
56 "License :: OSI Approved :: Apache Software License",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.6",
59 "Programming Language :: Python :: 3.7",
60 "Programming Language :: Python :: 3.8",
61 "Programming Language :: Python :: 3.9",
62 ],
63 }
64
65 setup(**config)
66
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
"install_requires": required,
"extras_require": {
"spark": ["pyspark>=2.3.2"],
- "sqlalchemy": ["sqlalchemy>=1.3.16"],
+ "sqlalchemy": ["sqlalchemy>=1.3.18"],
"airflow": ["apache-airflow[s3]>=1.9.0", "boto3>=1.7.3"],
"gcp": [
"google-cloud>=0.34.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \"install_requires\": required,\n \"extras_require\": {\n \"spark\": [\"pyspark>=2.3.2\"],\n- \"sqlalchemy\": [\"sqlalchemy>=1.3.16\"],\n+ \"sqlalchemy\": [\"sqlalchemy>=1.3.18\"],\n \"airflow\": [\"apache-airflow[s3]>=1.9.0\", \"boto3>=1.7.3\"],\n \"gcp\": [\n \"google-cloud>=0.34.0\",\n", "issue": "get_validator method do not work\nHello!\r\nI have a problem with get_validator component.\r\n\r\nHere\u2019s my code:\r\n```\r\nbatch_request = BatchRequest(\r\n datasource_name=\"redshift_\",\r\n data_connector_name=\"default_inferred_data_connector_name\",\r\n data_asset_name=\"daily_chargeback_table_v1\", # this is the name of the table you want to retrieve\r\n)\r\ncontext.create_expectation_suite(\r\n expectation_suite_name=\"test_suite\", overwrite_existing=True\r\n)\r\nvalidator = context.get_validator(\r\n batch_request=batch_request, expectation_suite_name=\"test_suite\"\r\n)\r\nprint(validator.head())\r\n```\r\n\r\nI get this exception:\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-67-16f90e0aa558> in <module>\r\n 8 )\r\n 9 validator = context.get_validator(\r\n---> 10 batch_request=batch_request, expectation_suite_name=\"test_suite\"\r\n 11 )\r\n 12 print(validator.head())\r\n.\r\n.\r\n.\r\n\r\n~/anaconda3/lib/python3.7/site-packages/great_expectations/execution_engine/sqlalchemy_execution_engine.py in _build_selectable_from_batch_spec(self, batch_spec)\r\n 979 )\r\n 980 .where(\r\n--> 981 sa.and_(\r\n 982 split_clause,\r\n 983 sampler_fn(**batch_spec[\"sampling_kwargs\"]),\r\nTypeError: table() got an unexpected keyword argument 'schema'\r\n```\r\n\r\nMy Datasource configuration like:\r\n```\r\nname: redshift_\r\nclass_name: Datasource\r\nexecution_engine:\r\n class_name: SqlAlchemyExecutionEngine\r\n credentials:\r\n host: redshift_host\r\n port: '5443'\r\n username: username\r\n password: password\r\n database: dbname\r\n query:\r\n sslmode: prefer\r\n drivername: postgresql+psycopg2\r\ndata_connectors:\r\n default_runtime_data_connector_name:\r\n class_name: RuntimeDataConnector\r\n batch_identifiers:\r\n - default_identifier_name\r\n default_inferred_data_connector_name:\r\n class_name: InferredAssetSqlDataConnector\r\n name: whole_table\r\n```\r\n\r\nMy environment:\r\nMacOS\r\npython 3.7.4\r\ngreat_expectations 0.13.34\r\n\r\nI will be grateful for any help.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nimport versioneer\n\n# Parse requirements.txt\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\n# try:\n# import pypandoc\n# long_description = pypandoc.convert_file('README.md', 'rst')\n# except (IOError, ImportError):\nlong_description = \"Always know what to expect from your data. (See https://github.com/great-expectations/great_expectations for full description).\"\n\nconfig = {\n \"description\": \"Always know what to expect from your data.\",\n \"author\": \"The Great Expectations Team\",\n \"url\": \"https://github.com/great-expectations/great_expectations\",\n \"author_email\": \"[email protected]\",\n \"version\": versioneer.get_version(),\n \"cmdclass\": versioneer.get_cmdclass(),\n \"install_requires\": required,\n \"extras_require\": {\n \"spark\": [\"pyspark>=2.3.2\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.3.16\"],\n \"airflow\": [\"apache-airflow[s3]>=1.9.0\", \"boto3>=1.7.3\"],\n \"gcp\": [\n \"google-cloud>=0.34.0\",\n \"google-cloud-storage>=1.28.0\",\n \"google-cloud-secret-manager>=1.0.0\",\n \"pybigquery==0.4.15\",\n ],\n \"redshift\": [\"psycopg2>=2.8\"],\n \"s3\": [\"boto3>=1.14\"],\n \"aws_secrets\": [\"boto3>=1.8.7\"],\n \"azure_secrets\": [\"azure-identity>=1.0.0\", \"azure-keyvault-secrets>=4.0.0\"],\n \"snowflake\": [\"snowflake-sqlalchemy>=1.2\"],\n },\n \"packages\": find_packages(exclude=[\"contrib*\", \"docs*\", \"tests*\", \"examples*\"]),\n \"entry_points\": {\n \"console_scripts\": [\"great_expectations=great_expectations.cli:main\"]\n },\n \"name\": \"great_expectations\",\n \"long_description\": long_description,\n \"license\": \"Apache-2.0\",\n \"keywords\": \"data science testing pipeline data quality dataquality validation datavalidation\",\n \"include_package_data\": True,\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Other Audience\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Testing\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n}\n\nsetup(**config)\n", "path": "setup.py"}]}
| 1,806 | 141 |
gh_patches_debug_30197
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-764
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modules missing EXAMPLES
The following modules are missing EXAMPLES strings:
- ~~packaging/language/cpanm.py~~
- system/open_iscsi.py
</issue>
<code>
[start of packaging/language/cpanm.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2012, Franck Cuny <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20 #
21
22 DOCUMENTATION = '''
23 ---
24 module: cpanm
25 short_description: Manages Perl library dependencies.
26 description:
27 - Manage Perl library dependencies.
28 version_added: "1.6"
29 options:
30 name:
31 description:
32 - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
33 required: false
34 default: null
35 aliases: ["pkg"]
36 from_path:
37 description:
38 - The local directory from where to install
39 required: false
40 default: null
41 notest:
42 description:
43 - Do not run unit tests
44 required: false
45 default: false
46 locallib:
47 description:
48 - Specify the install base to install modules
49 required: false
50 default: false
51 mirror:
52 description:
53 - Specifies the base URL for the CPAN mirror to use
54 required: false
55 default: false
56 mirror_only:
57 description:
58 - Use the mirror's index file instead of the CPAN Meta DB
59 required: false
60 default: false
61 examples:
62 - code: "cpanm: name=Dancer"
63 description: Install I(Dancer) perl package.
64 - code: "cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz"
65 description: Install version 0.99_05 of the I(Plack) perl package.
66 - code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib"
67 description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)"
68 - code: "cpanm: from_path=/srv/webapps/my_app/src/"
69 description: Install perl dependencies from local directory.
70 - code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib"
71 description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).
72 - code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/"
73 description: Install I(Dancer) perl package from a specific mirror
74 notes:
75 - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
76 author: "Franck Cuny (@franckcuny)"
77 '''
78
79 def _is_package_installed(module, name, locallib, cpanm):
80 cmd = ""
81 if locallib:
82 os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
83 cmd = "%s perl -M%s -e '1'" % (cmd, name)
84 res, stdout, stderr = module.run_command(cmd, check_rc=False)
85 if res == 0:
86 return True
87 else:
88 return False
89
90 def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm):
91 # this code should use "%s" like everything else and just return early but not fixing all of it now.
92 # don't copy stuff like this
93 if from_path:
94 cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path)
95 else:
96 cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name)
97
98 if notest is True:
99 cmd = "{cmd} -n".format(cmd=cmd)
100
101 if locallib is not None:
102 cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib)
103
104 if mirror is not None:
105 cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror)
106
107 if mirror_only is True:
108 cmd = "{cmd} --mirror-only".format(cmd=cmd)
109
110 return cmd
111
112
113 def main():
114 arg_spec = dict(
115 name=dict(default=None, required=False, aliases=['pkg']),
116 from_path=dict(default=None, required=False),
117 notest=dict(default=False, type='bool'),
118 locallib=dict(default=None, required=False),
119 mirror=dict(default=None, required=False),
120 mirror_only=dict(default=False, type='bool'),
121 )
122
123 module = AnsibleModule(
124 argument_spec=arg_spec,
125 required_one_of=[['name', 'from_path']],
126 )
127
128 cpanm = module.get_bin_path('cpanm', True)
129 name = module.params['name']
130 from_path = module.params['from_path']
131 notest = module.boolean(module.params.get('notest', False))
132 locallib = module.params['locallib']
133 mirror = module.params['mirror']
134 mirror_only = module.params['mirror_only']
135
136 changed = False
137
138 installed = _is_package_installed(module, name, locallib, cpanm)
139
140 if not installed:
141 out_cpanm = err_cpanm = ''
142 cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm)
143
144 rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
145
146 if rc_cpanm != 0:
147 module.fail_json(msg=err_cpanm, cmd=cmd)
148
149 if err_cpanm and 'is up to date' not in err_cpanm:
150 changed = True
151
152 module.exit_json(changed=changed, binary=cpanm, name=name)
153
154 # import module snippets
155 from ansible.module_utils.basic import *
156
157 main()
158
[end of packaging/language/cpanm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py
--- a/packaging/language/cpanm.py
+++ b/packaging/language/cpanm.py
@@ -58,24 +58,31 @@
- Use the mirror's index file instead of the CPAN Meta DB
required: false
default: false
-examples:
- - code: "cpanm: name=Dancer"
- description: Install I(Dancer) perl package.
- - code: "cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz"
- description: Install version 0.99_05 of the I(Plack) perl package.
- - code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib"
- description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)"
- - code: "cpanm: from_path=/srv/webapps/my_app/src/"
- description: Install perl dependencies from local directory.
- - code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib"
- description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).
- - code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/"
- description: Install I(Dancer) perl package from a specific mirror
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
author: "Franck Cuny (@franckcuny)"
'''
+EXAMPLES = '''
+# install Dancer perl package
+- cpanm: name=Dancer
+
+# install version 0.99_05 of the Plack perl package
+- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz
+
+# install Dancer into the specified locallib
+- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib
+
+# install perl dependencies from local directory
+- cpanm: from_path=/srv/webapps/my_app/src/
+
+# install Dancer perl package without running the unit tests in indicated locallib
+- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib
+
+# install Dancer perl package from a specific mirror
+- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/
+'''
+
def _is_package_installed(module, name, locallib, cpanm):
cmd = ""
if locallib:
|
{"golden_diff": "diff --git a/packaging/language/cpanm.py b/packaging/language/cpanm.py\n--- a/packaging/language/cpanm.py\n+++ b/packaging/language/cpanm.py\n@@ -58,24 +58,31 @@\n - Use the mirror's index file instead of the CPAN Meta DB\n required: false\n default: false\n-examples:\n- - code: \"cpanm: name=Dancer\"\n- description: Install I(Dancer) perl package.\n- - code: \"cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz\"\n- description: Install version 0.99_05 of the I(Plack) perl package.\n- - code: \"cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib\"\n- description: \"Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)\"\n- - code: \"cpanm: from_path=/srv/webapps/my_app/src/\"\n- description: Install perl dependencies from local directory.\n- - code: \"cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib\"\n- description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).\n- - code: \"cpanm: name=Dancer mirror=http://cpan.cpantesters.org/\"\n- description: Install I(Dancer) perl package from a specific mirror\n notes:\n - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.\n author: \"Franck Cuny (@franckcuny)\"\n '''\n \n+EXAMPLES = '''\n+# install Dancer perl package\n+- cpanm: name=Dancer\n+\n+# install version 0.99_05 of the Plack perl package\n+- cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz\n+\n+# install Dancer into the specified locallib\n+- cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib\n+\n+# install perl dependencies from local directory\n+- cpanm: from_path=/srv/webapps/my_app/src/\n+\n+# install Dancer perl package without running the unit tests in indicated locallib\n+- cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib\n+\n+# install Dancer perl package from a specific mirror\n+- cpanm: name=Dancer mirror=http://cpan.cpantesters.org/\n+'''\n+\n def _is_package_installed(module, name, locallib, cpanm):\n cmd = \"\"\n if locallib:\n", "issue": "Modules missing EXAMPLES\nThe following modules are missing EXAMPLES strings:\n- ~~packaging/language/cpanm.py~~\n- system/open_iscsi.py\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Franck Cuny <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: cpanm\nshort_description: Manages Perl library dependencies.\ndescription:\n - Manage Perl library dependencies.\nversion_added: \"1.6\"\noptions:\n name:\n description:\n - The name of the Perl library to install. You may use the \"full distribution path\", e.g. MIYAGAWA/Plack-0.99_05.tar.gz\n required: false\n default: null\n aliases: [\"pkg\"]\n from_path:\n description:\n - The local directory from where to install\n required: false\n default: null\n notest:\n description:\n - Do not run unit tests\n required: false\n default: false\n locallib:\n description:\n - Specify the install base to install modules\n required: false\n default: false\n mirror:\n description:\n - Specifies the base URL for the CPAN mirror to use\n required: false\n default: false\n mirror_only:\n description:\n - Use the mirror's index file instead of the CPAN Meta DB\n required: false\n default: false\nexamples:\n - code: \"cpanm: name=Dancer\"\n description: Install I(Dancer) perl package.\n - code: \"cpanm: name=MIYAGAWA/Plack-0.99_05.tar.gz\"\n description: Install version 0.99_05 of the I(Plack) perl package.\n - code: \"cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib\"\n description: \"Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)\"\n - code: \"cpanm: from_path=/srv/webapps/my_app/src/\"\n description: Install perl dependencies from local directory.\n - code: \"cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib\"\n description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib).\n - code: \"cpanm: name=Dancer mirror=http://cpan.cpantesters.org/\"\n description: Install I(Dancer) perl package from a specific mirror\nnotes:\n - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.\nauthor: \"Franck Cuny (@franckcuny)\"\n'''\n\ndef _is_package_installed(module, name, locallib, cpanm):\n cmd = \"\"\n if locallib:\n os.environ[\"PERL5LIB\"] = \"%s/lib/perl5\" % locallib\n cmd = \"%s perl -M%s -e '1'\" % (cmd, name)\n res, stdout, stderr = module.run_command(cmd, check_rc=False)\n if res == 0:\n return True\n else: \n return False\n\ndef _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm):\n # this code should use \"%s\" like everything else and just return early but not fixing all of it now.\n # don't copy stuff like this\n if from_path:\n cmd = \"{cpanm} {path}\".format(cpanm=cpanm, path=from_path)\n else:\n cmd = \"{cpanm} {name}\".format(cpanm=cpanm, name=name)\n\n if notest is True:\n cmd = \"{cmd} -n\".format(cmd=cmd)\n\n if locallib is not None:\n cmd = \"{cmd} -l {locallib}\".format(cmd=cmd, locallib=locallib)\n\n if mirror is not None:\n cmd = \"{cmd} --mirror {mirror}\".format(cmd=cmd, mirror=mirror)\n\n if mirror_only is True:\n cmd = \"{cmd} --mirror-only\".format(cmd=cmd)\n\n return cmd\n\n\ndef main():\n arg_spec = dict(\n name=dict(default=None, required=False, aliases=['pkg']),\n from_path=dict(default=None, required=False),\n notest=dict(default=False, type='bool'),\n locallib=dict(default=None, required=False),\n mirror=dict(default=None, required=False),\n mirror_only=dict(default=False, type='bool'),\n )\n\n module = AnsibleModule(\n argument_spec=arg_spec,\n required_one_of=[['name', 'from_path']],\n )\n\n cpanm = module.get_bin_path('cpanm', True)\n name = module.params['name']\n from_path = module.params['from_path']\n notest = module.boolean(module.params.get('notest', False))\n locallib = module.params['locallib']\n mirror = module.params['mirror']\n mirror_only = module.params['mirror_only']\n\n changed = False\n\n installed = _is_package_installed(module, name, locallib, cpanm)\n\n if not installed:\n out_cpanm = err_cpanm = ''\n cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, cpanm)\n\n rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)\n\n if rc_cpanm != 0:\n module.fail_json(msg=err_cpanm, cmd=cmd)\n\n if err_cpanm and 'is up to date' not in err_cpanm:\n changed = True\n\n module.exit_json(changed=changed, binary=cpanm, name=name)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\n\nmain()\n", "path": "packaging/language/cpanm.py"}]}
| 2,386 | 619 |
gh_patches_debug_4163
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-1665
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RawImageFileAdmin has incorrect relation defined
</issue>
<code>
[start of app/grandchallenge/cases/admin.py]
1 import re
2 from functools import update_wrapper
3
4 from django.contrib import admin
5 from django.contrib.admin.utils import unquote
6 from django.core.exceptions import PermissionDenied
7 from django.http import Http404, HttpResponse
8 from django.urls import path
9 from django.utils.html import format_html
10 from guardian.admin import GuardedModelAdmin
11
12 from grandchallenge.cases.models import (
13 Image,
14 ImageFile,
15 RawImageFile,
16 RawImageUploadSession,
17 )
18 from grandchallenge.jqfileupload.widgets.uploader import StagedAjaxFile
19 from grandchallenge.subdomains.utils import reverse
20
21
22 class ImageFileInline(admin.StackedInline):
23 model = ImageFile
24 extra = 0
25
26
27 class ImageAdmin(GuardedModelAdmin):
28 search_fields = (
29 "pk",
30 "name",
31 "study__name",
32 "modality__modality",
33 "color_space",
34 "eye_choice",
35 "field_of_view",
36 "stereoscopic_choice",
37 "study__patient__name",
38 )
39 list_filter = (
40 "modality",
41 "color_space",
42 "eye_choice",
43 "field_of_view",
44 "stereoscopic_choice",
45 "archive__slug",
46 )
47 inlines = [ImageFileInline]
48 readonly_fields = ("origin",)
49
50
51 class ImageInline(admin.StackedInline):
52 model = Image
53 extra = 0
54
55
56 class MhdOrRawFilter(admin.SimpleListFilter):
57 """Allow filtering on mhd or raw/zraw files."""
58
59 title = "MHD or RAW file"
60 parameter_name = "mhd_or_raw"
61
62 def lookups(self, request, model_admin):
63 return (("mhd", "MHD file"), ("raw", "RAW/ZRAW file"))
64
65 def queryset(self, request, queryset):
66 if self.value() == "mhd":
67 return queryset.filter(file__endswith=".mhd")
68 if self.value() == "raw":
69 return queryset.filter(file__endswith="raw")
70
71
72 class ImageFileAdmin(GuardedModelAdmin):
73 search_fields = ("pk", "file", "image__name")
74 list_filter = (MhdOrRawFilter,)
75 readonly_fields = ("image",)
76
77
78 class RawImageUploadSessionAdmin(GuardedModelAdmin):
79 ordering = ("-created",)
80 list_display = (
81 "pk",
82 "created",
83 "creator",
84 "status",
85 "error_message",
86 )
87 readonly_fields = (
88 "creator",
89 "status",
90 )
91 list_filter = ("status",)
92 search_fields = (
93 "creator__username",
94 "pk",
95 "error_message",
96 )
97
98
99 class DownloadableFilter(admin.SimpleListFilter):
100 """Allow filtering on downloadable files."""
101
102 title = "Downloadable"
103 parameter_name = "downloadable"
104
105 def lookups(self, request, model_admin):
106 return (("yes", "Yes"),)
107
108 def queryset(self, request, queryset):
109 if self.value() == "yes":
110 return queryset.filter(staged_file_id__isnull=False)
111 return queryset
112
113
114 class RawImageFileAdmin(GuardedModelAdmin):
115 list_filter = (DownloadableFilter,)
116 list_display = ("filename", "upload_session", "download")
117 list_select_related = ("upload_session__archive",)
118 readonly_fields = (
119 "download",
120 "upload_session",
121 )
122 search_fields = ("upload_session__pk", "filename")
123
124 def download(self, instance):
125 if not instance.staged_file_id:
126 return
127 return format_html(
128 f'<a class="button" href={reverse(f"admin:{self.model._meta.app_label}_{self.model._meta.model_name}_download", kwargs={"object_id": instance.pk})}>Download</a>'
129 )
130
131 def download_view(self, request, object_id, **kwargs):
132 obj = self.get_object(request, unquote(object_id), None)
133 if not self.has_view_or_change_permission(request, obj):
134 raise PermissionDenied
135
136 try:
137 saf = StagedAjaxFile(obj.staged_file_id).open()
138 response = HttpResponse(
139 saf.read(), content_type="application/dicom"
140 )
141 response[
142 "Content-Disposition"
143 ] = f'attachment; filename="{obj.filename}"'
144 return response
145 except Exception:
146 raise Http404("File not found")
147
148 def get_urls(self):
149 def wrap(view):
150 def wrapper(*args, **kwargs):
151 return self.admin_site.admin_view(view)(*args, **kwargs)
152
153 wrapper.model_admin = self
154 return update_wrapper(wrapper, view)
155
156 urls = super().get_urls()
157
158 download_url = path(
159 "<path:object_id>/download/",
160 wrap(self.download_view),
161 name=f"{self.model._meta.app_label}_{self.model._meta.model_name}_download",
162 )
163 # Currently the last url in ModelAdmin's get-urls is this:
164 # # For backwards compatibility (was the change url before 1.9)
165 # path('<path:object_id>/', wrap(RedirectView.as_view(
166 # pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
167 # ))),
168 # This would also match <path:object_id>/download/ and is only there for
169 # old django versions, which we do not use. Replace it if it is there.
170 # Otherwise just append the download_url to the list.
171 if urls[-1].pattern.regex == re.compile("^(?P<object_id>.+)/$"):
172 urls[-1] = download_url
173 else:
174 urls.append(download_url)
175
176 return urls
177
178
179 admin.site.register(Image, ImageAdmin)
180 admin.site.register(ImageFile, ImageFileAdmin)
181 admin.site.register(RawImageUploadSession, RawImageUploadSessionAdmin)
182 admin.site.register(RawImageFile, RawImageFileAdmin)
183
[end of app/grandchallenge/cases/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/grandchallenge/cases/admin.py b/app/grandchallenge/cases/admin.py
--- a/app/grandchallenge/cases/admin.py
+++ b/app/grandchallenge/cases/admin.py
@@ -114,7 +114,6 @@
class RawImageFileAdmin(GuardedModelAdmin):
list_filter = (DownloadableFilter,)
list_display = ("filename", "upload_session", "download")
- list_select_related = ("upload_session__archive",)
readonly_fields = (
"download",
"upload_session",
|
{"golden_diff": "diff --git a/app/grandchallenge/cases/admin.py b/app/grandchallenge/cases/admin.py\n--- a/app/grandchallenge/cases/admin.py\n+++ b/app/grandchallenge/cases/admin.py\n@@ -114,7 +114,6 @@\n class RawImageFileAdmin(GuardedModelAdmin):\n list_filter = (DownloadableFilter,)\n list_display = (\"filename\", \"upload_session\", \"download\")\n- list_select_related = (\"upload_session__archive\",)\n readonly_fields = (\n \"download\",\n \"upload_session\",\n", "issue": "RawImageFileAdmin has incorrect relation defined\n\n", "before_files": [{"content": "import re\nfrom functools import update_wrapper\n\nfrom django.contrib import admin\nfrom django.contrib.admin.utils import unquote\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\nfrom django.urls import path\nfrom django.utils.html import format_html\nfrom guardian.admin import GuardedModelAdmin\n\nfrom grandchallenge.cases.models import (\n Image,\n ImageFile,\n RawImageFile,\n RawImageUploadSession,\n)\nfrom grandchallenge.jqfileupload.widgets.uploader import StagedAjaxFile\nfrom grandchallenge.subdomains.utils import reverse\n\n\nclass ImageFileInline(admin.StackedInline):\n model = ImageFile\n extra = 0\n\n\nclass ImageAdmin(GuardedModelAdmin):\n search_fields = (\n \"pk\",\n \"name\",\n \"study__name\",\n \"modality__modality\",\n \"color_space\",\n \"eye_choice\",\n \"field_of_view\",\n \"stereoscopic_choice\",\n \"study__patient__name\",\n )\n list_filter = (\n \"modality\",\n \"color_space\",\n \"eye_choice\",\n \"field_of_view\",\n \"stereoscopic_choice\",\n \"archive__slug\",\n )\n inlines = [ImageFileInline]\n readonly_fields = (\"origin\",)\n\n\nclass ImageInline(admin.StackedInline):\n model = Image\n extra = 0\n\n\nclass MhdOrRawFilter(admin.SimpleListFilter):\n \"\"\"Allow filtering on mhd or raw/zraw files.\"\"\"\n\n title = \"MHD or RAW file\"\n parameter_name = \"mhd_or_raw\"\n\n def lookups(self, request, model_admin):\n return ((\"mhd\", \"MHD file\"), (\"raw\", \"RAW/ZRAW file\"))\n\n def queryset(self, request, queryset):\n if self.value() == \"mhd\":\n return queryset.filter(file__endswith=\".mhd\")\n if self.value() == \"raw\":\n return queryset.filter(file__endswith=\"raw\")\n\n\nclass ImageFileAdmin(GuardedModelAdmin):\n search_fields = (\"pk\", \"file\", \"image__name\")\n list_filter = (MhdOrRawFilter,)\n readonly_fields = (\"image\",)\n\n\nclass RawImageUploadSessionAdmin(GuardedModelAdmin):\n ordering = (\"-created\",)\n list_display = (\n \"pk\",\n \"created\",\n \"creator\",\n \"status\",\n \"error_message\",\n )\n readonly_fields = (\n \"creator\",\n \"status\",\n )\n list_filter = (\"status\",)\n search_fields = (\n \"creator__username\",\n \"pk\",\n \"error_message\",\n )\n\n\nclass DownloadableFilter(admin.SimpleListFilter):\n \"\"\"Allow filtering on downloadable files.\"\"\"\n\n title = \"Downloadable\"\n parameter_name = \"downloadable\"\n\n def lookups(self, request, model_admin):\n return ((\"yes\", \"Yes\"),)\n\n def queryset(self, request, queryset):\n if self.value() == \"yes\":\n return queryset.filter(staged_file_id__isnull=False)\n return queryset\n\n\nclass RawImageFileAdmin(GuardedModelAdmin):\n list_filter = (DownloadableFilter,)\n list_display = (\"filename\", \"upload_session\", \"download\")\n list_select_related = (\"upload_session__archive\",)\n readonly_fields = (\n \"download\",\n \"upload_session\",\n )\n search_fields = (\"upload_session__pk\", \"filename\")\n\n def download(self, instance):\n if not instance.staged_file_id:\n return\n return format_html(\n f'<a class=\"button\" href={reverse(f\"admin:{self.model._meta.app_label}_{self.model._meta.model_name}_download\", kwargs={\"object_id\": instance.pk})}>Download</a>'\n )\n\n def download_view(self, request, object_id, **kwargs):\n obj = self.get_object(request, unquote(object_id), None)\n if not self.has_view_or_change_permission(request, obj):\n raise PermissionDenied\n\n try:\n saf = StagedAjaxFile(obj.staged_file_id).open()\n response = HttpResponse(\n saf.read(), content_type=\"application/dicom\"\n )\n response[\n \"Content-Disposition\"\n ] = f'attachment; filename=\"{obj.filename}\"'\n return response\n except Exception:\n raise Http404(\"File not found\")\n\n def get_urls(self):\n def wrap(view):\n def wrapper(*args, **kwargs):\n return self.admin_site.admin_view(view)(*args, **kwargs)\n\n wrapper.model_admin = self\n return update_wrapper(wrapper, view)\n\n urls = super().get_urls()\n\n download_url = path(\n \"<path:object_id>/download/\",\n wrap(self.download_view),\n name=f\"{self.model._meta.app_label}_{self.model._meta.model_name}_download\",\n )\n # Currently the last url in ModelAdmin's get-urls is this:\n # # For backwards compatibility (was the change url before 1.9)\n # path('<path:object_id>/', wrap(RedirectView.as_view(\n # pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)\n # ))),\n # This would also match <path:object_id>/download/ and is only there for\n # old django versions, which we do not use. Replace it if it is there.\n # Otherwise just append the download_url to the list.\n if urls[-1].pattern.regex == re.compile(\"^(?P<object_id>.+)/$\"):\n urls[-1] = download_url\n else:\n urls.append(download_url)\n\n return urls\n\n\nadmin.site.register(Image, ImageAdmin)\nadmin.site.register(ImageFile, ImageFileAdmin)\nadmin.site.register(RawImageUploadSession, RawImageUploadSessionAdmin)\nadmin.site.register(RawImageFile, RawImageFileAdmin)\n", "path": "app/grandchallenge/cases/admin.py"}]}
| 2,246 | 119 |
gh_patches_debug_25838
|
rasdani/github-patches
|
git_diff
|
web2py__web2py-1502
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
webclient : cookie may contains several '='
Which leads to a ValueError: too many values to unpack.
A PR to correct the problem will follow
webclient : cookie may contains several '='
Which leads to a ValueError: too many values to unpack.
A PR to correct the problem will follow
</issue>
<code>
[start of gluon/contrib/webclient.py]
1 """
2 Developed by Massimo Di Pierro
3 Released under the web2py license (LGPL)
4
5 It an interface on top of urllib2 which simplifies scripting of http requests
6 mostly for testing purposes
7
8 - customizable
9 - supports basic auth
10 - supports cookies
11 - supports session cookies (tested with web2py sessions)
12 - detects broken session
13 - detects web2py form postbacks and handles formname and formkey
14 - detects web2py tickets
15
16 Some examples at the bottom.
17 """
18 from __future__ import print_function
19 from gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes
20 import re
21 import time
22
23
24 DEFAULT_HEADERS = {
25 'user-agent': 'Mozilla/4.0', # some servers are picky
26 'accept-language': 'en',
27 }
28
29 FORM_REGEX = re.compile('(\<input name\="_formkey" type\="hidden" value\="(?P<formkey>.+?)" \/\>)?\<input name\="_formname" type\="hidden" value\="(?P<formname>.+?)" \/\>')
30
31 SESSION_REGEX = 'session_id_(?P<name>.+)'
32
33
34 class WebClient(object):
35
36 def __init__(self,
37 app='',
38 postbacks=True,
39 default_headers=DEFAULT_HEADERS,
40 session_regex=SESSION_REGEX):
41 self.app = app
42 self.postbacks = postbacks
43 self.forms = {}
44 self.history = []
45 self.cookies = {}
46 self.default_headers = default_headers
47 self.sessions = {}
48 self.session_regex = session_regex and re.compile(session_regex)
49
50 def get(self, url, cookies=None, headers=None, auth=None):
51 return self.post(url, data=None, cookies=cookies,
52 headers=headers, method='GET')
53
54 def post(self, url, data=None, cookies=None,
55 headers=None, auth=None, method='auto'):
56 self.url = self.app + url
57
58 # if this POST form requires a postback do it
59 if data and '_formname' in data and self.postbacks and \
60 self.history and self.history[-1][1] != self.url:
61 # to bypass the web2py CSRF need to get formkey
62 # before submitting the form
63 self.get(url, cookies=cookies, headers=headers, auth=auth)
64
65 # unless cookies are specified, recycle cookies
66 if cookies is None:
67 cookies = self.cookies
68 cookies = cookies or {}
69 headers = headers or {}
70
71 cj = cookielib.CookieJar()
72 args = [
73 urllib2.HTTPCookieProcessor(cj),
74 urllib2.HTTPHandler(debuglevel=0)
75 ]
76 # if required do basic auth
77 if auth:
78 auth_handler = urllib2.HTTPBasicAuthHandler()
79 auth_handler.add_password(**auth)
80 args.append(auth_handler)
81
82 opener = urllib2.build_opener(*args)
83
84 # copy headers from dict to list of key,value
85 headers_list = []
86 for key, value in iteritems(self.default_headers):
87 if not key in headers:
88 headers[key] = value
89 for key, value in iteritems(headers):
90 if isinstance(value, (list, tuple)):
91 for v in value:
92 headers_list.append((key, v))
93 else:
94 headers_list.append((key, value))
95
96 # move cookies to headers
97 for key, value in iteritems(cookies):
98 headers_list.append(('Cookie', '%s=%s' % (key, value)))
99
100 # add headers to request
101 for key, value in headers_list:
102 opener.addheaders.append((key, str(value)))
103
104 # assume everything is ok and make http request
105 error = None
106 try:
107 if isinstance(data, str):
108 self.method = 'POST' if method=='auto' else method
109 elif isinstance(data, dict):
110 self.method = 'POST' if method=='auto' else method
111 # if there is only one form, set _formname automatically
112 if not '_formname' in data and len(self.forms) == 1:
113 data['_formname'] = self.forms.keys()[0]
114
115 # if there is no formkey but it is known, set it
116 if '_formname' in data and not '_formkey' in data and \
117 data['_formname'] in self.forms:
118 data['_formkey'] = self.forms[data['_formname']]
119
120 # time the POST request
121 data = urlencode(data, doseq=True)
122 else:
123 self.method = 'GET' if method=='auto' else method
124 data = None
125 t0 = time.time()
126 self.response = opener.open(self.url, to_bytes(data))
127 self.time = time.time() - t0
128 except urllib2.HTTPError as er:
129 error = er
130 # catch HTTP errors
131 self.time = time.time() - t0
132 self.response = er
133
134 if hasattr(self.response, 'getcode'):
135 self.status = self.response.getcode()
136 else:#python2.5
137 self.status = None
138
139 self.text = to_native(self.response.read())
140 # In PY3 self.response.headers are case sensitive
141 self.headers = dict()
142 for h in self.response.headers:
143 self.headers[h.lower()] = self.response.headers[h]
144
145 # treat web2py tickets as special types of errors
146 if error is not None:
147 if 'web2py_error' in self.headers:
148 raise RuntimeError(self.headers['web2py_error'])
149 else:
150 raise error
151
152 # parse headers into cookies
153 self.cookies = {}
154 if 'set-cookie' in self.headers:
155 for item in self.headers['set-cookie'].split(','):
156 key, value = item[:item.find(';')].split('=')
157 self.cookies[key.strip()] = value.strip()
158
159 # check is a new session id has been issued, symptom of broken session
160 if self.session_regex is not None:
161 for cookie, value in iteritems(self.cookies):
162 match = self.session_regex.match(cookie)
163 if match:
164 name = match.group('name')
165 if name in self.sessions and self.sessions[name] != value:
166 print(RuntimeError('Changed session ID %s' % name))
167 self.sessions[name] = value
168
169 # find all forms and formkeys in page
170 self.forms = {}
171 for match in FORM_REGEX.finditer(to_native(self.text)):
172 self.forms[match.group('formname')] = match.group('formkey')
173
174 # log this request
175 self.history.append((self.method, self.url, self.status, self.time))
176
177
178 def test_web2py_registration_and_login():
179 # from gluon.contrib.webclient import WebClient
180 # start a web2py instance for testing
181
182 client = WebClient('http://127.0.0.1:8000/welcome/default/')
183 client.get('index')
184
185 # register
186 data = dict(first_name='Homer',
187 last_name='Simpson',
188 email='[email protected]',
189 password='test',
190 password_two='test',
191 _formname='register')
192 client.post('user/register', data=data)
193
194 # logout
195 client.get('user/logout')
196
197 # login
198 data = dict(email='[email protected]',
199 password='test',
200 _formname='login')
201 client.post('user/login', data=data)
202
203 # check registration and login were successful
204 client.get('user/profile')
205 assert 'Welcome Homer' in client.text
206
207 # print some variables
208 print('\nsessions:\n', client.sessions)
209 print('\nheaders:\n', client.headers)
210 print('\ncookies:\n', client.cookies)
211 print('\nforms:\n', client.forms)
212 print()
213 for method, url, status, t in client.history:
214 print(method, url, status, t)
215
216 if __name__ == '__main__':
217 test_web2py_registration_and_login()
218
[end of gluon/contrib/webclient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py
--- a/gluon/contrib/webclient.py
+++ b/gluon/contrib/webclient.py
@@ -46,6 +46,17 @@
self.default_headers = default_headers
self.sessions = {}
self.session_regex = session_regex and re.compile(session_regex)
+ self.headers = {}
+
+ def _parse_headers_in_cookies(self):
+ self.cookies = {}
+ if 'set-cookie' in self.headers:
+ for item in self.headers['set-cookie'].split(','):
+ cookie = item[:item.find(';')]
+ pos = cookie.find('=')
+ key = cookie[:pos]
+ value = cookie[pos+1:]
+ self.cookies[key.strip()] = value.strip()
def get(self, url, cookies=None, headers=None, auth=None):
return self.post(url, data=None, cookies=cookies,
@@ -149,12 +160,7 @@
else:
raise error
- # parse headers into cookies
- self.cookies = {}
- if 'set-cookie' in self.headers:
- for item in self.headers['set-cookie'].split(','):
- key, value = item[:item.find(';')].split('=')
- self.cookies[key.strip()] = value.strip()
+ self._parse_headers_in_cookies()
# check is a new session id has been issued, symptom of broken session
if self.session_regex is not None:
|
{"golden_diff": "diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py\n--- a/gluon/contrib/webclient.py\n+++ b/gluon/contrib/webclient.py\n@@ -46,6 +46,17 @@\n self.default_headers = default_headers\n self.sessions = {}\n self.session_regex = session_regex and re.compile(session_regex)\n+ self.headers = {}\n+\n+ def _parse_headers_in_cookies(self):\n+ self.cookies = {}\n+ if 'set-cookie' in self.headers:\n+ for item in self.headers['set-cookie'].split(','):\n+ cookie = item[:item.find(';')]\n+ pos = cookie.find('=')\n+ key = cookie[:pos]\n+ value = cookie[pos+1:]\n+ self.cookies[key.strip()] = value.strip()\n \n def get(self, url, cookies=None, headers=None, auth=None):\n return self.post(url, data=None, cookies=cookies,\n@@ -149,12 +160,7 @@\n else:\n raise error\n \n- # parse headers into cookies\n- self.cookies = {}\n- if 'set-cookie' in self.headers:\n- for item in self.headers['set-cookie'].split(','):\n- key, value = item[:item.find(';')].split('=')\n- self.cookies[key.strip()] = value.strip()\n+ self._parse_headers_in_cookies()\n \n # check is a new session id has been issued, symptom of broken session\n if self.session_regex is not None:\n", "issue": "webclient : cookie may contains several '=' \nWhich leads to a ValueError: too many values to unpack.\n\nA PR to correct the problem will follow\n\nwebclient : cookie may contains several '=' \nWhich leads to a ValueError: too many values to unpack.\n\nA PR to correct the problem will follow\n\n", "before_files": [{"content": "\"\"\"\nDeveloped by Massimo Di Pierro\nReleased under the web2py license (LGPL)\n\nIt an interface on top of urllib2 which simplifies scripting of http requests\nmostly for testing purposes\n\n- customizable\n- supports basic auth\n- supports cookies\n- supports session cookies (tested with web2py sessions)\n- detects broken session\n- detects web2py form postbacks and handles formname and formkey\n- detects web2py tickets\n\nSome examples at the bottom.\n\"\"\"\nfrom __future__ import print_function\nfrom gluon._compat import urllib2, cookielib, iteritems, to_native, urlencode, to_bytes\nimport re\nimport time\n\n\nDEFAULT_HEADERS = {\n 'user-agent': 'Mozilla/4.0', # some servers are picky\n 'accept-language': 'en',\n}\n\nFORM_REGEX = re.compile('(\\<input name\\=\"_formkey\" type\\=\"hidden\" value\\=\"(?P<formkey>.+?)\" \\/\\>)?\\<input name\\=\"_formname\" type\\=\"hidden\" value\\=\"(?P<formname>.+?)\" \\/\\>')\n\nSESSION_REGEX = 'session_id_(?P<name>.+)'\n\n\nclass WebClient(object):\n\n def __init__(self,\n app='',\n postbacks=True,\n default_headers=DEFAULT_HEADERS,\n session_regex=SESSION_REGEX):\n self.app = app\n self.postbacks = postbacks\n self.forms = {}\n self.history = []\n self.cookies = {}\n self.default_headers = default_headers\n self.sessions = {}\n self.session_regex = session_regex and re.compile(session_regex)\n\n def get(self, url, cookies=None, headers=None, auth=None):\n return self.post(url, data=None, cookies=cookies,\n headers=headers, method='GET')\n\n def post(self, url, data=None, cookies=None,\n headers=None, auth=None, method='auto'):\n self.url = self.app + url\n\n # if this POST form requires a postback do it\n if data and '_formname' in data and self.postbacks and \\\n self.history and self.history[-1][1] != self.url:\n # to bypass the web2py CSRF need to get formkey\n # before submitting the form\n self.get(url, cookies=cookies, headers=headers, auth=auth)\n\n # unless cookies are specified, recycle cookies\n if cookies is None:\n cookies = self.cookies\n cookies = cookies or {}\n headers = headers or {}\n\n cj = cookielib.CookieJar()\n args = [\n urllib2.HTTPCookieProcessor(cj),\n urllib2.HTTPHandler(debuglevel=0)\n ]\n # if required do basic auth\n if auth:\n auth_handler = urllib2.HTTPBasicAuthHandler()\n auth_handler.add_password(**auth)\n args.append(auth_handler)\n\n opener = urllib2.build_opener(*args)\n\n # copy headers from dict to list of key,value\n headers_list = []\n for key, value in iteritems(self.default_headers):\n if not key in headers:\n headers[key] = value\n for key, value in iteritems(headers):\n if isinstance(value, (list, tuple)):\n for v in value:\n headers_list.append((key, v))\n else:\n headers_list.append((key, value))\n\n # move cookies to headers\n for key, value in iteritems(cookies):\n headers_list.append(('Cookie', '%s=%s' % (key, value)))\n\n # add headers to request\n for key, value in headers_list:\n opener.addheaders.append((key, str(value)))\n\n # assume everything is ok and make http request\n error = None\n try:\n if isinstance(data, str):\n self.method = 'POST' if method=='auto' else method\n elif isinstance(data, dict):\n self.method = 'POST' if method=='auto' else method\n # if there is only one form, set _formname automatically\n if not '_formname' in data and len(self.forms) == 1:\n data['_formname'] = self.forms.keys()[0]\n\n # if there is no formkey but it is known, set it\n if '_formname' in data and not '_formkey' in data and \\\n data['_formname'] in self.forms:\n data['_formkey'] = self.forms[data['_formname']]\n\n # time the POST request\n data = urlencode(data, doseq=True)\n else:\n self.method = 'GET' if method=='auto' else method\n data = None\n t0 = time.time()\n self.response = opener.open(self.url, to_bytes(data))\n self.time = time.time() - t0\n except urllib2.HTTPError as er:\n error = er\n # catch HTTP errors\n self.time = time.time() - t0\n self.response = er\n\n if hasattr(self.response, 'getcode'):\n self.status = self.response.getcode()\n else:#python2.5\n self.status = None\n\n self.text = to_native(self.response.read())\n # In PY3 self.response.headers are case sensitive\n self.headers = dict()\n for h in self.response.headers:\n self.headers[h.lower()] = self.response.headers[h]\n\n # treat web2py tickets as special types of errors\n if error is not None:\n if 'web2py_error' in self.headers:\n raise RuntimeError(self.headers['web2py_error'])\n else:\n raise error\n\n # parse headers into cookies\n self.cookies = {}\n if 'set-cookie' in self.headers:\n for item in self.headers['set-cookie'].split(','):\n key, value = item[:item.find(';')].split('=')\n self.cookies[key.strip()] = value.strip()\n\n # check is a new session id has been issued, symptom of broken session\n if self.session_regex is not None:\n for cookie, value in iteritems(self.cookies):\n match = self.session_regex.match(cookie)\n if match:\n name = match.group('name')\n if name in self.sessions and self.sessions[name] != value:\n print(RuntimeError('Changed session ID %s' % name))\n self.sessions[name] = value\n\n # find all forms and formkeys in page\n self.forms = {}\n for match in FORM_REGEX.finditer(to_native(self.text)):\n self.forms[match.group('formname')] = match.group('formkey')\n\n # log this request\n self.history.append((self.method, self.url, self.status, self.time))\n\n\ndef test_web2py_registration_and_login():\n # from gluon.contrib.webclient import WebClient\n # start a web2py instance for testing\n\n client = WebClient('http://127.0.0.1:8000/welcome/default/')\n client.get('index')\n\n # register\n data = dict(first_name='Homer',\n last_name='Simpson',\n email='[email protected]',\n password='test',\n password_two='test',\n _formname='register')\n client.post('user/register', data=data)\n\n # logout\n client.get('user/logout')\n\n # login\n data = dict(email='[email protected]',\n password='test',\n _formname='login')\n client.post('user/login', data=data)\n\n # check registration and login were successful\n client.get('user/profile')\n assert 'Welcome Homer' in client.text\n\n # print some variables\n print('\\nsessions:\\n', client.sessions)\n print('\\nheaders:\\n', client.headers)\n print('\\ncookies:\\n', client.cookies)\n print('\\nforms:\\n', client.forms)\n print()\n for method, url, status, t in client.history:\n print(method, url, status, t)\n\nif __name__ == '__main__':\n test_web2py_registration_and_login()\n", "path": "gluon/contrib/webclient.py"}]}
| 2,859 | 331 |
gh_patches_debug_37443
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-3793
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True
### What I did:
- add plone group `testgroup1` via /@@usergroup-groupprefs
- set registry entries for `plone.many_users` and `plone.many_groups` to `True`
- add a Folder
- go to the folder and call the sharing view /myfolder/@@sharing
- add the group `testgroup1` with reader, editor roles
- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`
- search a user
- select a user (should be a LDAP User) and save
### What I expect to happen:
- the user is added to the group
### What actually happened:
- endless waiting for response after click on "Add" Button
### What version of Plone/ Addons I am using:
- Plone 6.0.4
- pas.plugin.ldap 1.8.2
- node.ext.ldap 1.2
- node 1.2.1
- LDAP Backend user objects > 10000
### Some investigations
- in the ZMI acl_users -> source_groups the action is possible without errors
- with loglevel DEBUG i see thousands of queries to the LDAP Backend
- the task run many minutes
- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)
the [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.
An empty searchstring is like a intention "list all users" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.
With the following patch, all is fine ;-)
```
if searchGroups:
if not self.many_groups or bool(searchString):
groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')
groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]
groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))
if searchUsers:
if not self.many_users or bool(searchString):
userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')
userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]
userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')
```
</issue>
<code>
[start of Products/CMFPlone/controlpanel/browser/usergroups.py]
1 from AccessControl import getSecurityManager
2 from Acquisition import aq_inner
3 from itertools import chain
4 from plone.autoform.form import AutoExtensibleForm
5 from plone.base import PloneMessageFactory as _
6 from plone.base.interfaces import ISecuritySchema
7 from plone.base.interfaces import IUserGroupsSettingsSchema
8 from plone.z3cform import layout
9 from Products.CMFCore.permissions import ManagePortal
10 from Products.CMFCore.utils import getToolByName
11 from Products.CMFPlone.utils import normalizeString
12 from Products.Five.browser import BrowserView
13 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
14 from z3c.form import button
15 from z3c.form import form
16 from zope.component import getAdapter
17 from zope.component import getMultiAdapter
18 from ZTUtils import make_query
19
20
21 class UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):
22 schema = IUserGroupsSettingsSchema
23 id = "usergroupsettings-control-panel"
24 label = _("Users and Groups")
25 form_name = _("User/Groups settings")
26 control_panel_view = "usergroups-controlpanel"
27
28 @button.buttonAndHandler(_("label_save", default="Save"), name="save")
29 def handleApply(self, action):
30 super().handleApply(self, action)
31
32 def updateActions(self):
33 super().updateActions()
34 if self.actions and "save" in self.actions:
35 self.actions["save"].addClass("btn-primary")
36
37
38 class ControlPanelFormWrapper(layout.FormWrapper):
39 """Use this form as the plone.z3cform layout wrapper to get the control
40 panel layout.
41 """
42
43 index = ViewPageTemplateFile("controlpanel_usergroups_layout.pt")
44
45
46 UserGroupsSettingsPanelView = layout.wrap_form(
47 UserGroupsSettingsControlPanel, ControlPanelFormWrapper
48 )
49
50
51 class UsersGroupsControlPanelView(BrowserView):
52 @property
53 def portal_roles(self):
54 pmemb = getToolByName(aq_inner(self.context), "portal_membership")
55 return [r for r in pmemb.getPortalRoles() if r != "Owner"]
56
57 @property
58 def many_users(self):
59 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users
60
61 @property
62 def many_groups(self):
63 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups
64
65 @property
66 def email_as_username(self):
67 return getAdapter(
68 aq_inner(self.context), ISecuritySchema
69 ).get_use_email_as_login()
70
71 def makeQuery(self, **kw):
72 return make_query(**kw)
73
74 def membershipSearch(
75 self, searchString="", searchUsers=True, searchGroups=True, ignore=[]
76 ):
77 """Search for users and/or groups, returning actual member and group items
78 Replaces the now-deprecated prefs_user_groups_search.py script"""
79 groupResults = userResults = []
80
81 gtool = getToolByName(self, "portal_groups")
82 mtool = getToolByName(self, "portal_membership")
83
84 searchView = getMultiAdapter(
85 (aq_inner(self.context), self.request), name="pas_search"
86 )
87
88 if searchGroups:
89 groupResults = searchView.merge(
90 chain(
91 *[
92 searchView.searchGroups(**{field: searchString})
93 for field in ["id", "title"]
94 ]
95 ),
96 "groupid",
97 )
98 groupResults = [
99 gtool.getGroupById(g["id"])
100 for g in groupResults
101 if g["id"] not in ignore
102 ]
103 groupResults.sort(
104 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
105 )
106
107 if searchUsers:
108 userResults = searchView.merge(
109 chain(
110 *[
111 searchView.searchUsers(**{field: searchString})
112 for field in ["login", "fullname", "email"]
113 ]
114 ),
115 "userid",
116 )
117 userResults = [
118 mtool.getMemberById(u["id"])
119 for u in userResults
120 if u["id"] not in ignore
121 ]
122 userResults.sort(
123 key=lambda x: x is not None
124 and x.getProperty("fullname") is not None
125 and normalizeString(x.getProperty("fullname"))
126 or ""
127 )
128
129 return groupResults + userResults
130
131 def atoi(self, s):
132 try:
133 return int(s)
134 except ValueError:
135 return 0
136
137 @property
138 def is_zope_manager(self):
139 return getSecurityManager().checkPermission(ManagePortal, self.context)
140
141 # The next two class methods implement the following truth table:
142 #
143 # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT
144 # False False False Lists unavailable
145 # False False True Show all
146 # False True False Show matching
147 # False True True Show matching
148 # True False False Too many to list
149 # True False True Lists unavailable
150 # True True False Show matching
151 # True True True Show matching
152
153 # TODO: Maybe have these methods return a text message (instead of a bool)
154 # corresponding to the actual result, e.g. "Too many to list", "Lists
155 # unavailable"
156
157 @property
158 def show_group_listing_warning(self):
159 if not self.searchString:
160 acl = getToolByName(self, "acl_users")
161 if acl.canListAllGroups():
162 if self.many_groups:
163 return True
164 return False
165
166 @property
167 def show_users_listing_warning(self):
168 if not self.searchString:
169 acl = getToolByName(self, "acl_users")
170 # XXX Huh? Is canListAllUsers broken?
171 if not acl.canListAllUsers():
172 if self.many_users:
173 return True
174 return False
175
[end of Products/CMFPlone/controlpanel/browser/usergroups.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py
@@ -86,45 +86,50 @@
)
if searchGroups:
- groupResults = searchView.merge(
- chain(
- *[
- searchView.searchGroups(**{field: searchString})
- for field in ["id", "title"]
- ]
- ),
- "groupid",
- )
- groupResults = [
- gtool.getGroupById(g["id"])
- for g in groupResults
- if g["id"] not in ignore
- ]
- groupResults.sort(
- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_groups) or bool(self.searchString):
+ groupResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchGroups(**{field: searchString})
+ for field in ["id", "title"]
+ ]
+ ),
+ "groupid",
+ )
+ groupResults = [
+ gtool.getGroupById(g["id"])
+ for g in groupResults
+ if g["id"] not in ignore
+ ]
+ groupResults.sort(
+ key=lambda x: x is not None
+ and normalizeString(x.getGroupTitleOrName())
+ )
if searchUsers:
- userResults = searchView.merge(
- chain(
- *[
- searchView.searchUsers(**{field: searchString})
- for field in ["login", "fullname", "email"]
- ]
- ),
- "userid",
- )
- userResults = [
- mtool.getMemberById(u["id"])
- for u in userResults
- if u["id"] not in ignore
- ]
- userResults.sort(
- key=lambda x: x is not None
- and x.getProperty("fullname") is not None
- and normalizeString(x.getProperty("fullname"))
- or ""
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_users) or bool(self.searchString):
+ userResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchUsers(**{field: searchString})
+ for field in ["login", "fullname", "email"]
+ ]
+ ),
+ "userid",
+ )
+ userResults = [
+ mtool.getMemberById(u["id"])
+ for u in userResults
+ if u["id"] not in ignore
+ ]
+ userResults.sort(
+ key=lambda x: x is not None
+ and x.getProperty("fullname") is not None
+ and normalizeString(x.getProperty("fullname"))
+ or ""
+ )
return groupResults + userResults
|
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py\n@@ -86,45 +86,50 @@\n )\n \n if searchGroups:\n- groupResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchGroups(**{field: searchString})\n- for field in [\"id\", \"title\"]\n- ]\n- ),\n- \"groupid\",\n- )\n- groupResults = [\n- gtool.getGroupById(g[\"id\"])\n- for g in groupResults\n- if g[\"id\"] not in ignore\n- ]\n- groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_groups) or bool(self.searchString):\n+ groupResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchGroups(**{field: searchString})\n+ for field in [\"id\", \"title\"]\n+ ]\n+ ),\n+ \"groupid\",\n+ )\n+ groupResults = [\n+ gtool.getGroupById(g[\"id\"])\n+ for g in groupResults\n+ if g[\"id\"] not in ignore\n+ ]\n+ groupResults.sort(\n+ key=lambda x: x is not None\n+ and normalizeString(x.getGroupTitleOrName())\n+ )\n \n if searchUsers:\n- userResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchUsers(**{field: searchString})\n- for field in [\"login\", \"fullname\", \"email\"]\n- ]\n- ),\n- \"userid\",\n- )\n- userResults = [\n- mtool.getMemberById(u[\"id\"])\n- for u in userResults\n- if u[\"id\"] not in ignore\n- ]\n- userResults.sort(\n- key=lambda x: x is not None\n- and x.getProperty(\"fullname\") is not None\n- and normalizeString(x.getProperty(\"fullname\"))\n- or \"\"\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_users) or bool(self.searchString):\n+ userResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchUsers(**{field: searchString})\n+ for field in [\"login\", \"fullname\", \"email\"]\n+ ]\n+ ),\n+ \"userid\",\n+ )\n+ userResults = [\n+ mtool.getMemberById(u[\"id\"])\n+ for u in userResults\n+ if u[\"id\"] not in ignore\n+ ]\n+ userResults.sort(\n+ key=lambda x: x is not None\n+ and x.getProperty(\"fullname\") is not None\n+ and normalizeString(x.getProperty(\"fullname\"))\n+ or \"\"\n+ )\n \n return groupResults + userResults\n", "issue": "Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True\n### What I did:\r\n\r\n- add plone group `testgroup1` via /@@usergroup-groupprefs\r\n- set registry entries for `plone.many_users` and `plone.many_groups` to `True`\r\n- add a Folder\r\n- go to the folder and call the sharing view /myfolder/@@sharing\r\n- add the group `testgroup1` with reader, editor roles\r\n- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`\r\n- search a user\r\n- select a user (should be a LDAP User) and save\r\n\r\n### What I expect to happen:\r\n- the user is added to the group\r\n\r\n### What actually happened:\r\n- endless waiting for response after click on \"Add\" Button\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\n- Plone 6.0.4\r\n- pas.plugin.ldap 1.8.2\r\n- node.ext.ldap 1.2\r\n- node 1.2.1\r\n- LDAP Backend user objects > 10000\r\n\r\n### Some investigations\r\n\r\n- in the ZMI acl_users -> source_groups the action is possible without errors\r\n- with loglevel DEBUG i see thousands of queries to the LDAP Backend\r\n- the task run many minutes\r\n- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)\r\n \r\n\r\nthe [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.\r\n\r\nAn empty searchstring is like a intention \"list all users\" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.\r\n\r\nWith the following patch, all is fine ;-)\r\n\r\n```\r\nif searchGroups:\r\n if not self.many_groups or bool(searchString):\r\n groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')\r\n groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]\r\n groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))\r\n \r\n if searchUsers:\r\n if not self.many_users or bool(searchString):\r\n userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')\r\n userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]\r\n userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')\r\n```\r\n\n", "before_files": [{"content": "from AccessControl import getSecurityManager\nfrom Acquisition import aq_inner\nfrom itertools import chain\nfrom plone.autoform.form import AutoExtensibleForm\nfrom plone.base import PloneMessageFactory as _\nfrom plone.base.interfaces import ISecuritySchema\nfrom plone.base.interfaces import IUserGroupsSettingsSchema\nfrom plone.z3cform import layout\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import normalizeString\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom z3c.form import button\nfrom z3c.form import form\nfrom zope.component import getAdapter\nfrom zope.component import getMultiAdapter\nfrom ZTUtils import make_query\n\n\nclass UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):\n schema = IUserGroupsSettingsSchema\n id = \"usergroupsettings-control-panel\"\n label = _(\"Users and Groups\")\n form_name = _(\"User/Groups settings\")\n control_panel_view = \"usergroups-controlpanel\"\n\n @button.buttonAndHandler(_(\"label_save\", default=\"Save\"), name=\"save\")\n def handleApply(self, action):\n super().handleApply(self, action)\n\n def updateActions(self):\n super().updateActions()\n if self.actions and \"save\" in self.actions:\n self.actions[\"save\"].addClass(\"btn-primary\")\n\n\nclass ControlPanelFormWrapper(layout.FormWrapper):\n \"\"\"Use this form as the plone.z3cform layout wrapper to get the control\n panel layout.\n \"\"\"\n\n index = ViewPageTemplateFile(\"controlpanel_usergroups_layout.pt\")\n\n\nUserGroupsSettingsPanelView = layout.wrap_form(\n UserGroupsSettingsControlPanel, ControlPanelFormWrapper\n)\n\n\nclass UsersGroupsControlPanelView(BrowserView):\n @property\n def portal_roles(self):\n pmemb = getToolByName(aq_inner(self.context), \"portal_membership\")\n return [r for r in pmemb.getPortalRoles() if r != \"Owner\"]\n\n @property\n def many_users(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users\n\n @property\n def many_groups(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups\n\n @property\n def email_as_username(self):\n return getAdapter(\n aq_inner(self.context), ISecuritySchema\n ).get_use_email_as_login()\n\n def makeQuery(self, **kw):\n return make_query(**kw)\n\n def membershipSearch(\n self, searchString=\"\", searchUsers=True, searchGroups=True, ignore=[]\n ):\n \"\"\"Search for users and/or groups, returning actual member and group items\n Replaces the now-deprecated prefs_user_groups_search.py script\"\"\"\n groupResults = userResults = []\n\n gtool = getToolByName(self, \"portal_groups\")\n mtool = getToolByName(self, \"portal_membership\")\n\n searchView = getMultiAdapter(\n (aq_inner(self.context), self.request), name=\"pas_search\"\n )\n\n if searchGroups:\n groupResults = searchView.merge(\n chain(\n *[\n searchView.searchGroups(**{field: searchString})\n for field in [\"id\", \"title\"]\n ]\n ),\n \"groupid\",\n )\n groupResults = [\n gtool.getGroupById(g[\"id\"])\n for g in groupResults\n if g[\"id\"] not in ignore\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n\n if searchUsers:\n userResults = searchView.merge(\n chain(\n *[\n searchView.searchUsers(**{field: searchString})\n for field in [\"login\", \"fullname\", \"email\"]\n ]\n ),\n \"userid\",\n )\n userResults = [\n mtool.getMemberById(u[\"id\"])\n for u in userResults\n if u[\"id\"] not in ignore\n ]\n userResults.sort(\n key=lambda x: x is not None\n and x.getProperty(\"fullname\") is not None\n and normalizeString(x.getProperty(\"fullname\"))\n or \"\"\n )\n\n return groupResults + userResults\n\n def atoi(self, s):\n try:\n return int(s)\n except ValueError:\n return 0\n\n @property\n def is_zope_manager(self):\n return getSecurityManager().checkPermission(ManagePortal, self.context)\n\n # The next two class methods implement the following truth table:\n #\n # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT\n # False False False Lists unavailable\n # False False True Show all\n # False True False Show matching\n # False True True Show matching\n # True False False Too many to list\n # True False True Lists unavailable\n # True True False Show matching\n # True True True Show matching\n\n # TODO: Maybe have these methods return a text message (instead of a bool)\n # corresponding to the actual result, e.g. \"Too many to list\", \"Lists\n # unavailable\"\n\n @property\n def show_group_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n if acl.canListAllGroups():\n if self.many_groups:\n return True\n return False\n\n @property\n def show_users_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n # XXX Huh? Is canListAllUsers broken?\n if not acl.canListAllUsers():\n if self.many_users:\n return True\n return False\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups.py"}]}
| 2,917 | 700 |
gh_patches_debug_19353
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1230
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue with Cache in children endpoint
**This issue needs further investigation/testing**. For Malte the cache is currently disabled and the performance is acceptable. The priority of this issue depends on the performance of the migrated Integreat CMS.
### Describe the Bug
When calling random pages we sometimes get the following stack trace:
```
Feb 21 22:53:01 ERROR django.request - Internal Server Error: /leakarlsruhefelsstrasse/de/wp-json/extensions/v3/children/
Traceback (most recent call last):
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/decorators.py", line 178, in wrap
return func(request, *args, **kwargs)
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/decorators.py", line 124, in wrap
return function(request, *args, **kwargs)
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/v3/pages.py", line 205, in children
result.append(transform_page(public_translation))
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/v3/pages.py", line 22, in transform_page
parent_page = page_translation.page.cached_parent
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/utils/functional.py", line 48, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/cms/models/abstract_tree_node.py", line 210, in cached_parent
return self.get_cached_ancestors()[-1]
IndexError: list index out of range
```
This trace persists until the cache is flushed.
### Steps to Reproduce
Happens randomly. Visit pages until it happens.
For the time being we could use a simple filter instead of a cache operation in https://github.com/digitalfabrik/integreat-cms/blob/develop/integreat_cms/api/v3/pages.py#L231
</issue>
<code>
[start of integreat_cms/api/v3/pages.py]
1 """
2 This module includes functions related to the pages API endpoint.
3 """
4 from django.conf import settings
5 from django.http import JsonResponse, Http404
6 from django.shortcuts import get_object_or_404
7
8 from ...cms.models import Page
9 from ..decorators import json_response, matomo_tracking
10
11
12 def transform_page(page_translation):
13 """
14 Function to create a dict from a single page_translation Object.
15
16 :param page_translation: single page translation object
17 :type page_translation: ~integreat_cms.cms.models.pages.page_translation.PageTranslation
18
19 :return: data necessary for API
20 :rtype: dict
21 """
22 parent_page = page_translation.page.cached_parent
23 if parent_page:
24 parent_absolute_url = parent_page.get_public_translation(
25 page_translation.language.slug
26 ).get_absolute_url()
27 parent = {
28 "id": parent_page.id,
29 "url": settings.BASE_URL + parent_absolute_url,
30 "path": parent_absolute_url,
31 }
32 else:
33 parent = {
34 "id": 0,
35 "url": None,
36 "path": None,
37 }
38 absolute_url = page_translation.get_absolute_url()
39 return {
40 "id": page_translation.id,
41 "url": settings.BASE_URL + absolute_url,
42 "path": absolute_url,
43 "title": page_translation.title,
44 "modified_gmt": page_translation.combined_last_updated,
45 "excerpt": page_translation.content,
46 "content": page_translation.combined_text,
47 "parent": parent,
48 "order": page_translation.page.lft, # use left edge indicator of mptt model for order
49 "available_languages": page_translation.available_languages,
50 "thumbnail": page_translation.page.icon.url
51 if page_translation.page.icon
52 else None,
53 "hash": None,
54 }
55
56
57 @matomo_tracking
58 @json_response
59 # pylint: disable=unused-argument
60 def pages(request, region_slug, language_slug):
61 """
62 Function to iterate through all non-archived pages of a region and return them as JSON.
63
64 :param request: Django request
65 :type request: ~django.http.HttpRequest
66 :param region_slug: slug of a region
67 :type region_slug: str
68 :param language_slug: language slug
69 :type language_slug: str
70
71 :return: JSON object according to APIv3 pages endpoint definition
72 :rtype: ~django.http.JsonResponse
73 """
74 region = request.region
75 result = []
76 # The preliminary filter for explicitly_archived=False is not strictly required, but reduces the number of entries
77 # requested from the database
78 for page in region.pages.filter(explicitly_archived=False).cache_tree(
79 archived=False
80 )[0]:
81 page_translation = page.get_public_translation(language_slug)
82 if page_translation:
83 result.append(transform_page(page_translation))
84 return JsonResponse(
85 result, safe=False
86 ) # Turn off Safe-Mode to allow serializing arrays
87
88
89 def get_single_page(request, language_slug):
90 """
91 Helper function returning the desired page or a 404 if the
92 requested page does not exist.
93
94 :param request: The request that has been sent to the Django server
95 :type request: ~django.http.HttpRequest
96
97 :param language_slug: Code to identify the desired language
98 :type language_slug: str
99
100 :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.
101
102 :raises RuntimeError: If neither the id nor the url parameter is given
103
104 :return: the requested page
105 :rtype: ~integreat_cms.cms.models.pages.page.Page
106 """
107 region = request.region
108
109 if request.GET.get("id"):
110 page = get_object_or_404(region.pages, id=request.GET.get("id"))
111
112 elif request.GET.get("url"):
113 # Strip leading and trailing slashes to avoid ambiguous urls
114 url = request.GET.get("url").strip("/")
115 # The last path component of the url is the page translation slug
116 page_translation_slug = url.split("/")[-1]
117 # Get page by filtering for translation slug and translation language slug
118 filtered_pages = region.pages.filter(
119 translations__slug=page_translation_slug,
120 translations__language__slug=language_slug,
121 ).distinct()
122
123 if len(filtered_pages) != 1:
124 raise Http404("No matching page translation found for url.")
125 page = filtered_pages[0]
126
127 else:
128 raise RuntimeError("Either the id or the url parameter is required.")
129
130 return page
131
132
133 @matomo_tracking
134 @json_response
135 # pylint: disable=unused-argument
136 def single_page(request, region_slug, language_slug):
137 """
138 View function returning the desired page as a JSON or a 404 if the
139 requested page does not exist.
140
141 :param request: The request that has been sent to the Django server
142 :type request: ~django.http.HttpRequest
143
144 :param region_slug: Slug defining the region
145 :type region_slug: str
146
147 :param language_slug: Code to identify the desired language
148 :type language_slug: str
149
150 :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.
151
152 :return: JSON with the requested page and a HTTP status 200.
153 :rtype: ~django.http.JsonResponse
154 """
155 try:
156 page = get_single_page(request, language_slug)
157 except RuntimeError as e:
158 return JsonResponse({"error": str(e)}, status=400)
159 # Get most recent public revision of the page
160 page_translation = page.get_public_translation(language_slug)
161 if page_translation:
162 return JsonResponse(transform_page(page_translation), safe=False)
163
164 raise Http404("No Page matches the given url or id.")
165
166
167 @matomo_tracking
168 @json_response
169 # pylint: disable=unused-argument
170 def children(request, region_slug, language_slug):
171 """
172 Retrieves all children for a single page
173
174 :param request: The request that has been sent to the Django server
175 :type request: ~django.http.HttpRequest
176
177 :param region_slug: Slug defining the region
178 :type region_slug: str
179
180 :param language_slug: Code to identify the desired language
181 :type language_slug: str
182
183 :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.
184
185 :return: JSON with the requested page descendants
186 :rtype: ~django.http.JsonResponse
187 """
188 depth = int(request.GET.get("depth", 1))
189 try:
190 # try to get a single ancestor page based on the requests query string
191 root_pages = [get_single_page(request, language_slug)]
192 except RuntimeError:
193 # if neither id nor url is set then get all root pages
194 root_pages = Page.get_root_pages(region_slug)
195 # simulate a virtual root node for WP compatibility
196 # so that depth = 1 returns only those pages without parents (immediate children of this virtual root page)
197 # like in wordpress depth = 0 will return no results in this case
198 depth = depth - 1
199 result = []
200 for root in root_pages:
201 descendants = root.get_tree_max_depth(max_depth=depth)
202 for descendant in descendants:
203 public_translation = descendant.get_public_translation(language_slug)
204 if public_translation:
205 result.append(transform_page(public_translation))
206 return JsonResponse(result, safe=False)
207
208
209 @json_response
210 # pylint: disable=unused-argument
211 def parents(request, region_slug, language_slug):
212 """
213 Retrieves all ancestors (parent and all nodes up to the root node) of a page
214
215 :param request: The request that has been sent to the Django server
216 :type request: ~django.http.HttpRequest
217
218 :param region_slug: Slug defining the region
219 :type region_slug: str
220
221 :param language_slug: Code to identify the desired language
222 :type language_slug: str
223
224 :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.
225
226 :return: JSON with the requested page ancestors
227 :rtype: ~django.http.JsonResponse
228 """
229 current_page = get_single_page(request, language_slug)
230 result = []
231 for ancestor in current_page.get_cached_ancestors(include_self=False):
232 public_translation = ancestor.get_public_translation(language_slug)
233 if not public_translation:
234 raise Http404("No Page matches the given url or id.")
235 result.append(transform_page(public_translation))
236 return JsonResponse(result, safe=False)
237
[end of integreat_cms/api/v3/pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/integreat_cms/api/v3/pages.py b/integreat_cms/api/v3/pages.py
--- a/integreat_cms/api/v3/pages.py
+++ b/integreat_cms/api/v3/pages.py
@@ -19,7 +19,7 @@
:return: data necessary for API
:rtype: dict
"""
- parent_page = page_translation.page.cached_parent
+ parent_page = page_translation.page.parent
if parent_page:
parent_absolute_url = parent_page.get_public_translation(
page_translation.language.slug
@@ -228,7 +228,7 @@
"""
current_page = get_single_page(request, language_slug)
result = []
- for ancestor in current_page.get_cached_ancestors(include_self=False):
+ for ancestor in current_page.get_ancestors():
public_translation = ancestor.get_public_translation(language_slug)
if not public_translation:
raise Http404("No Page matches the given url or id.")
|
{"golden_diff": "diff --git a/integreat_cms/api/v3/pages.py b/integreat_cms/api/v3/pages.py\n--- a/integreat_cms/api/v3/pages.py\n+++ b/integreat_cms/api/v3/pages.py\n@@ -19,7 +19,7 @@\n :return: data necessary for API\n :rtype: dict\n \"\"\"\n- parent_page = page_translation.page.cached_parent\n+ parent_page = page_translation.page.parent\n if parent_page:\n parent_absolute_url = parent_page.get_public_translation(\n page_translation.language.slug\n@@ -228,7 +228,7 @@\n \"\"\"\n current_page = get_single_page(request, language_slug)\n result = []\n- for ancestor in current_page.get_cached_ancestors(include_self=False):\n+ for ancestor in current_page.get_ancestors():\n public_translation = ancestor.get_public_translation(language_slug)\n if not public_translation:\n raise Http404(\"No Page matches the given url or id.\")\n", "issue": "Issue with Cache in children endpoint\n**This issue needs further investigation/testing**. For Malte the cache is currently disabled and the performance is acceptable. The priority of this issue depends on the performance of the migrated Integreat CMS.\r\n\r\n### Describe the Bug\r\nWhen calling random pages we sometimes get the following stack trace:\r\n```\r\nFeb 21 22:53:01 ERROR django.request - Internal Server Error: /leakarlsruhefelsstrasse/de/wp-json/extensions/v3/children/\r\nTraceback (most recent call last):\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/decorators.py\", line 178, in wrap\r\n return func(request, *args, **kwargs)\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/decorators.py\", line 124, in wrap\r\n return function(request, *args, **kwargs)\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/v3/pages.py\", line 205, in children\r\n result.append(transform_page(public_translation))\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/api/v3/pages.py\", line 22, in transform_page\r\n parent_page = page_translation.page.cached_parent\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/django/utils/functional.py\", line 48, in __get__\r\n res = instance.__dict__[self.name] = self.func(instance)\r\n File \"/opt/integreat-cms/.venv/lib/python3.9/site-packages/integreat_cms/cms/models/abstract_tree_node.py\", line 210, in cached_parent\r\n return self.get_cached_ancestors()[-1]\r\nIndexError: list index out of range\r\n```\r\nThis trace persists until the cache is flushed.\r\n\r\n### Steps to Reproduce\r\n\r\nHappens randomly. Visit pages until it happens.\r\n\r\nFor the time being we could use a simple filter instead of a cache operation in https://github.com/digitalfabrik/integreat-cms/blob/develop/integreat_cms/api/v3/pages.py#L231\n", "before_files": [{"content": "\"\"\"\nThis module includes functions related to the pages API endpoint.\n\"\"\"\nfrom django.conf import settings\nfrom django.http import JsonResponse, Http404\nfrom django.shortcuts import get_object_or_404\n\nfrom ...cms.models import Page\nfrom ..decorators import json_response, matomo_tracking\n\n\ndef transform_page(page_translation):\n \"\"\"\n Function to create a dict from a single page_translation Object.\n\n :param page_translation: single page translation object\n :type page_translation: ~integreat_cms.cms.models.pages.page_translation.PageTranslation\n\n :return: data necessary for API\n :rtype: dict\n \"\"\"\n parent_page = page_translation.page.cached_parent\n if parent_page:\n parent_absolute_url = parent_page.get_public_translation(\n page_translation.language.slug\n ).get_absolute_url()\n parent = {\n \"id\": parent_page.id,\n \"url\": settings.BASE_URL + parent_absolute_url,\n \"path\": parent_absolute_url,\n }\n else:\n parent = {\n \"id\": 0,\n \"url\": None,\n \"path\": None,\n }\n absolute_url = page_translation.get_absolute_url()\n return {\n \"id\": page_translation.id,\n \"url\": settings.BASE_URL + absolute_url,\n \"path\": absolute_url,\n \"title\": page_translation.title,\n \"modified_gmt\": page_translation.combined_last_updated,\n \"excerpt\": page_translation.content,\n \"content\": page_translation.combined_text,\n \"parent\": parent,\n \"order\": page_translation.page.lft, # use left edge indicator of mptt model for order\n \"available_languages\": page_translation.available_languages,\n \"thumbnail\": page_translation.page.icon.url\n if page_translation.page.icon\n else None,\n \"hash\": None,\n }\n\n\n@matomo_tracking\n@json_response\n# pylint: disable=unused-argument\ndef pages(request, region_slug, language_slug):\n \"\"\"\n Function to iterate through all non-archived pages of a region and return them as JSON.\n\n :param request: Django request\n :type request: ~django.http.HttpRequest\n :param region_slug: slug of a region\n :type region_slug: str\n :param language_slug: language slug\n :type language_slug: str\n\n :return: JSON object according to APIv3 pages endpoint definition\n :rtype: ~django.http.JsonResponse\n \"\"\"\n region = request.region\n result = []\n # The preliminary filter for explicitly_archived=False is not strictly required, but reduces the number of entries\n # requested from the database\n for page in region.pages.filter(explicitly_archived=False).cache_tree(\n archived=False\n )[0]:\n page_translation = page.get_public_translation(language_slug)\n if page_translation:\n result.append(transform_page(page_translation))\n return JsonResponse(\n result, safe=False\n ) # Turn off Safe-Mode to allow serializing arrays\n\n\ndef get_single_page(request, language_slug):\n \"\"\"\n Helper function returning the desired page or a 404 if the\n requested page does not exist.\n\n :param request: The request that has been sent to the Django server\n :type request: ~django.http.HttpRequest\n\n :param language_slug: Code to identify the desired language\n :type language_slug: str\n\n :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.\n\n :raises RuntimeError: If neither the id nor the url parameter is given\n\n :return: the requested page\n :rtype: ~integreat_cms.cms.models.pages.page.Page\n \"\"\"\n region = request.region\n\n if request.GET.get(\"id\"):\n page = get_object_or_404(region.pages, id=request.GET.get(\"id\"))\n\n elif request.GET.get(\"url\"):\n # Strip leading and trailing slashes to avoid ambiguous urls\n url = request.GET.get(\"url\").strip(\"/\")\n # The last path component of the url is the page translation slug\n page_translation_slug = url.split(\"/\")[-1]\n # Get page by filtering for translation slug and translation language slug\n filtered_pages = region.pages.filter(\n translations__slug=page_translation_slug,\n translations__language__slug=language_slug,\n ).distinct()\n\n if len(filtered_pages) != 1:\n raise Http404(\"No matching page translation found for url.\")\n page = filtered_pages[0]\n\n else:\n raise RuntimeError(\"Either the id or the url parameter is required.\")\n\n return page\n\n\n@matomo_tracking\n@json_response\n# pylint: disable=unused-argument\ndef single_page(request, region_slug, language_slug):\n \"\"\"\n View function returning the desired page as a JSON or a 404 if the\n requested page does not exist.\n\n :param request: The request that has been sent to the Django server\n :type request: ~django.http.HttpRequest\n\n :param region_slug: Slug defining the region\n :type region_slug: str\n\n :param language_slug: Code to identify the desired language\n :type language_slug: str\n\n :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.\n\n :return: JSON with the requested page and a HTTP status 200.\n :rtype: ~django.http.JsonResponse\n \"\"\"\n try:\n page = get_single_page(request, language_slug)\n except RuntimeError as e:\n return JsonResponse({\"error\": str(e)}, status=400)\n # Get most recent public revision of the page\n page_translation = page.get_public_translation(language_slug)\n if page_translation:\n return JsonResponse(transform_page(page_translation), safe=False)\n\n raise Http404(\"No Page matches the given url or id.\")\n\n\n@matomo_tracking\n@json_response\n# pylint: disable=unused-argument\ndef children(request, region_slug, language_slug):\n \"\"\"\n Retrieves all children for a single page\n\n :param request: The request that has been sent to the Django server\n :type request: ~django.http.HttpRequest\n\n :param region_slug: Slug defining the region\n :type region_slug: str\n\n :param language_slug: Code to identify the desired language\n :type language_slug: str\n\n :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.\n\n :return: JSON with the requested page descendants\n :rtype: ~django.http.JsonResponse\n \"\"\"\n depth = int(request.GET.get(\"depth\", 1))\n try:\n # try to get a single ancestor page based on the requests query string\n root_pages = [get_single_page(request, language_slug)]\n except RuntimeError:\n # if neither id nor url is set then get all root pages\n root_pages = Page.get_root_pages(region_slug)\n # simulate a virtual root node for WP compatibility\n # so that depth = 1 returns only those pages without parents (immediate children of this virtual root page)\n # like in wordpress depth = 0 will return no results in this case\n depth = depth - 1\n result = []\n for root in root_pages:\n descendants = root.get_tree_max_depth(max_depth=depth)\n for descendant in descendants:\n public_translation = descendant.get_public_translation(language_slug)\n if public_translation:\n result.append(transform_page(public_translation))\n return JsonResponse(result, safe=False)\n\n\n@json_response\n# pylint: disable=unused-argument\ndef parents(request, region_slug, language_slug):\n \"\"\"\n Retrieves all ancestors (parent and all nodes up to the root node) of a page\n\n :param request: The request that has been sent to the Django server\n :type request: ~django.http.HttpRequest\n\n :param region_slug: Slug defining the region\n :type region_slug: str\n\n :param language_slug: Code to identify the desired language\n :type language_slug: str\n\n :raises ~django.http.Http404: HTTP status 404 if the request is malformed or no page with the given id or url exists.\n\n :return: JSON with the requested page ancestors\n :rtype: ~django.http.JsonResponse\n \"\"\"\n current_page = get_single_page(request, language_slug)\n result = []\n for ancestor in current_page.get_cached_ancestors(include_self=False):\n public_translation = ancestor.get_public_translation(language_slug)\n if not public_translation:\n raise Http404(\"No Page matches the given url or id.\")\n result.append(transform_page(public_translation))\n return JsonResponse(result, safe=False)\n", "path": "integreat_cms/api/v3/pages.py"}]}
| 3,642 | 220 |
gh_patches_debug_2981
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-573
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws ec2 replace-network-acl-entry --protocol ?
How can I specify a protocol? When I specify --protocol tcp or --protocol udp, the command fails:
A client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'tcp' for IP protocol. Unknown protocol.
A client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'udp' for IP protocol. Unknown protocol.
The command create-network-acl-entry accepts --protocol tcp or --protocol udp.
</issue>
<code>
[start of awscli/customizations/ec2protocolarg.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization allows the user to specify the values "tcp", "udp",
15 or "icmp" as values for the --protocol parameter. The actual Protocol
16 parameter of the operation accepts only integer protocol numbers.
17 """
18
19 def _fix_args(operation, endpoint, params, **kwargs):
20 if 'protocol' in params:
21 if params['protocol'] == 'tcp':
22 params['protocol'] = '6'
23 elif params['protocol'] == 'udp':
24 params['protocol'] = '17'
25 elif params['protocol'] == 'icmp':
26 params['protocol'] = '1'
27 elif params['protocol'] == 'all':
28 params['protocol'] = '-1'
29
30
31 def register_protocol_args(cli):
32 ('before-parameter-build.ec2.RunInstances', _fix_args),
33 cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',
34 _fix_args)
35
36
[end of awscli/customizations/ec2protocolarg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/customizations/ec2protocolarg.py b/awscli/customizations/ec2protocolarg.py
--- a/awscli/customizations/ec2protocolarg.py
+++ b/awscli/customizations/ec2protocolarg.py
@@ -29,7 +29,8 @@
def register_protocol_args(cli):
- ('before-parameter-build.ec2.RunInstances', _fix_args),
cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',
_fix_args)
+ cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry',
+ _fix_args)
|
{"golden_diff": "diff --git a/awscli/customizations/ec2protocolarg.py b/awscli/customizations/ec2protocolarg.py\n--- a/awscli/customizations/ec2protocolarg.py\n+++ b/awscli/customizations/ec2protocolarg.py\n@@ -29,7 +29,8 @@\n \n \n def register_protocol_args(cli):\n- ('before-parameter-build.ec2.RunInstances', _fix_args),\n cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',\n _fix_args)\n+ cli.register('before-parameter-build.ec2.ReplaceNetworkAclEntry',\n+ _fix_args)\n", "issue": "aws ec2 replace-network-acl-entry --protocol ?\nHow can I specify a protocol? When I specify --protocol tcp or --protocol udp, the command fails:\n\nA client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'tcp' for IP protocol. Unknown protocol.\n\nA client error (InvalidParameterValue) occurred when calling the ReplaceNetworkAclEntry operation: Invalid value 'udp' for IP protocol. Unknown protocol.\n\nThe command create-network-acl-entry accepts --protocol tcp or --protocol udp.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization allows the user to specify the values \"tcp\", \"udp\",\nor \"icmp\" as values for the --protocol parameter. The actual Protocol\nparameter of the operation accepts only integer protocol numbers.\n\"\"\"\n\ndef _fix_args(operation, endpoint, params, **kwargs):\n if 'protocol' in params:\n if params['protocol'] == 'tcp':\n params['protocol'] = '6'\n elif params['protocol'] == 'udp':\n params['protocol'] = '17'\n elif params['protocol'] == 'icmp':\n params['protocol'] = '1'\n elif params['protocol'] == 'all':\n params['protocol'] = '-1'\n\n\ndef register_protocol_args(cli):\n ('before-parameter-build.ec2.RunInstances', _fix_args),\n cli.register('before-parameter-build.ec2.CreateNetworkAclEntry',\n _fix_args)\n \n", "path": "awscli/customizations/ec2protocolarg.py"}]}
| 1,037 | 124 |
gh_patches_debug_15624
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1960
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Current HEAD zsh-completion.py needs option_aliases update
**Small description**
`option_aliases` was removed in ce497f444db6d2f3fc0b8309f5ca839196c33c8b but is still referred to in the zsh completion code.
https://github.com/saulpw/visidata/blob/34808745232e798b0f25e893bb444fc9f3c034eb/dev/zsh-completion.py#L11C41-L11C41
I think the script needs a slight rejig to use the (present) `vd` import instead.
I wonder whether this can be included in future CI?
**Expected result**
The command succeeds.
**Actual result**
```
> /build/visidata-src
> Traceback (most recent call last):
> File "/build/visidata-src/dev/zsh-completion.py", line 11, in <module>
> from visidata.main import option_aliases
> ImportError: cannot import name 'option_aliases' from 'visidata.main' (/build/visidata-src/visidata/main.py)
```
**Steps to reproduce**
```
python dev/zsh-completion.py
```
**Additional context**
~~Please include the version of VisiData and Python.~~
https://github.com/saulpw/visidata/tree/34808745232e798b0f25e893bb444fc9f3c034eb but I listed the commit above that causes the breakage — I suspect this is a two minute fix for somebody familiar with the codebase, though not me. I can help with extending CI, though it might just be a case of adding
```yaml
- name: Ensure VisiData can create completions
run: python dev/zsh-completion.py
```
(I guess you might want to run a linter, instead.)
</issue>
<code>
[start of dev/zsh-completion.py]
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3
4 import os
5 from os.path import dirname as dirn
6 import sys
7 import re
8
9 sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
10 from visidata import vd
11 from visidata.main import option_aliases
12
13 ZSH_COMPLETION_FILE = "_visidata"
14 ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in"
15 pat_class = re.compile("'(.*)'")
16 pat_select = re.compile("^\([^)]*\)")
17
18
19 def generate_completion(opt):
20 prefix = "--" + opt.name
21 shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]
22 if len(shortnames):
23 if len(shortnames[0]) == 1:
24 shortname = "-" + shortnames[0]
25 else:
26 shortname = "--" + shortnames[0]
27 prefix = "{" + f"{shortname},{prefix}" + "}"
28 if isinstance(opt.value, bool):
29 completion = ""
30 else:
31 completion = ":" + pat_class.findall(str(opt.value.__class__))[0]
32 if opt.name in ["play", "output", "visidata_dir", "config"]:
33 completion += ":_files"
34 elif opt.name in ["plugins_url", "motd_url"]:
35 completion += ":_urls"
36 helpstr = opt.helpstr
37 selections = pat_select.findall(helpstr)
38 if len(selections):
39 completion += f":{selections[0].replace('/', ' ')}"
40 # TODO: use `zstyle ':completion:*' extra-verbose true`
41 # to control the display of default value
42 helpstr = helpstr + f" (default: {opt.value})"
43 helpstr = helpstr.replace("[", "\\[").replace("]", "\\]")
44 return f"{prefix}'[{helpstr}]{completion}'"
45
46
47 flags = [generate_completion(vd._options[opt]["default"]) for opt in vd._options]
48
49 with open(ZSH_COMPLETION_TEMPLATE) as f:
50 template = f.read()
51
52 template = template.replace("{{flags}}", " \\\n ".join(flags))
53
54 with open(ZSH_COMPLETION_FILE, "w") as f:
55 f.write(template)
56
[end of dev/zsh-completion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py
--- a/dev/zsh-completion.py
+++ b/dev/zsh-completion.py
@@ -8,7 +8,6 @@
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
from visidata import vd
-from visidata.main import option_aliases
ZSH_COMPLETION_FILE = "_visidata"
ZSH_COMPLETION_TEMPLATE = "dev/zsh-completion.in"
@@ -18,7 +17,9 @@
def generate_completion(opt):
prefix = "--" + opt.name
- shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]
+ shortnames = [
+ key for key, value in vd.option_aliases.items() if value[0] == opt.name
+ ]
if len(shortnames):
if len(shortnames[0]) == 1:
shortname = "-" + shortnames[0]
|
{"golden_diff": "diff --git a/dev/zsh-completion.py b/dev/zsh-completion.py\n--- a/dev/zsh-completion.py\n+++ b/dev/zsh-completion.py\n@@ -8,7 +8,6 @@\n \n sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\n from visidata import vd\n-from visidata.main import option_aliases\n \n ZSH_COMPLETION_FILE = \"_visidata\"\n ZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\n@@ -18,7 +17,9 @@\n \n def generate_completion(opt):\n prefix = \"--\" + opt.name\n- shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n+ shortnames = [\n+ key for key, value in vd.option_aliases.items() if value[0] == opt.name\n+ ]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n", "issue": "Current HEAD zsh-completion.py needs option_aliases update\n**Small description**\r\n\r\n`option_aliases` was removed in ce497f444db6d2f3fc0b8309f5ca839196c33c8b but is still referred to in the zsh completion code.\r\n\r\nhttps://github.com/saulpw/visidata/blob/34808745232e798b0f25e893bb444fc9f3c034eb/dev/zsh-completion.py#L11C41-L11C41\r\n\r\nI think the script needs a slight rejig to use the (present) `vd` import instead.\r\n\r\nI wonder whether this can be included in future CI?\r\n\r\n**Expected result**\r\n\r\nThe command succeeds.\r\n\r\n**Actual result**\r\n\r\n```\r\n > /build/visidata-src\r\n > Traceback (most recent call last):\r\n > File \"/build/visidata-src/dev/zsh-completion.py\", line 11, in <module>\r\n > from visidata.main import option_aliases\r\n > ImportError: cannot import name 'option_aliases' from 'visidata.main' (/build/visidata-src/visidata/main.py)\r\n```\r\n\r\n**Steps to reproduce**\r\n\r\n```\r\npython dev/zsh-completion.py\r\n```\r\n\r\n**Additional context**\r\n~~Please include the version of VisiData and Python.~~\r\n\r\nhttps://github.com/saulpw/visidata/tree/34808745232e798b0f25e893bb444fc9f3c034eb but I listed the commit above that causes the breakage \u2014 I suspect this is a two minute fix for somebody familiar with the codebase, though not me. I can help with extending CI, though it might just be a case of adding\r\n\r\n```yaml\r\n - name: Ensure VisiData can create completions\r\n run: python dev/zsh-completion.py\r\n```\r\n\r\n(I guess you might want to run a linter, instead.)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\n\nimport os\nfrom os.path import dirname as dirn\nimport sys\nimport re\n\nsys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))\nfrom visidata import vd\nfrom visidata.main import option_aliases\n\nZSH_COMPLETION_FILE = \"_visidata\"\nZSH_COMPLETION_TEMPLATE = \"dev/zsh-completion.in\"\npat_class = re.compile(\"'(.*)'\")\npat_select = re.compile(\"^\\([^)]*\\)\")\n\n\ndef generate_completion(opt):\n prefix = \"--\" + opt.name\n shortnames = [key for key, value in option_aliases.items() if value[0] == opt.name]\n if len(shortnames):\n if len(shortnames[0]) == 1:\n shortname = \"-\" + shortnames[0]\n else:\n shortname = \"--\" + shortnames[0]\n prefix = \"{\" + f\"{shortname},{prefix}\" + \"}\"\n if isinstance(opt.value, bool):\n completion = \"\"\n else:\n completion = \":\" + pat_class.findall(str(opt.value.__class__))[0]\n if opt.name in [\"play\", \"output\", \"visidata_dir\", \"config\"]:\n completion += \":_files\"\n elif opt.name in [\"plugins_url\", \"motd_url\"]:\n completion += \":_urls\"\n helpstr = opt.helpstr\n selections = pat_select.findall(helpstr)\n if len(selections):\n completion += f\":{selections[0].replace('/', ' ')}\"\n # TODO: use `zstyle ':completion:*' extra-verbose true`\n # to control the display of default value\n helpstr = helpstr + f\" (default: {opt.value})\"\n helpstr = helpstr.replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\")\n return f\"{prefix}'[{helpstr}]{completion}'\"\n\n\nflags = [generate_completion(vd._options[opt][\"default\"]) for opt in vd._options]\n\nwith open(ZSH_COMPLETION_TEMPLATE) as f:\n template = f.read()\n\ntemplate = template.replace(\"{{flags}}\", \" \\\\\\n \".join(flags))\n\nwith open(ZSH_COMPLETION_FILE, \"w\") as f:\n f.write(template)\n", "path": "dev/zsh-completion.py"}]}
| 1,579 | 217 |
gh_patches_debug_30510
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-2160
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
translate: mangle is a bit useless due to rate limit
<!-- Before reporting a bug, please search both open *and closed* issues to
see if it has already been reported. If you can, try to reproduce the problem
on an unmodified copy of the `master` branch first, as sometimes bugs are found
and fixed without a report. If the problem is unreported and persists in
`master`, please help us fix it quickly by filling out as much of this
information as you can. Thanks! -->
### Description
Mangle ends up just saying 'None' a lot after using it more than once or twice within a minute or so due to what seems to be a rate limit
### Reproduction steps
1. Start a basic, stock sopel up
2. Use mangle a few times within a minute or so
3. It returns None
### Expected behavior
It either back off and respond when it is successful or error nicely. @rate (I think) might be useful here
### Logs
```
If applicable, add logs to help us figure out what's happening. Raw logs are
super helpful! Logs are usually found in ~/.sopel/logs, depending on your
configuration.
```
### Environment
- Sopel `.version`: lastest on PyPi
- Sopel installed via: pip
- Python version: 3.7
- Operating system: Debian Buster
- IRCd `/version`: freenode's
- Relevant plugins: mangle
### Notes
https://cloud.google.com/translate/quotas
</issue>
<code>
[start of sopel/modules/translate.py]
1 """
2 translate.py - Sopel Translation Plugin
3 Copyright 2008, Sean B. Palmer, inamidst.com
4 Copyright 2013-2014, Elad Alfassa <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import generator_stop
10
11 import json
12 import logging
13 import random
14
15 import requests
16
17 from sopel import plugin, tools
18 from sopel.tools import web
19
20
21 LOGGER = logging.getLogger(__name__)
22 PLUGIN_OUTPUT_PREFIX = '[translate] '
23
24
25 def setup(bot):
26 if 'mangle_lines' not in bot.memory:
27 bot.memory['mangle_lines'] = tools.SopelIdentifierMemory()
28
29
30 def shutdown(bot):
31 try:
32 del bot.memory['mangle_lines']
33 except KeyError:
34 pass
35
36
37 def translate(text, in_lang='auto', out_lang='en'):
38 raw = False
39 if str(out_lang).endswith('-raw'):
40 out_lang = out_lang[:-4]
41 raw = True
42
43 headers = {
44 'User-Agent': 'Mozilla/5.0' +
45 '(X11; U; Linux i686)' +
46 'Gecko/20071127 Firefox/2.0.0.11'
47 }
48
49 query = {
50 "client": "gtx",
51 "sl": in_lang,
52 "tl": out_lang,
53 "dt": "t",
54 "q": text,
55 }
56 url = "https://translate.googleapis.com/translate_a/single"
57 result = requests.get(url, params=query, timeout=40, headers=headers).text
58
59 if result == '[,,""]':
60 return None, in_lang
61
62 while ',,' in result:
63 result = result.replace(',,', ',null,')
64 result = result.replace('[,', '[null,')
65
66 try:
67 data = json.loads(result)
68 except ValueError:
69 LOGGER.error(
70 'Error parsing JSON response from translate API (%s to %s: "%s")',
71 in_lang, out_lang, text)
72 return None, None
73
74 if raw:
75 return str(data), 'en-raw'
76
77 try:
78 language = data[2] # -2][0][0]
79 except IndexError:
80 language = '?'
81
82 return ''.join(x[0] for x in data[0]), language
83
84
85 @plugin.rule(r'$nickname[,:]\s+(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?["“](.+?)["”]\? *$')
86 @plugin.example('$nickname: "mon chien"? or $nickname: fr "mon chien"?')
87 @plugin.priority('low')
88 @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
89 def tr(bot, trigger):
90 """Translates a phrase, with an optional language hint."""
91 in_lang, out_lang, phrase = trigger.groups()
92
93 if (len(phrase) > 350) and (not trigger.admin):
94 bot.reply('Phrase must be under 350 characters.')
95 return
96
97 if phrase.strip() == '':
98 bot.reply('You need to specify a string for me to translate!')
99 return
100
101 in_lang = in_lang or 'auto'
102 out_lang = out_lang or 'en'
103
104 if in_lang == out_lang:
105 bot.reply('Language guessing failed, so try suggesting one!')
106 return
107
108 try:
109 msg, in_lang = translate(phrase, in_lang, out_lang)
110 except requests.Timeout:
111 bot.reply("Translation service unavailable (timeout).")
112 LOGGER.error(
113 'Translate API error (%s to %s: "%s"): timeout.',
114 in_lang, out_lang, phrase)
115 return
116 except requests.RequestException as http_error:
117 bot.reply("Translation request failed.")
118 LOGGER.exception(
119 'Translate API error (%s to %s: "%s"): %s.',
120 in_lang, out_lang, phrase, http_error)
121 return
122
123 if not in_lang:
124 bot.reply("Translation failed, probably because of a rate-limit.")
125 return
126
127 if not msg:
128 bot.reply(
129 'The %s to %s translation failed; are you sure you specified '
130 'valid language abbreviations?' % (in_lang, out_lang)
131 )
132 return
133
134 msg = web.decode(msg)
135 msg = '"%s" (%s to %s, translate.google.com)' % (msg, in_lang, out_lang)
136 bot.say(msg)
137
138
139 @plugin.command('translate', 'tr')
140 @plugin.example('.tr :en :fr my dog',
141 '"mon chien" (en to fr, translate.google.com)',
142 online=True, vcr=True)
143 @plugin.example('.tr מחשב',
144 '"computer" (iw to en, translate.google.com)',
145 online=True, vcr=True)
146 @plugin.example('.tr mon chien',
147 '"my dog" (fr to en, translate.google.com)',
148 online=True, vcr=True)
149 @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
150 def tr2(bot, trigger):
151 """Translates a phrase, with an optional language hint."""
152 command = trigger.group(2)
153
154 if not command:
155 bot.reply('You did not give me anything to translate.')
156 return
157
158 def langcode(p):
159 return p.startswith(':') and (2 < len(p) < 10) and p[1:].isalpha()
160
161 args = ['auto', 'en']
162
163 for i in range(2):
164 if ' ' not in command:
165 break
166 prefix, cmd = command.split(' ', 1)
167 if langcode(prefix):
168 args[i] = prefix[1:]
169 command = cmd
170
171 phrase = command
172 if (len(phrase) > 350) and (not trigger.admin):
173 bot.reply('Phrase must be under 350 characters.')
174 return
175
176 if phrase.strip() == '':
177 bot.reply('You need to specify a string for me to translate!')
178 return
179
180 src, dest = args
181
182 if src == dest:
183 bot.reply('Language guessing failed, so try suggesting one!')
184 return
185
186 try:
187 msg, src = translate(phrase, src, dest)
188 except requests.Timeout:
189 bot.reply("Translation service unavailable (timeout).")
190 LOGGER.error(
191 'Translate API error (%s to %s: "%s"): timeout.',
192 src, dest, phrase)
193 return
194 except requests.RequestException as http_error:
195 bot.reply("Translation request failed.")
196 LOGGER.exception(
197 'Translate API error (%s to %s: "%s"): %s.',
198 src, dest, phrase, http_error)
199 return
200
201 if not src:
202 return bot.say("Translation failed, probably because of a rate-limit.")
203
204 if not msg:
205 bot.reply(
206 'The %s to %s translation failed; '
207 'are you sure you specified valid language abbreviations?'
208 % (src, dest))
209 return
210
211 msg = web.decode(msg) # msg.replace(''', "'")
212 msg = '"%s" (%s to %s, translate.google.com)' % (msg, src, dest)
213
214 bot.say(msg)
215
216
217 def get_random_lang(long_list, short_list):
218 random_index = random.randint(0, len(long_list) - 1)
219 random_lang = long_list[random_index]
220 if random_lang not in short_list:
221 short_list.append(random_lang)
222 else:
223 return get_random_lang(long_list, short_list)
224 return short_list
225
226
227 @plugin.command('mangle', 'mangle2')
228 @plugin.output_prefix(PLUGIN_OUTPUT_PREFIX)
229 def mangle(bot, trigger):
230 """Repeatedly translate the input until it makes absolutely no sense."""
231 long_lang_list = ['fr', 'de', 'es', 'it', 'no', 'he', 'la', 'ja', 'cy', 'ar', 'yi', 'zh', 'nl', 'ru', 'fi', 'hi', 'af', 'jw', 'mr', 'ceb', 'cs', 'ga', 'sv', 'eo', 'el', 'ms', 'lv']
232 lang_list = []
233 for __ in range(0, 8):
234 lang_list = get_random_lang(long_lang_list, lang_list)
235 random.shuffle(lang_list)
236 if trigger.group(2) is None:
237 try:
238 phrase = (bot.memory['mangle_lines'][trigger.sender], '')
239 except KeyError:
240 bot.reply("What do you want me to mangle?")
241 return
242 else:
243 phrase = (trigger.group(2).strip(), '')
244 if phrase[0] == '':
245 bot.reply("What do you want me to mangle?")
246 return
247 for lang in lang_list:
248 backup = phrase
249 try:
250 phrase = translate(phrase[0], 'en', lang)
251 except Exception: # TODO: Be specific
252 phrase = False
253 if not phrase:
254 phrase = backup
255 break
256
257 try:
258 phrase = translate(phrase[0], lang, 'en')
259 except Exception: # TODO: Be specific
260 phrase = backup
261 continue
262
263 if not phrase:
264 phrase = backup
265 break
266
267 bot.say(phrase[0])
268
269
270 @plugin.rule('(.*)')
271 @plugin.priority('low')
272 @plugin.unblockable
273 def collect_mangle_lines(bot, trigger):
274 bot.memory['mangle_lines'][trigger.sender] = "%s said '%s'" % (
275 trigger.nick,
276 trigger.group(0).strip(),
277 )
278
[end of sopel/modules/translate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/translate.py b/sopel/modules/translate.py
--- a/sopel/modules/translate.py
+++ b/sopel/modules/translate.py
@@ -230,9 +230,11 @@
"""Repeatedly translate the input until it makes absolutely no sense."""
long_lang_list = ['fr', 'de', 'es', 'it', 'no', 'he', 'la', 'ja', 'cy', 'ar', 'yi', 'zh', 'nl', 'ru', 'fi', 'hi', 'af', 'jw', 'mr', 'ceb', 'cs', 'ga', 'sv', 'eo', 'el', 'ms', 'lv']
lang_list = []
+
for __ in range(0, 8):
lang_list = get_random_lang(long_lang_list, lang_list)
random.shuffle(lang_list)
+
if trigger.group(2) is None:
try:
phrase = (bot.memory['mangle_lines'][trigger.sender], '')
@@ -241,9 +243,11 @@
return
else:
phrase = (trigger.group(2).strip(), '')
+
if phrase[0] == '':
bot.reply("What do you want me to mangle?")
return
+
for lang in lang_list:
backup = phrase
try:
@@ -264,6 +268,12 @@
phrase = backup
break
+ if phrase[0] is None:
+ # translate() returns (None, None) if an error happens,
+ # usually because the bot has exceeded a rate limit
+ bot.reply("Translation rate limit reached. Try again later.")
+ return
+
bot.say(phrase[0])
|
{"golden_diff": "diff --git a/sopel/modules/translate.py b/sopel/modules/translate.py\n--- a/sopel/modules/translate.py\n+++ b/sopel/modules/translate.py\n@@ -230,9 +230,11 @@\n \"\"\"Repeatedly translate the input until it makes absolutely no sense.\"\"\"\n long_lang_list = ['fr', 'de', 'es', 'it', 'no', 'he', 'la', 'ja', 'cy', 'ar', 'yi', 'zh', 'nl', 'ru', 'fi', 'hi', 'af', 'jw', 'mr', 'ceb', 'cs', 'ga', 'sv', 'eo', 'el', 'ms', 'lv']\n lang_list = []\n+\n for __ in range(0, 8):\n lang_list = get_random_lang(long_lang_list, lang_list)\n random.shuffle(lang_list)\n+\n if trigger.group(2) is None:\n try:\n phrase = (bot.memory['mangle_lines'][trigger.sender], '')\n@@ -241,9 +243,11 @@\n return\n else:\n phrase = (trigger.group(2).strip(), '')\n+\n if phrase[0] == '':\n bot.reply(\"What do you want me to mangle?\")\n return\n+\n for lang in lang_list:\n backup = phrase\n try:\n@@ -264,6 +268,12 @@\n phrase = backup\n break\n \n+ if phrase[0] is None:\n+ # translate() returns (None, None) if an error happens,\n+ # usually because the bot has exceeded a rate limit\n+ bot.reply(\"Translation rate limit reached. Try again later.\")\n+ return\n+\n bot.say(phrase[0])\n", "issue": "translate: mangle is a bit useless due to rate limit\n<!-- Before reporting a bug, please search both open *and closed* issues to\r\nsee if it has already been reported. If you can, try to reproduce the problem\r\non an unmodified copy of the `master` branch first, as sometimes bugs are found\r\nand fixed without a report. If the problem is unreported and persists in\r\n`master`, please help us fix it quickly by filling out as much of this\r\ninformation as you can. Thanks! -->\r\n\r\n### Description\r\nMangle ends up just saying 'None' a lot after using it more than once or twice within a minute or so due to what seems to be a rate limit\r\n\r\n### Reproduction steps\r\n1. Start a basic, stock sopel up\r\n2. Use mangle a few times within a minute or so\r\n3. It returns None\r\n\r\n### Expected behavior\r\nIt either back off and respond when it is successful or error nicely. @rate (I think) might be useful here\r\n\r\n### Logs\r\n```\r\nIf applicable, add logs to help us figure out what's happening. Raw logs are\r\nsuper helpful! Logs are usually found in ~/.sopel/logs, depending on your\r\nconfiguration.\r\n```\r\n\r\n### Environment\r\n- Sopel `.version`: lastest on PyPi\r\n- Sopel installed via: pip \r\n- Python version: 3.7\r\n- Operating system: Debian Buster\r\n- IRCd `/version`: freenode's\r\n- Relevant plugins: mangle\r\n\r\n### Notes\r\nhttps://cloud.google.com/translate/quotas\r\n\n", "before_files": [{"content": "\"\"\"\ntranslate.py - Sopel Translation Plugin\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright 2013-2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nimport json\nimport logging\nimport random\n\nimport requests\n\nfrom sopel import plugin, tools\nfrom sopel.tools import web\n\n\nLOGGER = logging.getLogger(__name__)\nPLUGIN_OUTPUT_PREFIX = '[translate] '\n\n\ndef setup(bot):\n if 'mangle_lines' not in bot.memory:\n bot.memory['mangle_lines'] = tools.SopelIdentifierMemory()\n\n\ndef shutdown(bot):\n try:\n del bot.memory['mangle_lines']\n except KeyError:\n pass\n\n\ndef translate(text, in_lang='auto', out_lang='en'):\n raw = False\n if str(out_lang).endswith('-raw'):\n out_lang = out_lang[:-4]\n raw = True\n\n headers = {\n 'User-Agent': 'Mozilla/5.0' +\n '(X11; U; Linux i686)' +\n 'Gecko/20071127 Firefox/2.0.0.11'\n }\n\n query = {\n \"client\": \"gtx\",\n \"sl\": in_lang,\n \"tl\": out_lang,\n \"dt\": \"t\",\n \"q\": text,\n }\n url = \"https://translate.googleapis.com/translate_a/single\"\n result = requests.get(url, params=query, timeout=40, headers=headers).text\n\n if result == '[,,\"\"]':\n return None, in_lang\n\n while ',,' in result:\n result = result.replace(',,', ',null,')\n result = result.replace('[,', '[null,')\n\n try:\n data = json.loads(result)\n except ValueError:\n LOGGER.error(\n 'Error parsing JSON response from translate API (%s to %s: \"%s\")',\n in_lang, out_lang, text)\n return None, None\n\n if raw:\n return str(data), 'en-raw'\n\n try:\n language = data[2] # -2][0][0]\n except IndexError:\n language = '?'\n\n return ''.join(x[0] for x in data[0]), language\n\n\[email protected](r'$nickname[,:]\\s+(?:([a-z]{2}) +)?(?:([a-z]{2}|en-raw) +)?[\"\u201c](.+?)[\"\u201d]\\? *$')\[email protected]('$nickname: \"mon chien\"? or $nickname: fr \"mon chien\"?')\[email protected]('low')\[email protected]_prefix(PLUGIN_OUTPUT_PREFIX)\ndef tr(bot, trigger):\n \"\"\"Translates a phrase, with an optional language hint.\"\"\"\n in_lang, out_lang, phrase = trigger.groups()\n\n if (len(phrase) > 350) and (not trigger.admin):\n bot.reply('Phrase must be under 350 characters.')\n return\n\n if phrase.strip() == '':\n bot.reply('You need to specify a string for me to translate!')\n return\n\n in_lang = in_lang or 'auto'\n out_lang = out_lang or 'en'\n\n if in_lang == out_lang:\n bot.reply('Language guessing failed, so try suggesting one!')\n return\n\n try:\n msg, in_lang = translate(phrase, in_lang, out_lang)\n except requests.Timeout:\n bot.reply(\"Translation service unavailable (timeout).\")\n LOGGER.error(\n 'Translate API error (%s to %s: \"%s\"): timeout.',\n in_lang, out_lang, phrase)\n return\n except requests.RequestException as http_error:\n bot.reply(\"Translation request failed.\")\n LOGGER.exception(\n 'Translate API error (%s to %s: \"%s\"): %s.',\n in_lang, out_lang, phrase, http_error)\n return\n\n if not in_lang:\n bot.reply(\"Translation failed, probably because of a rate-limit.\")\n return\n\n if not msg:\n bot.reply(\n 'The %s to %s translation failed; are you sure you specified '\n 'valid language abbreviations?' % (in_lang, out_lang)\n )\n return\n\n msg = web.decode(msg)\n msg = '\"%s\" (%s to %s, translate.google.com)' % (msg, in_lang, out_lang)\n bot.say(msg)\n\n\[email protected]('translate', 'tr')\[email protected]('.tr :en :fr my dog',\n '\"mon chien\" (en to fr, translate.google.com)',\n online=True, vcr=True)\[email protected]('.tr \u05de\u05d7\u05e9\u05d1',\n '\"computer\" (iw to en, translate.google.com)',\n online=True, vcr=True)\[email protected]('.tr mon chien',\n '\"my dog\" (fr to en, translate.google.com)',\n online=True, vcr=True)\[email protected]_prefix(PLUGIN_OUTPUT_PREFIX)\ndef tr2(bot, trigger):\n \"\"\"Translates a phrase, with an optional language hint.\"\"\"\n command = trigger.group(2)\n\n if not command:\n bot.reply('You did not give me anything to translate.')\n return\n\n def langcode(p):\n return p.startswith(':') and (2 < len(p) < 10) and p[1:].isalpha()\n\n args = ['auto', 'en']\n\n for i in range(2):\n if ' ' not in command:\n break\n prefix, cmd = command.split(' ', 1)\n if langcode(prefix):\n args[i] = prefix[1:]\n command = cmd\n\n phrase = command\n if (len(phrase) > 350) and (not trigger.admin):\n bot.reply('Phrase must be under 350 characters.')\n return\n\n if phrase.strip() == '':\n bot.reply('You need to specify a string for me to translate!')\n return\n\n src, dest = args\n\n if src == dest:\n bot.reply('Language guessing failed, so try suggesting one!')\n return\n\n try:\n msg, src = translate(phrase, src, dest)\n except requests.Timeout:\n bot.reply(\"Translation service unavailable (timeout).\")\n LOGGER.error(\n 'Translate API error (%s to %s: \"%s\"): timeout.',\n src, dest, phrase)\n return\n except requests.RequestException as http_error:\n bot.reply(\"Translation request failed.\")\n LOGGER.exception(\n 'Translate API error (%s to %s: \"%s\"): %s.',\n src, dest, phrase, http_error)\n return\n\n if not src:\n return bot.say(\"Translation failed, probably because of a rate-limit.\")\n\n if not msg:\n bot.reply(\n 'The %s to %s translation failed; '\n 'are you sure you specified valid language abbreviations?'\n % (src, dest))\n return\n\n msg = web.decode(msg) # msg.replace(''', \"'\")\n msg = '\"%s\" (%s to %s, translate.google.com)' % (msg, src, dest)\n\n bot.say(msg)\n\n\ndef get_random_lang(long_list, short_list):\n random_index = random.randint(0, len(long_list) - 1)\n random_lang = long_list[random_index]\n if random_lang not in short_list:\n short_list.append(random_lang)\n else:\n return get_random_lang(long_list, short_list)\n return short_list\n\n\[email protected]('mangle', 'mangle2')\[email protected]_prefix(PLUGIN_OUTPUT_PREFIX)\ndef mangle(bot, trigger):\n \"\"\"Repeatedly translate the input until it makes absolutely no sense.\"\"\"\n long_lang_list = ['fr', 'de', 'es', 'it', 'no', 'he', 'la', 'ja', 'cy', 'ar', 'yi', 'zh', 'nl', 'ru', 'fi', 'hi', 'af', 'jw', 'mr', 'ceb', 'cs', 'ga', 'sv', 'eo', 'el', 'ms', 'lv']\n lang_list = []\n for __ in range(0, 8):\n lang_list = get_random_lang(long_lang_list, lang_list)\n random.shuffle(lang_list)\n if trigger.group(2) is None:\n try:\n phrase = (bot.memory['mangle_lines'][trigger.sender], '')\n except KeyError:\n bot.reply(\"What do you want me to mangle?\")\n return\n else:\n phrase = (trigger.group(2).strip(), '')\n if phrase[0] == '':\n bot.reply(\"What do you want me to mangle?\")\n return\n for lang in lang_list:\n backup = phrase\n try:\n phrase = translate(phrase[0], 'en', lang)\n except Exception: # TODO: Be specific\n phrase = False\n if not phrase:\n phrase = backup\n break\n\n try:\n phrase = translate(phrase[0], lang, 'en')\n except Exception: # TODO: Be specific\n phrase = backup\n continue\n\n if not phrase:\n phrase = backup\n break\n\n bot.say(phrase[0])\n\n\[email protected]('(.*)')\[email protected]('low')\[email protected]\ndef collect_mangle_lines(bot, trigger):\n bot.memory['mangle_lines'][trigger.sender] = \"%s said '%s'\" % (\n trigger.nick,\n trigger.group(0).strip(),\n )\n", "path": "sopel/modules/translate.py"}]}
| 3,678 | 385 |
gh_patches_debug_37423
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2380
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement temporality conversion
Blocked by #2327
Temporality conversion is specified in the [design doc](https://docs.google.com/document/d/1FOmpGCiZAWTd6a3vcEHBnm1LKp7HAKuD4BBN3YCkymc/edit#heading=h.ulghopzfw7ou). For this issue:
- Create a component to handle temporality conversion and storing of previous collection interval's cumulative point value when necessary. For example, there is no need to store previous cumulative for async instruments and cumulative export temporality.
- Write temporality conversion algorithm. Use the export format added for #2327 as the input and output of the algorithm
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from abc import ABC, abstractmethod
16 from bisect import bisect_left
17 from logging import getLogger
18 from math import inf
19 from threading import Lock
20 from typing import Generic, List, Optional, Sequence, TypeVar
21
22 from opentelemetry.sdk._metrics.measurement import Measurement
23 from opentelemetry.sdk._metrics.point import (
24 AggregationTemporality,
25 Gauge,
26 Histogram,
27 PointT,
28 Sum,
29 )
30 from opentelemetry.util._time import _time_ns
31
32 _PointVarT = TypeVar("_PointVarT", bound=PointT)
33
34 _logger = getLogger(__name__)
35
36
37 class _InstrumentMonotonicityAwareAggregation:
38 def __init__(self, instrument_is_monotonic: bool):
39 self._instrument_is_monotonic = instrument_is_monotonic
40 super().__init__()
41
42
43 class Aggregation(ABC, Generic[_PointVarT]):
44 def __init__(self):
45 self._lock = Lock()
46
47 @abstractmethod
48 def aggregate(self, measurement: Measurement) -> None:
49 pass
50
51 @abstractmethod
52 def collect(self) -> Optional[_PointVarT]:
53 pass
54
55
56 class SynchronousSumAggregation(
57 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]
58 ):
59 def __init__(self, instrument_is_monotonic: bool):
60 super().__init__(instrument_is_monotonic)
61 self._value = 0
62 self._start_time_unix_nano = _time_ns()
63
64 def aggregate(self, measurement: Measurement) -> None:
65 with self._lock:
66 self._value = self._value + measurement.value
67
68 def collect(self) -> Optional[Sum]:
69 """
70 Atomically return a point for the current value of the metric and
71 reset the aggregation value.
72 """
73 now = _time_ns()
74
75 with self._lock:
76 value = self._value
77 start_time_unix_nano = self._start_time_unix_nano
78
79 self._value = 0
80 self._start_time_unix_nano = now + 1
81
82 return Sum(
83 aggregation_temporality=AggregationTemporality.DELTA,
84 is_monotonic=self._instrument_is_monotonic,
85 start_time_unix_nano=start_time_unix_nano,
86 time_unix_nano=now,
87 value=value,
88 )
89
90
91 class AsynchronousSumAggregation(
92 _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]
93 ):
94 def __init__(self, instrument_is_monotonic: bool):
95 super().__init__(instrument_is_monotonic)
96 self._value = None
97 self._start_time_unix_nano = _time_ns()
98
99 def aggregate(self, measurement: Measurement) -> None:
100 with self._lock:
101 self._value = measurement.value
102
103 def collect(self) -> Optional[Sum]:
104 """
105 Atomically return a point for the current value of the metric.
106 """
107 if self._value is None:
108 return None
109
110 return Sum(
111 start_time_unix_nano=self._start_time_unix_nano,
112 time_unix_nano=_time_ns(),
113 value=self._value,
114 aggregation_temporality=AggregationTemporality.CUMULATIVE,
115 is_monotonic=self._instrument_is_monotonic,
116 )
117
118
119 class LastValueAggregation(Aggregation[Gauge]):
120 def __init__(self):
121 super().__init__()
122 self._value = None
123
124 def aggregate(self, measurement: Measurement):
125 with self._lock:
126 self._value = measurement.value
127
128 def collect(self) -> Optional[Gauge]:
129 """
130 Atomically return a point for the current value of the metric.
131 """
132 if self._value is None:
133 return None
134
135 return Gauge(
136 time_unix_nano=_time_ns(),
137 value=self._value,
138 )
139
140
141 class ExplicitBucketHistogramAggregation(Aggregation[Histogram]):
142 def __init__(
143 self,
144 boundaries: Sequence[float] = (
145 0.0,
146 5.0,
147 10.0,
148 25.0,
149 50.0,
150 75.0,
151 100.0,
152 250.0,
153 500.0,
154 1000.0,
155 ),
156 record_min_max: bool = True,
157 ):
158 super().__init__()
159 self._boundaries = tuple(boundaries)
160 self._bucket_counts = self._get_empty_bucket_counts()
161 self._min = inf
162 self._max = -inf
163 self._sum = 0
164 self._record_min_max = record_min_max
165 self._start_time_unix_nano = _time_ns()
166
167 def _get_empty_bucket_counts(self) -> List[int]:
168 return [0] * (len(self._boundaries) + 1)
169
170 def aggregate(self, measurement: Measurement) -> None:
171
172 value = measurement.value
173
174 if self._record_min_max:
175 self._min = min(self._min, value)
176 self._max = max(self._max, value)
177
178 self._sum += value
179
180 self._bucket_counts[bisect_left(self._boundaries, value)] += 1
181
182 def collect(self) -> Optional[Histogram]:
183 """
184 Atomically return a point for the current value of the metric.
185 """
186 now = _time_ns()
187
188 with self._lock:
189 value = self._bucket_counts
190 start_time_unix_nano = self._start_time_unix_nano
191
192 self._bucket_counts = self._get_empty_bucket_counts()
193 self._start_time_unix_nano = now + 1
194
195 return Histogram(
196 start_time_unix_nano=start_time_unix_nano,
197 time_unix_nano=now,
198 bucket_counts=tuple(value),
199 explicit_bounds=self._boundaries,
200 aggregation_temporality=AggregationTemporality.DELTA,
201 sum=self._sum,
202 )
203
[end of opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py
@@ -14,6 +14,7 @@
from abc import ABC, abstractmethod
from bisect import bisect_left
+from dataclasses import replace
from logging import getLogger
from math import inf
from threading import Lock
@@ -200,3 +201,80 @@
aggregation_temporality=AggregationTemporality.DELTA,
sum=self._sum,
)
+
+
+def _convert_aggregation_temporality(
+ previous_point: Optional[_PointVarT],
+ current_point: _PointVarT,
+ aggregation_temporality: AggregationTemporality,
+) -> _PointVarT:
+ """Converts `current_point` to the requested `aggregation_temporality`
+ given the `previous_point`.
+
+ `previous_point` must have `CUMULATIVE` temporality. `current_point` may
+ have `DELTA` or `CUMULATIVE` temporality.
+
+ The output point will have temporality `aggregation_temporality`. Since
+ `GAUGE` points have no temporality, they are returned unchanged.
+ """
+
+ current_point_type = type(current_point)
+
+ if current_point_type is Gauge:
+ return current_point
+
+ if previous_point is not None and type(previous_point) is not type(
+ current_point
+ ):
+ _logger.warning(
+ "convert_aggregation_temporality called with mismatched "
+ "point types: %s and %s",
+ type(previous_point),
+ current_point_type,
+ )
+
+ return current_point
+
+ if current_point_type is Sum:
+ if previous_point is None:
+ # Output CUMULATIVE for a synchronous instrument
+ # There is no previous value, return the delta point as a
+ # cumulative
+ return replace(
+ current_point, aggregation_temporality=aggregation_temporality
+ )
+ if previous_point.aggregation_temporality is not (
+ AggregationTemporality.CUMULATIVE
+ ):
+ raise Exception(
+ "previous_point aggregation temporality must be CUMULATIVE"
+ )
+
+ if current_point.aggregation_temporality is aggregation_temporality:
+ # Output DELTA for a synchronous instrument
+ # Output CUMULATIVE for an asynchronous instrument
+ return current_point
+
+ if aggregation_temporality is AggregationTemporality.DELTA:
+ # Output temporality DELTA for an asynchronous instrument
+ value = current_point.value - previous_point.value
+ output_start_time_unix_nano = previous_point.time_unix_nano
+
+ else:
+ # Output CUMULATIVE for a synchronous instrument
+ value = current_point.value + previous_point.value
+ output_start_time_unix_nano = previous_point.start_time_unix_nano
+
+ is_monotonic = (
+ previous_point.is_monotonic and current_point.is_monotonic
+ )
+
+ return Sum(
+ start_time_unix_nano=output_start_time_unix_nano,
+ time_unix_nano=current_point.time_unix_nano,
+ value=value,
+ aggregation_temporality=aggregation_temporality,
+ is_monotonic=is_monotonic,
+ )
+
+ return None
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py\n@@ -14,6 +14,7 @@\n \n from abc import ABC, abstractmethod\n from bisect import bisect_left\n+from dataclasses import replace\n from logging import getLogger\n from math import inf\n from threading import Lock\n@@ -200,3 +201,80 @@\n aggregation_temporality=AggregationTemporality.DELTA,\n sum=self._sum,\n )\n+\n+\n+def _convert_aggregation_temporality(\n+ previous_point: Optional[_PointVarT],\n+ current_point: _PointVarT,\n+ aggregation_temporality: AggregationTemporality,\n+) -> _PointVarT:\n+ \"\"\"Converts `current_point` to the requested `aggregation_temporality`\n+ given the `previous_point`.\n+\n+ `previous_point` must have `CUMULATIVE` temporality. `current_point` may\n+ have `DELTA` or `CUMULATIVE` temporality.\n+\n+ The output point will have temporality `aggregation_temporality`. Since\n+ `GAUGE` points have no temporality, they are returned unchanged.\n+ \"\"\"\n+\n+ current_point_type = type(current_point)\n+\n+ if current_point_type is Gauge:\n+ return current_point\n+\n+ if previous_point is not None and type(previous_point) is not type(\n+ current_point\n+ ):\n+ _logger.warning(\n+ \"convert_aggregation_temporality called with mismatched \"\n+ \"point types: %s and %s\",\n+ type(previous_point),\n+ current_point_type,\n+ )\n+\n+ return current_point\n+\n+ if current_point_type is Sum:\n+ if previous_point is None:\n+ # Output CUMULATIVE for a synchronous instrument\n+ # There is no previous value, return the delta point as a\n+ # cumulative\n+ return replace(\n+ current_point, aggregation_temporality=aggregation_temporality\n+ )\n+ if previous_point.aggregation_temporality is not (\n+ AggregationTemporality.CUMULATIVE\n+ ):\n+ raise Exception(\n+ \"previous_point aggregation temporality must be CUMULATIVE\"\n+ )\n+\n+ if current_point.aggregation_temporality is aggregation_temporality:\n+ # Output DELTA for a synchronous instrument\n+ # Output CUMULATIVE for an asynchronous instrument\n+ return current_point\n+\n+ if aggregation_temporality is AggregationTemporality.DELTA:\n+ # Output temporality DELTA for an asynchronous instrument\n+ value = current_point.value - previous_point.value\n+ output_start_time_unix_nano = previous_point.time_unix_nano\n+\n+ else:\n+ # Output CUMULATIVE for a synchronous instrument\n+ value = current_point.value + previous_point.value\n+ output_start_time_unix_nano = previous_point.start_time_unix_nano\n+\n+ is_monotonic = (\n+ previous_point.is_monotonic and current_point.is_monotonic\n+ )\n+\n+ return Sum(\n+ start_time_unix_nano=output_start_time_unix_nano,\n+ time_unix_nano=current_point.time_unix_nano,\n+ value=value,\n+ aggregation_temporality=aggregation_temporality,\n+ is_monotonic=is_monotonic,\n+ )\n+\n+ return None\n", "issue": "Implement temporality conversion\nBlocked by #2327\r\n\r\nTemporality conversion is specified in the [design doc](https://docs.google.com/document/d/1FOmpGCiZAWTd6a3vcEHBnm1LKp7HAKuD4BBN3YCkymc/edit#heading=h.ulghopzfw7ou). For this issue:\r\n\r\n- Create a component to handle temporality conversion and storing of previous collection interval's cumulative point value when necessary. For example, there is no need to store previous cumulative for async instruments and cumulative export temporality.\r\n- Write temporality conversion algorithm. Use the export format added for #2327 as the input and output of the algorithm\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom bisect import bisect_left\nfrom logging import getLogger\nfrom math import inf\nfrom threading import Lock\nfrom typing import Generic, List, Optional, Sequence, TypeVar\n\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import (\n AggregationTemporality,\n Gauge,\n Histogram,\n PointT,\n Sum,\n)\nfrom opentelemetry.util._time import _time_ns\n\n_PointVarT = TypeVar(\"_PointVarT\", bound=PointT)\n\n_logger = getLogger(__name__)\n\n\nclass _InstrumentMonotonicityAwareAggregation:\n def __init__(self, instrument_is_monotonic: bool):\n self._instrument_is_monotonic = instrument_is_monotonic\n super().__init__()\n\n\nclass Aggregation(ABC, Generic[_PointVarT]):\n def __init__(self):\n self._lock = Lock()\n\n @abstractmethod\n def aggregate(self, measurement: Measurement) -> None:\n pass\n\n @abstractmethod\n def collect(self) -> Optional[_PointVarT]:\n pass\n\n\nclass SynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = 0\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = self._value + measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric and\n reset the aggregation value.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._value\n start_time_unix_nano = self._start_time_unix_nano\n\n self._value = 0\n self._start_time_unix_nano = now + 1\n\n return Sum(\n aggregation_temporality=AggregationTemporality.DELTA,\n is_monotonic=self._instrument_is_monotonic,\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n value=value,\n )\n\n\nclass AsynchronousSumAggregation(\n _InstrumentMonotonicityAwareAggregation, Aggregation[Sum]\n):\n def __init__(self, instrument_is_monotonic: bool):\n super().__init__(instrument_is_monotonic)\n self._value = None\n self._start_time_unix_nano = _time_ns()\n\n def aggregate(self, measurement: Measurement) -> None:\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Sum]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Sum(\n start_time_unix_nano=self._start_time_unix_nano,\n time_unix_nano=_time_ns(),\n value=self._value,\n aggregation_temporality=AggregationTemporality.CUMULATIVE,\n is_monotonic=self._instrument_is_monotonic,\n )\n\n\nclass LastValueAggregation(Aggregation[Gauge]):\n def __init__(self):\n super().__init__()\n self._value = None\n\n def aggregate(self, measurement: Measurement):\n with self._lock:\n self._value = measurement.value\n\n def collect(self) -> Optional[Gauge]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n if self._value is None:\n return None\n\n return Gauge(\n time_unix_nano=_time_ns(),\n value=self._value,\n )\n\n\nclass ExplicitBucketHistogramAggregation(Aggregation[Histogram]):\n def __init__(\n self,\n boundaries: Sequence[float] = (\n 0.0,\n 5.0,\n 10.0,\n 25.0,\n 50.0,\n 75.0,\n 100.0,\n 250.0,\n 500.0,\n 1000.0,\n ),\n record_min_max: bool = True,\n ):\n super().__init__()\n self._boundaries = tuple(boundaries)\n self._bucket_counts = self._get_empty_bucket_counts()\n self._min = inf\n self._max = -inf\n self._sum = 0\n self._record_min_max = record_min_max\n self._start_time_unix_nano = _time_ns()\n\n def _get_empty_bucket_counts(self) -> List[int]:\n return [0] * (len(self._boundaries) + 1)\n\n def aggregate(self, measurement: Measurement) -> None:\n\n value = measurement.value\n\n if self._record_min_max:\n self._min = min(self._min, value)\n self._max = max(self._max, value)\n\n self._sum += value\n\n self._bucket_counts[bisect_left(self._boundaries, value)] += 1\n\n def collect(self) -> Optional[Histogram]:\n \"\"\"\n Atomically return a point for the current value of the metric.\n \"\"\"\n now = _time_ns()\n\n with self._lock:\n value = self._bucket_counts\n start_time_unix_nano = self._start_time_unix_nano\n\n self._bucket_counts = self._get_empty_bucket_counts()\n self._start_time_unix_nano = now + 1\n\n return Histogram(\n start_time_unix_nano=start_time_unix_nano,\n time_unix_nano=now,\n bucket_counts=tuple(value),\n explicit_bounds=self._boundaries,\n aggregation_temporality=AggregationTemporality.DELTA,\n sum=self._sum,\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/aggregation.py"}]}
| 2,618 | 803 |
gh_patches_debug_10350
|
rasdani/github-patches
|
git_diff
|
Rapptz__discord.py-4024
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Accessing a flag property on the class raises AttributeError
Due to this:
https://github.com/Rapptz/discord.py/blob/e473f3c775b26a4bb54b18eb6240757306f4f5c1/discord/flags.py#L38
This code:
```py
print(discord.Permissions.manage_messages)
```
Raises this error:
```pytb
File ".venv/lib/python3.8/site-packages/discord/flags.py", line 38, in __get__
return instance._has_flag(self.flag)
AttributeError: 'NoneType' object has no attribute '_has_flag'
```
Trying to access a descriptor on a class should display a meaningful repr.
</issue>
<code>
[start of discord/flags.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 The MIT License (MIT)
5
6 Copyright (c) 2015-2020 Rapptz
7
8 Permission is hereby granted, free of charge, to any person obtaining a
9 copy of this software and associated documentation files (the "Software"),
10 to deal in the Software without restriction, including without limitation
11 the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 and/or sell copies of the Software, and to permit persons to whom the
13 Software is furnished to do so, subject to the following conditions:
14
15 The above copyright notice and this permission notice shall be included in
16 all copies or substantial portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 DEALINGS IN THE SOFTWARE.
25 """
26
27 __all__ = (
28 'SystemChannelFlags',
29 'MessageFlags',
30 )
31
32 class flag_value:
33 def __init__(self, func):
34 self.flag = func(None)
35 self.__doc__ = func.__doc__
36
37 def __get__(self, instance, owner):
38 return instance._has_flag(self.flag)
39
40 def __set__(self, instance, value):
41 instance._set_flag(self.flag, value)
42
43 def fill_with_flags(*, inverted=False):
44 def decorator(cls):
45 cls.VALID_FLAGS = {
46 name: value.flag
47 for name, value in cls.__dict__.items()
48 if isinstance(value, flag_value)
49 }
50
51 if inverted:
52 max_bits = max(cls.VALID_FLAGS.values()).bit_length()
53 cls.DEFAULT_VALUE = -1 + (2 ** max_bits)
54 else:
55 cls.DEFAULT_VALUE = 0
56
57 return cls
58 return decorator
59
60 # n.b. flags must inherit from this and use the decorator above
61 class BaseFlags:
62 __slots__ = ('value',)
63
64 def __init__(self, **kwargs):
65 self.value = self.DEFAULT_VALUE
66 for key, value in kwargs.items():
67 if key not in self.VALID_FLAGS:
68 raise TypeError('%r is not a valid flag name.' % key)
69 setattr(self, key, value)
70
71 @classmethod
72 def _from_value(cls, value):
73 self = cls.__new__(cls)
74 self.value = value
75 return self
76
77 def __eq__(self, other):
78 return isinstance(other, self.__class__) and self.value == other.value
79
80 def __ne__(self, other):
81 return not self.__eq__(other)
82
83 def __hash__(self):
84 return hash(self.value)
85
86 def __repr__(self):
87 return '<%s value=%s>' % (self.__class__.__name__, self.value)
88
89 def __iter__(self):
90 for name, value in self.__class__.__dict__.items():
91 if isinstance(value, flag_value):
92 yield (name, self._has_flag(value.flag))
93
94 def _has_flag(self, o):
95 return (self.value & o) == o
96
97 def _set_flag(self, o, toggle):
98 if toggle is True:
99 self.value |= o
100 elif toggle is False:
101 self.value &= ~o
102 else:
103 raise TypeError('Value to set for %s must be a bool.' % self.__class__.__name__)
104
105 @fill_with_flags(inverted=True)
106 class SystemChannelFlags(BaseFlags):
107 r"""Wraps up a Discord system channel flag value.
108
109 Similar to :class:`Permissions`\, the properties provided are two way.
110 You can set and retrieve individual bits using the properties as if they
111 were regular bools. This allows you to edit the system flags easily.
112
113 To construct an object you can pass keyword arguments denoting the flags
114 to enable or disable.
115
116 .. container:: operations
117
118 .. describe:: x == y
119
120 Checks if two flags are equal.
121 .. describe:: x != y
122
123 Checks if two flags are not equal.
124 .. describe:: hash(x)
125
126 Return the flag's hash.
127 .. describe:: iter(x)
128
129 Returns an iterator of ``(name, value)`` pairs. This allows it
130 to be, for example, constructed as a dict or a list of pairs.
131
132 Attributes
133 -----------
134 value: :class:`int`
135 The raw value. This value is a bit array field of a 53-bit integer
136 representing the currently available flags. You should query
137 flags via the properties rather than using this raw value.
138 """
139 __slots__ = ()
140
141 # For some reason the flags for system channels are "inverted"
142 # ergo, if they're set then it means "suppress" (off in the GUI toggle)
143 # Since this is counter-intuitive from an API perspective and annoying
144 # these will be inverted automatically
145
146 def _has_flag(self, o):
147 return (self.value & o) != o
148
149 def _set_flag(self, o, toggle):
150 if toggle is True:
151 self.value &= ~o
152 elif toggle is False:
153 self.value |= o
154 else:
155 raise TypeError('Value to set for SystemChannelFlags must be a bool.')
156
157 @flag_value
158 def join_notifications(self):
159 """:class:`bool`: Returns ``True`` if the system channel is used for member join notifications."""
160 return 1
161
162 @flag_value
163 def premium_subscriptions(self):
164 """:class:`bool`: Returns ``True`` if the system channel is used for Nitro boosting notifications."""
165 return 2
166
167
168 @fill_with_flags()
169 class MessageFlags(BaseFlags):
170 r"""Wraps up a Discord Message flag value.
171
172 See :class:`SystemChannelFlags`.
173
174 .. container:: operations
175
176 .. describe:: x == y
177
178 Checks if two flags are equal.
179 .. describe:: x != y
180
181 Checks if two flags are not equal.
182 .. describe:: hash(x)
183
184 Return the flag's hash.
185 .. describe:: iter(x)
186
187 Returns an iterator of ``(name, value)`` pairs. This allows it
188 to be, for example, constructed as a dict or a list of pairs.
189
190 .. versionadded:: 1.3
191
192 Attributes
193 -----------
194 value: :class:`int`
195 The raw value. This value is a bit array field of a 53-bit integer
196 representing the currently available flags. You should query
197 flags via the properties rather than using this raw value.
198 """
199 __slots__ = ()
200
201 @flag_value
202 def crossposted(self):
203 """:class:`bool`: Returns ``True`` if the message is the original crossposted message."""
204 return 1
205
206 @flag_value
207 def is_crossposted(self):
208 """:class:`bool`: Returns ``True`` if the message was crossposted from another channel."""
209 return 2
210
211 @flag_value
212 def suppress_embeds(self):
213 """:class:`bool`: Returns ``True`` if the message's embeds have been suppressed."""
214 return 4
215
216 @flag_value
217 def source_message_deleted(self):
218 """:class:`bool`: Returns ``True`` if the source message for this crosspost has been deleted."""
219 return 8
220
221 @flag_value
222 def urgent(self):
223 """:class:`bool`: Returns ``True`` if the source message is an urgent message.
224
225 An urgent message is one sent by Discord Trust and Safety.
226 """
227 return 16
228
[end of discord/flags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/discord/flags.py b/discord/flags.py
--- a/discord/flags.py
+++ b/discord/flags.py
@@ -35,11 +35,16 @@
self.__doc__ = func.__doc__
def __get__(self, instance, owner):
+ if instance is None:
+ return self
return instance._has_flag(self.flag)
def __set__(self, instance, value):
instance._set_flag(self.flag, value)
+ def __repr__(self):
+ return '<flag_value flag={.flag!r}>'.format(self)
+
def fill_with_flags(*, inverted=False):
def decorator(cls):
cls.VALID_FLAGS = {
|
{"golden_diff": "diff --git a/discord/flags.py b/discord/flags.py\n--- a/discord/flags.py\n+++ b/discord/flags.py\n@@ -35,11 +35,16 @@\n self.__doc__ = func.__doc__\n \n def __get__(self, instance, owner):\n+ if instance is None:\n+ return self\n return instance._has_flag(self.flag)\n \n def __set__(self, instance, value):\n instance._set_flag(self.flag, value)\n \n+ def __repr__(self):\n+ return '<flag_value flag={.flag!r}>'.format(self)\n+\n def fill_with_flags(*, inverted=False):\n def decorator(cls):\n cls.VALID_FLAGS = {\n", "issue": "Accessing a flag property on the class raises AttributeError\nDue to this:\r\n\r\nhttps://github.com/Rapptz/discord.py/blob/e473f3c775b26a4bb54b18eb6240757306f4f5c1/discord/flags.py#L38\r\n\r\nThis code:\r\n\r\n```py\r\nprint(discord.Permissions.manage_messages)\r\n```\r\n\r\nRaises this error:\r\n```pytb\r\n File \".venv/lib/python3.8/site-packages/discord/flags.py\", line 38, in __get__\r\n return instance._has_flag(self.flag)\r\nAttributeError: 'NoneType' object has no attribute '_has_flag'\r\n```\r\n\r\nTrying to access a descriptor on a class should display a meaningful repr.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2020 Rapptz\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\n__all__ = (\n 'SystemChannelFlags',\n 'MessageFlags',\n)\n\nclass flag_value:\n def __init__(self, func):\n self.flag = func(None)\n self.__doc__ = func.__doc__\n\n def __get__(self, instance, owner):\n return instance._has_flag(self.flag)\n\n def __set__(self, instance, value):\n instance._set_flag(self.flag, value)\n\ndef fill_with_flags(*, inverted=False):\n def decorator(cls):\n cls.VALID_FLAGS = {\n name: value.flag\n for name, value in cls.__dict__.items()\n if isinstance(value, flag_value)\n }\n\n if inverted:\n max_bits = max(cls.VALID_FLAGS.values()).bit_length()\n cls.DEFAULT_VALUE = -1 + (2 ** max_bits)\n else:\n cls.DEFAULT_VALUE = 0\n\n return cls\n return decorator\n\n# n.b. flags must inherit from this and use the decorator above\nclass BaseFlags:\n __slots__ = ('value',)\n\n def __init__(self, **kwargs):\n self.value = self.DEFAULT_VALUE\n for key, value in kwargs.items():\n if key not in self.VALID_FLAGS:\n raise TypeError('%r is not a valid flag name.' % key)\n setattr(self, key, value)\n\n @classmethod\n def _from_value(cls, value):\n self = cls.__new__(cls)\n self.value = value\n return self\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.value == other.value\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.value)\n\n def __repr__(self):\n return '<%s value=%s>' % (self.__class__.__name__, self.value)\n\n def __iter__(self):\n for name, value in self.__class__.__dict__.items():\n if isinstance(value, flag_value):\n yield (name, self._has_flag(value.flag))\n\n def _has_flag(self, o):\n return (self.value & o) == o\n\n def _set_flag(self, o, toggle):\n if toggle is True:\n self.value |= o\n elif toggle is False:\n self.value &= ~o\n else:\n raise TypeError('Value to set for %s must be a bool.' % self.__class__.__name__)\n\n@fill_with_flags(inverted=True)\nclass SystemChannelFlags(BaseFlags):\n r\"\"\"Wraps up a Discord system channel flag value.\n\n Similar to :class:`Permissions`\\, the properties provided are two way.\n You can set and retrieve individual bits using the properties as if they\n were regular bools. This allows you to edit the system flags easily.\n\n To construct an object you can pass keyword arguments denoting the flags\n to enable or disable.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two flags are equal.\n .. describe:: x != y\n\n Checks if two flags are not equal.\n .. describe:: hash(x)\n\n Return the flag's hash.\n .. describe:: iter(x)\n\n Returns an iterator of ``(name, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n\n Attributes\n -----------\n value: :class:`int`\n The raw value. This value is a bit array field of a 53-bit integer\n representing the currently available flags. You should query\n flags via the properties rather than using this raw value.\n \"\"\"\n __slots__ = ()\n\n # For some reason the flags for system channels are \"inverted\"\n # ergo, if they're set then it means \"suppress\" (off in the GUI toggle)\n # Since this is counter-intuitive from an API perspective and annoying\n # these will be inverted automatically\n\n def _has_flag(self, o):\n return (self.value & o) != o\n\n def _set_flag(self, o, toggle):\n if toggle is True:\n self.value &= ~o\n elif toggle is False:\n self.value |= o\n else:\n raise TypeError('Value to set for SystemChannelFlags must be a bool.')\n\n @flag_value\n def join_notifications(self):\n \"\"\":class:`bool`: Returns ``True`` if the system channel is used for member join notifications.\"\"\"\n return 1\n\n @flag_value\n def premium_subscriptions(self):\n \"\"\":class:`bool`: Returns ``True`` if the system channel is used for Nitro boosting notifications.\"\"\"\n return 2\n\n\n@fill_with_flags()\nclass MessageFlags(BaseFlags):\n r\"\"\"Wraps up a Discord Message flag value.\n\n See :class:`SystemChannelFlags`.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two flags are equal.\n .. describe:: x != y\n\n Checks if two flags are not equal.\n .. describe:: hash(x)\n\n Return the flag's hash.\n .. describe:: iter(x)\n\n Returns an iterator of ``(name, value)`` pairs. This allows it\n to be, for example, constructed as a dict or a list of pairs.\n\n .. versionadded:: 1.3\n\n Attributes\n -----------\n value: :class:`int`\n The raw value. This value is a bit array field of a 53-bit integer\n representing the currently available flags. You should query\n flags via the properties rather than using this raw value.\n \"\"\"\n __slots__ = ()\n\n @flag_value\n def crossposted(self):\n \"\"\":class:`bool`: Returns ``True`` if the message is the original crossposted message.\"\"\"\n return 1\n\n @flag_value\n def is_crossposted(self):\n \"\"\":class:`bool`: Returns ``True`` if the message was crossposted from another channel.\"\"\"\n return 2\n\n @flag_value\n def suppress_embeds(self):\n \"\"\":class:`bool`: Returns ``True`` if the message's embeds have been suppressed.\"\"\"\n return 4\n\n @flag_value\n def source_message_deleted(self):\n \"\"\":class:`bool`: Returns ``True`` if the source message for this crosspost has been deleted.\"\"\"\n return 8\n\n @flag_value\n def urgent(self):\n \"\"\":class:`bool`: Returns ``True`` if the source message is an urgent message.\n\n An urgent message is one sent by Discord Trust and Safety.\n \"\"\"\n return 16\n", "path": "discord/flags.py"}]}
| 2,950 | 159 |
gh_patches_debug_16691
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-2029
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Application Insights daily data cap cannot be set to fractional number via CLI
## Describe the bug
The daily data cap in Application Insights can be set to a non-integer number (eg. 0.1). The `az monitor app-insights component billing update` command only allows integer numbers.
When using `az monitor app-insights component billing show`, the data volume cap is returned as a non-integer number.
The `billing update` command should be updated to accept fractional numbers to match the behavior of the portal.
**Command Name**
`az monitor app-insights component billing update`
`Extension Name: application-insights. Version: 0.1.8.`
**Errors:**
```
> az monitor app-insights component billing update --app $appName --resource-group $resourceGroup --cap 0.1
az monitor app-insights component billing update: error: argument --cap: invalid int value: '0.1'
```
## To Reproduce:
- `az monitor app-insights component create --app $appName --resource-group $resourceGroup`
- `az monitor app-insights component billing update --app $appName --resource-group $resourceGroup --cap 0.1`
## Expected Behavior
The `--cap` value should accept non-integer numbers.
## Environment Summary
```
Windows-10-10.0.18362-SP0
Python 3.6.6
azure-cli 2.2.0 *
Extensions:
application-insights 0.1.8
```
<!--Please don't remove this:-->
<!--auto-generated-->
</issue>
<code>
[start of src/application-insights/azext_applicationinsights/_params.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 # pylint: disable=line-too-long, too-many-statements
7 from azure.cli.core.commands.parameters import get_datetime_type, get_location_type, tags_type, get_three_state_flag, get_enum_type
8 from azure.cli.command_modules.monitor.actions import get_period_type
9 from ._validators import validate_applications, validate_storage_account_name_or_id, validate_log_analytic_workspace_name_or_id
10
11
12 def load_arguments(self, _):
13 with self.argument_context('monitor app-insights') as c:
14 c.argument('application', options_list=['--app', '-a'], id_part='name', help='GUID, app name, or fully-qualified Azure resource name of Application Insights component. The application GUID may be acquired from the API Access menu item on any Application Insights resource in the Azure portal. If using an application name, please specify resource group.')
15
16 with self.argument_context('monitor app-insights component create') as c:
17 c.argument('location', arg_type=get_location_type(self.cli_ctx))
18 c.argument('application-type', options_list=['application-type', '--type', '-t'], help="Type of application being monitored. Possible values include: 'web', 'other'. Default value: 'web' .")
19 c.argument('kind', options_list=['--kind', '-k'], help='The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.')
20 c.argument('tags', tags_type)
21
22 with self.argument_context('monitor app-insights component update') as c:
23 c.argument('location', arg_type=get_location_type(self.cli_ctx))
24 c.argument('application-type', options_list=['application-type', '--type', '-t'], help="Type of application being monitored. Possible values include: 'web', 'other'. Default value: 'web' .")
25 c.argument('kind', options_list=['--kind', '-k'], help='The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.')
26
27 with self.argument_context('monitor app-insights component') as c:
28 c.argument('workspace_resource_id', options_list=['--workspace'], validator=validate_log_analytic_workspace_name_or_id,
29 help='Name or resource ID of a log analytics workspace')
30 c.argument('retention_in_days', options_list=['--retention-time'], help='Retention in days for Application Insights. The value can be one of the following values: 30,60,90,120,180,270,365,550,730. It can be set only when Application Insights is not connected to a Log Analytics workspace.')
31 from .vendored_sdks.mgmt_applicationinsights.models import PublicNetworkAccessType
32 c.argument('public_network_access_for_ingestion', options_list=['--ingestion-access'], help='The public network access type for accessing Application Insights ingestion.',
33 arg_type=get_enum_type(PublicNetworkAccessType))
34 c.argument('public_network_access_for_query', options_list=['--query-access'], help='The public network access type for accessing Application Insights query.',
35 arg_type=get_enum_type(PublicNetworkAccessType))
36
37 with self.argument_context('monitor app-insights component update-tags') as c:
38 c.argument('tags', tags_type)
39
40 with self.argument_context('monitor app-insights component billing') as c:
41 c.argument('stop_sending_notification_when_hitting_cap', options_list=['-s', '--stop'], arg_type=get_three_state_flag(),
42 help='Do not send a notification email when the daily data volume cap is met.')
43 c.argument('cap', type=int, help='Daily data volume cap in GB.')
44
45 with self.argument_context('monitor app-insights api-key create') as c:
46 c.argument('api_key', help='The name of the API key to create.')
47 c.argument('read_properties', nargs='+', options_list=['--read-properties'])
48 c.argument('write_properties', nargs='+')
49
50 with self.argument_context('monitor app-insights api-key show') as c:
51 c.argument('api_key', help='The name of the API key to fetch.')
52
53 with self.argument_context('monitor app-insights metrics show') as c:
54 c.argument('metric', options_list=['--metrics', '-m'], help='The metric to retrieve. May be either a standard AI metric or an application-specific custom metric.')
55 c.argument('aggregation', nargs='*', help='The aggregation to use when computing the metric values. To retrieve more than one aggregation at a time, separate them with a comma. If no aggregation is specified, then the default aggregation for the metric is used.')
56 c.argument('interval', arg_group='Time', type=get_period_type())
57 c.argument('orderby', help='The aggregation function and direction to sort the segments by. This value is only valid when segment is specified.')
58 c.argument('segment', help='The name of the dimension to segment the metric values by. This dimension must be applicable to the metric you are retrieving. To segment by more than one dimension at a time, separate them with a comma (,). In this case, the metric data will be segmented in the order the dimensions are listed in the parameter.')
59 c.argument('top', help='The number of segments to return. This value is only valid when segment is specified.')
60 c.argument('filter_arg', options_list=['--filter'], help=' An expression used to filter the results. This value should be a valid OData filter expression where the keys of each clause should be applicable dimensions for the metric you are retrieving.')
61 c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))
62 c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))
63 c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))
64
65 with self.argument_context('monitor app-insights events show') as c:
66 c.argument('event_type', options_list=['--type'], help='The type of events to retrieve.')
67 c.argument('event', options_list=['--event'], help='GUID of the event to retrieve. This could be obtained by first listing and filtering events, then selecting an event of interest.')
68 c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))
69 c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))
70 c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))
71
72 with self.argument_context('monitor app-insights query') as c:
73 c.argument('application', validator=validate_applications, options_list=['--apps', '-a'], nargs='+', id_part='name', help='GUID, app name, or fully-qualified Azure resource name of Application Insights component. The application GUID may be acquired from the API Access menu item on any Application Insights resource in the Azure portal. If using an application name, please specify resource group.')
74 c.argument('analytics_query', help='Query to execute over Application Insights data.')
75 c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))
76 c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))
77 c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))
78
79 with self.argument_context('monitor app-insights component linked-storage') as c:
80 c.argument('storage_account_id', options_list=['--storage-account', '-s'], validator=validate_storage_account_name_or_id,
81 help='Name or ID of a linked storage account.')
82
[end of src/application-insights/azext_applicationinsights/_params.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/application-insights/azext_applicationinsights/_params.py b/src/application-insights/azext_applicationinsights/_params.py
--- a/src/application-insights/azext_applicationinsights/_params.py
+++ b/src/application-insights/azext_applicationinsights/_params.py
@@ -40,7 +40,7 @@
with self.argument_context('monitor app-insights component billing') as c:
c.argument('stop_sending_notification_when_hitting_cap', options_list=['-s', '--stop'], arg_type=get_three_state_flag(),
help='Do not send a notification email when the daily data volume cap is met.')
- c.argument('cap', type=int, help='Daily data volume cap in GB.')
+ c.argument('cap', type=float, help='Daily data volume cap in GB.')
with self.argument_context('monitor app-insights api-key create') as c:
c.argument('api_key', help='The name of the API key to create.')
|
{"golden_diff": "diff --git a/src/application-insights/azext_applicationinsights/_params.py b/src/application-insights/azext_applicationinsights/_params.py\n--- a/src/application-insights/azext_applicationinsights/_params.py\n+++ b/src/application-insights/azext_applicationinsights/_params.py\n@@ -40,7 +40,7 @@\n with self.argument_context('monitor app-insights component billing') as c:\n c.argument('stop_sending_notification_when_hitting_cap', options_list=['-s', '--stop'], arg_type=get_three_state_flag(),\n help='Do not send a notification email when the daily data volume cap is met.')\n- c.argument('cap', type=int, help='Daily data volume cap in GB.')\n+ c.argument('cap', type=float, help='Daily data volume cap in GB.')\n \n with self.argument_context('monitor app-insights api-key create') as c:\n c.argument('api_key', help='The name of the API key to create.')\n", "issue": "Application Insights daily data cap cannot be set to fractional number via CLI\n## Describe the bug\r\n\r\nThe daily data cap in Application Insights can be set to a non-integer number (eg. 0.1). The `az monitor app-insights component billing update` command only allows integer numbers. \r\n\r\nWhen using `az monitor app-insights component billing show`, the data volume cap is returned as a non-integer number. \r\n\r\nThe `billing update` command should be updated to accept fractional numbers to match the behavior of the portal.\r\n\r\n**Command Name**\r\n`az monitor app-insights component billing update`\r\n`Extension Name: application-insights. Version: 0.1.8.`\r\n\r\n**Errors:**\r\n\r\n```\r\n> az monitor app-insights component billing update --app $appName --resource-group $resourceGroup --cap 0.1\r\naz monitor app-insights component billing update: error: argument --cap: invalid int value: '0.1'\r\n```\r\n\r\n## To Reproduce:\r\n\r\n- `az monitor app-insights component create --app $appName --resource-group $resourceGroup`\r\n- `az monitor app-insights component billing update --app $appName --resource-group $resourceGroup --cap 0.1`\r\n\r\n## Expected Behavior\r\n\r\nThe `--cap` value should accept non-integer numbers.\r\n\r\n## Environment Summary\r\n```\r\nWindows-10-10.0.18362-SP0\r\nPython 3.6.6\r\n\r\nazure-cli 2.2.0 *\r\n\r\nExtensions:\r\napplication-insights 0.1.8\r\n\r\n```\r\n\r\n<!--Please don't remove this:-->\r\n<!--auto-generated-->\r\n\r\n\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long, too-many-statements\nfrom azure.cli.core.commands.parameters import get_datetime_type, get_location_type, tags_type, get_three_state_flag, get_enum_type\nfrom azure.cli.command_modules.monitor.actions import get_period_type\nfrom ._validators import validate_applications, validate_storage_account_name_or_id, validate_log_analytic_workspace_name_or_id\n\n\ndef load_arguments(self, _):\n with self.argument_context('monitor app-insights') as c:\n c.argument('application', options_list=['--app', '-a'], id_part='name', help='GUID, app name, or fully-qualified Azure resource name of Application Insights component. The application GUID may be acquired from the API Access menu item on any Application Insights resource in the Azure portal. If using an application name, please specify resource group.')\n\n with self.argument_context('monitor app-insights component create') as c:\n c.argument('location', arg_type=get_location_type(self.cli_ctx))\n c.argument('application-type', options_list=['application-type', '--type', '-t'], help=\"Type of application being monitored. Possible values include: 'web', 'other'. Default value: 'web' .\")\n c.argument('kind', options_list=['--kind', '-k'], help='The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.')\n c.argument('tags', tags_type)\n\n with self.argument_context('monitor app-insights component update') as c:\n c.argument('location', arg_type=get_location_type(self.cli_ctx))\n c.argument('application-type', options_list=['application-type', '--type', '-t'], help=\"Type of application being monitored. Possible values include: 'web', 'other'. Default value: 'web' .\")\n c.argument('kind', options_list=['--kind', '-k'], help='The kind of application that this component refers to, used to customize UI. This value is a freeform string, values should typically be one of the following: web, ios, other, store, java, phone.')\n\n with self.argument_context('monitor app-insights component') as c:\n c.argument('workspace_resource_id', options_list=['--workspace'], validator=validate_log_analytic_workspace_name_or_id,\n help='Name or resource ID of a log analytics workspace')\n c.argument('retention_in_days', options_list=['--retention-time'], help='Retention in days for Application Insights. The value can be one of the following values: 30,60,90,120,180,270,365,550,730. It can be set only when Application Insights is not connected to a Log Analytics workspace.')\n from .vendored_sdks.mgmt_applicationinsights.models import PublicNetworkAccessType\n c.argument('public_network_access_for_ingestion', options_list=['--ingestion-access'], help='The public network access type for accessing Application Insights ingestion.',\n arg_type=get_enum_type(PublicNetworkAccessType))\n c.argument('public_network_access_for_query', options_list=['--query-access'], help='The public network access type for accessing Application Insights query.',\n arg_type=get_enum_type(PublicNetworkAccessType))\n\n with self.argument_context('monitor app-insights component update-tags') as c:\n c.argument('tags', tags_type)\n\n with self.argument_context('monitor app-insights component billing') as c:\n c.argument('stop_sending_notification_when_hitting_cap', options_list=['-s', '--stop'], arg_type=get_three_state_flag(),\n help='Do not send a notification email when the daily data volume cap is met.')\n c.argument('cap', type=int, help='Daily data volume cap in GB.')\n\n with self.argument_context('monitor app-insights api-key create') as c:\n c.argument('api_key', help='The name of the API key to create.')\n c.argument('read_properties', nargs='+', options_list=['--read-properties'])\n c.argument('write_properties', nargs='+')\n\n with self.argument_context('monitor app-insights api-key show') as c:\n c.argument('api_key', help='The name of the API key to fetch.')\n\n with self.argument_context('monitor app-insights metrics show') as c:\n c.argument('metric', options_list=['--metrics', '-m'], help='The metric to retrieve. May be either a standard AI metric or an application-specific custom metric.')\n c.argument('aggregation', nargs='*', help='The aggregation to use when computing the metric values. To retrieve more than one aggregation at a time, separate them with a comma. If no aggregation is specified, then the default aggregation for the metric is used.')\n c.argument('interval', arg_group='Time', type=get_period_type())\n c.argument('orderby', help='The aggregation function and direction to sort the segments by. This value is only valid when segment is specified.')\n c.argument('segment', help='The name of the dimension to segment the metric values by. This dimension must be applicable to the metric you are retrieving. To segment by more than one dimension at a time, separate them with a comma (,). In this case, the metric data will be segmented in the order the dimensions are listed in the parameter.')\n c.argument('top', help='The number of segments to return. This value is only valid when segment is specified.')\n c.argument('filter_arg', options_list=['--filter'], help=' An expression used to filter the results. This value should be a valid OData filter expression where the keys of each clause should be applicable dimensions for the metric you are retrieving.')\n c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))\n c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))\n c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))\n\n with self.argument_context('monitor app-insights events show') as c:\n c.argument('event_type', options_list=['--type'], help='The type of events to retrieve.')\n c.argument('event', options_list=['--event'], help='GUID of the event to retrieve. This could be obtained by first listing and filtering events, then selecting an event of interest.')\n c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))\n c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))\n c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))\n\n with self.argument_context('monitor app-insights query') as c:\n c.argument('application', validator=validate_applications, options_list=['--apps', '-a'], nargs='+', id_part='name', help='GUID, app name, or fully-qualified Azure resource name of Application Insights component. The application GUID may be acquired from the API Access menu item on any Application Insights resource in the Azure portal. If using an application name, please specify resource group.')\n c.argument('analytics_query', help='Query to execute over Application Insights data.')\n c.argument('start_time', arg_type=get_datetime_type(help='Start-time of time range for which to retrieve data.'))\n c.argument('end_time', arg_type=get_datetime_type(help='End of time range for current operation. Defaults to the current time.'))\n c.argument('offset', help='Filter results based on UTC hour offset.', type=get_period_type(as_timedelta=True))\n\n with self.argument_context('monitor app-insights component linked-storage') as c:\n c.argument('storage_account_id', options_list=['--storage-account', '-s'], validator=validate_storage_account_name_or_id,\n help='Name or ID of a linked storage account.')\n", "path": "src/application-insights/azext_applicationinsights/_params.py"}]}
| 2,786 | 213 |
gh_patches_debug_23981
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1227
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exception in accounts plugin authentication: Unicode-objects must be encoded before hashing
Running a fresh docker image built from master (36dbcc0) with postgresql storage
After:
```
echo '{"data": {"password": "PASSWD"}}' | http PUT http://localhost:8889/v1/accounts/eric
```
Running
```
http GET http://localhost:8889/v1/ --auth eric:coucou3333
```
Triggers following exception :
```
Unicode-objects must be encoded before hashing
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/pyramid/tweens.py", line 22, in excview_tween
response = handler(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py", line 119, in tm_tween
reraise(*exc_info)
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/compat.py", line 15, in reraise
raise value
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py", line 98, in tm_tween
response = handler(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/router.py", line 155, in handle_request
view_name
File "/usr/local/lib/python3.5/dist-packages/pyramid/view.py", line 612, in _call_view
response = view_callable(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 389, in attr_view
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 367, in predicate_wrapper
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 438, in rendered_view
result = view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/usr/local/lib/python3.5/dist-packages/cornice/service.py", line 493, in wrapper
response = view_(request)
File "/code/kinto/core/views/hello.py", line 45, in get_hello
if Authenticated in request.effective_principals:
File "/usr/local/lib/python3.5/dist-packages/pyramid/security.py", line 375, in effective_principals
return policy.effective_principals(self)
File "/usr/local/lib/python3.5/dist-packages/pyramid_multiauth/__init__.py", line 119, in effective_principals
userid = policy.authenticated_userid(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py", line 92, in authenticated_userid
callback_ok = self.callback(userid, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py", line 1123, in callback
return self.check(username, password, request)
File "/code/kinto/plugins/accounts/authentication.py", line 18, in account_check
if hashed == bcrypt.hashpw(pwd_str, hashed):
File "/usr/local/lib/python3.5/dist-packages/bcrypt/__init__.py", line 62, in hashpw
raise TypeError("Unicode-objects must be encoded before hashing")
```
It is fixed by encoding the value received from the session at [this line in authentication.py](https://github.com/Kinto/kinto/blob/master/kinto/plugins/accounts/authentication.py#L16):
```
hashed = existing['password'].encode(encoding='utf8')
```
exception in accounts plugin authentication: Unicode-objects must be encoded before hashing
Running a fresh docker image built from master (36dbcc0) with postgresql storage
After:
```
echo '{"data": {"password": "PASSWD"}}' | http PUT http://localhost:8889/v1/accounts/eric
```
Running
```
http GET http://localhost:8889/v1/ --auth eric:coucou3333
```
Triggers following exception :
```
Unicode-objects must be encoded before hashing
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/pyramid/tweens.py", line 22, in excview_tween
response = handler(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py", line 119, in tm_tween
reraise(*exc_info)
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/compat.py", line 15, in reraise
raise value
File "/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py", line 98, in tm_tween
response = handler(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/router.py", line 155, in handle_request
view_name
File "/usr/local/lib/python3.5/dist-packages/pyramid/view.py", line 612, in _call_view
response = view_callable(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 389, in attr_view
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 367, in predicate_wrapper
return view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 438, in rendered_view
result = view(context, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/usr/local/lib/python3.5/dist-packages/cornice/service.py", line 493, in wrapper
response = view_(request)
File "/code/kinto/core/views/hello.py", line 45, in get_hello
if Authenticated in request.effective_principals:
File "/usr/local/lib/python3.5/dist-packages/pyramid/security.py", line 375, in effective_principals
return policy.effective_principals(self)
File "/usr/local/lib/python3.5/dist-packages/pyramid_multiauth/__init__.py", line 119, in effective_principals
userid = policy.authenticated_userid(request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py", line 92, in authenticated_userid
callback_ok = self.callback(userid, request)
File "/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py", line 1123, in callback
return self.check(username, password, request)
File "/code/kinto/plugins/accounts/authentication.py", line 18, in account_check
if hashed == bcrypt.hashpw(pwd_str, hashed):
File "/usr/local/lib/python3.5/dist-packages/bcrypt/__init__.py", line 62, in hashpw
raise TypeError("Unicode-objects must be encoded before hashing")
```
It is fixed by encoding the value received from the session at [this line in authentication.py](https://github.com/Kinto/kinto/blob/master/kinto/plugins/accounts/authentication.py#L16):
```
hashed = existing['password'].encode(encoding='utf8')
```
</issue>
<code>
[start of kinto/plugins/accounts/authentication.py]
1 import bcrypt
2 from pyramid import authentication as base_auth
3
4 from kinto.core.storage import exceptions as storage_exceptions
5
6
7 def account_check(username, password, request):
8 parent_id = username
9 try:
10 existing = request.registry.storage.get(parent_id=parent_id,
11 collection_id='account',
12 object_id=username)
13 except storage_exceptions.RecordNotFoundError:
14 return None
15
16 hashed = existing['password']
17 pwd_str = password.encode(encoding='utf-8')
18 if hashed == bcrypt.hashpw(pwd_str, hashed):
19 return True # Match! Return anything but None.
20
21
22 class AccountsAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):
23 """Accounts authentication policy.
24
25 It will check that the credentials exist in the account resource.
26 """
27 def __init__(self, *args, **kwargs):
28 super().__init__(account_check, *args, **kwargs)
29
30 def effective_principals(self, request):
31 # Bypass default Pyramid construction of principals because
32 # Pyramid multiauth already adds userid, Authenticated and Everyone
33 # principals.
34 return []
35
[end of kinto/plugins/accounts/authentication.py]
[start of kinto/plugins/accounts/views.py]
1 import bcrypt
2 import colander
3 from pyramid import httpexceptions
4 from pyramid.decorator import reify
5 from pyramid.security import Authenticated, Everyone
6 from pyramid.settings import aslist
7
8 from kinto.views import NameGenerator
9 from kinto.core import resource
10 from kinto.core.errors import raise_invalid, http_error
11
12
13 class AccountSchema(resource.ResourceSchema):
14 password = colander.SchemaNode(colander.String())
15
16
17 @resource.register()
18 class Account(resource.ShareableResource):
19
20 schema = AccountSchema
21
22 def __init__(self, request, context):
23 # Store if current user is administrator (before accessing get_parent_id())
24 allowed_from_settings = request.registry.settings.get('account_write_principals', [])
25 context.is_administrator = len(set(aslist(allowed_from_settings)) &
26 set(request.prefixed_principals)) > 0
27 # Shortcut to check if current is anonymous (before get_parent_id()).
28 context.is_anonymous = Authenticated not in request.effective_principals
29
30 super().__init__(request, context)
31
32 # Overwrite the current principal set by ShareableResource.
33 if self.model.current_principal == Everyone:
34 # Creation is anonymous, but author with write perm is this:
35 # XXX: only works if policy name is account in settings.
36 self.model.current_principal = 'account:{}'.format(self.model.parent_id)
37
38 @reify
39 def id_generator(self):
40 # This generator is used for ID validation.
41 return NameGenerator()
42
43 def get_parent_id(self, request):
44 # The whole challenge here is that we want to isolate what
45 # authenticated users can list, but give access to everything to
46 # administrators.
47 # Plus when anonymous create accounts, we have to set their parent id
48 # to the same value they would obtain when authenticated.
49 if self.context.is_administrator:
50 if self.context.on_collection:
51 # Admin see all accounts.
52 return '*'
53 else:
54 # No pattern matching for admin on single record.
55 return request.matchdict['id']
56
57 if not self.context.is_anonymous:
58 # Authenticated users see their own account only.
59 return request.selected_userid
60
61 # Anonymous creation with PUT.
62 if 'id' in request.matchdict:
63 return request.matchdict['id']
64
65 try:
66 # Anonymous creation with POST.
67 return request.json['data']['id']
68 except (ValueError, KeyError):
69 # Bad POST data.
70 if request.method.lower() == 'post':
71 error_details = {
72 'name': 'data.id',
73 'description': 'data.id in body: Required'
74 }
75 raise_invalid(request, **error_details)
76 # Anonymous GET
77 error_msg = 'Cannot read accounts.'
78 raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)
79
80 def collection_post(self):
81 result = super(Account, self).collection_post()
82 if self.context.is_anonymous and self.request.response.status_code == 200:
83 error_details = {
84 'message': 'Account ID %r already exists' % result['data']['id']
85 }
86 raise http_error(httpexceptions.HTTPForbidden(), **error_details)
87 return result
88
89 def process_record(self, new, old=None):
90 new = super(Account, self).process_record(new, old)
91
92 # Store password safely in database.
93 pwd_str = new["password"].encode(encoding='utf-8')
94 new["password"] = bcrypt.hashpw(pwd_str, bcrypt.gensalt())
95
96 # Administrators can reach other accounts and anonymous have no
97 # selected_userid. So do not try to enforce.
98 if self.context.is_administrator or self.context.is_anonymous:
99 return new
100
101 # Otherwise, we force the id to match the authenticated username.
102 if new[self.model.id_field] != self.request.selected_userid:
103 error_details = {
104 'name': 'data.id',
105 'description': 'Username and account ID do not match.',
106 }
107 raise_invalid(self.request, **error_details)
108
109 return new
110
[end of kinto/plugins/accounts/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/accounts/authentication.py b/kinto/plugins/accounts/authentication.py
--- a/kinto/plugins/accounts/authentication.py
+++ b/kinto/plugins/accounts/authentication.py
@@ -13,7 +13,7 @@
except storage_exceptions.RecordNotFoundError:
return None
- hashed = existing['password']
+ hashed = existing['password'].encode(encoding='utf-8')
pwd_str = password.encode(encoding='utf-8')
if hashed == bcrypt.hashpw(pwd_str, hashed):
return True # Match! Return anything but None.
diff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py
--- a/kinto/plugins/accounts/views.py
+++ b/kinto/plugins/accounts/views.py
@@ -89,9 +89,11 @@
def process_record(self, new, old=None):
new = super(Account, self).process_record(new, old)
- # Store password safely in database.
+ # Store password safely in database as str
+ # (bcrypt.hashpw returns base64 bytes).
pwd_str = new["password"].encode(encoding='utf-8')
- new["password"] = bcrypt.hashpw(pwd_str, bcrypt.gensalt())
+ hashed = bcrypt.hashpw(pwd_str, bcrypt.gensalt())
+ new["password"] = hashed.decode(encoding='utf-8')
# Administrators can reach other accounts and anonymous have no
# selected_userid. So do not try to enforce.
|
{"golden_diff": "diff --git a/kinto/plugins/accounts/authentication.py b/kinto/plugins/accounts/authentication.py\n--- a/kinto/plugins/accounts/authentication.py\n+++ b/kinto/plugins/accounts/authentication.py\n@@ -13,7 +13,7 @@\n except storage_exceptions.RecordNotFoundError:\n return None\n \n- hashed = existing['password']\n+ hashed = existing['password'].encode(encoding='utf-8')\n pwd_str = password.encode(encoding='utf-8')\n if hashed == bcrypt.hashpw(pwd_str, hashed):\n return True # Match! Return anything but None.\ndiff --git a/kinto/plugins/accounts/views.py b/kinto/plugins/accounts/views.py\n--- a/kinto/plugins/accounts/views.py\n+++ b/kinto/plugins/accounts/views.py\n@@ -89,9 +89,11 @@\n def process_record(self, new, old=None):\n new = super(Account, self).process_record(new, old)\n \n- # Store password safely in database.\n+ # Store password safely in database as str\n+ # (bcrypt.hashpw returns base64 bytes).\n pwd_str = new[\"password\"].encode(encoding='utf-8')\n- new[\"password\"] = bcrypt.hashpw(pwd_str, bcrypt.gensalt())\n+ hashed = bcrypt.hashpw(pwd_str, bcrypt.gensalt())\n+ new[\"password\"] = hashed.decode(encoding='utf-8')\n \n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n", "issue": "exception in accounts plugin authentication: Unicode-objects must be encoded before hashing\nRunning a fresh docker image built from master (36dbcc0) with postgresql storage\r\n\r\nAfter:\r\n```\r\necho '{\"data\": {\"password\": \"PASSWD\"}}' | http PUT http://localhost:8889/v1/accounts/eric\r\n```\r\nRunning\r\n```\r\nhttp GET http://localhost:8889/v1/ --auth eric:coucou3333\r\n```\r\nTriggers following exception :\r\n```\r\nUnicode-objects must be encoded before hashing \r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/tweens.py\", line 22, in excview_tween\r\n response = handler(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py\", line 119, in tm_tween\r\n reraise(*exc_info)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/compat.py\", line 15, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py\", line 98, in tm_tween\r\n response = handler(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/router.py\", line 155, in handle_request\r\n view_name\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/view.py\", line 612, in _call_view\r\n response = view_callable(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 389, in attr_view\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 367, in predicate_wrapper\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 438, in rendered_view\r\n result = view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/cornice/service.py\", line 493, in wrapper\r\n response = view_(request)\r\n File \"/code/kinto/core/views/hello.py\", line 45, in get_hello\r\n if Authenticated in request.effective_principals:\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/security.py\", line 375, in effective_principals\r\n return policy.effective_principals(self)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_multiauth/__init__.py\", line 119, in effective_principals\r\n userid = policy.authenticated_userid(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py\", line 92, in authenticated_userid\r\n callback_ok = self.callback(userid, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py\", line 1123, in callback\r\n return self.check(username, password, request)\r\n File \"/code/kinto/plugins/accounts/authentication.py\", line 18, in account_check\r\n if hashed == bcrypt.hashpw(pwd_str, hashed):\r\n File \"/usr/local/lib/python3.5/dist-packages/bcrypt/__init__.py\", line 62, in hashpw\r\n raise TypeError(\"Unicode-objects must be encoded before hashing\")\r\n```\r\n\r\nIt is fixed by encoding the value received from the session at [this line in authentication.py](https://github.com/Kinto/kinto/blob/master/kinto/plugins/accounts/authentication.py#L16):\r\n```\r\n hashed = existing['password'].encode(encoding='utf8')\r\n```\r\n\nexception in accounts plugin authentication: Unicode-objects must be encoded before hashing\nRunning a fresh docker image built from master (36dbcc0) with postgresql storage\r\n\r\nAfter:\r\n```\r\necho '{\"data\": {\"password\": \"PASSWD\"}}' | http PUT http://localhost:8889/v1/accounts/eric\r\n```\r\nRunning\r\n```\r\nhttp GET http://localhost:8889/v1/ --auth eric:coucou3333\r\n```\r\nTriggers following exception :\r\n```\r\nUnicode-objects must be encoded before hashing \r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/tweens.py\", line 22, in excview_tween\r\n response = handler(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py\", line 119, in tm_tween\r\n reraise(*exc_info)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/compat.py\", line 15, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_tm/__init__.py\", line 98, in tm_tween\r\n response = handler(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/router.py\", line 155, in handle_request\r\n view_name\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/view.py\", line 612, in _call_view\r\n response = view_callable(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 389, in attr_view\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 367, in predicate_wrapper\r\n return view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 438, in rendered_view\r\n result = view(context, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/cornice/service.py\", line 493, in wrapper\r\n response = view_(request)\r\n File \"/code/kinto/core/views/hello.py\", line 45, in get_hello\r\n if Authenticated in request.effective_principals:\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/security.py\", line 375, in effective_principals\r\n return policy.effective_principals(self)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid_multiauth/__init__.py\", line 119, in effective_principals\r\n userid = policy.authenticated_userid(request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py\", line 92, in authenticated_userid\r\n callback_ok = self.callback(userid, request)\r\n File \"/usr/local/lib/python3.5/dist-packages/pyramid/authentication.py\", line 1123, in callback\r\n return self.check(username, password, request)\r\n File \"/code/kinto/plugins/accounts/authentication.py\", line 18, in account_check\r\n if hashed == bcrypt.hashpw(pwd_str, hashed):\r\n File \"/usr/local/lib/python3.5/dist-packages/bcrypt/__init__.py\", line 62, in hashpw\r\n raise TypeError(\"Unicode-objects must be encoded before hashing\")\r\n```\r\n\r\nIt is fixed by encoding the value received from the session at [this line in authentication.py](https://github.com/Kinto/kinto/blob/master/kinto/plugins/accounts/authentication.py#L16):\r\n```\r\n hashed = existing['password'].encode(encoding='utf8')\r\n```\r\n\n", "before_files": [{"content": "import bcrypt\nfrom pyramid import authentication as base_auth\n\nfrom kinto.core.storage import exceptions as storage_exceptions\n\n\ndef account_check(username, password, request):\n parent_id = username\n try:\n existing = request.registry.storage.get(parent_id=parent_id,\n collection_id='account',\n object_id=username)\n except storage_exceptions.RecordNotFoundError:\n return None\n\n hashed = existing['password']\n pwd_str = password.encode(encoding='utf-8')\n if hashed == bcrypt.hashpw(pwd_str, hashed):\n return True # Match! Return anything but None.\n\n\nclass AccountsAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):\n \"\"\"Accounts authentication policy.\n\n It will check that the credentials exist in the account resource.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(account_check, *args, **kwargs)\n\n def effective_principals(self, request):\n # Bypass default Pyramid construction of principals because\n # Pyramid multiauth already adds userid, Authenticated and Everyone\n # principals.\n return []\n", "path": "kinto/plugins/accounts/authentication.py"}, {"content": "import bcrypt\nimport colander\nfrom pyramid import httpexceptions\nfrom pyramid.decorator import reify\nfrom pyramid.security import Authenticated, Everyone\nfrom pyramid.settings import aslist\n\nfrom kinto.views import NameGenerator\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid, http_error\n\n\nclass AccountSchema(resource.ResourceSchema):\n password = colander.SchemaNode(colander.String())\n\n\[email protected]()\nclass Account(resource.ShareableResource):\n\n schema = AccountSchema\n\n def __init__(self, request, context):\n # Store if current user is administrator (before accessing get_parent_id())\n allowed_from_settings = request.registry.settings.get('account_write_principals', [])\n context.is_administrator = len(set(aslist(allowed_from_settings)) &\n set(request.prefixed_principals)) > 0\n # Shortcut to check if current is anonymous (before get_parent_id()).\n context.is_anonymous = Authenticated not in request.effective_principals\n\n super().__init__(request, context)\n\n # Overwrite the current principal set by ShareableResource.\n if self.model.current_principal == Everyone:\n # Creation is anonymous, but author with write perm is this:\n # XXX: only works if policy name is account in settings.\n self.model.current_principal = 'account:{}'.format(self.model.parent_id)\n\n @reify\n def id_generator(self):\n # This generator is used for ID validation.\n return NameGenerator()\n\n def get_parent_id(self, request):\n # The whole challenge here is that we want to isolate what\n # authenticated users can list, but give access to everything to\n # administrators.\n # Plus when anonymous create accounts, we have to set their parent id\n # to the same value they would obtain when authenticated.\n if self.context.is_administrator:\n if self.context.on_collection:\n # Admin see all accounts.\n return '*'\n else:\n # No pattern matching for admin on single record.\n return request.matchdict['id']\n\n if not self.context.is_anonymous:\n # Authenticated users see their own account only.\n return request.selected_userid\n\n # Anonymous creation with PUT.\n if 'id' in request.matchdict:\n return request.matchdict['id']\n\n try:\n # Anonymous creation with POST.\n return request.json['data']['id']\n except (ValueError, KeyError):\n # Bad POST data.\n if request.method.lower() == 'post':\n error_details = {\n 'name': 'data.id',\n 'description': 'data.id in body: Required'\n }\n raise_invalid(request, **error_details)\n # Anonymous GET\n error_msg = 'Cannot read accounts.'\n raise http_error(httpexceptions.HTTPUnauthorized(), error=error_msg)\n\n def collection_post(self):\n result = super(Account, self).collection_post()\n if self.context.is_anonymous and self.request.response.status_code == 200:\n error_details = {\n 'message': 'Account ID %r already exists' % result['data']['id']\n }\n raise http_error(httpexceptions.HTTPForbidden(), **error_details)\n return result\n\n def process_record(self, new, old=None):\n new = super(Account, self).process_record(new, old)\n\n # Store password safely in database.\n pwd_str = new[\"password\"].encode(encoding='utf-8')\n new[\"password\"] = bcrypt.hashpw(pwd_str, bcrypt.gensalt())\n\n # Administrators can reach other accounts and anonymous have no\n # selected_userid. So do not try to enforce.\n if self.context.is_administrator or self.context.is_anonymous:\n return new\n\n # Otherwise, we force the id to match the authenticated username.\n if new[self.model.id_field] != self.request.selected_userid:\n error_details = {\n 'name': 'data.id',\n 'description': 'Username and account ID do not match.',\n }\n raise_invalid(self.request, **error_details)\n\n return new\n", "path": "kinto/plugins/accounts/views.py"}]}
| 3,668 | 315 |
gh_patches_debug_8215
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1605
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Editing an author deletes author data not in form
**Describe the bug**
Editing an author saves the form data as an update. `isni` and `viaf_id` are not part of the form, so these values are deleted when the author details are edited.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'author page'
2. Click on 'Edit'
3. Make a change
4. Save
5. Most users will not see any change because these values are not displayed, however they have been removed.
**Expected behavior**
Only values that have changed should change. ISNI and VIAF data should be retained on edits.
**Instance**
Development site but it presumably would be replicated on any production server.
**Additional context**
I only noticed this because I am working on a PR that will display a link to the ISNI record, and it disappears whenever I edit an author.
Editing an author deletes author data not in form
**Describe the bug**
Editing an author saves the form data as an update. `isni` and `viaf_id` are not part of the form, so these values are deleted when the author details are edited.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'author page'
2. Click on 'Edit'
3. Make a change
4. Save
5. Most users will not see any change because these values are not displayed, however they have been removed.
**Expected behavior**
Only values that have changed should change. ISNI and VIAF data should be retained on edits.
**Instance**
Development site but it presumably would be replicated on any production server.
**Additional context**
I only noticed this because I am working on a PR that will display a link to the ISNI record, and it disappears whenever I edit an author.
</issue>
<code>
[start of bookwyrm/forms.py]
1 """ using django model forms """
2 import datetime
3 from collections import defaultdict
4
5 from django import forms
6 from django.forms import ModelForm, PasswordInput, widgets, ChoiceField
7 from django.forms.widgets import Textarea
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10
11 from bookwyrm import models
12
13
14 class CustomForm(ModelForm):
15 """add css classes to the forms"""
16
17 def __init__(self, *args, **kwargs):
18 css_classes = defaultdict(lambda: "")
19 css_classes["text"] = "input"
20 css_classes["password"] = "input"
21 css_classes["email"] = "input"
22 css_classes["number"] = "input"
23 css_classes["checkbox"] = "checkbox"
24 css_classes["textarea"] = "textarea"
25 # pylint: disable=super-with-arguments
26 super(CustomForm, self).__init__(*args, **kwargs)
27 for visible in self.visible_fields():
28 if hasattr(visible.field.widget, "input_type"):
29 input_type = visible.field.widget.input_type
30 if isinstance(visible.field.widget, Textarea):
31 input_type = "textarea"
32 visible.field.widget.attrs["rows"] = 5
33 visible.field.widget.attrs["class"] = css_classes[input_type]
34
35
36 # pylint: disable=missing-class-docstring
37 class LoginForm(CustomForm):
38 class Meta:
39 model = models.User
40 fields = ["localname", "password"]
41 help_texts = {f: None for f in fields}
42 widgets = {
43 "password": PasswordInput(),
44 }
45
46
47 class RegisterForm(CustomForm):
48 class Meta:
49 model = models.User
50 fields = ["localname", "email", "password"]
51 help_texts = {f: None for f in fields}
52 widgets = {"password": PasswordInput()}
53
54
55 class RatingForm(CustomForm):
56 class Meta:
57 model = models.ReviewRating
58 fields = ["user", "book", "rating", "privacy"]
59
60
61 class ReviewForm(CustomForm):
62 class Meta:
63 model = models.Review
64 fields = [
65 "user",
66 "book",
67 "name",
68 "content",
69 "rating",
70 "content_warning",
71 "sensitive",
72 "privacy",
73 ]
74
75
76 class CommentForm(CustomForm):
77 class Meta:
78 model = models.Comment
79 fields = [
80 "user",
81 "book",
82 "content",
83 "content_warning",
84 "sensitive",
85 "privacy",
86 "progress",
87 "progress_mode",
88 "reading_status",
89 ]
90
91
92 class QuotationForm(CustomForm):
93 class Meta:
94 model = models.Quotation
95 fields = [
96 "user",
97 "book",
98 "quote",
99 "content",
100 "content_warning",
101 "sensitive",
102 "privacy",
103 "position",
104 "position_mode",
105 ]
106
107
108 class ReplyForm(CustomForm):
109 class Meta:
110 model = models.Status
111 fields = [
112 "user",
113 "content",
114 "content_warning",
115 "sensitive",
116 "reply_parent",
117 "privacy",
118 ]
119
120
121 class StatusForm(CustomForm):
122 class Meta:
123 model = models.Status
124 fields = ["user", "content", "content_warning", "sensitive", "privacy"]
125
126
127 class DirectForm(CustomForm):
128 class Meta:
129 model = models.Status
130 fields = ["user", "content", "content_warning", "sensitive", "privacy"]
131
132
133 class EditUserForm(CustomForm):
134 class Meta:
135 model = models.User
136 fields = [
137 "avatar",
138 "name",
139 "email",
140 "summary",
141 "show_goal",
142 "show_suggested_users",
143 "manually_approves_followers",
144 "default_post_privacy",
145 "discoverable",
146 "preferred_timezone",
147 "preferred_language",
148 ]
149 help_texts = {f: None for f in fields}
150
151
152 class LimitedEditUserForm(CustomForm):
153 class Meta:
154 model = models.User
155 fields = [
156 "avatar",
157 "name",
158 "summary",
159 "manually_approves_followers",
160 "discoverable",
161 ]
162 help_texts = {f: None for f in fields}
163
164
165 class DeleteUserForm(CustomForm):
166 class Meta:
167 model = models.User
168 fields = ["password"]
169
170
171 class UserGroupForm(CustomForm):
172 class Meta:
173 model = models.User
174 fields = ["groups"]
175
176
177 class CoverForm(CustomForm):
178 class Meta:
179 model = models.Book
180 fields = ["cover"]
181 help_texts = {f: None for f in fields}
182
183
184 class EditionForm(CustomForm):
185 class Meta:
186 model = models.Edition
187 exclude = [
188 "remote_id",
189 "origin_id",
190 "created_date",
191 "updated_date",
192 "edition_rank",
193 "authors",
194 "parent_work",
195 "shelves",
196 "connector",
197 "search_vector",
198 ]
199
200
201 class AuthorForm(CustomForm):
202 class Meta:
203 model = models.Author
204 exclude = [
205 "remote_id",
206 "origin_id",
207 "created_date",
208 "updated_date",
209 "search_vector",
210 ]
211
212
213 class ImportForm(forms.Form):
214 csv_file = forms.FileField()
215
216
217 class ExpiryWidget(widgets.Select):
218 def value_from_datadict(self, data, files, name):
219 """human-readable exiration time buckets"""
220 selected_string = super().value_from_datadict(data, files, name)
221
222 if selected_string == "day":
223 interval = datetime.timedelta(days=1)
224 elif selected_string == "week":
225 interval = datetime.timedelta(days=7)
226 elif selected_string == "month":
227 interval = datetime.timedelta(days=31) # Close enough?
228 elif selected_string == "forever":
229 return None
230 else:
231 return selected_string # This will raise
232
233 return timezone.now() + interval
234
235
236 class InviteRequestForm(CustomForm):
237 def clean(self):
238 """make sure the email isn't in use by a registered user"""
239 cleaned_data = super().clean()
240 email = cleaned_data.get("email")
241 if email and models.User.objects.filter(email=email).exists():
242 self.add_error("email", _("A user with this email already exists."))
243
244 class Meta:
245 model = models.InviteRequest
246 fields = ["email"]
247
248
249 class CreateInviteForm(CustomForm):
250 class Meta:
251 model = models.SiteInvite
252 exclude = ["code", "user", "times_used", "invitees"]
253 widgets = {
254 "expiry": ExpiryWidget(
255 choices=[
256 ("day", _("One Day")),
257 ("week", _("One Week")),
258 ("month", _("One Month")),
259 ("forever", _("Does Not Expire")),
260 ]
261 ),
262 "use_limit": widgets.Select(
263 choices=[(i, _(f"{i} uses")) for i in [1, 5, 10, 25, 50, 100]]
264 + [(None, _("Unlimited"))]
265 ),
266 }
267
268
269 class ShelfForm(CustomForm):
270 class Meta:
271 model = models.Shelf
272 fields = ["user", "name", "privacy", "description"]
273
274
275 class GoalForm(CustomForm):
276 class Meta:
277 model = models.AnnualGoal
278 fields = ["user", "year", "goal", "privacy"]
279
280
281 class SiteForm(CustomForm):
282 class Meta:
283 model = models.SiteSettings
284 exclude = []
285
286
287 class AnnouncementForm(CustomForm):
288 class Meta:
289 model = models.Announcement
290 exclude = ["remote_id"]
291
292
293 class ListForm(CustomForm):
294 class Meta:
295 model = models.List
296 fields = ["user", "name", "description", "curation", "privacy", "group"]
297
298
299 class GroupForm(CustomForm):
300 class Meta:
301 model = models.Group
302 fields = ["user", "privacy", "name", "description"]
303
304
305 class ReportForm(CustomForm):
306 class Meta:
307 model = models.Report
308 fields = ["user", "reporter", "statuses", "note"]
309
310
311 class EmailBlocklistForm(CustomForm):
312 class Meta:
313 model = models.EmailBlocklist
314 fields = ["domain"]
315
316
317 class IPBlocklistForm(CustomForm):
318 class Meta:
319 model = models.IPBlocklist
320 fields = ["address"]
321
322
323 class ServerForm(CustomForm):
324 class Meta:
325 model = models.FederatedServer
326 exclude = ["remote_id"]
327
328
329 class SortListForm(forms.Form):
330 sort_by = ChoiceField(
331 choices=(
332 ("order", _("List Order")),
333 ("title", _("Book Title")),
334 ("rating", _("Rating")),
335 ),
336 label=_("Sort By"),
337 )
338 direction = ChoiceField(
339 choices=(
340 ("ascending", _("Ascending")),
341 ("descending", _("Descending")),
342 ),
343 )
344
[end of bookwyrm/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py
--- a/bookwyrm/forms.py
+++ b/bookwyrm/forms.py
@@ -201,12 +201,18 @@
class AuthorForm(CustomForm):
class Meta:
model = models.Author
- exclude = [
- "remote_id",
- "origin_id",
- "created_date",
- "updated_date",
- "search_vector",
+ fields = [
+ "last_edited_by",
+ "name",
+ "aliases",
+ "bio",
+ "wikipedia_link",
+ "born",
+ "died",
+ "openlibrary_key",
+ "inventaire_id",
+ "librarything_key",
+ "goodreads_key",
]
|
{"golden_diff": "diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py\n--- a/bookwyrm/forms.py\n+++ b/bookwyrm/forms.py\n@@ -201,12 +201,18 @@\n class AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n- exclude = [\n- \"remote_id\",\n- \"origin_id\",\n- \"created_date\",\n- \"updated_date\",\n- \"search_vector\",\n+ fields = [\n+ \"last_edited_by\",\n+ \"name\",\n+ \"aliases\",\n+ \"bio\",\n+ \"wikipedia_link\",\n+ \"born\",\n+ \"died\",\n+ \"openlibrary_key\",\n+ \"inventaire_id\",\n+ \"librarything_key\",\n+ \"goodreads_key\",\n ]\n", "issue": "Editing an author deletes author data not in form\n**Describe the bug**\r\nEditing an author saves the form data as an update. `isni` and `viaf_id` are not part of the form, so these values are deleted when the author details are edited.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'author page'\r\n2. Click on 'Edit'\r\n3. Make a change\r\n4. Save\r\n5. Most users will not see any change because these values are not displayed, however they have been removed.\r\n\r\n**Expected behavior**\r\nOnly values that have changed should change. ISNI and VIAF data should be retained on edits.\r\n\r\n**Instance**\r\nDevelopment site but it presumably would be replicated on any production server.\r\n\r\n**Additional context**\r\nI only noticed this because I am working on a PR that will display a link to the ISNI record, and it disappears whenever I edit an author.\r\n\r\n\r\n\nEditing an author deletes author data not in form\n**Describe the bug**\r\nEditing an author saves the form data as an update. `isni` and `viaf_id` are not part of the form, so these values are deleted when the author details are edited.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'author page'\r\n2. Click on 'Edit'\r\n3. Make a change\r\n4. Save\r\n5. Most users will not see any change because these values are not displayed, however they have been removed.\r\n\r\n**Expected behavior**\r\nOnly values that have changed should change. ISNI and VIAF data should be retained on edits.\r\n\r\n**Instance**\r\nDevelopment site but it presumably would be replicated on any production server.\r\n\r\n**Additional context**\r\nI only noticed this because I am working on a PR that will display a link to the ISNI record, and it disappears whenever I edit an author.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets, ChoiceField\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\"add css classes to the forms\"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n # pylint: disable=super-with-arguments\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"rows\"] = 5\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n \"reading_status\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"position\",\n \"position_mode\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass DirectForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"show_suggested_users\",\n \"manually_approves_followers\",\n \"default_post_privacy\",\n \"discoverable\",\n \"preferred_timezone\",\n \"preferred_language\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass DeleteUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"password\"]\n\n\nclass UserGroupForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"groups\"]\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"connector\",\n \"search_vector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"search_vector\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\"human-readable exiration time buckets\"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\"make sure the email isn't in use by a registered user\"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\", \"invitees\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[(i, _(f\"{i} uses\")) for i in [1, 5, 10, 25, 50, 100]]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\", \"description\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass AnnouncementForm(CustomForm):\n class Meta:\n model = models.Announcement\n exclude = [\"remote_id\"]\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\", \"group\"]\n\n\nclass GroupForm(CustomForm):\n class Meta:\n model = models.Group\n fields = [\"user\", \"privacy\", \"name\", \"description\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n\n\nclass EmailBlocklistForm(CustomForm):\n class Meta:\n model = models.EmailBlocklist\n fields = [\"domain\"]\n\n\nclass IPBlocklistForm(CustomForm):\n class Meta:\n model = models.IPBlocklist\n fields = [\"address\"]\n\n\nclass ServerForm(CustomForm):\n class Meta:\n model = models.FederatedServer\n exclude = [\"remote_id\"]\n\n\nclass SortListForm(forms.Form):\n sort_by = ChoiceField(\n choices=(\n (\"order\", _(\"List Order\")),\n (\"title\", _(\"Book Title\")),\n (\"rating\", _(\"Rating\")),\n ),\n label=_(\"Sort By\"),\n )\n direction = ChoiceField(\n choices=(\n (\"ascending\", _(\"Ascending\")),\n (\"descending\", _(\"Descending\")),\n ),\n )\n", "path": "bookwyrm/forms.py"}]}
| 3,763 | 177 |
gh_patches_debug_3995
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5433
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend TextInput with `placeholder`
`placeholder` and `pattern` are nice features in HTML to give the user info about the field and to validate the user-input.
Add `placeholder` and `pattern` to `TextInput` in python, extend `text_input.coffee` and the `text_input_template.eco`
</issue>
<code>
[start of bokeh/models/widgets/inputs.py]
1 """ Various kinds of input widgets and form controls.
2
3 """
4 from __future__ import absolute_import
5
6 from ...core.properties import abstract
7 from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Tuple, Either, Instance
8 from ..callbacks import Callback
9 from .widget import Widget
10 from ...core.enums import SliderCallbackPolicy
11
12 @abstract
13 class InputWidget(Widget):
14 """ Abstract base class for input widgets. `InputWidget`` is not
15 generally useful to instantiate on its own.
16
17 """
18
19 title = String(default="", help="""
20 Widget's label.
21 """)
22
23 @classmethod
24 def coerce_value(cls, val):
25 prop_obj = cls.lookup('value')
26 if isinstance(prop_obj, Float):
27 return float(val)
28 elif isinstance(prop_obj, Int):
29 return int(val)
30 elif isinstance(prop_obj, String):
31 return str(val)
32 else:
33 return val
34
35 class TextInput(InputWidget):
36 """ Single-line input widget. """
37
38 value = String(default="", help="""
39 Initial or entered text value.
40 """)
41
42 callback = Instance(Callback, help="""
43 A callback to run in the browser whenever the user unfocuses the TextInput
44 widget by hitting Enter or clicking outside of the text box area.
45 """)
46
47
48 class AutocompleteInput(TextInput):
49 """ Single-line input widget with auto-completion. """
50
51 completions = List(String, help="""
52 A list of completion strings. This will be used to guide the
53 user upon typing the beginning of a desired value.
54 """)
55
56
57 class Select(InputWidget):
58 """ Single-select widget.
59
60 """
61
62 options = List(Either(String, Tuple(String, String)), help="""
63 Available selection options. Options may be provided either as a list of
64 possible string values, or as a list of tuples, each of the form
65 ``(value, label)``. In the latter case, the visible widget text for each
66 value will be corresponding given label.
67 """)
68
69 value = String(default="", help="""
70 Initial or selected value.
71 """)
72
73 callback = Instance(Callback, help="""
74 A callback to run in the browser whenever the current Select dropdown
75 value changes.
76 """)
77
78 class MultiSelect(InputWidget):
79 """ Multi-select widget.
80
81 """
82
83 options = List(Either(String, Tuple(String, String)), help="""
84 Available selection options. Options may be provided either as a list of
85 possible string values, or as a list of tuples, each of the form
86 ``(value, label)``. In the latter case, the visible widget text for each
87 value will be corresponding given label.
88 """)
89
90 value = List(String, help="""
91 Initial or selected values.
92 """)
93
94 callback = Instance(Callback, help="""
95 A callback to run in the browser whenever the current dropdown value
96 changes.
97 """)
98
99 class Slider(InputWidget):
100 """ Slider-based number selection widget.
101
102 """
103
104 value = Float(default=0.5, help="""
105 Initial or selected value.
106 """)
107
108 start = Float(default=0, help="""
109 The minimum allowable value.
110 """)
111
112 end = Float(default=1, help="""
113 The maximum allowable value.
114 """)
115
116 step = Float(default=0.1, help="""
117 The step between consecutive values.
118 """)
119
120 orientation = Enum("horizontal", "vertical", help="""
121 Orient the slider either horizontally (default) or vertically.
122 """)
123
124 callback = Instance(Callback, help="""
125 A callback to run in the browser whenever the current Slider value changes.
126 """)
127
128 callback_throttle = Float(default=200, help="""
129 Number of microseconds to pause between callback calls as the slider is moved.
130 """)
131
132 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help="""
133 When the callback is initiated. This parameter can take on only one of three options:
134
135 "continuous": the callback will be executed immediately for each movement of the slider
136 "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds.
137 "mouseup": the callback will be executed only once when the slider is released.
138
139 The `mouseup` policy is intended for scenarios in which the callback is expensive in time.
140 """)
141
142 class RangeSlider(InputWidget):
143 """ Range-slider based range selection widget
144
145 """
146
147 range = Tuple(Float, Float, default=(0.1, 0.9), help="""
148 Initial or selected range.
149 """)
150
151 start = Float(default=0, help="""
152 The minimum allowable value.
153 """)
154
155 end = Float(default=1, help="""
156 The maximum allowable value.
157 """)
158
159 step = Float(default=0.1, help="""
160 The step between consecutive values.
161 """)
162
163 orientation = Enum("horizontal", "vertical", help="""
164 Orient the slider either horizontally (default) or vertically.
165 """)
166
167 callback = Instance(Callback, help="""
168 A callback to run in the browser whenever the current Slider value changes.
169 """)
170
171 callback_throttle = Float(default=200, help="""
172 Number of microseconds to pause between callback calls as the slider is moved.
173 """)
174
175 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help="""
176 When the callback is initiated. This parameter can take on only one of three options:
177
178 "continuous": the callback will be executed immediately for each movement of the slider
179 "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds.
180 "mouseup": the callback will be executed only once when the slider is released.
181
182 The `mouseup` policy is intended for scenarios in which the callback is expensive in time.
183 """)
184
185
186 class DateRangeSlider(InputWidget):
187 """ Slider-based date range selection widget.
188
189 """
190
191 value = Tuple(Date, Date, help="""
192 The initial or selected date range.
193 """)
194
195 bounds = Tuple(Date, Date, help="""
196 The earliest and latest allowable dates.
197 """)
198
199 range = Tuple(RelativeDelta, RelativeDelta, help="""
200 [TDB]
201 """)
202
203 step = RelativeDelta(help="""
204 The step between consecutive dates.
205 """)
206
207 # formatter = Either(String, Function(Date))
208 # scales = DateRangeSliderScales ... # first, next, stop, label, format
209
210 enabled = Bool(True, help="""
211 Enable or disable this widget.
212 """)
213
214 arrows = Bool(True, help="""
215 Whether to show clickable arrows on both ends of the slider.
216 """)
217
218 value_labels = Enum("show", "hide", "change", help="""
219 Show or hide value labels on both sides of the slider.
220 """)
221
222 wheel_mode = Enum("scroll", "zoom", default=None, help="""
223 Whether mouse zoom should scroll or zoom selected range (or
224 do nothing).
225 """)
226
227 callback = Instance(Callback, help="""
228 A callback to run in the browser whenever either slider's value changes.
229 """)
230
231 class DatePicker(InputWidget):
232 """ Calendar-based date picker widget.
233
234 """
235
236 value = Date(help="""
237 The initial or picked date.
238 """)
239
240 min_date = Date(default=None, help="""
241 Optional earliest allowable date.
242 """)
243
244 max_date = Date(default=None, help="""
245 Optional latest allowable date.
246 """)
247
248 callback = Instance(Callback, help="""
249 A callback to run in the browser whenever the current date value changes.
250 """)
251
[end of bokeh/models/widgets/inputs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/models/widgets/inputs.py b/bokeh/models/widgets/inputs.py
--- a/bokeh/models/widgets/inputs.py
+++ b/bokeh/models/widgets/inputs.py
@@ -43,6 +43,10 @@
A callback to run in the browser whenever the user unfocuses the TextInput
widget by hitting Enter or clicking outside of the text box area.
""")
+
+ placeholder = String(default="", help="""
+ Placeholder for empty input field
+ """)
class AutocompleteInput(TextInput):
|
{"golden_diff": "diff --git a/bokeh/models/widgets/inputs.py b/bokeh/models/widgets/inputs.py\n--- a/bokeh/models/widgets/inputs.py\n+++ b/bokeh/models/widgets/inputs.py\n@@ -43,6 +43,10 @@\n A callback to run in the browser whenever the user unfocuses the TextInput\n widget by hitting Enter or clicking outside of the text box area.\n \"\"\")\n+ \n+ placeholder = String(default=\"\", help=\"\"\"\n+ Placeholder for empty input field\n+ \"\"\")\n \n \n class AutocompleteInput(TextInput):\n", "issue": "Extend TextInput with `placeholder` \n`placeholder` and `pattern` are nice features in HTML to give the user info about the field and to validate the user-input.\n\nAdd `placeholder` and `pattern` to `TextInput` in python, extend `text_input.coffee` and the `text_input_template.eco` \n\n", "before_files": [{"content": "\"\"\" Various kinds of input widgets and form controls.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Tuple, Either, Instance\nfrom ..callbacks import Callback\nfrom .widget import Widget\nfrom ...core.enums import SliderCallbackPolicy\n\n@abstract\nclass InputWidget(Widget):\n \"\"\" Abstract base class for input widgets. `InputWidget`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Widget's label.\n \"\"\")\n\n @classmethod\n def coerce_value(cls, val):\n prop_obj = cls.lookup('value')\n if isinstance(prop_obj, Float):\n return float(val)\n elif isinstance(prop_obj, Int):\n return int(val)\n elif isinstance(prop_obj, String):\n return str(val)\n else:\n return val\n\nclass TextInput(InputWidget):\n \"\"\" Single-line input widget. \"\"\"\n\n value = String(default=\"\", help=\"\"\"\n Initial or entered text value.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the user unfocuses the TextInput\n widget by hitting Enter or clicking outside of the text box area.\n \"\"\")\n\n\nclass AutocompleteInput(TextInput):\n \"\"\" Single-line input widget with auto-completion. \"\"\"\n\n completions = List(String, help=\"\"\"\n A list of completion strings. This will be used to guide the\n user upon typing the beginning of a desired value.\n \"\"\")\n\n\nclass Select(InputWidget):\n \"\"\" Single-select widget.\n\n \"\"\"\n\n options = List(Either(String, Tuple(String, String)), help=\"\"\"\n Available selection options. Options may be provided either as a list of\n possible string values, or as a list of tuples, each of the form\n ``(value, label)``. In the latter case, the visible widget text for each\n value will be corresponding given label.\n \"\"\")\n\n value = String(default=\"\", help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Select dropdown\n value changes.\n \"\"\")\n\nclass MultiSelect(InputWidget):\n \"\"\" Multi-select widget.\n\n \"\"\"\n\n options = List(Either(String, Tuple(String, String)), help=\"\"\"\n Available selection options. Options may be provided either as a list of\n possible string values, or as a list of tuples, each of the form\n ``(value, label)``. In the latter case, the visible widget text for each\n value will be corresponding given label.\n \"\"\")\n\n value = List(String, help=\"\"\"\n Initial or selected values.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current dropdown value\n changes.\n \"\"\")\n\nclass Slider(InputWidget):\n \"\"\" Slider-based number selection widget.\n\n \"\"\"\n\n value = Float(default=0.5, help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Float(default=0, help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(default=1, help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=0.1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of microseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n \"continuous\": the callback will be executed immediately for each movement of the slider\n \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n \"mouseup\": the callback will be executed only once when the slider is released.\n\n The `mouseup` policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\nclass RangeSlider(InputWidget):\n \"\"\" Range-slider based range selection widget\n\n \"\"\"\n\n range = Tuple(Float, Float, default=(0.1, 0.9), help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Float(default=0, help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(default=1, help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=0.1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of microseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n \"continuous\": the callback will be executed immediately for each movement of the slider\n \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n \"mouseup\": the callback will be executed only once when the slider is released.\n\n The `mouseup` policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\n\nclass DateRangeSlider(InputWidget):\n \"\"\" Slider-based date range selection widget.\n\n \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n The initial or selected date range.\n \"\"\")\n\n bounds = Tuple(Date, Date, help=\"\"\"\n The earliest and latest allowable dates.\n \"\"\")\n\n range = Tuple(RelativeDelta, RelativeDelta, help=\"\"\"\n [TDB]\n \"\"\")\n\n step = RelativeDelta(help=\"\"\"\n The step between consecutive dates.\n \"\"\")\n\n # formatter = Either(String, Function(Date))\n # scales = DateRangeSliderScales ... # first, next, stop, label, format\n\n enabled = Bool(True, help=\"\"\"\n Enable or disable this widget.\n \"\"\")\n\n arrows = Bool(True, help=\"\"\"\n Whether to show clickable arrows on both ends of the slider.\n \"\"\")\n\n value_labels = Enum(\"show\", \"hide\", \"change\", help=\"\"\"\n Show or hide value labels on both sides of the slider.\n \"\"\")\n\n wheel_mode = Enum(\"scroll\", \"zoom\", default=None, help=\"\"\"\n Whether mouse zoom should scroll or zoom selected range (or\n do nothing).\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever either slider's value changes.\n \"\"\")\n\nclass DatePicker(InputWidget):\n \"\"\" Calendar-based date picker widget.\n\n \"\"\"\n\n value = Date(help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Date(default=None, help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Date(default=None, help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current date value changes.\n \"\"\")\n", "path": "bokeh/models/widgets/inputs.py"}]}
| 2,916 | 123 |
gh_patches_debug_2236
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1017
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multipart request headers do not work properly for values of empty string
Continuing the discussion from https://github.com/sigmavirus24/requests-toolbelt/issues/162, attempting to create a `RequestField` which is then made multipart via `make_multipart` does not work properly if the filename given is an empty string.
urllib3 test code:
```
from urllib3.fields import RequestField
field = RequestField(name="somename", data="somedata", filename="")
field.make_multipart(content_type="application/octet-stream")
print(field.headers)
```
Expected output:
```
{'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"; filename=""'}
```
Actual output:
```
{'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name="somename"'}
```
##
</issue>
<code>
[start of urllib3/fields.py]
1 from __future__ import absolute_import
2 import email.utils
3 import mimetypes
4
5 from .packages import six
6
7
8 def guess_content_type(filename, default='application/octet-stream'):
9 """
10 Guess the "Content-Type" of a file.
11
12 :param filename:
13 The filename to guess the "Content-Type" of using :mod:`mimetypes`.
14 :param default:
15 If no "Content-Type" can be guessed, default to `default`.
16 """
17 if filename:
18 return mimetypes.guess_type(filename)[0] or default
19 return default
20
21
22 def format_header_param(name, value):
23 """
24 Helper function to format and quote a single header parameter.
25
26 Particularly useful for header parameters which might contain
27 non-ASCII values, like file names. This follows RFC 2231, as
28 suggested by RFC 2388 Section 4.4.
29
30 :param name:
31 The name of the parameter, a string expected to be ASCII only.
32 :param value:
33 The value of the parameter, provided as a unicode string.
34 """
35 if not any(ch in value for ch in '"\\\r\n'):
36 result = '%s="%s"' % (name, value)
37 try:
38 result.encode('ascii')
39 except (UnicodeEncodeError, UnicodeDecodeError):
40 pass
41 else:
42 return result
43 if not six.PY3 and isinstance(value, six.text_type): # Python 2:
44 value = value.encode('utf-8')
45 value = email.utils.encode_rfc2231(value, 'utf-8')
46 value = '%s*=%s' % (name, value)
47 return value
48
49
50 class RequestField(object):
51 """
52 A data container for request body parameters.
53
54 :param name:
55 The name of this request field.
56 :param data:
57 The data/value body.
58 :param filename:
59 An optional filename of the request field.
60 :param headers:
61 An optional dict-like object of headers to initially use for the field.
62 """
63 def __init__(self, name, data, filename=None, headers=None):
64 self._name = name
65 self._filename = filename
66 self.data = data
67 self.headers = {}
68 if headers:
69 self.headers = dict(headers)
70
71 @classmethod
72 def from_tuples(cls, fieldname, value):
73 """
74 A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
75
76 Supports constructing :class:`~urllib3.fields.RequestField` from
77 parameter of key/value strings AND key/filetuple. A filetuple is a
78 (filename, data, MIME type) tuple where the MIME type is optional.
79 For example::
80
81 'foo': 'bar',
82 'fakefile': ('foofile.txt', 'contents of foofile'),
83 'realfile': ('barfile.txt', open('realfile').read()),
84 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
85 'nonamefile': 'contents of nonamefile field',
86
87 Field names and filenames must be unicode.
88 """
89 if isinstance(value, tuple):
90 if len(value) == 3:
91 filename, data, content_type = value
92 else:
93 filename, data = value
94 content_type = guess_content_type(filename)
95 else:
96 filename = None
97 content_type = None
98 data = value
99
100 request_param = cls(fieldname, data, filename=filename)
101 request_param.make_multipart(content_type=content_type)
102
103 return request_param
104
105 def _render_part(self, name, value):
106 """
107 Overridable helper function to format a single header parameter.
108
109 :param name:
110 The name of the parameter, a string expected to be ASCII only.
111 :param value:
112 The value of the parameter, provided as a unicode string.
113 """
114 return format_header_param(name, value)
115
116 def _render_parts(self, header_parts):
117 """
118 Helper function to format and quote a single header.
119
120 Useful for single headers that are composed of multiple items. E.g.,
121 'Content-Disposition' fields.
122
123 :param header_parts:
124 A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
125 as `k1="v1"; k2="v2"; ...`.
126 """
127 parts = []
128 iterable = header_parts
129 if isinstance(header_parts, dict):
130 iterable = header_parts.items()
131
132 for name, value in iterable:
133 if value:
134 parts.append(self._render_part(name, value))
135
136 return '; '.join(parts)
137
138 def render_headers(self):
139 """
140 Renders the headers for this request field.
141 """
142 lines = []
143
144 sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
145 for sort_key in sort_keys:
146 if self.headers.get(sort_key, False):
147 lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
148
149 for header_name, header_value in self.headers.items():
150 if header_name not in sort_keys:
151 if header_value:
152 lines.append('%s: %s' % (header_name, header_value))
153
154 lines.append('\r\n')
155 return '\r\n'.join(lines)
156
157 def make_multipart(self, content_disposition=None, content_type=None,
158 content_location=None):
159 """
160 Makes this request field into a multipart request field.
161
162 This method overrides "Content-Disposition", "Content-Type" and
163 "Content-Location" headers to the request parameter.
164
165 :param content_type:
166 The 'Content-Type' of the request body.
167 :param content_location:
168 The 'Content-Location' of the request body.
169
170 """
171 self.headers['Content-Disposition'] = content_disposition or 'form-data'
172 self.headers['Content-Disposition'] += '; '.join([
173 '', self._render_parts(
174 (('name', self._name), ('filename', self._filename))
175 )
176 ])
177 self.headers['Content-Type'] = content_type
178 self.headers['Content-Location'] = content_location
179
[end of urllib3/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/urllib3/fields.py b/urllib3/fields.py
--- a/urllib3/fields.py
+++ b/urllib3/fields.py
@@ -130,7 +130,7 @@
iterable = header_parts.items()
for name, value in iterable:
- if value:
+ if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
|
{"golden_diff": "diff --git a/urllib3/fields.py b/urllib3/fields.py\n--- a/urllib3/fields.py\n+++ b/urllib3/fields.py\n@@ -130,7 +130,7 @@\n iterable = header_parts.items()\n \n for name, value in iterable:\n- if value:\n+ if value is not None:\n parts.append(self._render_part(name, value))\n \n return '; '.join(parts)\n", "issue": "Multipart request headers do not work properly for values of empty string\nContinuing the discussion from https://github.com/sigmavirus24/requests-toolbelt/issues/162, attempting to create a `RequestField` which is then made multipart via `make_multipart` does not work properly if the filename given is an empty string.\n\nurllib3 test code:\n\n```\nfrom urllib3.fields import RequestField\nfield = RequestField(name=\"somename\", data=\"somedata\", filename=\"\")\nfield.make_multipart(content_type=\"application/octet-stream\")\nprint(field.headers)\n```\n\nExpected output:\n\n```\n{'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name=\"somename\"; filename=\"\"'}\n```\n\nActual output:\n\n```\n{'Content-Type': 'application/octet-stream', 'Content-Location': None, 'Content-Disposition': 'form-data; name=\"somename\"'}\n```\n## \n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport email.utils\nimport mimetypes\n\nfrom .packages import six\n\n\ndef guess_content_type(filename, default='application/octet-stream'):\n \"\"\"\n Guess the \"Content-Type\" of a file.\n\n :param filename:\n The filename to guess the \"Content-Type\" of using :mod:`mimetypes`.\n :param default:\n If no \"Content-Type\" can be guessed, default to `default`.\n \"\"\"\n if filename:\n return mimetypes.guess_type(filename)[0] or default\n return default\n\n\ndef format_header_param(name, value):\n \"\"\"\n Helper function to format and quote a single header parameter.\n\n Particularly useful for header parameters which might contain\n non-ASCII values, like file names. This follows RFC 2231, as\n suggested by RFC 2388 Section 4.4.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n if not any(ch in value for ch in '\"\\\\\\r\\n'):\n result = '%s=\"%s\"' % (name, value)\n try:\n result.encode('ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n pass\n else:\n return result\n if not six.PY3 and isinstance(value, six.text_type): # Python 2:\n value = value.encode('utf-8')\n value = email.utils.encode_rfc2231(value, 'utf-8')\n value = '%s*=%s' % (name, value)\n return value\n\n\nclass RequestField(object):\n \"\"\"\n A data container for request body parameters.\n\n :param name:\n The name of this request field.\n :param data:\n The data/value body.\n :param filename:\n An optional filename of the request field.\n :param headers:\n An optional dict-like object of headers to initially use for the field.\n \"\"\"\n def __init__(self, name, data, filename=None, headers=None):\n self._name = name\n self._filename = filename\n self.data = data\n self.headers = {}\n if headers:\n self.headers = dict(headers)\n\n @classmethod\n def from_tuples(cls, fieldname, value):\n \"\"\"\n A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.\n\n Supports constructing :class:`~urllib3.fields.RequestField` from\n parameter of key/value strings AND key/filetuple. A filetuple is a\n (filename, data, MIME type) tuple where the MIME type is optional.\n For example::\n\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n\n Field names and filenames must be unicode.\n \"\"\"\n if isinstance(value, tuple):\n if len(value) == 3:\n filename, data, content_type = value\n else:\n filename, data = value\n content_type = guess_content_type(filename)\n else:\n filename = None\n content_type = None\n data = value\n\n request_param = cls(fieldname, data, filename=filename)\n request_param.make_multipart(content_type=content_type)\n\n return request_param\n\n def _render_part(self, name, value):\n \"\"\"\n Overridable helper function to format a single header parameter.\n\n :param name:\n The name of the parameter, a string expected to be ASCII only.\n :param value:\n The value of the parameter, provided as a unicode string.\n \"\"\"\n return format_header_param(name, value)\n\n def _render_parts(self, header_parts):\n \"\"\"\n Helper function to format and quote a single header.\n\n Useful for single headers that are composed of multiple items. E.g.,\n 'Content-Disposition' fields.\n\n :param header_parts:\n A sequence of (k, v) typles or a :class:`dict` of (k, v) to format\n as `k1=\"v1\"; k2=\"v2\"; ...`.\n \"\"\"\n parts = []\n iterable = header_parts\n if isinstance(header_parts, dict):\n iterable = header_parts.items()\n\n for name, value in iterable:\n if value:\n parts.append(self._render_part(name, value))\n\n return '; '.join(parts)\n\n def render_headers(self):\n \"\"\"\n Renders the headers for this request field.\n \"\"\"\n lines = []\n\n sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']\n for sort_key in sort_keys:\n if self.headers.get(sort_key, False):\n lines.append('%s: %s' % (sort_key, self.headers[sort_key]))\n\n for header_name, header_value in self.headers.items():\n if header_name not in sort_keys:\n if header_value:\n lines.append('%s: %s' % (header_name, header_value))\n\n lines.append('\\r\\n')\n return '\\r\\n'.join(lines)\n\n def make_multipart(self, content_disposition=None, content_type=None,\n content_location=None):\n \"\"\"\n Makes this request field into a multipart request field.\n\n This method overrides \"Content-Disposition\", \"Content-Type\" and\n \"Content-Location\" headers to the request parameter.\n\n :param content_type:\n The 'Content-Type' of the request body.\n :param content_location:\n The 'Content-Location' of the request body.\n\n \"\"\"\n self.headers['Content-Disposition'] = content_disposition or 'form-data'\n self.headers['Content-Disposition'] += '; '.join([\n '', self._render_parts(\n (('name', self._name), ('filename', self._filename))\n )\n ])\n self.headers['Content-Type'] = content_type\n self.headers['Content-Location'] = content_location\n", "path": "urllib3/fields.py"}]}
| 2,483 | 102 |
gh_patches_debug_19256
|
rasdani/github-patches
|
git_diff
|
e2nIEE__pandapower-275
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation warning for asmatrix
The usage of numpy asmatrix raises Deprecation Warnings in numpy 1.15.4:
PendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray.
There are 5 occurences in pandapower/pf/dSbus_dV_pypower.py.
</issue>
<code>
[start of pandapower/pf/dSbus_dV_pypower.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright 1996-2015 PSERC. All rights reserved.
4 # Use of this source code is governed by a BSD-style
5 # license that can be found in the LICENSE file.
6
7 # Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
8 # and Energy System Technology (IEE), Kassel. All rights reserved.
9
10
11
12 """Computes partial derivatives of power injection w.r.t. voltage.
13 """
14
15 from numpy import conj, diag, asmatrix, asarray, zeros
16 from scipy.sparse import issparse, csr_matrix as sparse
17
18
19 def dSbus_dV(Ybus, V):
20 """Computes partial derivatives of power injection w.r.t. voltage.
21 """
22
23 if issparse(Ybus):
24 return dSbus_dV_sparse(Ybus, V)
25 else:
26 return dSbus_dV_dense(Ybus, V)
27
28
29 def dSbus_dV_sparse(Ybus, V):
30 Ibus = Ybus * V
31 ib = range(len(V))
32 diagV = sparse((V, (ib, ib)))
33 diagIbus = sparse((Ibus, (ib, ib)))
34 diagVnorm = sparse((V / abs(V), (ib, ib)))
35 dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
36 dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
37 return dS_dVm, dS_dVa
38
39
40 def dSbus_dV_dense(Ybus, V):
41 # standard code from Pypower (slower than above)
42 Ibus = Ybus * asmatrix(V).T
43
44 diagV = asmatrix(diag(V))
45 diagIbus = asmatrix(diag(asarray(Ibus).flatten()))
46 diagVnorm = asmatrix(diag(V / abs(V)))
47
48 dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
49 dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
50 return dS_dVm, dS_dVa
51
[end of pandapower/pf/dSbus_dV_pypower.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pandapower/pf/dSbus_dV_pypower.py b/pandapower/pf/dSbus_dV_pypower.py
--- a/pandapower/pf/dSbus_dV_pypower.py
+++ b/pandapower/pf/dSbus_dV_pypower.py
@@ -12,7 +12,7 @@
"""Computes partial derivatives of power injection w.r.t. voltage.
"""
-from numpy import conj, diag, asmatrix, asarray, zeros
+from numpy import conj, diag, asarray
from scipy.sparse import issparse, csr_matrix as sparse
@@ -39,11 +39,11 @@
def dSbus_dV_dense(Ybus, V):
# standard code from Pypower (slower than above)
- Ibus = Ybus * asmatrix(V).T
+ Ibus = Ybus * asarray(V).T
- diagV = asmatrix(diag(V))
- diagIbus = asmatrix(diag(asarray(Ibus).flatten()))
- diagVnorm = asmatrix(diag(V / abs(V)))
+ diagV = asarray(diag(V))
+ diagIbus = asarray(diag(asarray(Ibus).flatten()))
+ diagVnorm = asarray(diag(V / abs(V)))
dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
|
{"golden_diff": "diff --git a/pandapower/pf/dSbus_dV_pypower.py b/pandapower/pf/dSbus_dV_pypower.py\n--- a/pandapower/pf/dSbus_dV_pypower.py\n+++ b/pandapower/pf/dSbus_dV_pypower.py\n@@ -12,7 +12,7 @@\n \"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n \"\"\"\n \n-from numpy import conj, diag, asmatrix, asarray, zeros\n+from numpy import conj, diag, asarray\n from scipy.sparse import issparse, csr_matrix as sparse\n \n \n@@ -39,11 +39,11 @@\n \n def dSbus_dV_dense(Ybus, V):\n # standard code from Pypower (slower than above)\n- Ibus = Ybus * asmatrix(V).T\n+ Ibus = Ybus * asarray(V).T\n \n- diagV = asmatrix(diag(V))\n- diagIbus = asmatrix(diag(asarray(Ibus).flatten()))\n- diagVnorm = asmatrix(diag(V / abs(V)))\n+ diagV = asarray(diag(V))\n+ diagIbus = asarray(diag(asarray(Ibus).flatten()))\n+ diagVnorm = asarray(diag(V / abs(V)))\n \n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n", "issue": "Deprecation warning for asmatrix\nThe usage of numpy asmatrix raises Deprecation Warnings in numpy 1.15.4:\r\nPendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray.\r\nThere are 5 occurences in pandapower/pf/dSbus_dV_pypower.py.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\n\n\"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n\"\"\"\n\nfrom numpy import conj, diag, asmatrix, asarray, zeros\nfrom scipy.sparse import issparse, csr_matrix as sparse\n\n\ndef dSbus_dV(Ybus, V):\n \"\"\"Computes partial derivatives of power injection w.r.t. voltage.\n \"\"\"\n\n if issparse(Ybus):\n return dSbus_dV_sparse(Ybus, V)\n else:\n return dSbus_dV_dense(Ybus, V)\n\n\ndef dSbus_dV_sparse(Ybus, V):\n Ibus = Ybus * V\n ib = range(len(V))\n diagV = sparse((V, (ib, ib)))\n diagIbus = sparse((Ibus, (ib, ib)))\n diagVnorm = sparse((V / abs(V), (ib, ib)))\n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n return dS_dVm, dS_dVa\n\n\ndef dSbus_dV_dense(Ybus, V):\n # standard code from Pypower (slower than above)\n Ibus = Ybus * asmatrix(V).T\n\n diagV = asmatrix(diag(V))\n diagIbus = asmatrix(diag(asarray(Ibus).flatten()))\n diagVnorm = asmatrix(diag(V / abs(V)))\n\n dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm\n dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)\n return dS_dVm, dS_dVa\n", "path": "pandapower/pf/dSbus_dV_pypower.py"}]}
| 1,253 | 349 |
gh_patches_debug_25881
|
rasdani/github-patches
|
git_diff
|
translate__pootle-4060
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
merge_user should also remove the old user
Currently `merge_user` does not actually remove the old user. You need to run `purge_user` following `merge_user` to completely remove the user. This is dangerous and error prone, especially on older instances that have a large number of users.
</issue>
<code>
[start of pootle/apps/accounts/management/commands/merge_user.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import accounts
11
12 from . import UserCommand
13
14
15 class Command(UserCommand):
16 args = "user other_user"
17 help = "Merge user to other_user"
18
19 def handle(self, *args, **kwargs):
20 super(Command, self).handle(*args, **kwargs)
21 accounts.utils.UserMerger(self.get_user(username=args[0]),
22 self.get_user(username=args[1])).merge()
23
[end of pootle/apps/accounts/management/commands/merge_user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/accounts/management/commands/merge_user.py b/pootle/apps/accounts/management/commands/merge_user.py
--- a/pootle/apps/accounts/management/commands/merge_user.py
+++ b/pootle/apps/accounts/management/commands/merge_user.py
@@ -7,6 +7,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from optparse import make_option
+
import accounts
from . import UserCommand
@@ -15,8 +17,22 @@
class Command(UserCommand):
args = "user other_user"
help = "Merge user to other_user"
+ shared_option_list = (
+ make_option("--no-delete",
+ dest='delete',
+ action="store_false",
+ default=True,
+ help="Don't delete user after merging."),
+ )
+ option_list = UserCommand.option_list + shared_option_list
def handle(self, *args, **kwargs):
super(Command, self).handle(*args, **kwargs)
- accounts.utils.UserMerger(self.get_user(username=args[0]),
+ src_user = self.get_user(username=args[0])
+ accounts.utils.UserMerger(src_user,
self.get_user(username=args[1])).merge()
+
+ if kwargs.get("delete"):
+ self.stdout.write("Deleting user: %s...\n" % src_user.username)
+ src_user.delete()
+ self.stdout.write("User deleted: %s\n" % src_user.username)
|
{"golden_diff": "diff --git a/pootle/apps/accounts/management/commands/merge_user.py b/pootle/apps/accounts/management/commands/merge_user.py\n--- a/pootle/apps/accounts/management/commands/merge_user.py\n+++ b/pootle/apps/accounts/management/commands/merge_user.py\n@@ -7,6 +7,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from optparse import make_option\n+\n import accounts\n \n from . import UserCommand\n@@ -15,8 +17,22 @@\n class Command(UserCommand):\n args = \"user other_user\"\n help = \"Merge user to other_user\"\n+ shared_option_list = (\n+ make_option(\"--no-delete\",\n+ dest='delete',\n+ action=\"store_false\",\n+ default=True,\n+ help=\"Don't delete user after merging.\"),\n+ )\n+ option_list = UserCommand.option_list + shared_option_list\n \n def handle(self, *args, **kwargs):\n super(Command, self).handle(*args, **kwargs)\n- accounts.utils.UserMerger(self.get_user(username=args[0]),\n+ src_user = self.get_user(username=args[0])\n+ accounts.utils.UserMerger(src_user,\n self.get_user(username=args[1])).merge()\n+\n+ if kwargs.get(\"delete\"):\n+ self.stdout.write(\"Deleting user: %s...\\n\" % src_user.username)\n+ src_user.delete()\n+ self.stdout.write(\"User deleted: %s\\n\" % src_user.username)\n", "issue": "merge_user should also remove the old user\nCurrently `merge_user` does not actually remove the old user. You need to run `purge_user` following `merge_user` to completely remove the user. This is dangerous and error prone, especially on older instances that have a large number of users.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport accounts\n\nfrom . import UserCommand\n\n\nclass Command(UserCommand):\n args = \"user other_user\"\n help = \"Merge user to other_user\"\n\n def handle(self, *args, **kwargs):\n super(Command, self).handle(*args, **kwargs)\n accounts.utils.UserMerger(self.get_user(username=args[0]),\n self.get_user(username=args[1])).merge()\n", "path": "pootle/apps/accounts/management/commands/merge_user.py"}]}
| 813 | 343 |
gh_patches_debug_41748
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-352
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support pushing docker image built by elasticdl client.
to run docker images in cloud environment, we need add step for pushing docker image to registry provided by cloud.
</issue>
<code>
[start of elasticdl/client/client.py]
1 import argparse
2 import os
3 import inspect
4 import tempfile
5 import time
6 import getpass
7 import sys
8 from string import Template
9 import docker
10 import yaml
11 from kubernetes.client.apis import core_v1_api
12 from kubernetes import config
13
14
15 def _m_file_in_docker(model_file):
16 return "/model/" + os.path.basename(model_file)
17
18 def _build_docker_image(
19 m_file, timestamp, image_base="elasticdl:dev"
20 ):
21 DOCKER_TEMPLATE = """
22 FROM {}
23 COPY {} {}
24 """
25
26 with tempfile.NamedTemporaryFile(mode="w+", delete=False) as df:
27 df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))
28
29 client = docker.APIClient(base_url="unix://var/run/docker.sock")
30 for line in client.build(
31 dockerfile=df.name, path=".", rm=True, tag="elasticdl:dev_" + str(timestamp)
32 ):
33 print(str(line, encoding="utf-8"))
34
35 # TODO: upload docker image to docker hub.
36
37 def _gen_master_def(model_file, argv, timestamp):
38 master_yaml = """
39 apiVersion: v1
40 kind: Pod
41 metadata:
42 name: elasticdl-master-{timestamp}
43 labels:
44 purpose: test-command
45 spec:
46 containers:
47 - name: elasticdl-master-{timestamp}
48 image: elasticdl:dev_{timestamp}
49 command: ["python"]
50 args: [
51 "-m", "elasticdl.master.main",
52 "--worker_image", "elasticdl:dev_{timestamp}",
53 "--model_file", "{m_file}"
54 ]
55 imagePullPolicy: Never
56 env:
57 - name: MY_POD_IP
58 valueFrom:
59 fieldRef:
60 fieldPath: status.podIP
61 restartPolicy: Never
62 """ .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)
63
64 master_def = yaml.safe_load(master_yaml)
65
66 # Build master arguments
67 master_def['spec']['containers'][0]['args'].extend(argv)
68 return master_def
69
70 def _submit(model_file, argv, timestamp):
71 master_def = _gen_master_def(model_file, argv, timestamp)
72 config.load_kube_config()
73 api = core_v1_api.CoreV1Api()
74 resp = api.create_namespaced_pod(body=master_def, namespace="default")
75 print("Master launched. status='%s'" % str(resp.status))
76
77 def main():
78 parser = argparse.ArgumentParser(description="ElasticDL Client")
79 # Rewrite model_file argument and pass all other arguments to master.
80 parser.add_argument("--model_file", help="Path to Model file", required=True)
81 args, argv = parser.parse_known_args()
82
83 timestamp = int(round(time.time() * 1000))
84 _build_docker_image(args.model_file, timestamp)
85 _submit(args.model_file, argv, timestamp)
86
87
88 if __name__ == "__main__":
89 main()
90
[end of elasticdl/client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py
--- a/elasticdl/client/client.py
+++ b/elasticdl/client/client.py
@@ -16,7 +16,8 @@
return "/model/" + os.path.basename(model_file)
def _build_docker_image(
- m_file, timestamp, image_base="elasticdl:dev"
+ m_file, image_name, image_base="elasticdl:dev",
+ repository=None
):
DOCKER_TEMPLATE = """
FROM {}
@@ -28,13 +29,15 @@
client = docker.APIClient(base_url="unix://var/run/docker.sock")
for line in client.build(
- dockerfile=df.name, path=".", rm=True, tag="elasticdl:dev_" + str(timestamp)
+ dockerfile=df.name, path=".", rm=True, tag=image_name
):
print(str(line, encoding="utf-8"))
- # TODO: upload docker image to docker hub.
+ if repository != None:
+ for line in client.push(image_name, stream=True, decode=True):
+ print(line)
-def _gen_master_def(model_file, argv, timestamp):
+def _gen_master_def(image_name, model_file, argv, timestamp):
master_yaml = """
apiVersion: v1
kind: Pod
@@ -45,21 +48,21 @@
spec:
containers:
- name: elasticdl-master-{timestamp}
- image: elasticdl:dev_{timestamp}
+ image: {image_name}
command: ["python"]
args: [
"-m", "elasticdl.master.main",
- "--worker_image", "elasticdl:dev_{timestamp}",
+ "--worker_image", {image_name},
"--model_file", "{m_file}"
]
- imagePullPolicy: Never
+ imagePullPolicy: IfNotPresent
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
restartPolicy: Never
-""" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)
+""" .format(m_file=_m_file_in_docker(model_file), image_name=image_name, timestamp=timestamp)
master_def = yaml.safe_load(master_yaml)
@@ -67,8 +70,8 @@
master_def['spec']['containers'][0]['args'].extend(argv)
return master_def
-def _submit(model_file, argv, timestamp):
- master_def = _gen_master_def(model_file, argv, timestamp)
+def _submit(image_name, model_file, argv, timestamp):
+ master_def = _gen_master_def(image_name, model_file, argv, timestamp)
config.load_kube_config()
api = core_v1_api.CoreV1Api()
resp = api.create_namespaced_pod(body=master_def, namespace="default")
@@ -78,11 +81,15 @@
parser = argparse.ArgumentParser(description="ElasticDL Client")
# Rewrite model_file argument and pass all other arguments to master.
parser.add_argument("--model_file", help="Path to Model file", required=True)
+ parser.add_argument("--image-base", help="Base image containing elasticdl runtime environment.")
+ parser.add_argument("--repository", help="The repository to push docker image to.")
args, argv = parser.parse_known_args()
- timestamp = int(round(time.time() * 1000))
- _build_docker_image(args.model_file, timestamp)
- _submit(args.model_file, argv, timestamp)
+ timestamp = str(int(round(time.time() * 1000)))
+ image_name = args.image_base + '_' + timestamp
+ _build_docker_image(args.model_file, image_name, image_base=args.image_base,
+ repository=args.repository)
+ _submit(image_name, args.model_file, argv, timestamp)
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/elasticdl/client/client.py b/elasticdl/client/client.py\n--- a/elasticdl/client/client.py\n+++ b/elasticdl/client/client.py\n@@ -16,7 +16,8 @@\n return \"/model/\" + os.path.basename(model_file)\n \n def _build_docker_image(\n- m_file, timestamp, image_base=\"elasticdl:dev\"\n+ m_file, image_name, image_base=\"elasticdl:dev\",\n+ repository=None\n ):\n DOCKER_TEMPLATE = \"\"\"\n FROM {}\n@@ -28,13 +29,15 @@\n \n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n for line in client.build(\n- dockerfile=df.name, path=\".\", rm=True, tag=\"elasticdl:dev_\" + str(timestamp)\n+ dockerfile=df.name, path=\".\", rm=True, tag=image_name\n ):\n print(str(line, encoding=\"utf-8\"))\n \n- # TODO: upload docker image to docker hub.\n+ if repository != None:\n+ for line in client.push(image_name, stream=True, decode=True):\n+ print(line)\n \n-def _gen_master_def(model_file, argv, timestamp):\n+def _gen_master_def(image_name, model_file, argv, timestamp):\n master_yaml = \"\"\"\n apiVersion: v1\n kind: Pod\n@@ -45,21 +48,21 @@\n spec:\n containers:\n - name: elasticdl-master-{timestamp}\n- image: elasticdl:dev_{timestamp}\n+ image: {image_name}\n command: [\"python\"]\n args: [\n \"-m\", \"elasticdl.master.main\",\n- \"--worker_image\", \"elasticdl:dev_{timestamp}\",\n+ \"--worker_image\", {image_name},\n \"--model_file\", \"{m_file}\"\n ]\n- imagePullPolicy: Never\n+ imagePullPolicy: IfNotPresent \n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n-\"\"\" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)\n+\"\"\" .format(m_file=_m_file_in_docker(model_file), image_name=image_name, timestamp=timestamp)\n \n master_def = yaml.safe_load(master_yaml)\n \n@@ -67,8 +70,8 @@\n master_def['spec']['containers'][0]['args'].extend(argv)\n return master_def\n \n-def _submit(model_file, argv, timestamp):\n- master_def = _gen_master_def(model_file, argv, timestamp)\n+def _submit(image_name, model_file, argv, timestamp):\n+ master_def = _gen_master_def(image_name, model_file, argv, timestamp)\n config.load_kube_config()\n api = core_v1_api.CoreV1Api()\n resp = api.create_namespaced_pod(body=master_def, namespace=\"default\")\n@@ -78,11 +81,15 @@\n parser = argparse.ArgumentParser(description=\"ElasticDL Client\")\n # Rewrite model_file argument and pass all other arguments to master.\n parser.add_argument(\"--model_file\", help=\"Path to Model file\", required=True)\n+ parser.add_argument(\"--image-base\", help=\"Base image containing elasticdl runtime environment.\")\n+ parser.add_argument(\"--repository\", help=\"The repository to push docker image to.\")\n args, argv = parser.parse_known_args()\n \n- timestamp = int(round(time.time() * 1000))\n- _build_docker_image(args.model_file, timestamp)\n- _submit(args.model_file, argv, timestamp) \n+ timestamp = str(int(round(time.time() * 1000)))\n+ image_name = args.image_base + '_' + timestamp \n+ _build_docker_image(args.model_file, image_name, image_base=args.image_base,\n+ repository=args.repository)\n+ _submit(image_name, args.model_file, argv, timestamp)\n \n \n if __name__ == \"__main__\":\n", "issue": "Support pushing docker image built by elasticdl client.\nto run docker images in cloud environment, we need add step for pushing docker image to registry provided by cloud.\n", "before_files": [{"content": "import argparse\nimport os\nimport inspect\nimport tempfile\nimport time\nimport getpass\nimport sys\nfrom string import Template\nimport docker\nimport yaml\nfrom kubernetes.client.apis import core_v1_api\nfrom kubernetes import config\n\n\ndef _m_file_in_docker(model_file):\n return \"/model/\" + os.path.basename(model_file)\n\ndef _build_docker_image(\n m_file, timestamp, image_base=\"elasticdl:dev\"\n):\n DOCKER_TEMPLATE = \"\"\"\nFROM {}\nCOPY {} {}\n\"\"\"\n\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(DOCKER_TEMPLATE.format(image_base, m_file, _m_file_in_docker(m_file)))\n\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n for line in client.build(\n dockerfile=df.name, path=\".\", rm=True, tag=\"elasticdl:dev_\" + str(timestamp)\n ):\n print(str(line, encoding=\"utf-8\"))\n\n # TODO: upload docker image to docker hub.\n\ndef _gen_master_def(model_file, argv, timestamp):\n master_yaml = \"\"\"\napiVersion: v1\nkind: Pod\nmetadata:\n name: elasticdl-master-{timestamp}\n labels:\n purpose: test-command\nspec:\n containers:\n - name: elasticdl-master-{timestamp}\n image: elasticdl:dev_{timestamp}\n command: [\"python\"]\n args: [\n \"-m\", \"elasticdl.master.main\",\n \"--worker_image\", \"elasticdl:dev_{timestamp}\",\n \"--model_file\", \"{m_file}\"\n ]\n imagePullPolicy: Never\n env:\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n restartPolicy: Never\n\"\"\" .format(m_file=_m_file_in_docker(model_file), timestamp=timestamp)\n\n master_def = yaml.safe_load(master_yaml)\n\n # Build master arguments\n master_def['spec']['containers'][0]['args'].extend(argv)\n return master_def\n\ndef _submit(model_file, argv, timestamp):\n master_def = _gen_master_def(model_file, argv, timestamp)\n config.load_kube_config()\n api = core_v1_api.CoreV1Api()\n resp = api.create_namespaced_pod(body=master_def, namespace=\"default\")\n print(\"Master launched. status='%s'\" % str(resp.status))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ElasticDL Client\")\n # Rewrite model_file argument and pass all other arguments to master.\n parser.add_argument(\"--model_file\", help=\"Path to Model file\", required=True)\n args, argv = parser.parse_known_args()\n\n timestamp = int(round(time.time() * 1000))\n _build_docker_image(args.model_file, timestamp)\n _submit(args.model_file, argv, timestamp) \n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/client/client.py"}]}
| 1,363 | 856 |
gh_patches_debug_13363
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-757
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Visa now supports 19 digits credit card length
reference:
https://www.freeformatter.com/credit-card-number-generator-validator.html
</issue>
<code>
[start of faker/providers/credit_card/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from collections import OrderedDict
4
5 from .. import BaseProvider
6
7
8 class CreditCard(object):
9
10 def __init__(
11 self,
12 name,
13 prefixes,
14 length=16,
15 security_code='CVC',
16 security_code_length=3):
17 self.name = name
18 self.prefixes = prefixes
19 self.length = length
20 self.security_code = security_code
21 self.security_code_length = security_code_length
22
23
24 class Provider(BaseProvider):
25
26 # Prefixes from:
27 # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29
28 # * https://www.regular-expressions.info/creditcard.html
29 # * https://creditcardjs.com/credit-card-type-detection
30 prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',
31 '6304', '6759', '6761', '6762', '6763', '0604', '6390']
32 prefix_mastercard = ['51', '52', '53', '54', '55', '222%']
33 prefix_visa = ['4']
34 prefix_amex = ['34', '37']
35 prefix_discover = ['6011', '65']
36 prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']
37 prefix_jcb16 = ['35']
38 prefix_jcb15 = ['2131', '1800']
39
40 credit_card_types = OrderedDict((
41 ('maestro', CreditCard('Maestro',
42 prefix_maestro, 12, security_code='CVV')),
43 ('mastercard', CreditCard('Mastercard',
44 prefix_mastercard, 16, security_code='CVV')),
45 ('visa16', CreditCard('VISA 16 digit', prefix_visa)),
46 ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),
47 ('amex', CreditCard('American Express', prefix_amex,
48 15, security_code='CID', security_code_length=4)),
49 ('discover', CreditCard('Discover', prefix_discover)),
50 ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),
51 ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),
52 ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),
53 ))
54 credit_card_types['visa'] = credit_card_types['visa16']
55 credit_card_types['jcb'] = credit_card_types['jcb16']
56
57 luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,
58 '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}
59
60 def credit_card_provider(self, card_type=None):
61 """ Returns the provider's name of the credit card. """
62 if card_type is None:
63 card_type = self.random_element(self.credit_card_types.keys())
64 return self._credit_card_type(card_type).name
65
66 def credit_card_number(self, card_type=None):
67 """ Returns a valid credit card number. """
68 card = self._credit_card_type(card_type)
69 prefix = self.random_element(card.prefixes)
70 number = self._generate_number(self.numerify(prefix), card.length)
71 return number
72
73 def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):
74 expire_date = self.generator.date_time_between(start, end)
75 return expire_date.strftime(date_format)
76
77 def credit_card_full(self, card_type=None):
78 card = self._credit_card_type(card_type)
79
80 tpl = ('{provider}\n'
81 '{owner}\n'
82 '{number} {expire_date}\n'
83 '{security}: {security_nb}\n')
84
85 tpl = tpl.format(provider=card.name,
86 owner=self.generator.parse(
87 "{{first_name}} {{last_name}}"),
88 number=self.credit_card_number(card),
89 expire_date=self.credit_card_expire(),
90 security=card.security_code,
91 security_nb=self.credit_card_security_code(card))
92
93 return self.generator.parse(tpl)
94
95 def credit_card_security_code(self, card_type=None):
96 """ Returns a security code string. """
97 sec_len = self._credit_card_type(card_type).security_code_length
98 return self.numerify('#' * sec_len)
99
100 def _credit_card_type(self, card_type=None):
101 """ Returns a random credit card type instance. """
102 if card_type is None:
103 card_type = self.random_element(self.credit_card_types.keys())
104 elif isinstance(card_type, CreditCard):
105 return card_type
106 return self.credit_card_types[card_type]
107
108 def _generate_number(self, prefix, length):
109 """
110 'prefix' is the start of the CC number as a string, any number of digits.
111 'length' is the length of the CC number to generate. Typically 13 or 16
112 """
113 number = prefix
114 # Generate random char digits
115 number += '#' * (length - len(prefix) - 1)
116 number = self.numerify(number)
117 reverse = number[::-1]
118 # Calculate sum
119 tot = 0
120 pos = 0
121 while pos < length - 1:
122 tot += Provider.luhn_lookup[reverse[pos]]
123 if pos != (length - 2):
124 tot += int(reverse[pos + 1])
125 pos += 2
126 # Calculate check digit
127 check_digit = (10 - (tot % 10)) % 10
128 number += str(check_digit)
129 return number
130
[end of faker/providers/credit_card/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py
--- a/faker/providers/credit_card/__init__.py
+++ b/faker/providers/credit_card/__init__.py
@@ -44,6 +44,7 @@
prefix_mastercard, 16, security_code='CVV')),
('visa16', CreditCard('VISA 16 digit', prefix_visa)),
('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),
+ ('visa19', CreditCard('VISA 19 digit', prefix_visa, 19)),
('amex', CreditCard('American Express', prefix_amex,
15, security_code='CID', security_code_length=4)),
('discover', CreditCard('Discover', prefix_discover)),
|
{"golden_diff": "diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py\n--- a/faker/providers/credit_card/__init__.py\n+++ b/faker/providers/credit_card/__init__.py\n@@ -44,6 +44,7 @@\n prefix_mastercard, 16, security_code='CVV')),\n ('visa16', CreditCard('VISA 16 digit', prefix_visa)),\n ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),\n+ ('visa19', CreditCard('VISA 19 digit', prefix_visa, 19)),\n ('amex', CreditCard('American Express', prefix_amex,\n 15, security_code='CID', security_code_length=4)),\n ('discover', CreditCard('Discover', prefix_discover)),\n", "issue": "Visa now supports 19 digits credit card length\nreference: \r\nhttps://www.freeformatter.com/credit-card-number-generator-validator.html \n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\n\n\nclass CreditCard(object):\n\n def __init__(\n self,\n name,\n prefixes,\n length=16,\n security_code='CVC',\n security_code_length=3):\n self.name = name\n self.prefixes = prefixes\n self.length = length\n self.security_code = security_code\n self.security_code_length = security_code_length\n\n\nclass Provider(BaseProvider):\n\n # Prefixes from:\n # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29\n # * https://www.regular-expressions.info/creditcard.html\n # * https://creditcardjs.com/credit-card-type-detection\n prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n prefix_mastercard = ['51', '52', '53', '54', '55', '222%']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n prefix_discover = ['6011', '65']\n prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']\n prefix_jcb16 = ['35']\n prefix_jcb15 = ['2131', '1800']\n\n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n prefix_maestro, 12, security_code='CVV')),\n ('mastercard', CreditCard('Mastercard',\n prefix_mastercard, 16, security_code='CVV')),\n ('visa16', CreditCard('VISA 16 digit', prefix_visa)),\n ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),\n ('amex', CreditCard('American Express', prefix_amex,\n 15, security_code='CID', security_code_length=4)),\n ('discover', CreditCard('Discover', prefix_discover)),\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n\n luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,\n '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}\n\n def credit_card_provider(self, card_type=None):\n \"\"\" Returns the provider's name of the credit card. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n return self._credit_card_type(card_type).name\n\n def credit_card_number(self, card_type=None):\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n number = self._generate_number(self.numerify(prefix), card.length)\n return number\n\n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n expire_date = self.generator.date_time_between(start, end)\n return expire_date.strftime(date_format)\n\n def credit_card_full(self, card_type=None):\n card = self._credit_card_type(card_type)\n\n tpl = ('{provider}\\n'\n '{owner}\\n'\n '{number} {expire_date}\\n'\n '{security}: {security_nb}\\n')\n\n tpl = tpl.format(provider=card.name,\n owner=self.generator.parse(\n \"{{first_name}} {{last_name}}\"),\n number=self.credit_card_number(card),\n expire_date=self.credit_card_expire(),\n security=card.security_code,\n security_nb=self.credit_card_security_code(card))\n\n return self.generator.parse(tpl)\n\n def credit_card_security_code(self, card_type=None):\n \"\"\" Returns a security code string. \"\"\"\n sec_len = self._credit_card_type(card_type).security_code_length\n return self.numerify('#' * sec_len)\n\n def _credit_card_type(self, card_type=None):\n \"\"\" Returns a random credit card type instance. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n elif isinstance(card_type, CreditCard):\n return card_type\n return self.credit_card_types[card_type]\n\n def _generate_number(self, prefix, length):\n \"\"\"\n 'prefix' is the start of the CC number as a string, any number of digits.\n 'length' is the length of the CC number to generate. Typically 13 or 16\n \"\"\"\n number = prefix\n # Generate random char digits\n number += '#' * (length - len(prefix) - 1)\n number = self.numerify(number)\n reverse = number[::-1]\n # Calculate sum\n tot = 0\n pos = 0\n while pos < length - 1:\n tot += Provider.luhn_lookup[reverse[pos]]\n if pos != (length - 2):\n tot += int(reverse[pos + 1])\n pos += 2\n # Calculate check digit\n check_digit = (10 - (tot % 10)) % 10\n number += str(check_digit)\n return number\n", "path": "faker/providers/credit_card/__init__.py"}]}
| 2,193 | 192 |
gh_patches_debug_15372
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1269
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pwndbg.gdblib.arch.current is wrong if executed in a pwntools gdbscript command
TL;DR: This should not fail, but does so, because the `patch` command fetches `pwndbg.gdblib.arch.current` which for some reason is wrong/not updated.
```py
from pwn import *
gdbscript = '''
tbreak main
patch $rip 'xor rax, rax'
continue
'''
p = gdb.debug('/bin/ls', gdbscript=gdbscript)
p.interactive()
```
Stacktrace:
```
Traceback (most recent call last):
File "/home/dc/tools/pwndbg/pwndbg/commands/__init__.py", line 145, in __call__
return self.function(*args, **kwargs)
File "/home/dc/tools/pwndbg/pwndbg/commands/__init__.py", line 216, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/dc/tools/pwndbg/pwndbg/commands/patch.py", line 25, in patch
new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
File "/home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py", line 1444, in setter
raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
AttributeError: Invalid arch/bits combination: i386/64
If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues
(Please don't forget to search if it hasn't been reported before)
To generate the report and open a browser, you may run `bugreport --run-browser`
PS: Pull requests are welcome
> /home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py(1444)setter()
-> raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
(Pdb) p arch, bits
('i386', 64)
(Pdb)
```
</issue>
<code>
[start of pwndbg/commands/patch.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import argparse
4
5 from pwnlib.asm import asm
6 from pwnlib.asm import disasm
7
8 import pwndbg.color.message as message
9 import pwndbg.commands
10 import pwndbg.gdblib.memory
11 import pwndbg.lib.memoize
12
13 # Keep old patches made so we can revert them
14 patches = {}
15
16
17 parser = argparse.ArgumentParser(description="Patches given instruction with given code or bytes")
18 parser.add_argument("address", type=int, help="The address to patch")
19 parser.add_argument("ins", type=str, help="instruction[s]")
20
21
22 @pwndbg.commands.ArgparsedCommand(parser)
23 @pwndbg.commands.OnlyWhenRunning
24 def patch(address, ins):
25 new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
26
27 old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))
28
29 patches[address] = (old_mem, new_mem)
30
31 pwndbg.gdblib.memory.write(address, new_mem)
32
33 pwndbg.lib.memoize.reset()
34
35
36 parser2 = argparse.ArgumentParser(description="Revert patch at given address")
37 parser2.add_argument("address", type=int, help="Address to revert patch on")
38
39
40 @pwndbg.commands.ArgparsedCommand(parser2)
41 @pwndbg.commands.OnlyWhenRunning
42 def patch_revert(address):
43 if not patches:
44 print(message.notice("No patches to revert"))
45 return
46
47 if address == -1:
48 for addr, (old, _new) in patches.items():
49 pwndbg.gdblib.memory.write(addr, old)
50 print(message.notice("Reverted patch at %#x" % addr))
51 patches.clear()
52 else:
53 old, _new = patches[address]
54 pwndbg.gdblib.memory.write(address, old)
55
56 pwndbg.lib.memoize.reset()
57
58
59 parser3 = argparse.ArgumentParser(description="List all patches")
60
61
62 @pwndbg.commands.ArgparsedCommand(parser3)
63 @pwndbg.commands.OnlyWhenRunning
64 def patch_list():
65 if not patches:
66 print(message.hint("No patches to list"))
67 return
68
69 print(message.hint("Patches:"))
70 for addr, (old, new) in patches.items():
71 old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)
72 new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)
73
74 print(
75 message.hint("Patch at"),
76 message.warning("%#x:" % addr),
77 message.hint("from"),
78 message.warning(old_insns.replace("\n", "; ")),
79 message.hint("to"),
80 message.warning(new_insns.replace("\n", "; ")),
81 )
82
[end of pwndbg/commands/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/commands/patch.py b/pwndbg/commands/patch.py
--- a/pwndbg/commands/patch.py
+++ b/pwndbg/commands/patch.py
@@ -22,7 +22,7 @@
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWhenRunning
def patch(address, ins):
- new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)
+ new_mem = asm(ins)
old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))
@@ -68,8 +68,8 @@
print(message.hint("Patches:"))
for addr, (old, new) in patches.items():
- old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)
- new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)
+ old_insns = disasm(old)
+ new_insns = disasm(new)
print(
message.hint("Patch at"),
|
{"golden_diff": "diff --git a/pwndbg/commands/patch.py b/pwndbg/commands/patch.py\n--- a/pwndbg/commands/patch.py\n+++ b/pwndbg/commands/patch.py\n@@ -22,7 +22,7 @@\n @pwndbg.commands.ArgparsedCommand(parser)\n @pwndbg.commands.OnlyWhenRunning\n def patch(address, ins):\n- new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\n+ new_mem = asm(ins)\n \n old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))\n \n@@ -68,8 +68,8 @@\n \n print(message.hint(\"Patches:\"))\n for addr, (old, new) in patches.items():\n- old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)\n- new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)\n+ old_insns = disasm(old)\n+ new_insns = disasm(new)\n \n print(\n message.hint(\"Patch at\"),\n", "issue": "pwndbg.gdblib.arch.current is wrong if executed in a pwntools gdbscript command\nTL;DR: This should not fail, but does so, because the `patch` command fetches `pwndbg.gdblib.arch.current` which for some reason is wrong/not updated.\r\n\r\n\r\n```py\r\nfrom pwn import *\r\n\r\ngdbscript = '''\r\ntbreak main\r\npatch $rip 'xor rax, rax'\r\ncontinue\r\n'''\r\n\r\np = gdb.debug('/bin/ls', gdbscript=gdbscript)\r\n\r\np.interactive()\r\n```\r\n\r\nStacktrace:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/__init__.py\", line 145, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/__init__.py\", line 216, in _OnlyWhenRunning\r\n return function(*a, **kw)\r\n File \"/home/dc/tools/pwndbg/pwndbg/commands/patch.py\", line 25, in patch\r\n new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\r\n File \"/home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py\", line 1444, in setter\r\n raise AttributeError(\"Invalid arch/bits combination: %s/%s\" % (arch, bits))\r\nAttributeError: Invalid arch/bits combination: i386/64\r\n\r\nIf that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\r\n(Please don't forget to search if it hasn't been reported before)\r\nTo generate the report and open a browser, you may run `bugreport --run-browser`\r\nPS: Pull requests are welcome\r\n> /home/dc/.virtualenvs/pwn/lib/python3.8/site-packages/pwnlib/context/__init__.py(1444)setter()\r\n-> raise AttributeError(\"Invalid arch/bits combination: %s/%s\" % (arch, bits))\r\n(Pdb) p arch, bits\r\n('i386', 64)\r\n(Pdb) \r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport argparse\n\nfrom pwnlib.asm import asm\nfrom pwnlib.asm import disasm\n\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.gdblib.memory\nimport pwndbg.lib.memoize\n\n# Keep old patches made so we can revert them\npatches = {}\n\n\nparser = argparse.ArgumentParser(description=\"Patches given instruction with given code or bytes\")\nparser.add_argument(\"address\", type=int, help=\"The address to patch\")\nparser.add_argument(\"ins\", type=str, help=\"instruction[s]\")\n\n\[email protected](parser)\[email protected]\ndef patch(address, ins):\n new_mem = asm(ins, arch=pwndbg.gdblib.arch.current)\n\n old_mem = pwndbg.gdblib.memory.read(address, len(new_mem))\n\n patches[address] = (old_mem, new_mem)\n\n pwndbg.gdblib.memory.write(address, new_mem)\n\n pwndbg.lib.memoize.reset()\n\n\nparser2 = argparse.ArgumentParser(description=\"Revert patch at given address\")\nparser2.add_argument(\"address\", type=int, help=\"Address to revert patch on\")\n\n\[email protected](parser2)\[email protected]\ndef patch_revert(address):\n if not patches:\n print(message.notice(\"No patches to revert\"))\n return\n\n if address == -1:\n for addr, (old, _new) in patches.items():\n pwndbg.gdblib.memory.write(addr, old)\n print(message.notice(\"Reverted patch at %#x\" % addr))\n patches.clear()\n else:\n old, _new = patches[address]\n pwndbg.gdblib.memory.write(address, old)\n\n pwndbg.lib.memoize.reset()\n\n\nparser3 = argparse.ArgumentParser(description=\"List all patches\")\n\n\[email protected](parser3)\[email protected]\ndef patch_list():\n if not patches:\n print(message.hint(\"No patches to list\"))\n return\n\n print(message.hint(\"Patches:\"))\n for addr, (old, new) in patches.items():\n old_insns = disasm(old, arch=pwndbg.gdblib.arch.current)\n new_insns = disasm(new, arch=pwndbg.gdblib.arch.current)\n\n print(\n message.hint(\"Patch at\"),\n message.warning(\"%#x:\" % addr),\n message.hint(\"from\"),\n message.warning(old_insns.replace(\"\\n\", \"; \")),\n message.hint(\"to\"),\n message.warning(new_insns.replace(\"\\n\", \"; \")),\n )\n", "path": "pwndbg/commands/patch.py"}]}
| 1,758 | 236 |
gh_patches_debug_27937
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-5332
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lutris can not launch a game through a .desktop file
Hello, first of all thank you for the amazing work you have done in the linux gaming scene.
When Lutris imports a game from the local source (a desktop file in .local/share/applications), it can not be launched.
I am using ULWGL for launching the game through the desktop file.
[Desktop Entry]
Type=Application
Categories=Game
Name=Grim Dawn
Icon=/usr/share/icons/Papirus-Dark/16x16/places/folder-games.svg
Path=/HDD/GranAlmacen/Juegos/Grim Dawn
Exec=env GAMEID="" WINEPREFIX="/HDD/GranAlmacen/Juegos/Grim Dawn/wine" ulwgl-run-cli "x64/Grim Dawn.exe"
Cheers !
</issue>
<code>
[start of lutris/services/xdg.py]
1 """XDG applications service"""
2 import json
3 import os
4 import re
5 import shlex
6 import subprocess
7 from gettext import gettext as _
8
9 from gi.repository import Gio
10
11 from lutris import settings
12 from lutris.database.games import get_games_where
13 from lutris.services.base import BaseService
14 from lutris.services.service_game import ServiceGame
15 from lutris.services.service_media import ServiceMedia
16 from lutris.util import system
17 from lutris.util.log import logger
18 from lutris.util.strings import slugify
19
20
21 def get_appid(app):
22 """Get the appid for the game"""
23 try:
24 return os.path.splitext(app.get_id())[0]
25 except UnicodeDecodeError:
26 logger.exception(
27 "Failed to read ID for app %s (non UTF-8 encoding). Reverting to executable name.",
28 app,
29 )
30 return app.get_executable()
31
32
33 class XDGMedia(ServiceMedia):
34 service = "xdg"
35 source = "local"
36 size = (64, 64)
37 dest_path = os.path.join(settings.CACHE_DIR, "xdg/icons")
38 file_patterns = ["%s.png"]
39
40
41 class XDGService(BaseService):
42 id = "xdg"
43 name = _("Local")
44 icon = "linux"
45 runner = "linux"
46 online = False
47 local = True
48 medias = {"icon": XDGMedia}
49
50 ignored_games = ("lutris",)
51 ignored_executables = ("lutris", "steam")
52 ignored_categories = ("Emulator", "Development", "Utility")
53
54 @classmethod
55 def iter_xdg_games(cls):
56 """Iterates through XDG games only"""
57 for app in Gio.AppInfo.get_all():
58 if cls._is_importable(app):
59 yield app
60
61 @property
62 def lutris_games(self):
63 """Iterates through Lutris games imported from XDG"""
64 for game in get_games_where(runner=XDGGame.runner, installer_slug=XDGGame.installer_slug, installed=1):
65 yield game
66
67 @classmethod
68 def _is_importable(cls, app):
69 """Returns whether a XDG game is importable to Lutris"""
70 appid = get_appid(app)
71 executable = app.get_executable() or ""
72 if any(
73 [
74 app.get_nodisplay() or app.get_is_hidden(), # App is hidden
75 not executable, # Check app has an executable
76 appid.startswith("net.lutris"), # Skip lutris created shortcuts
77 appid.lower() in map(str.lower, cls.ignored_games), # game blacklisted
78 executable.lower() in cls.ignored_executables, # exe blacklisted
79 ]
80 ):
81 return False
82
83 # must be in Game category
84 categories = app.get_categories() or ""
85 categories = list(filter(None, categories.lower().split(";")))
86 if "game" not in categories:
87 return False
88
89 # contains a blacklisted category
90 ignored_categories = set(c.casefold() for c in cls.ignored_categories)
91 if any(c for c in categories if c in ignored_categories):
92 return False
93 return True
94
95 def match_games(self):
96 """XDG games aren't on the lutris website"""
97 return
98
99 def load(self):
100 """Return the list of games stored in the XDG menu."""
101 xdg_games = [XDGGame.new_from_xdg_app(app) for app in self.iter_xdg_games()]
102 for game in xdg_games:
103 game.save()
104 return xdg_games
105
106 def generate_installer(self, db_game):
107 details = json.loads(db_game["details"])
108 return {
109 "name": db_game["name"],
110 "version": "XDG",
111 "slug": db_game["slug"],
112 "game_slug": self.get_installed_slug(db_game),
113 "runner": self.get_installed_runner_name(db_game),
114 "script": {
115 "game": {
116 "exe": details["exe"],
117 "args": details["args"],
118 },
119 "system": {"disable_runtime": True},
120 },
121 }
122
123 def get_installed_runner_name(self, db_game):
124 return self.runner
125
126 def get_game_directory(self, installer):
127 """Pull install location from installer"""
128 return os.path.dirname(installer["script"]["game"]["exe"])
129
130
131 class XDGGame(ServiceGame):
132 """XDG game (Linux game with a desktop launcher)"""
133
134 service = "xdg"
135 runner = "linux"
136 installer_slug = "desktopapp"
137
138 @staticmethod
139 def get_app_icon(xdg_app):
140 """Return the name of the icon for an XDG app if one if set"""
141 icon = xdg_app.get_icon()
142 if not icon:
143 return ""
144 return icon.to_string()
145
146 @classmethod
147 def new_from_xdg_app(cls, xdg_app):
148 """Create a service game from a XDG entry"""
149 service_game = cls()
150 service_game.name = xdg_app.get_display_name()
151 service_game.icon = cls.get_app_icon(xdg_app)
152 service_game.appid = get_appid(xdg_app)
153 service_game.slug = cls.get_slug(xdg_app)
154 exe, args = cls.get_command_args(xdg_app)
155 service_game.details = json.dumps(
156 {
157 "exe": exe,
158 "args": args,
159 }
160 )
161 return service_game
162
163 @staticmethod
164 def get_command_args(app):
165 """Return a tuple with absolute command path and an argument string"""
166 command = shlex.split(app.get_commandline())
167 # remove %U etc. and change %% to % in arguments
168 args = list(map(lambda arg: re.sub("%[^%]", "", arg).replace("%%", "%"), command[1:]))
169 exe = command[0]
170 if not exe.startswith("/"):
171 exe = system.find_executable(exe)
172 return exe, subprocess.list2cmdline(args)
173
174 @staticmethod
175 def get_slug(xdg_app):
176 """Get the slug from the game name"""
177 return slugify(xdg_app.get_display_name()) or slugify(get_appid(xdg_app))
178
[end of lutris/services/xdg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/services/xdg.py b/lutris/services/xdg.py
--- a/lutris/services/xdg.py
+++ b/lutris/services/xdg.py
@@ -115,6 +115,7 @@
"game": {
"exe": details["exe"],
"args": details["args"],
+ "working_dir": details["path"],
},
"system": {"disable_runtime": True},
},
@@ -152,14 +153,27 @@
service_game.appid = get_appid(xdg_app)
service_game.slug = cls.get_slug(xdg_app)
exe, args = cls.get_command_args(xdg_app)
+ path = cls.get_desktop_entry_path(xdg_app)
service_game.details = json.dumps(
{
"exe": exe,
"args": args,
+ "path": path,
}
)
return service_game
+ @staticmethod
+ def get_desktop_entry_path(xdg_app):
+ """Retrieve the Path variable from the .desktop file"""
+ desktop_entry = xdg_app.get_filename()
+ with open(desktop_entry, 'r') as f:
+ contents = f.read()
+ match = re.search(r'^Path=(.*)$', contents, re.MULTILINE)
+ if match:
+ return match.group(1)
+ return None
+
@staticmethod
def get_command_args(app):
"""Return a tuple with absolute command path and an argument string"""
|
{"golden_diff": "diff --git a/lutris/services/xdg.py b/lutris/services/xdg.py\n--- a/lutris/services/xdg.py\n+++ b/lutris/services/xdg.py\n@@ -115,6 +115,7 @@\n \"game\": {\n \"exe\": details[\"exe\"],\n \"args\": details[\"args\"],\n+ \"working_dir\": details[\"path\"],\n },\n \"system\": {\"disable_runtime\": True},\n },\n@@ -152,14 +153,27 @@\n service_game.appid = get_appid(xdg_app)\n service_game.slug = cls.get_slug(xdg_app)\n exe, args = cls.get_command_args(xdg_app)\n+ path = cls.get_desktop_entry_path(xdg_app)\n service_game.details = json.dumps(\n {\n \"exe\": exe,\n \"args\": args,\n+ \"path\": path,\n }\n )\n return service_game\n \n+ @staticmethod\n+ def get_desktop_entry_path(xdg_app):\n+ \"\"\"Retrieve the Path variable from the .desktop file\"\"\"\n+ desktop_entry = xdg_app.get_filename()\n+ with open(desktop_entry, 'r') as f:\n+ contents = f.read()\n+ match = re.search(r'^Path=(.*)$', contents, re.MULTILINE)\n+ if match:\n+ return match.group(1)\n+ return None\n+\n @staticmethod\n def get_command_args(app):\n \"\"\"Return a tuple with absolute command path and an argument string\"\"\"\n", "issue": "Lutris can not launch a game through a .desktop file\nHello, first of all thank you for the amazing work you have done in the linux gaming scene.\r\n\r\nWhen Lutris imports a game from the local source (a desktop file in .local/share/applications), it can not be launched.\r\nI am using ULWGL for launching the game through the desktop file.\r\n\r\n[Desktop Entry]\r\nType=Application\r\nCategories=Game\r\nName=Grim Dawn\r\nIcon=/usr/share/icons/Papirus-Dark/16x16/places/folder-games.svg\r\nPath=/HDD/GranAlmacen/Juegos/Grim Dawn\r\nExec=env GAMEID=\"\" WINEPREFIX=\"/HDD/GranAlmacen/Juegos/Grim Dawn/wine\" ulwgl-run-cli \"x64/Grim Dawn.exe\"\r\n\r\nCheers !\n", "before_files": [{"content": "\"\"\"XDG applications service\"\"\"\nimport json\nimport os\nimport re\nimport shlex\nimport subprocess\nfrom gettext import gettext as _\n\nfrom gi.repository import Gio\n\nfrom lutris import settings\nfrom lutris.database.games import get_games_where\nfrom lutris.services.base import BaseService\nfrom lutris.services.service_game import ServiceGame\nfrom lutris.services.service_media import ServiceMedia\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.util.strings import slugify\n\n\ndef get_appid(app):\n \"\"\"Get the appid for the game\"\"\"\n try:\n return os.path.splitext(app.get_id())[0]\n except UnicodeDecodeError:\n logger.exception(\n \"Failed to read ID for app %s (non UTF-8 encoding). Reverting to executable name.\",\n app,\n )\n return app.get_executable()\n\n\nclass XDGMedia(ServiceMedia):\n service = \"xdg\"\n source = \"local\"\n size = (64, 64)\n dest_path = os.path.join(settings.CACHE_DIR, \"xdg/icons\")\n file_patterns = [\"%s.png\"]\n\n\nclass XDGService(BaseService):\n id = \"xdg\"\n name = _(\"Local\")\n icon = \"linux\"\n runner = \"linux\"\n online = False\n local = True\n medias = {\"icon\": XDGMedia}\n\n ignored_games = (\"lutris\",)\n ignored_executables = (\"lutris\", \"steam\")\n ignored_categories = (\"Emulator\", \"Development\", \"Utility\")\n\n @classmethod\n def iter_xdg_games(cls):\n \"\"\"Iterates through XDG games only\"\"\"\n for app in Gio.AppInfo.get_all():\n if cls._is_importable(app):\n yield app\n\n @property\n def lutris_games(self):\n \"\"\"Iterates through Lutris games imported from XDG\"\"\"\n for game in get_games_where(runner=XDGGame.runner, installer_slug=XDGGame.installer_slug, installed=1):\n yield game\n\n @classmethod\n def _is_importable(cls, app):\n \"\"\"Returns whether a XDG game is importable to Lutris\"\"\"\n appid = get_appid(app)\n executable = app.get_executable() or \"\"\n if any(\n [\n app.get_nodisplay() or app.get_is_hidden(), # App is hidden\n not executable, # Check app has an executable\n appid.startswith(\"net.lutris\"), # Skip lutris created shortcuts\n appid.lower() in map(str.lower, cls.ignored_games), # game blacklisted\n executable.lower() in cls.ignored_executables, # exe blacklisted\n ]\n ):\n return False\n\n # must be in Game category\n categories = app.get_categories() or \"\"\n categories = list(filter(None, categories.lower().split(\";\")))\n if \"game\" not in categories:\n return False\n\n # contains a blacklisted category\n ignored_categories = set(c.casefold() for c in cls.ignored_categories)\n if any(c for c in categories if c in ignored_categories):\n return False\n return True\n\n def match_games(self):\n \"\"\"XDG games aren't on the lutris website\"\"\"\n return\n\n def load(self):\n \"\"\"Return the list of games stored in the XDG menu.\"\"\"\n xdg_games = [XDGGame.new_from_xdg_app(app) for app in self.iter_xdg_games()]\n for game in xdg_games:\n game.save()\n return xdg_games\n\n def generate_installer(self, db_game):\n details = json.loads(db_game[\"details\"])\n return {\n \"name\": db_game[\"name\"],\n \"version\": \"XDG\",\n \"slug\": db_game[\"slug\"],\n \"game_slug\": self.get_installed_slug(db_game),\n \"runner\": self.get_installed_runner_name(db_game),\n \"script\": {\n \"game\": {\n \"exe\": details[\"exe\"],\n \"args\": details[\"args\"],\n },\n \"system\": {\"disable_runtime\": True},\n },\n }\n\n def get_installed_runner_name(self, db_game):\n return self.runner\n\n def get_game_directory(self, installer):\n \"\"\"Pull install location from installer\"\"\"\n return os.path.dirname(installer[\"script\"][\"game\"][\"exe\"])\n\n\nclass XDGGame(ServiceGame):\n \"\"\"XDG game (Linux game with a desktop launcher)\"\"\"\n\n service = \"xdg\"\n runner = \"linux\"\n installer_slug = \"desktopapp\"\n\n @staticmethod\n def get_app_icon(xdg_app):\n \"\"\"Return the name of the icon for an XDG app if one if set\"\"\"\n icon = xdg_app.get_icon()\n if not icon:\n return \"\"\n return icon.to_string()\n\n @classmethod\n def new_from_xdg_app(cls, xdg_app):\n \"\"\"Create a service game from a XDG entry\"\"\"\n service_game = cls()\n service_game.name = xdg_app.get_display_name()\n service_game.icon = cls.get_app_icon(xdg_app)\n service_game.appid = get_appid(xdg_app)\n service_game.slug = cls.get_slug(xdg_app)\n exe, args = cls.get_command_args(xdg_app)\n service_game.details = json.dumps(\n {\n \"exe\": exe,\n \"args\": args,\n }\n )\n return service_game\n\n @staticmethod\n def get_command_args(app):\n \"\"\"Return a tuple with absolute command path and an argument string\"\"\"\n command = shlex.split(app.get_commandline())\n # remove %U etc. and change %% to % in arguments\n args = list(map(lambda arg: re.sub(\"%[^%]\", \"\", arg).replace(\"%%\", \"%\"), command[1:]))\n exe = command[0]\n if not exe.startswith(\"/\"):\n exe = system.find_executable(exe)\n return exe, subprocess.list2cmdline(args)\n\n @staticmethod\n def get_slug(xdg_app):\n \"\"\"Get the slug from the game name\"\"\"\n return slugify(xdg_app.get_display_name()) or slugify(get_appid(xdg_app))\n", "path": "lutris/services/xdg.py"}]}
| 2,456 | 333 |
gh_patches_debug_10386
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-8684
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3138] [Bug] Error: Invalid value for '--warn-error-options'
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
When installing `dbt-core` into an environment where an older version of click is installed, `dbt-core` will throw a vague error `Error: Invalid value for '--warn-error-options': Cannot load YAML from type <class 'dbt.helper_types.WarnErrorOptions'>`. It seems `dbt-core` is pinning `click<9` which allows it to use older versions of click that it seems no longer compatible with. I think `dbt-core` should be pinning a minimum version requirement of click that plays well with the library.
### Expected Behavior
No error.
### Steps To Reproduce
Install `click==8.0.0`
Install the latest `dbt-core`
Attempt to use the dbt cli w/ a command `dbt ls` or something
### Relevant log output
```shell
Error: Invalid value for '--warn-error-options': Cannot load YAML from type <class 'dbt.helper_types.WarnErrorOptions'>
```
### Environment
```markdown
- OS:
- Python: 3.9.1
- dbt-core: 1.6.3
pip freeze output
agate==1.7.1
asn1crypto==1.5.1
attrs==23.1.0
Babel==2.12.1
certifi==2023.7.22
cffi==1.15.1
charset-normalizer==3.2.0
click==8.0.0
colorama==0.4.6
cryptography==41.0.4
dbt-core==1.6.3
dbt-extractor==0.4.1
dbt-semantic-interfaces==0.2.0
filelock==3.12.4
hologram==0.0.16
idna==3.4
importlib-metadata==6.8.0
isodate==0.6.1
jaraco.classes==3.3.0
Jinja2==3.1.2
jsonschema==3.2.0
keyring==24.2.0
leather==0.3.4
Logbook==1.5.3
MarkupSafe==2.1.3
mashumaro==3.8.1
minimal-snowplow-tracker==0.0.2
more-itertools==8.14.0
msgpack==1.0.5
networkx==3.1
oscrypto==1.3.0
packaging==23.1
parsedatetime==2.6
pathspec==0.11.2
platformdirs==3.8.1
protobuf==4.24.3
pycparser==2.21
pycryptodomex==3.19.0
pydantic==1.10.12
PyJWT==2.8.0
pyOpenSSL==23.2.0
pyrsistent==0.19.3
python-dateutil==2.8.2
python-slugify==8.0.1
pytimeparse==1.1.8
pytz==2023.3.post1
PyYAML==6.0.1
requests==2.31.0
six==1.16.0
snowflake-connector-python==3.2.0
sortedcontainers==2.4.0
sqlparse==0.4.4
text-unidecode==1.3
tomlkit==0.12.1
typing_extensions==4.8.0
urllib3==1.26.16
zipp==3.17.0
```
### Which database adapter are you using with dbt?
snowflake
### Additional Context
_No response_
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 8):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.8 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.7.0b2"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.cli.main:cli"],
47 },
48 install_requires=[
49 # ----
50 # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one).
51 # Pin to the patch or minor version, and bump in each new minor version of dbt-core.
52 "agate~=1.7.0",
53 "Jinja2~=3.1.2",
54 "mashumaro[msgpack]~=3.9",
55 # ----
56 # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0)
57 # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core
58 "logbook>=1.5,<1.6",
59 # ----
60 # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility
61 # with major versions in each new minor version of dbt-core.
62 "click<9",
63 "networkx>=2.3,<4",
64 # ----
65 # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)
66 # and check compatibility / bump in each new minor version of dbt-core.
67 "colorama>=0.3.9,<0.5",
68 "pathspec>=0.9,<0.12",
69 "isodate>=0.6,<0.7",
70 # ----
71 "sqlparse>=0.2.3,<0.5",
72 # ----
73 # These are major-version-0 packages also maintained by dbt-labs. Accept patches.
74 "dbt-extractor~=0.5.0",
75 "minimal-snowplow-tracker~=0.0.2",
76 # DSI is under active development, so we're pinning to specific dev versions for now.
77 "dbt-semantic-interfaces~=0.2.0",
78 # ----
79 # Expect compatibility with all new versions of these packages, so lower bounds only.
80 "jsonschema>=3.0",
81 "packaging>20.9",
82 "protobuf>=4.0.0",
83 "pytz>=2015.7",
84 "pyyaml>=6.0",
85 "typing-extensions>=3.7.4",
86 # ----
87 # Match snowflake-connector-python, to ensure compatibility in dbt-snowflake
88 "cffi>=1.9,<2.0.0",
89 "idna>=2.5,<4",
90 "requests<3.0.0",
91 "urllib3~=1.0",
92 # ----
93 ],
94 zip_safe=False,
95 classifiers=[
96 "Development Status :: 5 - Production/Stable",
97 "License :: OSI Approved :: Apache Software License",
98 "Operating System :: Microsoft :: Windows",
99 "Operating System :: MacOS :: MacOS X",
100 "Operating System :: POSIX :: Linux",
101 "Programming Language :: Python :: 3.8",
102 "Programming Language :: Python :: 3.9",
103 "Programming Language :: Python :: 3.10",
104 "Programming Language :: Python :: 3.11",
105 ],
106 python_requires=">=3.8",
107 )
108
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -59,7 +59,7 @@
# ----
# dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility
# with major versions in each new minor version of dbt-core.
- "click<9",
+ "click>=8.0.2,<9",
"networkx>=2.3,<4",
# ----
# These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)
|
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -59,7 +59,7 @@\n # ----\n # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility\n # with major versions in each new minor version of dbt-core.\n- \"click<9\",\n+ \"click>=8.0.2,<9\",\n \"networkx>=2.3,<4\",\n # ----\n # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)\n", "issue": "[CT-3138] [Bug] Error: Invalid value for '--warn-error-options'\n### Is this a new bug in dbt-core?\n\n- [X] I believe this is a new bug in dbt-core\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\n\n### Current Behavior\n\nWhen installing `dbt-core` into an environment where an older version of click is installed, `dbt-core` will throw a vague error `Error: Invalid value for '--warn-error-options': Cannot load YAML from type <class 'dbt.helper_types.WarnErrorOptions'>`. It seems `dbt-core` is pinning `click<9` which allows it to use older versions of click that it seems no longer compatible with. I think `dbt-core` should be pinning a minimum version requirement of click that plays well with the library.\n\n### Expected Behavior\n\nNo error.\n\n### Steps To Reproduce\n\nInstall `click==8.0.0`\r\nInstall the latest `dbt-core`\r\nAttempt to use the dbt cli w/ a command `dbt ls` or something\n\n### Relevant log output\n\n```shell\nError: Invalid value for '--warn-error-options': Cannot load YAML from type <class 'dbt.helper_types.WarnErrorOptions'>\n```\n\n\n### Environment\n\n```markdown\n- OS: \r\n- Python: 3.9.1\r\n- dbt-core: 1.6.3\r\n\r\npip freeze output\r\nagate==1.7.1\r\nasn1crypto==1.5.1\r\nattrs==23.1.0\r\nBabel==2.12.1\r\ncertifi==2023.7.22\r\ncffi==1.15.1\r\ncharset-normalizer==3.2.0\r\nclick==8.0.0\r\ncolorama==0.4.6\r\ncryptography==41.0.4\r\ndbt-core==1.6.3\r\ndbt-extractor==0.4.1\r\ndbt-semantic-interfaces==0.2.0\r\nfilelock==3.12.4\r\nhologram==0.0.16\r\nidna==3.4\r\nimportlib-metadata==6.8.0\r\nisodate==0.6.1\r\njaraco.classes==3.3.0\r\nJinja2==3.1.2\r\njsonschema==3.2.0\r\nkeyring==24.2.0\r\nleather==0.3.4\r\nLogbook==1.5.3\r\nMarkupSafe==2.1.3\r\nmashumaro==3.8.1\r\nminimal-snowplow-tracker==0.0.2\r\nmore-itertools==8.14.0\r\nmsgpack==1.0.5\r\nnetworkx==3.1\r\noscrypto==1.3.0\r\npackaging==23.1\r\nparsedatetime==2.6\r\npathspec==0.11.2\r\nplatformdirs==3.8.1\r\nprotobuf==4.24.3\r\npycparser==2.21\r\npycryptodomex==3.19.0\r\npydantic==1.10.12\r\nPyJWT==2.8.0\r\npyOpenSSL==23.2.0\r\npyrsistent==0.19.3\r\npython-dateutil==2.8.2\r\npython-slugify==8.0.1\r\npytimeparse==1.1.8\r\npytz==2023.3.post1\r\nPyYAML==6.0.1\r\nrequests==2.31.0\r\nsix==1.16.0\r\nsnowflake-connector-python==3.2.0\r\nsortedcontainers==2.4.0\r\nsqlparse==0.4.4\r\ntext-unidecode==1.3\r\ntomlkit==0.12.1\r\ntyping_extensions==4.8.0\r\nurllib3==1.26.16\r\nzipp==3.17.0\n```\n\n\n### Which database adapter are you using with dbt?\n\nsnowflake\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 8):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.8 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.7.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n # ----\n # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one).\n # Pin to the patch or minor version, and bump in each new minor version of dbt-core.\n \"agate~=1.7.0\",\n \"Jinja2~=3.1.2\",\n \"mashumaro[msgpack]~=3.9\",\n # ----\n # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0)\n # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core\n \"logbook>=1.5,<1.6\",\n # ----\n # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility\n # with major versions in each new minor version of dbt-core.\n \"click<9\",\n \"networkx>=2.3,<4\",\n # ----\n # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)\n # and check compatibility / bump in each new minor version of dbt-core.\n \"colorama>=0.3.9,<0.5\",\n \"pathspec>=0.9,<0.12\",\n \"isodate>=0.6,<0.7\",\n # ----\n \"sqlparse>=0.2.3,<0.5\",\n # ----\n # These are major-version-0 packages also maintained by dbt-labs. Accept patches.\n \"dbt-extractor~=0.5.0\",\n \"minimal-snowplow-tracker~=0.0.2\",\n # DSI is under active development, so we're pinning to specific dev versions for now.\n \"dbt-semantic-interfaces~=0.2.0\",\n # ----\n # Expect compatibility with all new versions of these packages, so lower bounds only.\n \"jsonschema>=3.0\",\n \"packaging>20.9\",\n \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n \"pyyaml>=6.0\",\n \"typing-extensions>=3.7.4\",\n # ----\n # Match snowflake-connector-python, to ensure compatibility in dbt-snowflake\n \"cffi>=1.9,<2.0.0\",\n \"idna>=2.5,<4\",\n \"requests<3.0.0\",\n \"urllib3~=1.0\",\n # ----\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.8\",\n)\n", "path": "core/setup.py"}]}
| 2,690 | 136 |
gh_patches_debug_7928
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1650
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Program doesn't correctly change visually when submitted
**Describe the bug**
Currently when we submit a program the user receives no feedback if their submission was successful. This issue might occur due to #1628 where we re-write the programs page structure to prevent re-loading to update user information. The program is correctly submitted in the back-end but not on the front-end (yet). With the additional issue that now the submitting achievements are not correctly shown to the user.
**Expected behavior**
When submitting a program the program box on the programs page should change visually and show the submission achievement if received.
</issue>
<code>
[start of website/achievements.py]
1 from website import database
2 from hedyweb import AchievementTranslations
3 from website.auth import requires_login
4 from flask import request, jsonify, session
5 import hedy
6
7
8 class Achievements:
9
10 def __init__(self):
11 self.DATABASE = database.Database()
12 self.TRANSLATIONS = AchievementTranslations()
13
14 def routes(self, app, database):
15 global DATABASE
16 DATABASE = database
17
18 @app.route('/achievements', methods=['POST'])
19 @requires_login
20 def push_new_achievement(user):
21 body = request.json
22 if "achievement" in body:
23 if not session['achieved']:
24 self.initialize_user_data(user['username'])
25 if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):
26 return jsonify({"achievements": self.verify_pushed_achievement(user.get('username'), body['achievement'])})
27 return jsonify({})
28
29 def increase_count(self, category):
30 if category == "run":
31 session['run_programs'] += 1
32 elif category == "saved":
33 session['saved_programs'] += 1
34 elif category == "submitted":
35 session['submitted_programs'] += 1
36
37 def initialize_user_data(self, username):
38 achievements_data = self.DATABASE.progress_by_username(username)
39 session['new_achieved'] = []
40 session['new_commands'] = []
41 session['previous_code'] = None
42 session['identical_consecutive_errors'] = 0
43 session['consecutive_errors'] = 0
44 if not achievements_data:
45 achievements_data = {}
46 if 'achieved' in achievements_data:
47 session['achieved'] = achievements_data['achieved']
48 else:
49 session['achieved'] = []
50 if 'commands' in achievements_data:
51 session['commands'] = achievements_data['commands']
52 else:
53 session['commands'] = []
54 if 'run_programs' in achievements_data:
55 session['run_programs'] = achievements_data['run_programs']
56 else:
57 session['run_programs'] = 0
58 if 'saved_programs' in achievements_data:
59 session['saved_programs'] = achievements_data['saved_programs']
60 else:
61 session['saved_programs'] = 0
62 if 'submitted_programs' in achievements_data:
63 session['submitted_programs'] = achievements_data['submitted_programs']
64 else:
65 session['submitted_programs'] = 0
66
67 def add_single_achievement(self, username, achievement):
68 if not session['achieved']:
69 self.initialize_user_data(username)
70 if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):
71 return self.verify_pushed_achievement(username, achievement)
72 else:
73 return None
74
75 def verify_run_achievements(self, username, code=None, level=None, response=None):
76 if not session['achieved']:
77 self.initialize_user_data(username)
78 self.check_programs_run()
79 if code and level:
80 self.check_code_achievements(code, level)
81 if code and response:
82 self.check_response_achievements(code, response)
83
84 if len(session['commands']) > 0:
85 for command in session['new_commands']:
86 session['commands'].append(command)
87 self.DATABASE.add_commands_to_username(username, session['commands'])
88
89 if len(session['new_achieved']) > 0:
90 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
91 for achievement in session['new_achieved']:
92 session['achieved'].append(achievement)
93 return True
94 return False
95
96 def verify_save_achievements(self, username, adventure=None):
97 if not session['achieved']:
98 self.initialize_user_data(username)
99 self.check_programs_saved()
100 if adventure and 'adventure_is_worthwhile' not in session['achieved']:
101 session['new_achieved'].append("adventure_is_worthwhile")
102 if len(session['new_achieved']) > 0:
103 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
104 for achievement in session['new_achieved']:
105 session['achieved'].append(achievement)
106 return True
107 return False
108
109 def verify_submit_achievements(self, username):
110 if not session['achieved']:
111 self.initialize_user_data(username)
112 self.check_programs_submitted(session['submitted_programs'])
113
114 if len(session['new_achieved']) > 0:
115 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
116 for achievement in session['new_achieved']:
117 session['achieved'].append(achievement)
118 return True
119 return False
120
121 def verify_pushed_achievement(self, username, achievement):
122 session['new_achieved'] = [achievement]
123 self.DATABASE.add_achievement_to_username(username, achievement)
124 session['achieved'].append(achievement)
125 return self.get_earned_achievements()
126
127 def get_earned_achievements(self):
128 translations = self.TRANSLATIONS.get_translations(session['lang'])
129 translated_achievements = []
130 for achievement in session['new_achieved']:
131 translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])
132 session['new_achieved'] = [] #Once we get earned achievements -> empty the array with "waiting" ones
133 session['new_commands'] = []
134 return translated_achievements
135
136 def check_programs_run(self):
137 if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:
138 session['new_achieved'].append("getting_started_I")
139 if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:
140 session['new_achieved'].append("getting_started_II")
141 if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:
142 session['new_achieved'].append("getting_started_III")
143 if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:
144 session['new_achieved'].append("getting_started_IV")
145 if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:
146 session['new_achieved'].append("getting_started_V")
147
148 def check_programs_saved(self):
149 if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:
150 session['new_achieved'].append("one_to_remember_I")
151 if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:
152 session['new_achieved'].append("one_to_remember_II")
153 if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:
154 session['new_achieved'].append("one_to_remember_III")
155 if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:
156 session['new_achieved'].append("one_to_remember_IV")
157 if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:
158 session['new_achieved'].append("one_to_remember_V")
159
160 def check_programs_submitted(self):
161 if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:
162 session['new_achieved'].append("deadline_daredevil_I")
163 if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:
164 session['new_achieved'].append("deadline_daredevil_II")
165 if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:
166 session['new_achieved'].append("deadline_daredevil_III")
167
168 def check_code_achievements(self, code, level):
169 commands_in_code = hedy.all_commands(code, level, session['lang'])
170 if 'trying_is_key' not in session['achieved']:
171 for command in set(commands_in_code):
172 if command not in session['commands']:
173 session['new_commands'].append(command)
174 if set(session['commands']) == set(hedy.commands_per_level.get(hedy.HEDY_MAX_LEVEL)):
175 session['new_achieved'].append("trying_is_key")
176 if 'did_you_say_please' not in session['achieved'] and "ask" in hedy.all_commands(code, level, session['lang']):
177 session['new_achieved'].append("did_you_say_please")
178 if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count("ask") >= 5:
179 session['new_achieved'].append("talk-talk-talk")
180 if 'hedy_honor' not in session['achieved'] and "Hedy" in code:
181 session['new_achieved'].append("hedy_honor")
182 if 'hedy-ious' not in session['achieved']:
183 all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])
184 for argument in all_print_arguments:
185 if all_print_arguments.count(argument) >= 10:
186 session['new_achieved'].append("hedy-ious")
187 break
188
189
190 def check_response_achievements(self, code, response):
191 if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:
192 session['new_achieved'].append("ninja_turtle")
193 if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:
194 session['new_achieved'].append("watch_out")
195 if 'Error' in response and response['Error']:
196 session['consecutive_errors'] += 1
197 if session['previous_code'] == code:
198 if session['identical_consecutive_errors'] == 0:
199 session['identical_consecutive_errors'] += 2 #We have to count the first one too!
200 session['identical_consecutive_errors'] += 1
201 if session['identical_consecutive_errors'] >= 3:
202 if 'programming_panic' not in session['achieved']:
203 session['new_achieved'].append("programming_panic")
204 session['previous_code'] = code
205 else:
206 if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:
207 session['new_achieved'].append("programming_protagonist")
208 session['consecutive_errors'] = 0
209 session['identical_consecutive_errors'] = 0
210
211
212
213
[end of website/achievements.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/achievements.py b/website/achievements.py
--- a/website/achievements.py
+++ b/website/achievements.py
@@ -109,7 +109,7 @@
def verify_submit_achievements(self, username):
if not session['achieved']:
self.initialize_user_data(username)
- self.check_programs_submitted(session['submitted_programs'])
+ self.check_programs_submitted()
if len(session['new_achieved']) > 0:
self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
|
{"golden_diff": "diff --git a/website/achievements.py b/website/achievements.py\n--- a/website/achievements.py\n+++ b/website/achievements.py\n@@ -109,7 +109,7 @@\n def verify_submit_achievements(self, username):\n if not session['achieved']:\n self.initialize_user_data(username)\n- self.check_programs_submitted(session['submitted_programs'])\n+ self.check_programs_submitted()\n \n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n", "issue": "[BUG] Program doesn't correctly change visually when submitted\n**Describe the bug**\r\nCurrently when we submit a program the user receives no feedback if their submission was successful. This issue might occur due to #1628 where we re-write the programs page structure to prevent re-loading to update user information. The program is correctly submitted in the back-end but not on the front-end (yet). With the additional issue that now the submitting achievements are not correctly shown to the user.\r\n\r\n**Expected behavior**\r\nWhen submitting a program the program box on the programs page should change visually and show the submission achievement if received.\r\n\n", "before_files": [{"content": "from website import database\nfrom hedyweb import AchievementTranslations\nfrom website.auth import requires_login\nfrom flask import request, jsonify, session\nimport hedy\n\n\nclass Achievements:\n\n def __init__(self):\n self.DATABASE = database.Database()\n self.TRANSLATIONS = AchievementTranslations()\n\n def routes(self, app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/achievements', methods=['POST'])\n @requires_login\n def push_new_achievement(user):\n body = request.json\n if \"achievement\" in body:\n if not session['achieved']:\n self.initialize_user_data(user['username'])\n if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):\n return jsonify({\"achievements\": self.verify_pushed_achievement(user.get('username'), body['achievement'])})\n return jsonify({})\n\n def increase_count(self, category):\n if category == \"run\":\n session['run_programs'] += 1\n elif category == \"saved\":\n session['saved_programs'] += 1\n elif category == \"submitted\":\n session['submitted_programs'] += 1\n\n def initialize_user_data(self, username):\n achievements_data = self.DATABASE.progress_by_username(username)\n session['new_achieved'] = []\n session['new_commands'] = []\n session['previous_code'] = None\n session['identical_consecutive_errors'] = 0\n session['consecutive_errors'] = 0\n if not achievements_data:\n achievements_data = {}\n if 'achieved' in achievements_data:\n session['achieved'] = achievements_data['achieved']\n else:\n session['achieved'] = []\n if 'commands' in achievements_data:\n session['commands'] = achievements_data['commands']\n else:\n session['commands'] = []\n if 'run_programs' in achievements_data:\n session['run_programs'] = achievements_data['run_programs']\n else:\n session['run_programs'] = 0\n if 'saved_programs' in achievements_data:\n session['saved_programs'] = achievements_data['saved_programs']\n else:\n session['saved_programs'] = 0\n if 'submitted_programs' in achievements_data:\n session['submitted_programs'] = achievements_data['submitted_programs']\n else:\n session['submitted_programs'] = 0\n\n def add_single_achievement(self, username, achievement):\n if not session['achieved']:\n self.initialize_user_data(username)\n if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):\n return self.verify_pushed_achievement(username, achievement)\n else:\n return None\n\n def verify_run_achievements(self, username, code=None, level=None, response=None):\n if not session['achieved']:\n self.initialize_user_data(username)\n self.check_programs_run()\n if code and level:\n self.check_code_achievements(code, level)\n if code and response:\n self.check_response_achievements(code, response)\n\n if len(session['commands']) > 0:\n for command in session['new_commands']:\n session['commands'].append(command)\n self.DATABASE.add_commands_to_username(username, session['commands'])\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_save_achievements(self, username, adventure=None):\n if not session['achieved']:\n self.initialize_user_data(username)\n self.check_programs_saved()\n if adventure and 'adventure_is_worthwhile' not in session['achieved']:\n session['new_achieved'].append(\"adventure_is_worthwhile\")\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_submit_achievements(self, username):\n if not session['achieved']:\n self.initialize_user_data(username)\n self.check_programs_submitted(session['submitted_programs'])\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_pushed_achievement(self, username, achievement):\n session['new_achieved'] = [achievement]\n self.DATABASE.add_achievement_to_username(username, achievement)\n session['achieved'].append(achievement)\n return self.get_earned_achievements()\n\n def get_earned_achievements(self):\n translations = self.TRANSLATIONS.get_translations(session['lang'])\n translated_achievements = []\n for achievement in session['new_achieved']:\n translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])\n session['new_achieved'] = [] #Once we get earned achievements -> empty the array with \"waiting\" ones\n session['new_commands'] = []\n return translated_achievements\n\n def check_programs_run(self):\n if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:\n session['new_achieved'].append(\"getting_started_I\")\n if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:\n session['new_achieved'].append(\"getting_started_II\")\n if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:\n session['new_achieved'].append(\"getting_started_III\")\n if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:\n session['new_achieved'].append(\"getting_started_IV\")\n if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:\n session['new_achieved'].append(\"getting_started_V\")\n\n def check_programs_saved(self):\n if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:\n session['new_achieved'].append(\"one_to_remember_I\")\n if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:\n session['new_achieved'].append(\"one_to_remember_II\")\n if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:\n session['new_achieved'].append(\"one_to_remember_III\")\n if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:\n session['new_achieved'].append(\"one_to_remember_IV\")\n if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:\n session['new_achieved'].append(\"one_to_remember_V\")\n\n def check_programs_submitted(self):\n if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:\n session['new_achieved'].append(\"deadline_daredevil_I\")\n if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:\n session['new_achieved'].append(\"deadline_daredevil_II\")\n if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:\n session['new_achieved'].append(\"deadline_daredevil_III\")\n\n def check_code_achievements(self, code, level):\n commands_in_code = hedy.all_commands(code, level, session['lang'])\n if 'trying_is_key' not in session['achieved']:\n for command in set(commands_in_code):\n if command not in session['commands']:\n session['new_commands'].append(command)\n if set(session['commands']) == set(hedy.commands_per_level.get(hedy.HEDY_MAX_LEVEL)):\n session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n session['new_achieved'].append(\"talk-talk-talk\")\n if 'hedy_honor' not in session['achieved'] and \"Hedy\" in code:\n session['new_achieved'].append(\"hedy_honor\")\n if 'hedy-ious' not in session['achieved']:\n all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])\n for argument in all_print_arguments:\n if all_print_arguments.count(argument) >= 10:\n session['new_achieved'].append(\"hedy-ious\")\n break\n\n\n def check_response_achievements(self, code, response):\n if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:\n session['new_achieved'].append(\"ninja_turtle\")\n if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:\n session['new_achieved'].append(\"watch_out\")\n if 'Error' in response and response['Error']:\n session['consecutive_errors'] += 1\n if session['previous_code'] == code:\n if session['identical_consecutive_errors'] == 0:\n session['identical_consecutive_errors'] += 2 #We have to count the first one too!\n session['identical_consecutive_errors'] += 1\n if session['identical_consecutive_errors'] >= 3:\n if 'programming_panic' not in session['achieved']:\n session['new_achieved'].append(\"programming_panic\")\n session['previous_code'] = code\n else:\n if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:\n session['new_achieved'].append(\"programming_protagonist\")\n session['consecutive_errors'] = 0\n session['identical_consecutive_errors'] = 0\n\n\n\n", "path": "website/achievements.py"}]}
| 3,590 | 133 |
gh_patches_debug_15569
|
rasdani/github-patches
|
git_diff
|
lightly-ai__lightly-215
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ignore lightly outputs when creating a dataset
When working on a folder of images, e.g.
```
dataset/
L image_1.jpg
L image_2.jpg
L image_3.jpg
```
the following commands will not work when in the dataset directory:
```
lightly-embed input_dir=./
lightly-train input_dir=./
lightly-magic input_dir=./
```
This is because the command-line tool creates a directory `lightly_outputs` where logs and results are stored. However, when creating the `LightlyDataset`, this directory will be interpreted as a subfolder with images in it which leads to an error. We can handle this by ignoring the `lightly_outputs` directory.
</issue>
<code>
[start of lightly/data/_helpers.py]
1 """ Helper Functions """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import os
7 from torchvision import datasets
8
9 from lightly.data._image import DatasetFolder
10
11 try:
12 from lightly.data._video import VideoDataset
13 VIDEO_DATASET_AVAILABLE = True
14 except Exception as e:
15 VIDEO_DATASET_AVAILABLE = False
16 VIDEO_DATASET_ERRORMSG = e
17
18
19 IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',
20 '.pgm', '.tif', '.tiff', '.webp')
21
22 VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')
23
24
25 def _contains_videos(root: str, extensions: tuple):
26 """Checks whether directory contains video files.
27
28 Args:
29 root: Root directory path.
30
31 Returns:
32 True if root contains subdirectories else false.
33 """
34 list_dir = os.listdir(root)
35 is_video = \
36 [f.lower().endswith(extensions) for f in list_dir]
37 return any(is_video)
38
39
40 def _contains_subdirs(root: str):
41 """Checks whether directory contains subdirectories.
42
43 Args:
44 root: Root directory path.
45
46 Returns:
47 True if root contains subdirectories else false.
48
49 """
50 list_dir = os.listdir(root)
51 is_dir = \
52 [os.path.isdir(os.path.join(root, f)) for f in list_dir]
53 return any(is_dir)
54
55
56 def _load_dataset_from_folder(root: str, transform):
57 """Initializes dataset from folder.
58
59 Args:
60 root: (str) Root directory path
61 transform: (torchvision.transforms.Compose) image transformations
62
63 Returns:
64 Dataset consisting of images in the root directory.
65
66 """
67
68 # if there is a video in the input directory but we do not have
69 # the right dependencies, raise a ValueError
70 contains_videos = _contains_videos(root, VIDEO_EXTENSIONS)
71 if contains_videos and not VIDEO_DATASET_AVAILABLE:
72 raise ValueError(f'The input directory {root} contains videos '
73 'but the VideoDataset is not available. \n'
74 'Make sure you have installed the right '
75 'dependencies. The error from the imported '
76 f'module was: {VIDEO_DATASET_ERRORMSG}')
77
78 if contains_videos:
79 # root contains videos -> create a video dataset
80 dataset = VideoDataset(root,
81 extensions=VIDEO_EXTENSIONS,
82 transform=transform)
83 elif _contains_subdirs(root):
84 # root contains subdirectories -> create an image folder dataset
85 dataset = datasets.ImageFolder(root,
86 transform=transform)
87 else:
88 # root contains plain images -> create a folder dataset
89 dataset = DatasetFolder(root,
90 extensions=IMG_EXTENSIONS,
91 transform=transform)
92
93 return dataset
94
95
96 def _load_dataset(input_dir: str,
97 transform=None):
98 """Initializes dataset from torchvision or from folder.
99
100 Args:
101 root: (str) Directory where dataset is stored
102 name: (str) Name of the dataset (e.g. cifar10, cifar100)
103 train: (bool) Use the training set
104 download: (bool) Download the dataset
105 transform: (torchvision.transforms.Compose) image transformations
106 from_folder: (str) Path to directory holding the images to load.
107
108 Returns:
109 A torchvision dataset
110
111 Raises:
112 ValueError: If the specified dataset doesn't exist
113
114 """
115
116 if not os.path.exists(input_dir):
117 raise ValueError(f'The input directory {input_dir} does not exist!')
118
119 return _load_dataset_from_folder(input_dir, transform)
120
[end of lightly/data/_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lightly/data/_helpers.py b/lightly/data/_helpers.py
--- a/lightly/data/_helpers.py
+++ b/lightly/data/_helpers.py
@@ -37,6 +37,19 @@
return any(is_video)
+def _is_lightly_output_dir(dirname: str):
+ """Checks whether the directory is a lightly_output directory.
+
+ Args:
+ dirname: Directory to check.
+
+ Returns:
+ True if dirname is "lightly_outputs" else false.
+
+ """
+ return 'lightly_outputs' in dirname
+
+
def _contains_subdirs(root: str):
"""Checks whether directory contains subdirectories.
@@ -48,6 +61,7 @@
"""
list_dir = os.listdir(root)
+ list_dir = list(filter(lambda x: not _is_lightly_output_dir(x), list_dir))
is_dir = \
[os.path.isdir(os.path.join(root, f)) for f in list_dir]
return any(is_dir)
|
{"golden_diff": "diff --git a/lightly/data/_helpers.py b/lightly/data/_helpers.py\n--- a/lightly/data/_helpers.py\n+++ b/lightly/data/_helpers.py\n@@ -37,6 +37,19 @@\n return any(is_video)\n \n \n+def _is_lightly_output_dir(dirname: str):\n+ \"\"\"Checks whether the directory is a lightly_output directory.\n+\n+ Args:\n+ dirname: Directory to check.\n+\n+ Returns:\n+ True if dirname is \"lightly_outputs\" else false.\n+\n+ \"\"\"\n+ return 'lightly_outputs' in dirname\n+\n+\n def _contains_subdirs(root: str):\n \"\"\"Checks whether directory contains subdirectories.\n \n@@ -48,6 +61,7 @@\n \n \"\"\"\n list_dir = os.listdir(root)\n+ list_dir = list(filter(lambda x: not _is_lightly_output_dir(x), list_dir))\n is_dir = \\\n [os.path.isdir(os.path.join(root, f)) for f in list_dir]\n return any(is_dir)\n", "issue": "Ignore lightly outputs when creating a dataset\nWhen working on a folder of images, e.g.\r\n```\r\ndataset/\r\nL image_1.jpg\r\nL image_2.jpg\r\nL image_3.jpg\r\n```\r\nthe following commands will not work when in the dataset directory:\r\n```\r\nlightly-embed input_dir=./\r\nlightly-train input_dir=./\r\nlightly-magic input_dir=./\r\n```\r\n\r\nThis is because the command-line tool creates a directory `lightly_outputs` where logs and results are stored. However, when creating the `LightlyDataset`, this directory will be interpreted as a subfolder with images in it which leads to an error. We can handle this by ignoring the `lightly_outputs` directory.\n", "before_files": [{"content": "\"\"\" Helper Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport os\nfrom torchvision import datasets\n\nfrom lightly.data._image import DatasetFolder\n\ntry:\n from lightly.data._video import VideoDataset\n VIDEO_DATASET_AVAILABLE = True\nexcept Exception as e:\n VIDEO_DATASET_AVAILABLE = False\n VIDEO_DATASET_ERRORMSG = e\n\n\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',\n '.pgm', '.tif', '.tiff', '.webp')\n\nVIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')\n\n\ndef _contains_videos(root: str, extensions: tuple):\n \"\"\"Checks whether directory contains video files.\n\n Args:\n root: Root directory path.\n\n Returns:\n True if root contains subdirectories else false.\n \"\"\"\n list_dir = os.listdir(root)\n is_video = \\\n [f.lower().endswith(extensions) for f in list_dir]\n return any(is_video)\n\n\ndef _contains_subdirs(root: str):\n \"\"\"Checks whether directory contains subdirectories.\n\n Args:\n root: Root directory path.\n\n Returns:\n True if root contains subdirectories else false.\n\n \"\"\"\n list_dir = os.listdir(root)\n is_dir = \\\n [os.path.isdir(os.path.join(root, f)) for f in list_dir]\n return any(is_dir)\n\n\ndef _load_dataset_from_folder(root: str, transform):\n \"\"\"Initializes dataset from folder.\n\n Args:\n root: (str) Root directory path\n transform: (torchvision.transforms.Compose) image transformations\n\n Returns:\n Dataset consisting of images in the root directory.\n\n \"\"\"\n\n # if there is a video in the input directory but we do not have\n # the right dependencies, raise a ValueError\n contains_videos = _contains_videos(root, VIDEO_EXTENSIONS)\n if contains_videos and not VIDEO_DATASET_AVAILABLE:\n raise ValueError(f'The input directory {root} contains videos '\n 'but the VideoDataset is not available. \\n'\n 'Make sure you have installed the right '\n 'dependencies. The error from the imported '\n f'module was: {VIDEO_DATASET_ERRORMSG}')\n\n if contains_videos:\n # root contains videos -> create a video dataset\n dataset = VideoDataset(root,\n extensions=VIDEO_EXTENSIONS,\n transform=transform)\n elif _contains_subdirs(root):\n # root contains subdirectories -> create an image folder dataset\n dataset = datasets.ImageFolder(root,\n transform=transform)\n else:\n # root contains plain images -> create a folder dataset\n dataset = DatasetFolder(root,\n extensions=IMG_EXTENSIONS,\n transform=transform)\n\n return dataset\n\n\ndef _load_dataset(input_dir: str,\n transform=None):\n \"\"\"Initializes dataset from torchvision or from folder.\n\n Args:\n root: (str) Directory where dataset is stored\n name: (str) Name of the dataset (e.g. cifar10, cifar100)\n train: (bool) Use the training set\n download: (bool) Download the dataset\n transform: (torchvision.transforms.Compose) image transformations\n from_folder: (str) Path to directory holding the images to load.\n\n Returns:\n A torchvision dataset\n\n Raises:\n ValueError: If the specified dataset doesn't exist\n\n \"\"\"\n\n if not os.path.exists(input_dir):\n raise ValueError(f'The input directory {input_dir} does not exist!')\n\n return _load_dataset_from_folder(input_dir, transform)\n", "path": "lightly/data/_helpers.py"}]}
| 1,708 | 221 |
gh_patches_debug_15140
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-1381
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Plugging it in area is not accessible on dev server
This link should work: https://cs-unplugged-dev.appspot.com/plugging-it-in/
</issue>
<code>
[start of csunplugged/config/urls.py]
1 """URL configuration for the Django system.
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/dev/topics/http/urls/
5 """
6
7 from django.conf import settings
8 from django.urls import include, path
9 from django.conf.urls.i18n import i18n_patterns
10 from django.contrib import admin
11
12 urlpatterns = i18n_patterns(
13 path('', include('general.urls', namespace='general')),
14 path('topics/', include('topics.urls', namespace='topics')),
15 path('resources/', include('resources.urls', namespace='resources')),
16 path('at-home/', include('at_home.urls', namespace='at_home')),
17 )
18
19 urlpatterns += [
20 path('', include('classic.urls')),
21 path('en/search/', include('search.urls', namespace='search')),
22 path('admin/', admin.site.urls),
23 ]
24
25 if not settings.DJANGO_PRODUCTION:
26 urlpatterns += [
27 path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),
28 ]
29
30 if settings.DEBUG: # pragma: no cover
31 import debug_toolbar
32 urlpatterns += [
33 path('__debug__/', include(debug_toolbar.urls)),
34 ]
35 urlpatterns += i18n_patterns(
36 path('__dev__/', include('dev.urls', namespace='dev')),
37 )
38 # These patterns allows these error pages to be debugged during development.
39 from django.views import defaults
40 urlpatterns += [
41 path('400/', defaults.bad_request, kwargs={'exception': Exception('Bad request')}),
42 path('403/', defaults.permission_denied, kwargs={'exception': Exception('Permissin denied')}),
43 path('404/', defaults.page_not_found, kwargs={'exception': Exception('Page not found')}),
44 path('500/', defaults.server_error),
45 ]
46
[end of csunplugged/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py
--- a/csunplugged/config/urls.py
+++ b/csunplugged/config/urls.py
@@ -8,6 +8,8 @@
from django.urls import include, path
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
+import environ
+env = environ.Env()
urlpatterns = i18n_patterns(
path('', include('general.urls', namespace='general')),
@@ -22,7 +24,7 @@
path('admin/', admin.site.urls),
]
-if not settings.DJANGO_PRODUCTION:
+if not env("DEPLOYMENT", default=None) == "prod":
urlpatterns += [
path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),
]
|
{"golden_diff": "diff --git a/csunplugged/config/urls.py b/csunplugged/config/urls.py\n--- a/csunplugged/config/urls.py\n+++ b/csunplugged/config/urls.py\n@@ -8,6 +8,8 @@\n from django.urls import include, path\n from django.conf.urls.i18n import i18n_patterns\n from django.contrib import admin\n+import environ\n+env = environ.Env()\n \n urlpatterns = i18n_patterns(\n path('', include('general.urls', namespace='general')),\n@@ -22,7 +24,7 @@\n path('admin/', admin.site.urls),\n ]\n \n-if not settings.DJANGO_PRODUCTION:\n+if not env(\"DEPLOYMENT\", default=None) == \"prod\":\n urlpatterns += [\n path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),\n ]\n", "issue": "Plugging it in area is not accessible on dev server\nThis link should work: https://cs-unplugged-dev.appspot.com/plugging-it-in/\n", "before_files": [{"content": "\"\"\"URL configuration for the Django system.\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/dev/topics/http/urls/\n\"\"\"\n\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\n\nurlpatterns = i18n_patterns(\n path('', include('general.urls', namespace='general')),\n path('topics/', include('topics.urls', namespace='topics')),\n path('resources/', include('resources.urls', namespace='resources')),\n path('at-home/', include('at_home.urls', namespace='at_home')),\n)\n\nurlpatterns += [\n path('', include('classic.urls')),\n path('en/search/', include('search.urls', namespace='search')),\n path('admin/', admin.site.urls),\n]\n\nif not settings.DJANGO_PRODUCTION:\n urlpatterns += [\n path('plugging-it-in/', include('plugging_it_in.urls', namespace='plugging_it_in')),\n ]\n\nif settings.DEBUG: # pragma: no cover\n import debug_toolbar\n urlpatterns += [\n path('__debug__/', include(debug_toolbar.urls)),\n ]\n urlpatterns += i18n_patterns(\n path('__dev__/', include('dev.urls', namespace='dev')),\n )\n # These patterns allows these error pages to be debugged during development.\n from django.views import defaults\n urlpatterns += [\n path('400/', defaults.bad_request, kwargs={'exception': Exception('Bad request')}),\n path('403/', defaults.permission_denied, kwargs={'exception': Exception('Permissin denied')}),\n path('404/', defaults.page_not_found, kwargs={'exception': Exception('Page not found')}),\n path('500/', defaults.server_error),\n ]\n", "path": "csunplugged/config/urls.py"}]}
| 1,040 | 191 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.