problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24574
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-13621
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
median
The PyTorch front end median function implemented in #13212 does not follow the correct behaviour. PyTorch's median returns a named tuple with the median and their indices when a axis is provided to reduce along. The incorrect implementation has been reverted in #13480
https://pytorch.org/docs/stable/generated/torch.median.html
</issue>
<code>
[start of ivy/functional/frontends/torch/reduction_ops.py]
1 import ivy
2 from ivy.func_wrapper import with_unsupported_dtypes
3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
4 from collections import namedtuple
5
6
7 @to_ivy_arrays_and_back
8 def dist(input, other, p=2):
9 return ivy.vector_norm(ivy.subtract(input, other), ord=p)
10
11
12 @to_ivy_arrays_and_back
13 def argmax(input, dim=None, keepdim=False):
14 return ivy.argmax(input, axis=dim, keepdims=keepdim)
15
16
17 @to_ivy_arrays_and_back
18 def argmin(input, dim=None, keepdim=False):
19 return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)
20
21
22 @to_ivy_arrays_and_back
23 def amax(input, dim=None, keepdim=False, *, out=None):
24 return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
25
26
27 @to_ivy_arrays_and_back
28 def amin(input, dim=None, keepdim=False, *, out=None):
29 return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
30
31
32 @to_ivy_arrays_and_back
33 def all(input, dim=None, keepdim=False, *, out=None):
34 input_dtype = ivy.as_ivy_dtype(input.dtype)
35 ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)
36 if ivy.is_uint_dtype(input_dtype):
37 ret = ivy.astype(ret, input_dtype, out=out)
38 return ret
39
40
41 @to_ivy_arrays_and_back
42 def any(input, dim=None, keepdim=False, *, out=None):
43 input_dtype = ivy.as_ivy_dtype(input.dtype)
44 ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)
45 if ivy.is_uint_dtype(input_dtype):
46 ret = ivy.astype(ret, input_dtype, out=out)
47 return ret
48
49
50 @to_ivy_arrays_and_back
51 def sum(input, dim=None, keepdim=False, *, out=None):
52 return ivy.sum(input, axis=dim, keepdims=keepdim, out=out)
53
54
55 @to_ivy_arrays_and_back
56 def mean(input, dim, keepdim=False, *, out=None):
57 return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
58
59
60 @to_ivy_arrays_and_back
61 def nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):
62 return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)
63
64
65 @to_ivy_arrays_and_back
66 def std(input, dim, unbiased, keepdim=False, *, out=None):
67 return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
68
69
70 @to_ivy_arrays_and_back
71 @with_unsupported_dtypes(
72 {
73 "1.11.0 and below": (
74 "float16",
75 "bfloat16",
76 )
77 },
78 "torch",
79 )
80 # TODO: the original torch.prod places * right before `dtype`
81 def prod(input, dim, *, keepdim=False, dtype=None):
82 if not dtype:
83 if "int" in input.dtype:
84 dtype = ivy.int64
85 elif "float" in input.dtype:
86 dtype = ivy.float32
87 return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)
88
89
90 @to_ivy_arrays_and_back
91 def var(input, dim, unbiased, keepdim=False, *, out=None):
92 return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
93
94
95 @to_ivy_arrays_and_back
96 def min(input, dim=None, keepdim=False, *, out=None):
97 if dim is None:
98 return ivy.min(input, axis=dim, keepdims=keepdim, out=out)
99 elif out is not None:
100 ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])
101 ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])
102 return out
103 else:
104 min_tuple = namedtuple("min", ["values", "indices"])
105 return min_tuple(
106 ivy.min(input, axis=dim, keepdims=keepdim),
107 ivy.argmin(input, axis=dim, keepdims=keepdim),
108 )
109
110
111 @to_ivy_arrays_and_back
112 def max(input, dim=None, keepdim=False, *, out=None):
113 if dim is None:
114 return ivy.max(input, axis=dim, keepdims=keepdim, out=out)
115 elif out is not None:
116 ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])
117 ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])
118 return out
119 else:
120 max_tuple = namedtuple("max", ["values", "indices"])
121 return max_tuple(
122 ivy.max(input, axis=dim, keepdims=keepdim),
123 ivy.argmax(input, axis=dim, keepdims=keepdim),
124 )
125
126
127 @to_ivy_arrays_and_back
128 def moveaxis(input, source, destination):
129 return ivy.moveaxis(input, source, destination)
130
131
132 @to_ivy_arrays_and_back
133 def std_mean(input, dim, unbiased, keepdim=False, *, out=None):
134 temp_std = ivy.std(
135 input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
136 )
137 temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
138 return temp_std, temp_mean
139
140
141 @to_ivy_arrays_and_back
142 def var_mean(input, dim, unbiased, keepdim=False, *, out=None):
143 temp_var = ivy.var(
144 input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out
145 )
146 temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)
147 return (temp_var, temp_mean)
148
149
150 @to_ivy_arrays_and_back
151 def aminmax(input, *, dim=None, keepdim=False, out=None):
152 minmax_tuple = namedtuple("minmax", ["min", "max"])
153 return minmax_tuple(
154 ivy.min(input, axis=dim, keepdims=keepdim, out=out),
155 ivy.max(input, axis=dim, keepdims=keepdim, out=out),
156 )
157
158
159 aminmax.unsupported_dtypes = {
160 "torch": ("float16", "bfloat16"),
161 "numpy": ("float16", "bfloat16"),
162 "jax": ("float16", "bfloat16"),
163 "tensorflow": ("float16", "bfloat16"),
164 }
165
166
167 @to_ivy_arrays_and_back
168 def quantile(input, q, dim=None, keepdim=False, *, interpolation="linear", out=None):
169 return ivy.quantile(
170 input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out
171 )
172
173
174 quantile.unsupported_dtypes = {
175 "torch": ("float16", "bfloat16"),
176 "numpy": ("float16", "bfloat16"),
177 "jax": ("float16", "bfloat16"),
178 "tensorflow": ("float16", "bfloat16"),
179 }
180
181
182 @to_ivy_arrays_and_back
183 def count_nonzero(input, dim=None):
184 return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)
185
[end of ivy/functional/frontends/torch/reduction_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/torch/reduction_ops.py b/ivy/functional/frontends/torch/reduction_ops.py
--- a/ivy/functional/frontends/torch/reduction_ops.py
+++ b/ivy/functional/frontends/torch/reduction_ops.py
@@ -62,6 +62,34 @@
return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)
+@to_ivy_arrays_and_back
+def median(input, dim=None, keepdim=False, *, out=None):
+ if dim is None:
+ input = ivy.reshape(input, (-1,))
+ sorted_input = ivy.sort(input)
+ return sorted_input[(sorted_input.shape[0] - 1) // 2]
+
+ median_tuple = namedtuple("median", ["values", "indices"])
+
+ if input.ndim == 0:
+ result = median_tuple(input, ivy.array(0))
+ else:
+ sorted_indices = ivy.argsort(input, axis=dim)
+ median_indices = ivy.gather(sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim)
+ median_values = ivy.take_along_axis(input, ivy.expand_dims(median_indices, axis=dim), dim).squeeze(dim)
+
+ if keepdim:
+ median_values = ivy.expand_dims(median_values, axis=dim)
+ median_indices = ivy.expand_dims(median_indices, axis=dim)
+
+ result = median_tuple(median_values, median_indices)
+ if out is not None:
+ ivy.inplace_update(out[0], result.values)
+ ivy.inplace_update(out[1], result.indices)
+ return out
+ return result
+
+
@to_ivy_arrays_and_back
def std(input, dim, unbiased, keepdim=False, *, out=None):
return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/reduction_ops.py b/ivy/functional/frontends/torch/reduction_ops.py\n--- a/ivy/functional/frontends/torch/reduction_ops.py\n+++ b/ivy/functional/frontends/torch/reduction_ops.py\n@@ -62,6 +62,34 @@\n return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)\r\n \r\n \r\n+@to_ivy_arrays_and_back\r\n+def median(input, dim=None, keepdim=False, *, out=None):\r\n+ if dim is None:\r\n+ input = ivy.reshape(input, (-1,))\r\n+ sorted_input = ivy.sort(input)\r\n+ return sorted_input[(sorted_input.shape[0] - 1) // 2]\r\n+\r\n+ median_tuple = namedtuple(\"median\", [\"values\", \"indices\"])\r\n+\r\n+ if input.ndim == 0:\r\n+ result = median_tuple(input, ivy.array(0))\r\n+ else:\r\n+ sorted_indices = ivy.argsort(input, axis=dim)\r\n+ median_indices = ivy.gather(sorted_indices, (sorted_indices.shape[dim] - 1) // 2, axis=dim)\r\n+ median_values = ivy.take_along_axis(input, ivy.expand_dims(median_indices, axis=dim), dim).squeeze(dim)\r\n+\r\n+ if keepdim:\r\n+ median_values = ivy.expand_dims(median_values, axis=dim)\r\n+ median_indices = ivy.expand_dims(median_indices, axis=dim)\r\n+\r\n+ result = median_tuple(median_values, median_indices)\r\n+ if out is not None:\r\n+ ivy.inplace_update(out[0], result.values)\r\n+ ivy.inplace_update(out[1], result.indices)\r\n+ return out\r\n+ return result\r\n+\r\n+\r\n @to_ivy_arrays_and_back\r\n def std(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\n", "issue": "median\nThe PyTorch front end median function implemented in #13212 does not follow the correct behaviour. PyTorch's median returns a named tuple with the median and their indices when a axis is provided to reduce along. The incorrect implementation has been reverted in #13480 \r\n\r\nhttps://pytorch.org/docs/stable/generated/torch.median.html\n", "before_files": [{"content": "import ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\r\nfrom collections import namedtuple\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef dist(input, other, p=2):\r\n return ivy.vector_norm(ivy.subtract(input, other), ord=p)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmax(input, dim=None, keepdim=False):\r\n return ivy.argmax(input, axis=dim, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef argmin(input, dim=None, keepdim=False):\r\n return ivy.argmin(input, axis=dim, keepdims=keepdim).astype(ivy.int64)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amax(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef amin(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef all(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.all(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef any(input, dim=None, keepdim=False, *, out=None):\r\n input_dtype = ivy.as_ivy_dtype(input.dtype)\r\n ret = ivy.any(input, axis=dim, keepdims=keepdim, out=out)\r\n if ivy.is_uint_dtype(input_dtype):\r\n ret = ivy.astype(ret, input_dtype, out=out)\r\n return ret\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef sum(input, dim=None, keepdim=False, *, out=None):\r\n return ivy.sum(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef mean(input, dim, keepdim=False, *, out=None):\r\n return ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None):\r\n return ivy.nanmean(input, axis=dim, keepdims=keepdim, dtype=dtype, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\n@with_unsupported_dtypes(\r\n {\r\n \"1.11.0 and below\": (\r\n \"float16\",\r\n \"bfloat16\",\r\n )\r\n },\r\n \"torch\",\r\n)\r\n# TODO: the original torch.prod places * right before `dtype`\r\ndef prod(input, dim, *, keepdim=False, dtype=None):\r\n if not dtype:\r\n if \"int\" in input.dtype:\r\n dtype = ivy.int64\r\n elif \"float\" in input.dtype:\r\n dtype = ivy.float32\r\n return ivy.prod(input, axis=dim, dtype=dtype, keepdims=keepdim)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var(input, dim, unbiased, keepdim=False, *, out=None):\r\n return ivy.var(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef min(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.min(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmin(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n min_tuple = namedtuple(\"min\", [\"values\", \"indices\"])\r\n return min_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim),\r\n ivy.argmin(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef max(input, dim=None, keepdim=False, *, out=None):\r\n if dim is None:\r\n return ivy.max(input, axis=dim, keepdims=keepdim, out=out)\r\n elif out is not None:\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out[0])\r\n ivy.argmax(input, axis=dim, keepdims=keepdim, out=out[1])\r\n return out\r\n else:\r\n max_tuple = namedtuple(\"max\", [\"values\", \"indices\"])\r\n return max_tuple(\r\n ivy.max(input, axis=dim, keepdims=keepdim),\r\n ivy.argmax(input, axis=dim, keepdims=keepdim),\r\n )\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef moveaxis(input, source, destination):\r\n return ivy.moveaxis(input, source, destination)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef std_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_std = ivy.std(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return temp_std, temp_mean\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef var_mean(input, dim, unbiased, keepdim=False, *, out=None):\r\n temp_var = ivy.var(\r\n input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out\r\n )\r\n temp_mean = ivy.mean(input, axis=dim, keepdims=keepdim, out=out)\r\n return (temp_var, temp_mean)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef aminmax(input, *, dim=None, keepdim=False, out=None):\r\n minmax_tuple = namedtuple(\"minmax\", [\"min\", \"max\"])\r\n return minmax_tuple(\r\n ivy.min(input, axis=dim, keepdims=keepdim, out=out),\r\n ivy.max(input, axis=dim, keepdims=keepdim, out=out),\r\n )\r\n\r\n\r\naminmax.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef quantile(input, q, dim=None, keepdim=False, *, interpolation=\"linear\", out=None):\r\n return ivy.quantile(\r\n input, q, axis=dim, keepdims=keepdim, interpolation=interpolation, out=out\r\n )\r\n\r\n\r\nquantile.unsupported_dtypes = {\r\n \"torch\": (\"float16\", \"bfloat16\"),\r\n \"numpy\": (\"float16\", \"bfloat16\"),\r\n \"jax\": (\"float16\", \"bfloat16\"),\r\n \"tensorflow\": (\"float16\", \"bfloat16\"),\r\n}\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef count_nonzero(input, dim=None):\r\n return ivy.count_nonzero(input, axis=dim).astype(ivy.int64)\r\n", "path": "ivy/functional/frontends/torch/reduction_ops.py"}]}
| 2,753 | 450 |
gh_patches_debug_582
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-777
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 1.6.12
On the docket:
+ [x] PythonInterpreter: support python binary names with single letter suffixes #769
+ [x] Pex should support some form of verifiably reproducible resolve. #772
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.11'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.11'
+__version__ = '1.6.12'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.11'\n+__version__ = '1.6.12'\n", "issue": "Release 1.6.12\nOn the docket:\r\n+ [x] PythonInterpreter: support python binary names with single letter suffixes #769\r\n+ [x] Pex should support some form of verifiably reproducible resolve. #772\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.11'\n", "path": "pex/version.py"}]}
| 640 | 96 |
gh_patches_debug_27895
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-350
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace requirements.txt with Pipfile
Pipenv (which works with Pipfile) is officially recommended Python packaging tool from Python.org. We should use it instead `requirements.txt `.
</issue>
<code>
[start of setup.py]
1 """
2 .. image:: https://raw.githubusercontent.com/lk-geimfari/mimesis/master/media/logo-large-nodescr.png
3
4 Mimesis
5 -------
6
7 **Mimesis** is a fast and easy to use the library for Python programming
8 language, which helps generate mock data for a variety of purposes in a
9 variety of languages. This data can be particularly useful during software
10 development and testing. For example, it could be used to populate a
11 testing database for a web application with user information such as
12 email addresses, usernames, first names, last names, etc.
13
14 Documentation
15 -------------
16
17 Mimesis is very simple to use, and the below examples should help you
18 get started. Complete documentation for Mimesis is available on `Read
19 the Docs`_.
20
21 .. _Read the Docs: http://mimesis.readthedocs.io/
22
23 Installation
24 ------------
25
26 To install mimesis, simply use pip (or `pipenv <http://pipenv.org/>`_):
27
28 .. code:: bash
29
30 ➜ ~ pip install mimesis
31
32 Getting started
33 ---------------
34
35 As we said above, this library is really easy to use. A simple usage
36 example is given below:
37
38 .. code:: python
39
40 >>> from mimesis import Personal
41 >>> from mimesis.enums import Gender
42 >>> person = Personal('en')
43
44 >>> person.full_name(gender=Gender.FEMALE)
45 'Antonetta Garrison'
46
47 >>> person.occupation()
48 'Backend Developer'
49
50 >>> for template in ('U_d', 'U-d', 'l_d', 'l-d'):
51 ... person.username(template=template)
52
53 'Adders_1893'
54 'Abdel-1888'
55 'constructor_1884'
56 'chegre-2051'
57
58 Locales
59 -------
60
61 You can specify a locale when creating providers and they will return data that is appropriate for
62 the language or country associated with that locale. `Mimesis` currently includes support
63 for `33 different locales <http://mimesis.readthedocs.io/locales.html>`_.
64
65 Data Providers
66 --------------
67
68 List of supported data providers available `here <http://mimesis.readthedocs.io/providers.html>`_
69
70 """
71
72 import json
73 import os
74 import re
75 import sys
76 from distutils.core import setup
77 from os.path import abspath, dirname, exists, getsize, join, relpath, splitext
78 from shutil import rmtree
79
80 from setuptools import Command
81 from setuptools.command.test import test as TestCommand
82
83 VERSION_MINOR_MAX = 10
84 VERSION_MICRO_MAX = 10
85
86 here = abspath(dirname(__file__))
87
88 try:
89 with open('dev_requirements.txt') as f:
90 tests_requirements = f.read().splitlines()
91 except FileNotFoundError:
92 tests_requirements = []
93
94 about = {}
95 # Get meta-data from __version__.py
96 with open(join(here, 'mimesis', '__version__.py')) as f:
97 exec(f.read(), about)
98
99
100 class BaseCommand(Command):
101 description = ''
102 user_options = []
103
104 def initialize_options(self):
105 pass
106
107 def finalize_options(self):
108 pass
109
110 def run(self):
111 pass
112
113
114 class Upload(BaseCommand):
115 """Support setup.py upload."""
116
117 def run(self):
118 try:
119 rmtree(os.path.join(here, 'dist'))
120 except OSError:
121 pass
122
123 os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
124 os.system('twine upload dist/*')
125 sys.exit()
126
127
128 class PyTest(TestCommand):
129 """Custom command for running test using setup.py test"""
130
131 user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
132
133 def initialize_options(self):
134 TestCommand.initialize_options(self)
135 self.pytest_args = []
136
137 def finalize_options(self):
138 TestCommand.finalize_options(self)
139 self.test_args = []
140 self.test_suite = True
141
142 def run_tests(self):
143 import pytest
144 errno = pytest.main(self.pytest_args)
145 exit(errno)
146
147
148 class Minimizer(BaseCommand):
149 """Minify content of all json file for all locales.
150 """
151
152 def initialize_options(self):
153 """Find all files of all locales.
154 """
155
156 self.paths = []
157 self.data_path = '/mimesis/data'
158 self.separators = (',', ':')
159 self.data_dir = here + self.data_path
160 self.before_total = 0
161 self.after_total = 0
162
163 for root, _, files in os.walk(self.data_dir):
164 for file in sorted(files):
165 if splitext(file)[1] == '.json':
166 self.paths.append(join(
167 relpath(root, self.data_dir),
168 file
169 ))
170
171 @staticmethod
172 def size_of(num):
173 for unit in ['B', 'KB', 'MB']:
174 if abs(num) < 1024.0:
175 return "%3.1f%s" % (num, unit)
176 num = num / 1024.0
177 return "%.1f" % num
178
179 def minify(self, file_path):
180 size_before = getsize(file_path)
181 self.before_total += size_before
182 size_before = self.size_of(size_before)
183
184 with open(file_path, 'r', 1) as f:
185 json_text = json.loads(f.read())
186 minimized = json.dumps(
187 json_text, separators=self.separators, ensure_ascii=False)
188
189 if len(file_path) > 0:
190 output_path = abspath(file_path)
191 abs_path = abspath(dirname(output_path))
192
193 if not exists(abs_path):
194 os.makedirs(abs_path)
195
196 with open(output_path, 'w+', 1) as f:
197 f.write(minimized)
198
199 size_after = getsize(file_path)
200 self.after_total += size_after
201 size_after = self.size_of(size_after)
202
203 json_file = '/'.join(file_path.split('/')[-2:])
204
205 template = "\033[34m{}\033[0m : " \
206 "\033[92mminimized\033[0m : " \
207 "\033[33m{}\033[0m -> \033[92m{}\033[0m".format(
208 json_file,
209 size_before,
210 size_after,
211 )
212
213 print(template)
214
215 def run(self):
216 """Start json minimizer and exit when all json
217 files was minimized.
218 """
219 for rel_path in sorted(self.paths):
220 file_path = join(self.data_dir, rel_path)
221 self.minify(file_path)
222
223 after = self.size_of(self.after_total)
224 before = self.size_of(self.before_total)
225 saved = self.size_of(self.before_total - self.after_total)
226
227 template = '\nTotal: ' \
228 '\033[92m{}\033[0m -> \033[92m{}\033[0m. ' \
229 'Compressed: \033[92m{}\033[0m\n'.format(before, after, saved)
230
231 print(template)
232
233
234 class Version(BaseCommand):
235 """Custom command for versioning"""
236
237 def initialize_options(self):
238 self.current = about['__version__']
239 print('Previous version: '
240 '\033[33m{}\033[0m.\n'.format(self.current))
241
242 @staticmethod
243 def automatically(version):
244 """Automatically increment version string.
245
246 :param version: Current version.
247 :return: Next version.
248 """
249 major, minor, micro = [
250 int(i) for i in version.split('.')
251 ]
252
253 if VERSION_MICRO_MAX > micro:
254 micro += 1
255 elif VERSION_MICRO_MAX == micro:
256 micro = 0
257 minor += 1
258 elif VERSION_MINOR_MAX > minor:
259 minor += 1
260 elif VERSION_MINOR_MAX == minor:
261 micro, minor = 0, 0
262 major += 1
263 if VERSION_MINOR_MAX < minor:
264 minor, micro = 0, 0
265 major += 1
266
267 return '.'.join([str(i) for i
268 in (major, minor, micro)])
269
270 def rewrite(self, version=None):
271 if not version:
272 version = self.current
273
274 with open(join(here, 'mimesis', '__version__.py'), 'r+') as f:
275 version_str = '__version__ = \'{}\''.format(version)
276 regexp = r'__version__ = .*'
277
278 meta = re.sub(regexp, version_str, f.read())
279 f.seek(0)
280 f.write(meta)
281 f.truncate()
282
283 print('Updated! Current version is: '
284 '\033[34m{}\033[0m.\n'.format(version))
285
286 exit()
287
288 def run(self):
289 response = input('Are you sure? (yes/no): ')
290 if response.lower() in ('yes', 'y'):
291 self.rewrite(
292 self.automatically(
293 self.current,
294 ),
295 )
296
297
298 setup(
299 name=about['__title__'],
300 version=about['__version__'],
301 description=about['__description__'],
302 long_description=__doc__,
303 author=about['__author__'],
304 author_email=about['__author_email__'],
305 url=about['__url__'],
306 license=about['__license__'],
307 packages=[
308 'mimesis',
309 'mimesis.data',
310 'mimesis.data.int',
311 'mimesis.builtins',
312 'mimesis.providers',
313 ],
314 keywords=[
315 'fake',
316 'mock',
317 'data',
318 'populate',
319 'database',
320 'testing',
321 'generate',
322 'mimesis',
323 'dummy',
324 ],
325 package_data={
326 'mimesis': [
327 'data/*/*',
328 ],
329 },
330 exclude_package_data={
331 'mimesis': [
332 # It's for development.
333 'data/locale_template/*'
334 ],
335 },
336 data_files=[
337 ('', ['LICENSE']),
338 ],
339 classifiers=[
340 'License :: OSI Approved :: MIT License',
341 'Intended Audience :: Developers',
342 'Programming Language :: Python :: 3.5',
343 'Programming Language :: Python :: 3.6',
344 'Programming Language :: Python :: Implementation :: CPython',
345 'Operating System :: OS Independent',
346 'Topic :: Software Development',
347 'Topic :: Software Development :: Testing',
348 ],
349 tests_require=tests_requirements,
350 cmdclass={
351 'test': PyTest,
352 'version': Version,
353 'minify': Minimizer,
354 'upload': Upload,
355 },
356 )
357
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,12 +85,6 @@
here = abspath(dirname(__file__))
-try:
- with open('dev_requirements.txt') as f:
- tests_requirements = f.read().splitlines()
-except FileNotFoundError:
- tests_requirements = []
-
about = {}
# Get meta-data from __version__.py
with open(join(here, 'mimesis', '__version__.py')) as f:
@@ -125,26 +119,6 @@
sys.exit()
-class PyTest(TestCommand):
- """Custom command for running test using setup.py test"""
-
- user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
-
- def initialize_options(self):
- TestCommand.initialize_options(self)
- self.pytest_args = []
-
- def finalize_options(self):
- TestCommand.finalize_options(self)
- self.test_args = []
- self.test_suite = True
-
- def run_tests(self):
- import pytest
- errno = pytest.main(self.pytest_args)
- exit(errno)
-
-
class Minimizer(BaseCommand):
"""Minify content of all json file for all locales.
"""
@@ -346,9 +320,7 @@
'Topic :: Software Development',
'Topic :: Software Development :: Testing',
],
- tests_require=tests_requirements,
cmdclass={
- 'test': PyTest,
'version': Version,
'minify': Minimizer,
'upload': Upload,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,12 +85,6 @@\n \n here = abspath(dirname(__file__))\n \n-try:\n- with open('dev_requirements.txt') as f:\n- tests_requirements = f.read().splitlines()\n-except FileNotFoundError:\n- tests_requirements = []\n-\n about = {}\n # Get meta-data from __version__.py\n with open(join(here, 'mimesis', '__version__.py')) as f:\n@@ -125,26 +119,6 @@\n sys.exit()\n \n \n-class PyTest(TestCommand):\n- \"\"\"Custom command for running test using setup.py test\"\"\"\n-\n- user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]\n-\n- def initialize_options(self):\n- TestCommand.initialize_options(self)\n- self.pytest_args = []\n-\n- def finalize_options(self):\n- TestCommand.finalize_options(self)\n- self.test_args = []\n- self.test_suite = True\n-\n- def run_tests(self):\n- import pytest\n- errno = pytest.main(self.pytest_args)\n- exit(errno)\n-\n-\n class Minimizer(BaseCommand):\n \"\"\"Minify content of all json file for all locales.\n \"\"\"\n@@ -346,9 +320,7 @@\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Testing',\n ],\n- tests_require=tests_requirements,\n cmdclass={\n- 'test': PyTest,\n 'version': Version,\n 'minify': Minimizer,\n 'upload': Upload,\n", "issue": "Replace requirements.txt with Pipfile\nPipenv (which works with Pipfile) is officially recommended Python packaging tool from Python.org. We should use it instead `requirements.txt `.\r\n \n", "before_files": [{"content": "\"\"\"\n.. image:: https://raw.githubusercontent.com/lk-geimfari/mimesis/master/media/logo-large-nodescr.png\n\nMimesis\n-------\n\n**Mimesis** is a fast and easy to use the library for Python programming\nlanguage, which helps generate mock data for a variety of purposes in a\nvariety of languages. This data can be particularly useful during software\ndevelopment and testing. For example, it could be used to populate a\ntesting database for a web application with user information such as\nemail addresses, usernames, first names, last names, etc.\n\nDocumentation\n-------------\n\nMimesis is very simple to use, and the below examples should help you\nget started. Complete documentation for Mimesis is available on `Read\nthe Docs`_.\n\n.. _Read the Docs: http://mimesis.readthedocs.io/\n\nInstallation\n------------\n\nTo install mimesis, simply use pip (or `pipenv <http://pipenv.org/>`_):\n\n.. code:: bash\n\n \u279c ~ pip install mimesis\n\nGetting started\n---------------\n\nAs we said above, this library is really easy to use. A simple usage\nexample is given below:\n\n.. code:: python\n\n >>> from mimesis import Personal\n >>> from mimesis.enums import Gender\n >>> person = Personal('en')\n\n >>> person.full_name(gender=Gender.FEMALE)\n 'Antonetta Garrison'\n\n >>> person.occupation()\n 'Backend Developer'\n\n >>> for template in ('U_d', 'U-d', 'l_d', 'l-d'):\n ... person.username(template=template)\n\n 'Adders_1893'\n 'Abdel-1888'\n 'constructor_1884'\n 'chegre-2051'\n\nLocales\n-------\n\nYou can specify a locale when creating providers and they will return data that is appropriate for\nthe language or country associated with that locale. `Mimesis` currently includes support\nfor `33 different locales <http://mimesis.readthedocs.io/locales.html>`_.\n\nData Providers\n--------------\n\nList of supported data providers available `here <http://mimesis.readthedocs.io/providers.html>`_\n\n\"\"\"\n\nimport json\nimport os\nimport re\nimport sys\nfrom distutils.core import setup\nfrom os.path import abspath, dirname, exists, getsize, join, relpath, splitext\nfrom shutil import rmtree\n\nfrom setuptools import Command\nfrom setuptools.command.test import test as TestCommand\n\nVERSION_MINOR_MAX = 10\nVERSION_MICRO_MAX = 10\n\nhere = abspath(dirname(__file__))\n\ntry:\n with open('dev_requirements.txt') as f:\n tests_requirements = f.read().splitlines()\nexcept FileNotFoundError:\n tests_requirements = []\n\nabout = {}\n# Get meta-data from __version__.py\nwith open(join(here, 'mimesis', '__version__.py')) as f:\n exec(f.read(), about)\n\n\nclass BaseCommand(Command):\n description = ''\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n pass\n\n\nclass Upload(BaseCommand):\n \"\"\"Support setup.py upload.\"\"\"\n\n def run(self):\n try:\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))\n os.system('twine upload dist/*')\n sys.exit()\n\n\nclass PyTest(TestCommand):\n \"\"\"Custom command for running test using setup.py test\"\"\"\n\n user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.pytest_args)\n exit(errno)\n\n\nclass Minimizer(BaseCommand):\n \"\"\"Minify content of all json file for all locales.\n \"\"\"\n\n def initialize_options(self):\n \"\"\"Find all files of all locales.\n \"\"\"\n\n self.paths = []\n self.data_path = '/mimesis/data'\n self.separators = (',', ':')\n self.data_dir = here + self.data_path\n self.before_total = 0\n self.after_total = 0\n\n for root, _, files in os.walk(self.data_dir):\n for file in sorted(files):\n if splitext(file)[1] == '.json':\n self.paths.append(join(\n relpath(root, self.data_dir),\n file\n ))\n\n @staticmethod\n def size_of(num):\n for unit in ['B', 'KB', 'MB']:\n if abs(num) < 1024.0:\n return \"%3.1f%s\" % (num, unit)\n num = num / 1024.0\n return \"%.1f\" % num\n\n def minify(self, file_path):\n size_before = getsize(file_path)\n self.before_total += size_before\n size_before = self.size_of(size_before)\n\n with open(file_path, 'r', 1) as f:\n json_text = json.loads(f.read())\n minimized = json.dumps(\n json_text, separators=self.separators, ensure_ascii=False)\n\n if len(file_path) > 0:\n output_path = abspath(file_path)\n abs_path = abspath(dirname(output_path))\n\n if not exists(abs_path):\n os.makedirs(abs_path)\n\n with open(output_path, 'w+', 1) as f:\n f.write(minimized)\n\n size_after = getsize(file_path)\n self.after_total += size_after\n size_after = self.size_of(size_after)\n\n json_file = '/'.join(file_path.split('/')[-2:])\n\n template = \"\\033[34m{}\\033[0m : \" \\\n \"\\033[92mminimized\\033[0m : \" \\\n \"\\033[33m{}\\033[0m -> \\033[92m{}\\033[0m\".format(\n json_file,\n size_before,\n size_after,\n )\n\n print(template)\n\n def run(self):\n \"\"\"Start json minimizer and exit when all json\n files was minimized.\n \"\"\"\n for rel_path in sorted(self.paths):\n file_path = join(self.data_dir, rel_path)\n self.minify(file_path)\n\n after = self.size_of(self.after_total)\n before = self.size_of(self.before_total)\n saved = self.size_of(self.before_total - self.after_total)\n\n template = '\\nTotal: ' \\\n '\\033[92m{}\\033[0m -> \\033[92m{}\\033[0m. ' \\\n 'Compressed: \\033[92m{}\\033[0m\\n'.format(before, after, saved)\n\n print(template)\n\n\nclass Version(BaseCommand):\n \"\"\"Custom command for versioning\"\"\"\n\n def initialize_options(self):\n self.current = about['__version__']\n print('Previous version: '\n '\\033[33m{}\\033[0m.\\n'.format(self.current))\n\n @staticmethod\n def automatically(version):\n \"\"\"Automatically increment version string.\n\n :param version: Current version.\n :return: Next version.\n \"\"\"\n major, minor, micro = [\n int(i) for i in version.split('.')\n ]\n\n if VERSION_MICRO_MAX > micro:\n micro += 1\n elif VERSION_MICRO_MAX == micro:\n micro = 0\n minor += 1\n elif VERSION_MINOR_MAX > minor:\n minor += 1\n elif VERSION_MINOR_MAX == minor:\n micro, minor = 0, 0\n major += 1\n if VERSION_MINOR_MAX < minor:\n minor, micro = 0, 0\n major += 1\n\n return '.'.join([str(i) for i\n in (major, minor, micro)])\n\n def rewrite(self, version=None):\n if not version:\n version = self.current\n\n with open(join(here, 'mimesis', '__version__.py'), 'r+') as f:\n version_str = '__version__ = \\'{}\\''.format(version)\n regexp = r'__version__ = .*'\n\n meta = re.sub(regexp, version_str, f.read())\n f.seek(0)\n f.write(meta)\n f.truncate()\n\n print('Updated! Current version is: '\n '\\033[34m{}\\033[0m.\\n'.format(version))\n\n exit()\n\n def run(self):\n response = input('Are you sure? (yes/no): ')\n if response.lower() in ('yes', 'y'):\n self.rewrite(\n self.automatically(\n self.current,\n ),\n )\n\n\nsetup(\n name=about['__title__'],\n version=about['__version__'],\n description=about['__description__'],\n long_description=__doc__,\n author=about['__author__'],\n author_email=about['__author_email__'],\n url=about['__url__'],\n license=about['__license__'],\n packages=[\n 'mimesis',\n 'mimesis.data',\n 'mimesis.data.int',\n 'mimesis.builtins',\n 'mimesis.providers',\n ],\n keywords=[\n 'fake',\n 'mock',\n 'data',\n 'populate',\n 'database',\n 'testing',\n 'generate',\n 'mimesis',\n 'dummy',\n ],\n package_data={\n 'mimesis': [\n 'data/*/*',\n ],\n },\n exclude_package_data={\n 'mimesis': [\n # It's for development.\n 'data/locale_template/*'\n ],\n },\n data_files=[\n ('', ['LICENSE']),\n ],\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Testing',\n ],\n tests_require=tests_requirements,\n cmdclass={\n 'test': PyTest,\n 'version': Version,\n 'minify': Minimizer,\n 'upload': Upload,\n },\n)\n", "path": "setup.py"}]}
| 3,869 | 353 |
gh_patches_debug_23322
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-9034
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
allow mutiple station for same location
<!--
(Thanks for sending a pull request! Please make sure you click the link above to view the contribution guidelines, then fill out the blanks below.)
-->
<!-- Add the issue number that is fixed by this PR (In the form Fixes #123) -->
Fixes #8958
#### Short description of what this resolves:
- fix issue to allow mutiple station for same location
#### Changes proposed in this pull request:
- allow mutiple station for same location
#### Checklist
- [x] I have read the [Contribution & Best practices Guide](https://blog.fossasia.org/open-source-developer-guide-and-best-practices-at-fossasia) and my PR follows them.
- [x] My branch is up-to-date with the Upstream `development` branch.
- [ ] The unit tests pass locally with my changes <!-- use `nosetests tests/` to run all the tests -->
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] I have added necessary documentation (if appropriate)
<!-- If an existing function does not have a docstring, please add one -->
- [ ] All the functions created/modified in this PR contain relevant docstrings.
</issue>
<code>
[start of app/api/station.py]
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from flask_rest_jsonapi.exceptions import ObjectNotFound
3
4 from app.api.helpers.db import safe_query_kwargs
5 from app.api.helpers.permission_manager import has_access
6 from app.api.helpers.permissions import jwt_required
7 from app.api.helpers.utilities import require_relationship
8 from app.api.schema.station import StationSchema
9 from app.models import db
10 from app.models.event import Event
11 from app.models.microlocation import Microlocation
12 from app.models.station import Station
13
14
15 class StationList(ResourceList):
16 """Create and List Station"""
17
18 def query(self, view_kwargs):
19 """
20 query method for different view_kwargs
21 :param view_kwargs:
22 :return:
23 """
24 query_ = self.session.query(Station)
25 if view_kwargs.get('event_id'):
26 event = safe_query_kwargs(Event, view_kwargs, 'event_id')
27 query_ = query_.filter_by(event_id=event.id)
28
29 elif view_kwargs.get('microlocation_id'):
30 event = safe_query_kwargs(Microlocation, view_kwargs, 'microlocation_id')
31 query_ = query_.filter_by(microlocation_id=event.id)
32
33 return query_
34
35 view_kwargs = True
36 schema = StationSchema
37 data_layer = {
38 'session': db.session,
39 'model': Station,
40 'methods': {'query': query},
41 }
42
43
44 class StationDetail(ResourceDetail):
45 """Station detail by id"""
46
47 @staticmethod
48 def before_patch(args, kwargs, data):
49 """
50 before patch method
51 :param args:
52 :param kwargs:
53 :param data:
54 :return:
55 """
56 require_relationship(['event'], data)
57 if not has_access('is_coorganizer', event_id=data['event']):
58 raise ObjectNotFound(
59 {'parameter': 'event'},
60 f"Event: {data['event']} not found {args} {kwargs}",
61 )
62
63 if data.get('microlocation'):
64 require_relationship(['microlocation'], data)
65 else:
66 if data['station_type'] in ('check in', 'check out', 'daily'):
67 raise ObjectNotFound(
68 {'parameter': 'microlocation'},
69 "Microlocation: microlocation_id is missing from your request.",
70 )
71
72 schema = StationSchema
73 data_layer = {
74 'session': db.session,
75 'model': Station,
76 }
77
78
79 class StationRelationship(ResourceRelationship):
80 """Station Relationship (Required)"""
81
82 decorators = (jwt_required,)
83 methods = ['GET', 'PATCH']
84 schema = StationSchema
85 data_layer = {'session': db.session, 'model': Station}
86
87
88 class StationListPost(ResourceList):
89 """Create and List Station"""
90
91 @staticmethod
92 def before_post(args, kwargs, data):
93 """
94 method to check for required relationship with event and microlocation
95 :param data:
96 :param args:
97 :param kwargs:
98 :return:
99 """
100 require_relationship(['event'], data)
101 if not has_access('is_coorganizer', event_id=data['event']):
102 raise ObjectNotFound(
103 {'parameter': 'event'},
104 f"Event: {data['event']} not found {args} {kwargs}",
105 )
106
107 if data.get('microlocation'):
108 require_relationship(['microlocation'], data)
109 else:
110 if data['station_type'] in ('check in', 'check out', 'daily'):
111 raise ObjectNotFound(
112 {'parameter': 'microlocation'},
113 "Microlocation: missing from your request.",
114 )
115
116 schema = StationSchema
117 methods = [
118 'POST',
119 ]
120 data_layer = {
121 'session': db.session,
122 'model': Station,
123 }
124
[end of app/api/station.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/station.py b/app/api/station.py
--- a/app/api/station.py
+++ b/app/api/station.py
@@ -63,7 +63,7 @@
if data.get('microlocation'):
require_relationship(['microlocation'], data)
else:
- if data['station_type'] in ('check in', 'check out', 'daily'):
+ if data['station_type'] in ('check in', 'check out'):
raise ObjectNotFound(
{'parameter': 'microlocation'},
"Microlocation: microlocation_id is missing from your request.",
@@ -107,7 +107,7 @@
if data.get('microlocation'):
require_relationship(['microlocation'], data)
else:
- if data['station_type'] in ('check in', 'check out', 'daily'):
+ if data['station_type'] in ('check in', 'check out'):
raise ObjectNotFound(
{'parameter': 'microlocation'},
"Microlocation: missing from your request.",
|
{"golden_diff": "diff --git a/app/api/station.py b/app/api/station.py\n--- a/app/api/station.py\n+++ b/app/api/station.py\n@@ -63,7 +63,7 @@\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n- if data['station_type'] in ('check in', 'check out', 'daily'):\n+ if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: microlocation_id is missing from your request.\",\n@@ -107,7 +107,7 @@\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n- if data['station_type'] in ('check in', 'check out', 'daily'):\n+ if data['station_type'] in ('check in', 'check out'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: missing from your request.\",\n", "issue": "allow mutiple station for same location\n<!--\r\n(Thanks for sending a pull request! Please make sure you click the link above to view the contribution guidelines, then fill out the blanks below.)\r\n-->\r\n<!-- Add the issue number that is fixed by this PR (In the form Fixes #123) -->\r\n\r\nFixes #8958 \r\n\r\n#### Short description of what this resolves:\r\n- fix issue to allow mutiple station for same location\r\n\r\n#### Changes proposed in this pull request:\r\n\r\n- allow mutiple station for same location\r\n\r\n#### Checklist\r\n\r\n- [x] I have read the [Contribution & Best practices Guide](https://blog.fossasia.org/open-source-developer-guide-and-best-practices-at-fossasia) and my PR follows them.\r\n- [x] My branch is up-to-date with the Upstream `development` branch.\r\n- [ ] The unit tests pass locally with my changes <!-- use `nosetests tests/` to run all the tests -->\r\n- [ ] I have added tests that prove my fix is effective or that my feature works\r\n- [ ] I have added necessary documentation (if appropriate)\r\n<!-- If an existing function does not have a docstring, please add one -->\r\n- [ ] All the functions created/modified in this PR contain relevant docstrings.\r\n\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.db import safe_query_kwargs\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.station import StationSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.microlocation import Microlocation\nfrom app.models.station import Station\n\n\nclass StationList(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Station)\n if view_kwargs.get('event_id'):\n event = safe_query_kwargs(Event, view_kwargs, 'event_id')\n query_ = query_.filter_by(event_id=event.id)\n\n elif view_kwargs.get('microlocation_id'):\n event = safe_query_kwargs(Microlocation, view_kwargs, 'microlocation_id')\n query_ = query_.filter_by(microlocation_id=event.id)\n\n return query_\n\n view_kwargs = True\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n 'methods': {'query': query},\n }\n\n\nclass StationDetail(ResourceDetail):\n \"\"\"Station detail by id\"\"\"\n\n @staticmethod\n def before_patch(args, kwargs, data):\n \"\"\"\n before patch method\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out', 'daily'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: microlocation_id is missing from your request.\",\n )\n\n schema = StationSchema\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n\n\nclass StationRelationship(ResourceRelationship):\n \"\"\"Station Relationship (Required)\"\"\"\n\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = StationSchema\n data_layer = {'session': db.session, 'model': Station}\n\n\nclass StationListPost(ResourceList):\n \"\"\"Create and List Station\"\"\"\n\n @staticmethod\n def before_post(args, kwargs, data):\n \"\"\"\n method to check for required relationship with event and microlocation\n :param data:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound(\n {'parameter': 'event'},\n f\"Event: {data['event']} not found {args} {kwargs}\",\n )\n\n if data.get('microlocation'):\n require_relationship(['microlocation'], data)\n else:\n if data['station_type'] in ('check in', 'check out', 'daily'):\n raise ObjectNotFound(\n {'parameter': 'microlocation'},\n \"Microlocation: missing from your request.\",\n )\n\n schema = StationSchema\n methods = [\n 'POST',\n ]\n data_layer = {\n 'session': db.session,\n 'model': Station,\n }\n", "path": "app/api/station.py"}]}
| 1,841 | 222 |
gh_patches_debug_26321
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-7910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug 1314290: Update to Django 1.8.16
Django 1.8.15 -> 1.8.16 - Security release
https://www.djangoproject.com/weblog/2016/nov/01/security-releases/
</issue>
<code>
[start of kuma/api/v1/search/__init__.py]
1 from django import http
2 from django.conf import settings
3 from django.utils.cache import patch_cache_control
4 from elasticsearch import exceptions
5 from elasticsearch_dsl import Q, query, Search
6 from redo import retrying
7
8 from kuma.api.v1.decorators import allow_CORS_GET
9
10 from .forms import SearchForm
11
12 # This is the number of seconds to be put into the Cache-Control max-age header
13 # if the search is successful.
14 # We can increase the number as we feel more and more comfortable with how
15 # the `/api/v1/search` works.
16 SEARCH_CACHE_CONTROL_MAX_AGE = 60 * 60 * 12
17
18
19 class JsonResponse(http.JsonResponse):
20 """The only reason this exists is so that other Django views can call
21 views that return instances of this and then get to the data before it
22 gets JSON serialized.
23 This is something that rest_framework's JsonResponse supports.
24 Ultimately, the only view that cares is the (old) Kuma search view page
25 that calls the view function here in this file. Now it can do something like:
26
27 response = kuma.api.v1.search.search(request)
28 found = response.data
29
30 """
31
32 def __init__(self, data, *args, **kwargs):
33 self.data = data
34 super().__init__(data, *args, **kwargs)
35
36
37 def legacy(request, locale=None):
38 raise NotImplementedError("work harder")
39
40
41 @allow_CORS_GET
42 def search(request, locale=None):
43 initial = {"size": 10, "page": 1, "archive": SearchForm.ARCHIVE_CHOICES[0]}
44 if locale:
45 initial["locale"] = locale
46 form = SearchForm(request.GET, initial=initial)
47 if not form.is_valid():
48 return JsonResponse({"errors": form.errors.get_json_data()}, status=400)
49
50 locales = form.cleaned_data["locale"] or [settings.LANGUAGE_CODE]
51 assert isinstance(locales, list)
52
53 params = {
54 "locales": [x.lower() for x in locales],
55 "archive": form.cleaned_data["archive"],
56 "query": form.cleaned_data["q"],
57 "size": form.cleaned_data["size"],
58 "page": form.cleaned_data["page"],
59 "sort": form.cleaned_data["sort"],
60 # The `slug` is always stored, as a Keyword index, in lowercase.
61 "slug_prefixes": [x.lower() for x in form.cleaned_data["slug_prefix"]],
62 }
63
64 # By default, assume that we will try to make suggestions.
65 make_suggestions = True
66 if len(params["query"]) > 100 or max(len(x) for x in params["query"].split()) > 30:
67 # For example, if it's a really long query, or a specific word is just too
68 # long, you can get those tricky
69 # TransportError(500, 'search_phase_execution_exception', 'Term too complex:
70 # errors which are hard to prevent against.
71 make_suggestions = False
72
73 results = _find(
74 params,
75 make_suggestions=make_suggestions,
76 )
77 response = JsonResponse(results)
78
79 # The reason for caching is that most of the time, the searches people make
80 # are short and often stand a high chance of being reused by other users
81 # in the CDN.
82 # The worst that can happen is that we fill up the CDN with cached responses
83 # that end up being stored there and never reused by another user.
84 # We could consider only bothering with this based on looking at the parameters.
85 # For example, if someone made a search with "complex parameters" we could skip
86 # cache-control because it'll just be a waste to store it (CDN and client).
87 # The reason for not using a "shared" cache-control, i.e. `s-max-age` is
88 # because the cache-control seconds we intend to set are appropriate for both
89 # the client and the CDN. If the value set is 3600 seconds, that means that
90 # clients might potentially re-use their own browser cache if they trigger
91 # a repeated search. And it's an appropriate number for the CDN too.
92 # For more info about how our search patterns behave,
93 # see https://github.com/mdn/kuma/issues/7799
94 patch_cache_control(response, public=True, max_age=SEARCH_CACHE_CONTROL_MAX_AGE)
95 return response
96
97
98 def _find(params, total_only=False, make_suggestions=False, min_suggestion_score=0.8):
99 search_query = Search(
100 index=settings.SEARCH_INDEX_NAME,
101 )
102 if make_suggestions:
103 # XXX research if it it's better to use phrase suggesters and if
104 # that works
105 # https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters.html#phrase-suggester
106 search_query = search_query.suggest(
107 "title_suggestions", params["query"], term={"field": "title"}
108 )
109 search_query = search_query.suggest(
110 "body_suggestions", params["query"], term={"field": "body"}
111 )
112
113 sub_queries = []
114 sub_queries.append(Q("match", title={"query": params["query"], "boost": 2.0}))
115 sub_queries.append(Q("match", body={"query": params["query"], "boost": 1.0}))
116 if " " in params["query"]:
117 sub_queries.append(
118 Q("match_phrase", title={"query": params["query"], "boost": 10.0})
119 )
120 sub_queries.append(
121 Q("match_phrase", body={"query": params["query"], "boost": 5.0})
122 )
123
124 sub_query = query.Bool(should=sub_queries)
125
126 if params["locales"]:
127 search_query = search_query.filter("terms", locale=params["locales"])
128 if params["archive"] == "exclude":
129 search_query = search_query.filter("term", archived=False)
130 elif params["archive"] == "only":
131 search_query = search_query.filter("term", archived=True)
132
133 if params["slug_prefixes"]:
134 sub_queries = [Q("prefix", slug=x) for x in params["slug_prefixes"]]
135 search_query = search_query.query(query.Bool(should=sub_queries))
136
137 search_query = search_query.highlight_options(
138 pre_tags=["<mark>"],
139 post_tags=["</mark>"],
140 number_of_fragments=3,
141 fragment_size=120,
142 encoder="html",
143 )
144 search_query = search_query.highlight("title", "body")
145
146 if params["sort"] == "relevance":
147 search_query = search_query.sort("_score", "-popularity")
148 search_query = search_query.query(sub_query)
149 elif params["sort"] == "popularity":
150 search_query = search_query.sort("-popularity", "_score")
151 search_query = search_query.query(sub_query)
152 else:
153 popularity_factor = 10.0
154 boost_mode = "sum"
155 score_mode = "max"
156 search_query = search_query.query(
157 "function_score",
158 query=sub_query,
159 functions=[
160 query.SF(
161 "field_value_factor",
162 field="popularity",
163 factor=popularity_factor,
164 missing=0.0,
165 )
166 ],
167 boost_mode=boost_mode,
168 score_mode=score_mode,
169 )
170
171 search_query = search_query.source(excludes=["body"])
172
173 search_query = search_query[
174 params["size"] * (params["page"] - 1) : params["size"] * params["page"]
175 ]
176
177 retry_options = {
178 "retry_exceptions": (
179 # This is the standard operational exception.
180 exceptions.ConnectionError,
181 # This can happen if the search happened right as the index had
182 # just been deleted due to a fresh re-indexing happening in Yari.
183 exceptions.NotFoundError,
184 # This can happen when the index simply isn't ready yet.
185 exceptions.TransportError,
186 ),
187 # The default in redo is 60 seconds. Let's tone that down.
188 "sleeptime": settings.ES_RETRY_SLEEPTIME,
189 "attempts": settings.ES_RETRY_ATTEMPTS,
190 "jitter": settings.ES_RETRY_JITTER,
191 }
192 with retrying(search_query.execute, **retry_options) as retrying_function:
193 response = retrying_function()
194
195 if total_only:
196 return response.hits.total
197
198 metadata = {
199 "took_ms": response.took,
200 "total": {
201 # The `response.hits.total` is a `elasticsearch_dsl.utils.AttrDict`
202 # instance. Pluck only the exact data needed.
203 "value": response.hits.total.value,
204 "relation": response.hits.total.relation,
205 },
206 "size": params["size"],
207 "page": params["page"],
208 }
209 documents = []
210 for hit in response:
211 try:
212 body_highlight = list(hit.meta.highlight.body)
213 except AttributeError:
214 body_highlight = []
215 try:
216 title_highlight = list(hit.meta.highlight.title)
217 except AttributeError:
218 title_highlight = []
219
220 d = {
221 "mdn_url": hit.meta.id,
222 "score": hit.meta.score,
223 "title": hit.title,
224 "locale": hit.locale,
225 "slug": hit.slug,
226 "popularity": hit.popularity,
227 "archived": hit.archived,
228 "summary": hit.summary,
229 "highlight": {
230 "body": body_highlight,
231 "title": title_highlight,
232 },
233 }
234 documents.append(d)
235
236 try:
237 suggest = getattr(response, "suggest")
238 except AttributeError:
239 suggest = None
240
241 suggestions = []
242 if suggest:
243 suggestion_strings = _unpack_suggestions(
244 params["query"],
245 response.suggest,
246 ("body_suggestions", "title_suggestions"),
247 )
248
249 for score, string in suggestion_strings:
250 if score > min_suggestion_score or 1:
251 # Sure, this is different way to spell, but what will it yield
252 # if you actually search it?
253 total = _find(dict(params, query=string), total_only=True)
254 if total["value"] > 0:
255 suggestions.append(
256 {
257 "text": string,
258 "total": {
259 # This 'total' is an `AttrDict` instance.
260 "value": total.value,
261 "relation": total.relation,
262 },
263 }
264 )
265 # Since they're sorted by score, it's usually never useful
266 # to suggestion more than exactly 1 good suggestion.
267 break
268
269 return {
270 "documents": documents,
271 "metadata": metadata,
272 "suggestions": suggestions,
273 }
274
275
276 def _unpack_suggestions(query, suggest, keys):
277 alternatives = []
278 for key in keys:
279 for suggestion in getattr(suggest, key, []):
280 for option in suggestion.options:
281 alternatives.append(
282 (
283 option.score,
284 query[0 : suggestion.offset]
285 + option.text
286 + query[suggestion.offset + suggestion.length :],
287 )
288 )
289 alternatives.sort(reverse=True) # highest score first
290 return alternatives
291
[end of kuma/api/v1/search/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/api/v1/search/__init__.py b/kuma/api/v1/search/__init__.py
--- a/kuma/api/v1/search/__init__.py
+++ b/kuma/api/v1/search/__init__.py
@@ -110,15 +110,29 @@
"body_suggestions", params["query"], term={"field": "body"}
)
+ # The business logic here that we search for things different ways,
+ # and each different way as a different boost which dictates its importance.
+ # The importance order is as follows:
+ #
+ # 1. Title match-phrase
+ # 2. Title match
+ # 3. Body match-phrase
+ # 4. Body match
+ #
+ # The order is determined by the `boost` number in the code below.
+ # Remember that sort order is a combination of "match" and popularity, but
+ # ideally the popularity should complement. Try to get a pretty good
+ # sort by pure relevance first, and let popularity just make it better.
+ #
sub_queries = []
- sub_queries.append(Q("match", title={"query": params["query"], "boost": 2.0}))
+ sub_queries.append(Q("match", title={"query": params["query"], "boost": 5.0}))
sub_queries.append(Q("match", body={"query": params["query"], "boost": 1.0}))
if " " in params["query"]:
sub_queries.append(
Q("match_phrase", title={"query": params["query"], "boost": 10.0})
)
sub_queries.append(
- Q("match_phrase", body={"query": params["query"], "boost": 5.0})
+ Q("match_phrase", body={"query": params["query"], "boost": 2.0})
)
sub_query = query.Bool(should=sub_queries)
|
{"golden_diff": "diff --git a/kuma/api/v1/search/__init__.py b/kuma/api/v1/search/__init__.py\n--- a/kuma/api/v1/search/__init__.py\n+++ b/kuma/api/v1/search/__init__.py\n@@ -110,15 +110,29 @@\n \"body_suggestions\", params[\"query\"], term={\"field\": \"body\"}\n )\n \n+ # The business logic here that we search for things different ways,\n+ # and each different way as a different boost which dictates its importance.\n+ # The importance order is as follows:\n+ #\n+ # 1. Title match-phrase\n+ # 2. Title match\n+ # 3. Body match-phrase\n+ # 4. Body match\n+ #\n+ # The order is determined by the `boost` number in the code below.\n+ # Remember that sort order is a combination of \"match\" and popularity, but\n+ # ideally the popularity should complement. Try to get a pretty good\n+ # sort by pure relevance first, and let popularity just make it better.\n+ #\n sub_queries = []\n- sub_queries.append(Q(\"match\", title={\"query\": params[\"query\"], \"boost\": 2.0}))\n+ sub_queries.append(Q(\"match\", title={\"query\": params[\"query\"], \"boost\": 5.0}))\n sub_queries.append(Q(\"match\", body={\"query\": params[\"query\"], \"boost\": 1.0}))\n if \" \" in params[\"query\"]:\n sub_queries.append(\n Q(\"match_phrase\", title={\"query\": params[\"query\"], \"boost\": 10.0})\n )\n sub_queries.append(\n- Q(\"match_phrase\", body={\"query\": params[\"query\"], \"boost\": 5.0})\n+ Q(\"match_phrase\", body={\"query\": params[\"query\"], \"boost\": 2.0})\n )\n \n sub_query = query.Bool(should=sub_queries)\n", "issue": "bug 1314290: Update to Django 1.8.16\nDjango 1.8.15 -> 1.8.16 - Security release\r\n\r\nhttps://www.djangoproject.com/weblog/2016/nov/01/security-releases/\n", "before_files": [{"content": "from django import http\nfrom django.conf import settings\nfrom django.utils.cache import patch_cache_control\nfrom elasticsearch import exceptions\nfrom elasticsearch_dsl import Q, query, Search\nfrom redo import retrying\n\nfrom kuma.api.v1.decorators import allow_CORS_GET\n\nfrom .forms import SearchForm\n\n# This is the number of seconds to be put into the Cache-Control max-age header\n# if the search is successful.\n# We can increase the number as we feel more and more comfortable with how\n# the `/api/v1/search` works.\nSEARCH_CACHE_CONTROL_MAX_AGE = 60 * 60 * 12\n\n\nclass JsonResponse(http.JsonResponse):\n \"\"\"The only reason this exists is so that other Django views can call\n views that return instances of this and then get to the data before it\n gets JSON serialized.\n This is something that rest_framework's JsonResponse supports.\n Ultimately, the only view that cares is the (old) Kuma search view page\n that calls the view function here in this file. Now it can do something like:\n\n response = kuma.api.v1.search.search(request)\n found = response.data\n\n \"\"\"\n\n def __init__(self, data, *args, **kwargs):\n self.data = data\n super().__init__(data, *args, **kwargs)\n\n\ndef legacy(request, locale=None):\n raise NotImplementedError(\"work harder\")\n\n\n@allow_CORS_GET\ndef search(request, locale=None):\n initial = {\"size\": 10, \"page\": 1, \"archive\": SearchForm.ARCHIVE_CHOICES[0]}\n if locale:\n initial[\"locale\"] = locale\n form = SearchForm(request.GET, initial=initial)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n locales = form.cleaned_data[\"locale\"] or [settings.LANGUAGE_CODE]\n assert isinstance(locales, list)\n\n params = {\n \"locales\": [x.lower() for x in locales],\n \"archive\": form.cleaned_data[\"archive\"],\n \"query\": form.cleaned_data[\"q\"],\n \"size\": form.cleaned_data[\"size\"],\n \"page\": form.cleaned_data[\"page\"],\n \"sort\": form.cleaned_data[\"sort\"],\n # The `slug` is always stored, as a Keyword index, in lowercase.\n \"slug_prefixes\": [x.lower() for x in form.cleaned_data[\"slug_prefix\"]],\n }\n\n # By default, assume that we will try to make suggestions.\n make_suggestions = True\n if len(params[\"query\"]) > 100 or max(len(x) for x in params[\"query\"].split()) > 30:\n # For example, if it's a really long query, or a specific word is just too\n # long, you can get those tricky\n # TransportError(500, 'search_phase_execution_exception', 'Term too complex:\n # errors which are hard to prevent against.\n make_suggestions = False\n\n results = _find(\n params,\n make_suggestions=make_suggestions,\n )\n response = JsonResponse(results)\n\n # The reason for caching is that most of the time, the searches people make\n # are short and often stand a high chance of being reused by other users\n # in the CDN.\n # The worst that can happen is that we fill up the CDN with cached responses\n # that end up being stored there and never reused by another user.\n # We could consider only bothering with this based on looking at the parameters.\n # For example, if someone made a search with \"complex parameters\" we could skip\n # cache-control because it'll just be a waste to store it (CDN and client).\n # The reason for not using a \"shared\" cache-control, i.e. `s-max-age` is\n # because the cache-control seconds we intend to set are appropriate for both\n # the client and the CDN. If the value set is 3600 seconds, that means that\n # clients might potentially re-use their own browser cache if they trigger\n # a repeated search. And it's an appropriate number for the CDN too.\n # For more info about how our search patterns behave,\n # see https://github.com/mdn/kuma/issues/7799\n patch_cache_control(response, public=True, max_age=SEARCH_CACHE_CONTROL_MAX_AGE)\n return response\n\n\ndef _find(params, total_only=False, make_suggestions=False, min_suggestion_score=0.8):\n search_query = Search(\n index=settings.SEARCH_INDEX_NAME,\n )\n if make_suggestions:\n # XXX research if it it's better to use phrase suggesters and if\n # that works\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters.html#phrase-suggester\n search_query = search_query.suggest(\n \"title_suggestions\", params[\"query\"], term={\"field\": \"title\"}\n )\n search_query = search_query.suggest(\n \"body_suggestions\", params[\"query\"], term={\"field\": \"body\"}\n )\n\n sub_queries = []\n sub_queries.append(Q(\"match\", title={\"query\": params[\"query\"], \"boost\": 2.0}))\n sub_queries.append(Q(\"match\", body={\"query\": params[\"query\"], \"boost\": 1.0}))\n if \" \" in params[\"query\"]:\n sub_queries.append(\n Q(\"match_phrase\", title={\"query\": params[\"query\"], \"boost\": 10.0})\n )\n sub_queries.append(\n Q(\"match_phrase\", body={\"query\": params[\"query\"], \"boost\": 5.0})\n )\n\n sub_query = query.Bool(should=sub_queries)\n\n if params[\"locales\"]:\n search_query = search_query.filter(\"terms\", locale=params[\"locales\"])\n if params[\"archive\"] == \"exclude\":\n search_query = search_query.filter(\"term\", archived=False)\n elif params[\"archive\"] == \"only\":\n search_query = search_query.filter(\"term\", archived=True)\n\n if params[\"slug_prefixes\"]:\n sub_queries = [Q(\"prefix\", slug=x) for x in params[\"slug_prefixes\"]]\n search_query = search_query.query(query.Bool(should=sub_queries))\n\n search_query = search_query.highlight_options(\n pre_tags=[\"<mark>\"],\n post_tags=[\"</mark>\"],\n number_of_fragments=3,\n fragment_size=120,\n encoder=\"html\",\n )\n search_query = search_query.highlight(\"title\", \"body\")\n\n if params[\"sort\"] == \"relevance\":\n search_query = search_query.sort(\"_score\", \"-popularity\")\n search_query = search_query.query(sub_query)\n elif params[\"sort\"] == \"popularity\":\n search_query = search_query.sort(\"-popularity\", \"_score\")\n search_query = search_query.query(sub_query)\n else:\n popularity_factor = 10.0\n boost_mode = \"sum\"\n score_mode = \"max\"\n search_query = search_query.query(\n \"function_score\",\n query=sub_query,\n functions=[\n query.SF(\n \"field_value_factor\",\n field=\"popularity\",\n factor=popularity_factor,\n missing=0.0,\n )\n ],\n boost_mode=boost_mode,\n score_mode=score_mode,\n )\n\n search_query = search_query.source(excludes=[\"body\"])\n\n search_query = search_query[\n params[\"size\"] * (params[\"page\"] - 1) : params[\"size\"] * params[\"page\"]\n ]\n\n retry_options = {\n \"retry_exceptions\": (\n # This is the standard operational exception.\n exceptions.ConnectionError,\n # This can happen if the search happened right as the index had\n # just been deleted due to a fresh re-indexing happening in Yari.\n exceptions.NotFoundError,\n # This can happen when the index simply isn't ready yet.\n exceptions.TransportError,\n ),\n # The default in redo is 60 seconds. Let's tone that down.\n \"sleeptime\": settings.ES_RETRY_SLEEPTIME,\n \"attempts\": settings.ES_RETRY_ATTEMPTS,\n \"jitter\": settings.ES_RETRY_JITTER,\n }\n with retrying(search_query.execute, **retry_options) as retrying_function:\n response = retrying_function()\n\n if total_only:\n return response.hits.total\n\n metadata = {\n \"took_ms\": response.took,\n \"total\": {\n # The `response.hits.total` is a `elasticsearch_dsl.utils.AttrDict`\n # instance. Pluck only the exact data needed.\n \"value\": response.hits.total.value,\n \"relation\": response.hits.total.relation,\n },\n \"size\": params[\"size\"],\n \"page\": params[\"page\"],\n }\n documents = []\n for hit in response:\n try:\n body_highlight = list(hit.meta.highlight.body)\n except AttributeError:\n body_highlight = []\n try:\n title_highlight = list(hit.meta.highlight.title)\n except AttributeError:\n title_highlight = []\n\n d = {\n \"mdn_url\": hit.meta.id,\n \"score\": hit.meta.score,\n \"title\": hit.title,\n \"locale\": hit.locale,\n \"slug\": hit.slug,\n \"popularity\": hit.popularity,\n \"archived\": hit.archived,\n \"summary\": hit.summary,\n \"highlight\": {\n \"body\": body_highlight,\n \"title\": title_highlight,\n },\n }\n documents.append(d)\n\n try:\n suggest = getattr(response, \"suggest\")\n except AttributeError:\n suggest = None\n\n suggestions = []\n if suggest:\n suggestion_strings = _unpack_suggestions(\n params[\"query\"],\n response.suggest,\n (\"body_suggestions\", \"title_suggestions\"),\n )\n\n for score, string in suggestion_strings:\n if score > min_suggestion_score or 1:\n # Sure, this is different way to spell, but what will it yield\n # if you actually search it?\n total = _find(dict(params, query=string), total_only=True)\n if total[\"value\"] > 0:\n suggestions.append(\n {\n \"text\": string,\n \"total\": {\n # This 'total' is an `AttrDict` instance.\n \"value\": total.value,\n \"relation\": total.relation,\n },\n }\n )\n # Since they're sorted by score, it's usually never useful\n # to suggestion more than exactly 1 good suggestion.\n break\n\n return {\n \"documents\": documents,\n \"metadata\": metadata,\n \"suggestions\": suggestions,\n }\n\n\ndef _unpack_suggestions(query, suggest, keys):\n alternatives = []\n for key in keys:\n for suggestion in getattr(suggest, key, []):\n for option in suggestion.options:\n alternatives.append(\n (\n option.score,\n query[0 : suggestion.offset]\n + option.text\n + query[suggestion.offset + suggestion.length :],\n )\n )\n alternatives.sort(reverse=True) # highest score first\n return alternatives\n", "path": "kuma/api/v1/search/__init__.py"}]}
| 3,765 | 435 |
gh_patches_debug_41468
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-323
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use sentinel value for input parameters that aren't sent by the clients
When using input types with optional fields we cannot differentiate by fields that have sent as null and fields that haven't been sent at all.
So I think we should use a sentinel value that tells the field is unset, and also behaves as falsy:
```python
class _Unset:
def __bool__(self): return False
UNSET = _Unset()
# this utility might be useful, so we don't have to use an internal representation
def is_unset(value: Any):
return value is UNSET
```
then we can use this class when instantiating the input types for a resolver:)
</issue>
<code>
[start of strawberry/utils/arguments.py]
1 import enum
2 from dataclasses import is_dataclass
3 from datetime import date, datetime, time
4
5 from ..exceptions import UnsupportedTypeError
6 from .str_converters import to_camel_case, to_snake_case
7 from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional
8
9
10 SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]
11
12
13 def _to_type(value, annotation):
14 if value is None:
15 return None
16
17 if is_optional(annotation):
18 annotation = get_optional_annotation(annotation)
19
20 # TODO: change this to be a is_scalar util and make sure it works with any scalar
21 if getattr(annotation, "__supertype__", annotation) in SCALAR_TYPES:
22 return value
23
24 # Convert Enum fields to instances using the value. This is safe
25 # because graphql-core has already validated the input.
26 if isinstance(annotation, enum.EnumMeta):
27 return annotation(value)
28
29 if is_list(annotation):
30 annotation = get_list_annotation(annotation)
31
32 return [_to_type(x, annotation) for x in value]
33
34 if is_dataclass(annotation):
35 fields = annotation.__dataclass_fields__
36
37 kwargs = {}
38
39 for name, field in fields.items():
40 dict_name = name
41
42 if hasattr(field, "field_name") and field.field_name:
43 dict_name = field.field_name
44 else:
45 dict_name = to_camel_case(name)
46
47 kwargs[name] = _to_type(value.get(dict_name), field.type)
48
49 return annotation(**kwargs)
50
51 raise UnsupportedTypeError(annotation)
52
53
54 def convert_args(args, annotations):
55 """Converts a nested dictionary to a dictionary of strawberry input types."""
56
57 converted_args = {}
58
59 for key, value in args.items():
60 key = to_snake_case(key)
61 annotation = annotations[key]
62
63 converted_args[key] = _to_type(value, annotation)
64
65 return converted_args
66
[end of strawberry/utils/arguments.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/utils/arguments.py b/strawberry/utils/arguments.py
--- a/strawberry/utils/arguments.py
+++ b/strawberry/utils/arguments.py
@@ -1,19 +1,49 @@
import enum
+import typing
from dataclasses import is_dataclass
from datetime import date, datetime, time
from ..exceptions import UnsupportedTypeError
-from .str_converters import to_camel_case, to_snake_case
+from .str_converters import to_camel_case
from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional
SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]
-def _to_type(value, annotation):
+class _Unset:
+ def __str__(self):
+ return ""
+
+ def __bool__(self):
+ return False
+
+
+UNSET = _Unset()
+
+
+def is_unset(value: typing.Any) -> bool:
+ return value is UNSET
+
+
+def convert_args(
+ value: typing.Union[typing.Dict[str, typing.Any], typing.Any],
+ annotation: typing.Union[typing.Dict[str, typing.Type], typing.Type],
+):
+ """Converts a nested dictionary to a dictionary of actual types.
+
+ It deals with conversion of input types to proper dataclasses and
+ also uses a sentinel value for unset values."""
+
+ if annotation == {}:
+ return value
+
if value is None:
return None
+ if is_unset(value):
+ return value
+
if is_optional(annotation):
annotation = get_optional_annotation(annotation)
@@ -24,19 +54,27 @@
# Convert Enum fields to instances using the value. This is safe
# because graphql-core has already validated the input.
if isinstance(annotation, enum.EnumMeta):
- return annotation(value)
+ return annotation(value) # type: ignore
if is_list(annotation):
annotation = get_list_annotation(annotation)
- return [_to_type(x, annotation) for x in value]
+ return [convert_args(x, annotation) for x in value]
+
+ fields = None
- if is_dataclass(annotation):
- fields = annotation.__dataclass_fields__
+ # we receive dicts when converting resolvers arguments to
+ # actual types
+ if isinstance(annotation, dict):
+ fields = annotation.items()
+ elif is_dataclass(annotation):
+ fields = annotation.__dataclass_fields__.items()
+
+ if fields:
kwargs = {}
- for name, field in fields.items():
+ for name, field in fields:
dict_name = name
if hasattr(field, "field_name") and field.field_name:
@@ -44,22 +82,19 @@
else:
dict_name = to_camel_case(name)
- kwargs[name] = _to_type(value.get(dict_name), field.type)
-
- return annotation(**kwargs)
-
- raise UnsupportedTypeError(annotation)
-
-
-def convert_args(args, annotations):
- """Converts a nested dictionary to a dictionary of strawberry input types."""
+ # dataclasses field have a .type attribute
+ if hasattr(field, "type"):
+ field_type = field.type
+ # meanwhile when using dicts the value of the field is
+ # the actual type, for example in: { 'name': str }
+ else:
+ field_type = field
- converted_args = {}
+ kwargs[name] = convert_args(value.get(dict_name, UNSET), field_type)
- for key, value in args.items():
- key = to_snake_case(key)
- annotation = annotations[key]
+ if is_dataclass(annotation):
+ return annotation(**kwargs) # type: ignore
- converted_args[key] = _to_type(value, annotation)
+ return kwargs
- return converted_args
+ raise UnsupportedTypeError(annotation)
|
{"golden_diff": "diff --git a/strawberry/utils/arguments.py b/strawberry/utils/arguments.py\n--- a/strawberry/utils/arguments.py\n+++ b/strawberry/utils/arguments.py\n@@ -1,19 +1,49 @@\n import enum\n+import typing\n from dataclasses import is_dataclass\n from datetime import date, datetime, time\n \n from ..exceptions import UnsupportedTypeError\n-from .str_converters import to_camel_case, to_snake_case\n+from .str_converters import to_camel_case\n from .typing import get_list_annotation, get_optional_annotation, is_list, is_optional\n \n \n SCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]\n \n \n-def _to_type(value, annotation):\n+class _Unset:\n+ def __str__(self):\n+ return \"\"\n+\n+ def __bool__(self):\n+ return False\n+\n+\n+UNSET = _Unset()\n+\n+\n+def is_unset(value: typing.Any) -> bool:\n+ return value is UNSET\n+\n+\n+def convert_args(\n+ value: typing.Union[typing.Dict[str, typing.Any], typing.Any],\n+ annotation: typing.Union[typing.Dict[str, typing.Type], typing.Type],\n+):\n+ \"\"\"Converts a nested dictionary to a dictionary of actual types.\n+\n+ It deals with conversion of input types to proper dataclasses and\n+ also uses a sentinel value for unset values.\"\"\"\n+\n+ if annotation == {}:\n+ return value\n+\n if value is None:\n return None\n \n+ if is_unset(value):\n+ return value\n+\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n \n@@ -24,19 +54,27 @@\n # Convert Enum fields to instances using the value. This is safe\n # because graphql-core has already validated the input.\n if isinstance(annotation, enum.EnumMeta):\n- return annotation(value)\n+ return annotation(value) # type: ignore\n \n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n \n- return [_to_type(x, annotation) for x in value]\n+ return [convert_args(x, annotation) for x in value]\n+\n+ fields = None\n \n- if is_dataclass(annotation):\n- fields = annotation.__dataclass_fields__\n+ # we receive dicts when converting resolvers arguments to\n+ # actual types\n+ if isinstance(annotation, dict):\n+ fields = annotation.items()\n \n+ elif is_dataclass(annotation):\n+ fields = annotation.__dataclass_fields__.items()\n+\n+ if fields:\n kwargs = {}\n \n- for name, field in fields.items():\n+ for name, field in fields:\n dict_name = name\n \n if hasattr(field, \"field_name\") and field.field_name:\n@@ -44,22 +82,19 @@\n else:\n dict_name = to_camel_case(name)\n \n- kwargs[name] = _to_type(value.get(dict_name), field.type)\n-\n- return annotation(**kwargs)\n-\n- raise UnsupportedTypeError(annotation)\n-\n-\n-def convert_args(args, annotations):\n- \"\"\"Converts a nested dictionary to a dictionary of strawberry input types.\"\"\"\n+ # dataclasses field have a .type attribute\n+ if hasattr(field, \"type\"):\n+ field_type = field.type\n+ # meanwhile when using dicts the value of the field is\n+ # the actual type, for example in: { 'name': str }\n+ else:\n+ field_type = field\n \n- converted_args = {}\n+ kwargs[name] = convert_args(value.get(dict_name, UNSET), field_type)\n \n- for key, value in args.items():\n- key = to_snake_case(key)\n- annotation = annotations[key]\n+ if is_dataclass(annotation):\n+ return annotation(**kwargs) # type: ignore\n \n- converted_args[key] = _to_type(value, annotation)\n+ return kwargs\n \n- return converted_args\n+ raise UnsupportedTypeError(annotation)\n", "issue": "Use sentinel value for input parameters that aren't sent by the clients\nWhen using input types with optional fields we cannot differentiate by fields that have sent as null and fields that haven't been sent at all.\r\n\r\nSo I think we should use a sentinel value that tells the field is unset, and also behaves as falsy:\r\n\r\n```python\r\nclass _Unset:\r\n def __bool__(self): return False\r\n\r\nUNSET = _Unset()\r\n\r\n# this utility might be useful, so we don't have to use an internal representation\r\ndef is_unset(value: Any):\r\n return value is UNSET\r\n```\r\n\r\nthen we can use this class when instantiating the input types for a resolver:)\n", "before_files": [{"content": "import enum\nfrom dataclasses import is_dataclass\nfrom datetime import date, datetime, time\n\nfrom ..exceptions import UnsupportedTypeError\nfrom .str_converters import to_camel_case, to_snake_case\nfrom .typing import get_list_annotation, get_optional_annotation, is_list, is_optional\n\n\nSCALAR_TYPES = [int, str, float, bytes, bool, datetime, date, time]\n\n\ndef _to_type(value, annotation):\n if value is None:\n return None\n\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n\n # TODO: change this to be a is_scalar util and make sure it works with any scalar\n if getattr(annotation, \"__supertype__\", annotation) in SCALAR_TYPES:\n return value\n\n # Convert Enum fields to instances using the value. This is safe\n # because graphql-core has already validated the input.\n if isinstance(annotation, enum.EnumMeta):\n return annotation(value)\n\n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n\n return [_to_type(x, annotation) for x in value]\n\n if is_dataclass(annotation):\n fields = annotation.__dataclass_fields__\n\n kwargs = {}\n\n for name, field in fields.items():\n dict_name = name\n\n if hasattr(field, \"field_name\") and field.field_name:\n dict_name = field.field_name\n else:\n dict_name = to_camel_case(name)\n\n kwargs[name] = _to_type(value.get(dict_name), field.type)\n\n return annotation(**kwargs)\n\n raise UnsupportedTypeError(annotation)\n\n\ndef convert_args(args, annotations):\n \"\"\"Converts a nested dictionary to a dictionary of strawberry input types.\"\"\"\n\n converted_args = {}\n\n for key, value in args.items():\n key = to_snake_case(key)\n annotation = annotations[key]\n\n converted_args[key] = _to_type(value, annotation)\n\n return converted_args\n", "path": "strawberry/utils/arguments.py"}]}
| 1,212 | 872 |
gh_patches_debug_50122
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1278
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scrapy logging show UnicodeDecodeError
hello,all, I use scrapy 1.00rc1 from pip ,my pc is win7x64,python is 2.7.9(x64),
when I start my project
``` python
scrapy crawl imzx
Traceback (most recent call last):
File "C:\Python27\lib\logging\__init__.py", line 859, in emit
msg = self.format(record)
File "C:\Python27\lib\logging\__init__.py", line 732, in format
return fmt.format(record)
File "C:\Python27\lib\logging\__init__.py", line 474, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xd6 in position 19: ordinal not in range(128)
Logged from file log.py, line 108
```
Then I find utils/log.py ,line 108 ,I commented-out the code as you can see
``` python
#logger.info("Scrapy %(version)s started (bot: %(bot)s)",
#{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
```
And I restart my project,it's OK ,but something about logging error still:
``` python
Traceback (most recent call last):
File "C:\Python27\lib\logging\__init__.py", line 859, in emit
msg = self.format(record)
File "C:\Python27\lib\logging\__init__.py", line 732, in format
return fmt.format(record)
File "C:\Python27\lib\logging\__init__.py", line 474, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xd6 in position 19: ordinal not in range(128)
Logged from file engine.py, line 212
```
It's to much in my screen,though too many logging Traceback , my code work fine ,I get all data I need, But I confused,my code work fine in scrapy 0.24 without any above logging Traceback,so I think it's maybe scrapy bug
</issue>
<code>
[start of scrapy/settings/default_settings.py]
1 """
2 This module contains the default values for all settings used by Scrapy.
3
4 For more information about these settings you can read the settings
5 documentation in docs/topics/settings.rst
6
7 Scrapy developers, if you add a setting here remember to:
8
9 * add it in alphabetical order
10 * group similar settings without leaving blank lines
11 * add its documentation to the available settings documentation
12 (docs/topics/settings.rst)
13
14 """
15
16 import os
17 import sys
18 from importlib import import_module
19 from os.path import join, abspath, dirname
20
21 AJAXCRAWL_ENABLED = False
22
23 BOT_NAME = 'scrapybot'
24
25 CLOSESPIDER_TIMEOUT = 0
26 CLOSESPIDER_PAGECOUNT = 0
27 CLOSESPIDER_ITEMCOUNT = 0
28 CLOSESPIDER_ERRORCOUNT = 0
29
30 COMMANDS_MODULE = ''
31
32 COMPRESSION_ENABLED = True
33
34 CONCURRENT_ITEMS = 100
35
36 CONCURRENT_REQUESTS = 16
37 CONCURRENT_REQUESTS_PER_DOMAIN = 8
38 CONCURRENT_REQUESTS_PER_IP = 0
39
40 COOKIES_ENABLED = True
41 COOKIES_DEBUG = False
42
43 DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
44
45 DEFAULT_REQUEST_HEADERS = {
46 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
47 'Accept-Language': 'en',
48 }
49
50 DEPTH_LIMIT = 0
51 DEPTH_STATS = True
52 DEPTH_PRIORITY = 0
53
54 DNSCACHE_ENABLED = True
55 DNSCACHE_SIZE = 10000
56 DNS_TIMEOUT = 60
57
58 DOWNLOAD_DELAY = 0
59
60 DOWNLOAD_HANDLERS = {}
61 DOWNLOAD_HANDLERS_BASE = {
62 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
63 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
64 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
65 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
66 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
67 }
68
69 DOWNLOAD_TIMEOUT = 180 # 3mins
70
71 DOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m
72 DOWNLOAD_WARNSIZE = 32*1024*1024 # 32m
73
74 DOWNLOADER = 'scrapy.core.downloader.Downloader'
75
76 DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
77 DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
78
79 DOWNLOADER_MIDDLEWARES = {}
80
81 DOWNLOADER_MIDDLEWARES_BASE = {
82 # Engine side
83 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
84 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,
85 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,
86 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400,
87 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500,
88 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,
89 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
90 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,
91 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
92 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
93 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
94 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,
95 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,
96 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
97 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,
98 # Downloader side
99 }
100
101 DOWNLOADER_STATS = True
102
103 DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'
104
105 try:
106 EDITOR = os.environ['EDITOR']
107 except KeyError:
108 if sys.platform == 'win32':
109 EDITOR = '%s -m idlelib.idle'
110 else:
111 EDITOR = 'vi'
112
113 EXTENSIONS = {}
114
115 EXTENSIONS_BASE = {
116 'scrapy.extensions.corestats.CoreStats': 0,
117 'scrapy.telnet.TelnetConsole': 0,
118 'scrapy.extensions.memusage.MemoryUsage': 0,
119 'scrapy.extensions.memdebug.MemoryDebugger': 0,
120 'scrapy.extensions.closespider.CloseSpider': 0,
121 'scrapy.extensions.feedexport.FeedExporter': 0,
122 'scrapy.extensions.logstats.LogStats': 0,
123 'scrapy.extensions.spiderstate.SpiderState': 0,
124 'scrapy.extensions.throttle.AutoThrottle': 0,
125 }
126
127 FEED_URI = None
128 FEED_URI_PARAMS = None # a function to extend uri arguments
129 FEED_FORMAT = 'jsonlines'
130 FEED_STORE_EMPTY = False
131 FEED_EXPORT_FIELDS = None
132 FEED_STORAGES = {}
133 FEED_STORAGES_BASE = {
134 '': 'scrapy.extensions.feedexport.FileFeedStorage',
135 'file': 'scrapy.extensions.feedexport.FileFeedStorage',
136 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',
137 's3': 'scrapy.extensions.feedexport.S3FeedStorage',
138 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',
139 }
140 FEED_EXPORTERS = {}
141 FEED_EXPORTERS_BASE = {
142 'json': 'scrapy.exporters.JsonItemExporter',
143 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',
144 'jl': 'scrapy.exporters.JsonLinesItemExporter',
145 'csv': 'scrapy.exporters.CsvItemExporter',
146 'xml': 'scrapy.exporters.XmlItemExporter',
147 'marshal': 'scrapy.exporters.MarshalItemExporter',
148 'pickle': 'scrapy.exporters.PickleItemExporter',
149 }
150
151 HTTPCACHE_ENABLED = False
152 HTTPCACHE_DIR = 'httpcache'
153 HTTPCACHE_IGNORE_MISSING = False
154 HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
155 HTTPCACHE_EXPIRATION_SECS = 0
156 HTTPCACHE_IGNORE_HTTP_CODES = []
157 HTTPCACHE_IGNORE_SCHEMES = ['file']
158 HTTPCACHE_DBM_MODULE = 'anydbm'
159 HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'
160 HTTPCACHE_GZIP = False
161
162 ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'
163
164 ITEM_PIPELINES = {}
165 ITEM_PIPELINES_BASE = {}
166
167 LOG_ENABLED = True
168 LOG_ENCODING = 'utf-8'
169 LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
170 LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
171 LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S%z'
172 LOG_STDOUT = False
173 LOG_LEVEL = 'DEBUG'
174 LOG_FILE = None
175
176 LOG_UNSERIALIZABLE_REQUESTS = False
177
178 LOGSTATS_INTERVAL = 60.0
179
180 MAIL_HOST = 'localhost'
181 MAIL_PORT = 25
182 MAIL_FROM = 'scrapy@localhost'
183 MAIL_PASS = None
184 MAIL_USER = None
185
186 MEMDEBUG_ENABLED = False # enable memory debugging
187 MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
188
189 MEMUSAGE_ENABLED = False
190 MEMUSAGE_LIMIT_MB = 0
191 MEMUSAGE_NOTIFY_MAIL = []
192 MEMUSAGE_REPORT = False
193 MEMUSAGE_WARNING_MB = 0
194
195 METAREFRESH_ENABLED = True
196 METAREFRESH_MAXDELAY = 100
197
198 NEWSPIDER_MODULE = ''
199
200 RANDOMIZE_DOWNLOAD_DELAY = True
201
202 REACTOR_THREADPOOL_MAXSIZE = 10
203
204 REDIRECT_ENABLED = True
205 REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
206 REDIRECT_PRIORITY_ADJUST = +2
207
208 REFERER_ENABLED = True
209
210 RETRY_ENABLED = True
211 RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
212 RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]
213 RETRY_PRIORITY_ADJUST = -1
214
215 ROBOTSTXT_OBEY = False
216
217 SCHEDULER = 'scrapy.core.scheduler.Scheduler'
218 SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'
219 SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'
220
221 SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'
222
223 SPIDER_MIDDLEWARES = {}
224
225 SPIDER_MIDDLEWARES_BASE = {
226 # Engine side
227 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,
228 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,
229 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,
230 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,
231 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,
232 # Spider side
233 }
234
235 SPIDER_MODULES = []
236
237 STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'
238 STATS_DUMP = True
239
240 STATSMAILER_RCPTS = []
241
242 TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
243
244 URLLENGTH_LIMIT = 2083
245
246 USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__
247
248 TELNETCONSOLE_ENABLED = 1
249 TELNETCONSOLE_PORT = [6023, 6073]
250 TELNETCONSOLE_HOST = '127.0.0.1'
251
252 SPIDER_CONTRACTS = {}
253 SPIDER_CONTRACTS_BASE = {
254 'scrapy.contracts.default.UrlContract': 1,
255 'scrapy.contracts.default.ReturnsContract': 2,
256 'scrapy.contracts.default.ScrapesContract': 3,
257 }
258
[end of scrapy/settings/default_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py
--- a/scrapy/settings/default_settings.py
+++ b/scrapy/settings/default_settings.py
@@ -168,7 +168,7 @@
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'
-LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S%z'
+LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
|
{"golden_diff": "diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py\n--- a/scrapy/settings/default_settings.py\n+++ b/scrapy/settings/default_settings.py\n@@ -168,7 +168,7 @@\n LOG_ENCODING = 'utf-8'\n LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\n LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\n-LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S%z'\n+LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\n LOG_STDOUT = False\n LOG_LEVEL = 'DEBUG'\n LOG_FILE = None\n", "issue": "scrapy logging show UnicodeDecodeError\nhello,all, I use scrapy 1.00rc1 from pip ,my pc is win7x64,python is 2.7.9(x64),\nwhen I start my project \n\n``` python\nscrapy crawl imzx\nTraceback (most recent call last):\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 859, in emit\n msg = self.format(record)\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 732, in format\n return fmt.format(record)\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 474, in format\n s = self._fmt % record.__dict__\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xd6 in position 19: ordinal not in range(128)\nLogged from file log.py, line 108\n```\n\nThen I find utils/log.py ,line 108 ,I commented-out the code as you can see\n\n``` python\n#logger.info(\"Scrapy %(version)s started (bot: %(bot)s)\",\n #{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})\n```\n\nAnd I restart my project,it's OK ,but something about logging error still:\n\n``` python\nTraceback (most recent call last):\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 859, in emit\n msg = self.format(record)\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 732, in format\n return fmt.format(record)\n File \"C:\\Python27\\lib\\logging\\__init__.py\", line 474, in format\n s = self._fmt % record.__dict__\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xd6 in position 19: ordinal not in range(128)\nLogged from file engine.py, line 212\n```\n\nIt's to much in my screen,though too many logging Traceback , my code work fine ,I get all data I need, But I confused,my code work fine in scrapy 0.24 without any above logging Traceback,so I think it's maybe scrapy bug \n\n", "before_files": [{"content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport os\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nAJAXCRAWL_ENABLED = False\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS = True\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\ntry:\n EDITOR = os.environ['EDITOR']\nexcept KeyError:\n if sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n else:\n EDITOR = 'vi'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_DBM_MODULE = 'anydbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S%z'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\n\nLOG_UNSERIALIZABLE_REQUESTS = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_ENABLED = False\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_REPORT = False\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py"}]}
| 3,914 | 145 |
gh_patches_debug_15562
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-3684
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FR] Improve setuptools tracebacks for invalid entry points
### setuptools version
65.5.1
### Python version
3.8.13
### OS
macOS 10.15.7
### Additional environment information
macOS 10.15.7
python 3.8.13 installed via pyenv
virtualenv has
```console
> pip list
Package Version
------------------ ---------
build 0.9.0
certifi 2022.9.24
charset-normalizer 2.1.1
idna 3.4
importlib-metadata 5.0.0
jaraco.classes 3.2.3
keyring 23.11.0
more-itertools 9.0.0
packaging 21.3
pep517 0.13.0
pip 22.3.1
pyparsing 3.0.9
pyvmomi 7.0.3
requests 2.28.1
setuptools 65.5.1
six 1.16.0
tomli 2.0.1
urllib3 1.26.12
zipp 3.10.0
```
(Top level is requests, pyvmomi, and a build dep of "build" - rest are dependancies)
### Description
Attempting to do a "simple" (what *I* thought was a simple build with "build" package.
My pyproject.toml:
```console
[build-system]
requires = [
"setuptools",
"build"
]
build-backend = "setuptools.build_meta"
```
My setup.cfg
```console
[metadata]
name = monitor-vsphere
version = attr: monitor-vsphere.main.__version__
author = Hunter Matthews
author_email = [email protected]
description = Query and compare VM's in both NHGRI's vsphere and Device42 systems. Report differences.
long_description = file: README.md
long_description_content_type = text/markdown
url = https://github.com/NHGRI/monitor-vsphere
project_urls =
Bug Tracker = https://github.com/NHGRI/monitor-vsphere/issues
classifiers =
Programming Language :: Python :: 3
Operating System :: OS Independent
[options]
python_requires = >=3.8
#include_package_data = True
package_dir =
=src
packages = find:
install_requires =
keyring
requests
pyvmomi
zip_safe = False ## namespace pkgs are not zip safe - probably
[options.packages.find]
where = src
[options.extras_require]
dev = build
[options.entry_points]
console_scripts =
monitor-vsphere = monitor-vsphere.main:main
```
-----------------------------------------------
I do NOT have a setup.py
My development tree after an unsuccessful build:
> tree
.
├── pyproject.toml
├── setup.cfg
└── src
├── monitor-vsphere
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-38.pyc
│ ├── auth.py
│ ├── device42.py
│ ├── dotdict.py
│ ├── logs.py
│ ├── main.py
│ ├── virtualmachine.py
│ └── vsphere.py
└── monitor_vsphere.egg-info
├── PKG-INFO
├── dependency_links.txt
└── not-zip-safe
### Expected behavior
Build a sdist and a wheel.
### How to Reproduce
```console
python -m build
```
My apologies if this something very simple I've missed - the traceback is generic enough (a regex failure) that my google fu was weak on this one.
I did try downgrading both setuptools and importlib-metadata a couple of versions (guessing, based on the traceback) but no joy.
Please let me know if other information is needed.
### Output
```console
> python -m build
* Creating venv isolated environment...
* Installing packages in isolated environment... (build, setuptools)
* Getting build dependencies for sdist...
running egg_info
writing src/monitor_vsphere.egg-info/PKG-INFO
writing dependency_links to src/monitor_vsphere.egg-info/dependency_links.txt
Traceback (most recent call last):
File "/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py", line 351, in <module>
main()
File "/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py", line 333, in main
json_out['return_val'] = hook(**hook_input['kwargs'])
File "/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py", line 285, in get_requires_for_build_sdist
return hook(config_settings)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py", line 341, in get_requires_for_build_sdist
return self._get_build_requires(config_settings, requirements=[])
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py", line 320, in _get_build_requires
self.run_setup()
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py", line 335, in run_setup
exec(code, locals())
File "<string>", line 1, in <module>
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/__init__.py", line 87, in setup
return distutils.core.setup(**attrs)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/core.py", line 185, in setup
return run_commands(dist)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/core.py", line 201, in run_commands
dist.run_commands()
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 968, in run_commands
self.run_command(cmd)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/dist.py", line 1217, in run_command
super().run_command(command)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 987, in run_command
cmd_obj.run()
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/command/egg_info.py", line 301, in run
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/command/egg_info.py", line 741, in write_entries
eps = _entry_points.load(cmd.distribution.entry_points)
File "/Users/matthewsht/.local/pyenv/versions/3.8.13/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py", line 51, in load
return validate(metadata.EntryPoints(groups))
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py", line 39, in validate
consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_vendor/more_itertools/recipes.py", line 139, in consume
deque(iterator, maxlen=0)
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py", line 17, in ensure_valid
ep.extras
File "/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_vendor/importlib_metadata/__init__.py", line 211, in extras
return list(re.finditer(r'\w+', match.group('extras') or ''))
AttributeError: 'NoneType' object has no attribute 'group'
ERROR Backend subprocess exited when trying to invoke get_requires_for_build_sdist
```
</issue>
<code>
[start of setuptools/_entry_points.py]
1 import functools
2 import operator
3 import itertools
4
5 from .extern.jaraco.text import yield_lines
6 from .extern.jaraco.functools import pass_none
7 from ._importlib import metadata
8 from ._itertools import ensure_unique
9 from .extern.more_itertools import consume
10
11
12 def ensure_valid(ep):
13 """
14 Exercise one of the dynamic properties to trigger
15 the pattern match.
16 """
17 ep.extras
18
19
20 def load_group(value, group):
21 """
22 Given a value of an entry point or series of entry points,
23 return each as an EntryPoint.
24 """
25 # normalize to a single sequence of lines
26 lines = yield_lines(value)
27 text = f'[{group}]\n' + '\n'.join(lines)
28 return metadata.EntryPoints._from_text(text)
29
30
31 def by_group_and_name(ep):
32 return ep.group, ep.name
33
34
35 def validate(eps: metadata.EntryPoints):
36 """
37 Ensure entry points are unique by group and name and validate each.
38 """
39 consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
40 return eps
41
42
43 @functools.singledispatch
44 def load(eps):
45 """
46 Given a Distribution.entry_points, produce EntryPoints.
47 """
48 groups = itertools.chain.from_iterable(
49 load_group(value, group)
50 for group, value in eps.items())
51 return validate(metadata.EntryPoints(groups))
52
53
54 @load.register(str)
55 def _(eps):
56 r"""
57 >>> ep, = load('[console_scripts]\nfoo=bar')
58 >>> ep.group
59 'console_scripts'
60 >>> ep.name
61 'foo'
62 >>> ep.value
63 'bar'
64 """
65 return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))
66
67
68 load.register(type(None), lambda x: x)
69
70
71 @pass_none
72 def render(eps: metadata.EntryPoints):
73 by_group = operator.attrgetter('group')
74 groups = itertools.groupby(sorted(eps, key=by_group), by_group)
75
76 return '\n'.join(
77 f'[{group}]\n{render_items(items)}\n'
78 for group, items in groups
79 )
80
81
82 def render_items(eps):
83 return '\n'.join(
84 f'{ep.name} = {ep.value}'
85 for ep in sorted(eps)
86 )
87
[end of setuptools/_entry_points.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py
--- a/setuptools/_entry_points.py
+++ b/setuptools/_entry_points.py
@@ -2,6 +2,7 @@
import operator
import itertools
+from .errors import OptionError
from .extern.jaraco.text import yield_lines
from .extern.jaraco.functools import pass_none
from ._importlib import metadata
@@ -14,7 +15,14 @@
Exercise one of the dynamic properties to trigger
the pattern match.
"""
- ep.extras
+ try:
+ ep.extras
+ except AttributeError as ex:
+ msg = (
+ f"Problems to parse {ep}.\nPlease ensure entry-point follows the spec: "
+ "https://packaging.python.org/en/latest/specifications/entry-points/"
+ )
+ raise OptionError(msg) from ex
def load_group(value, group):
|
{"golden_diff": "diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py\n--- a/setuptools/_entry_points.py\n+++ b/setuptools/_entry_points.py\n@@ -2,6 +2,7 @@\n import operator\n import itertools\n \n+from .errors import OptionError\n from .extern.jaraco.text import yield_lines\n from .extern.jaraco.functools import pass_none\n from ._importlib import metadata\n@@ -14,7 +15,14 @@\n Exercise one of the dynamic properties to trigger\n the pattern match.\n \"\"\"\n- ep.extras\n+ try:\n+ ep.extras\n+ except AttributeError as ex:\n+ msg = (\n+ f\"Problems to parse {ep}.\\nPlease ensure entry-point follows the spec: \"\n+ \"https://packaging.python.org/en/latest/specifications/entry-points/\"\n+ )\n+ raise OptionError(msg) from ex\n \n \n def load_group(value, group):\n", "issue": "[FR] Improve setuptools tracebacks for invalid entry points\n### setuptools version\r\n\r\n65.5.1\r\n\r\n### Python version\r\n\r\n3.8.13\r\n\r\n### OS\r\n\r\nmacOS 10.15.7\r\n\r\n### Additional environment information\r\n\r\nmacOS 10.15.7\r\npython 3.8.13 installed via pyenv\r\nvirtualenv has \r\n```console\r\n> pip list\r\nPackage Version\r\n------------------ ---------\r\nbuild 0.9.0\r\ncertifi 2022.9.24\r\ncharset-normalizer 2.1.1\r\nidna 3.4\r\nimportlib-metadata 5.0.0\r\njaraco.classes 3.2.3\r\nkeyring 23.11.0\r\nmore-itertools 9.0.0\r\npackaging 21.3\r\npep517 0.13.0\r\npip 22.3.1\r\npyparsing 3.0.9\r\npyvmomi 7.0.3\r\nrequests 2.28.1\r\nsetuptools 65.5.1\r\nsix 1.16.0\r\ntomli 2.0.1\r\nurllib3 1.26.12\r\nzipp 3.10.0\r\n```\r\n(Top level is requests, pyvmomi, and a build dep of \"build\" - rest are dependancies)\r\n\r\n\r\n\r\n### Description\r\n\r\nAttempting to do a \"simple\" (what *I* thought was a simple build with \"build\" package.\r\n\r\nMy pyproject.toml:\r\n```console\r\n\r\n[build-system]\r\nrequires = [\r\n \"setuptools\",\r\n \"build\"\r\n]\r\n\r\nbuild-backend = \"setuptools.build_meta\"\r\n```\r\n\r\n\r\nMy setup.cfg\r\n```console\r\n\r\n[metadata]\r\nname = monitor-vsphere\r\nversion = attr: monitor-vsphere.main.__version__\r\n\r\nauthor = Hunter Matthews\r\nauthor_email = [email protected]\r\ndescription = Query and compare VM's in both NHGRI's vsphere and Device42 systems. Report differences.\r\nlong_description = file: README.md\r\nlong_description_content_type = text/markdown\r\nurl = https://github.com/NHGRI/monitor-vsphere\r\nproject_urls =\r\n Bug Tracker = https://github.com/NHGRI/monitor-vsphere/issues\r\nclassifiers =\r\n Programming Language :: Python :: 3\r\n Operating System :: OS Independent\r\n\r\n[options]\r\npython_requires = >=3.8\r\n#include_package_data = True\r\npackage_dir =\r\n =src\r\npackages = find:\r\ninstall_requires =\r\n keyring\r\n requests\r\n pyvmomi\r\nzip_safe = False ## namespace pkgs are not zip safe - probably\r\n\r\n[options.packages.find]\r\nwhere = src\r\n\r\n[options.extras_require]\r\ndev = build\r\n\r\n[options.entry_points]\r\nconsole_scripts =\r\n monitor-vsphere = monitor-vsphere.main:main\r\n\r\n```\r\n-----------------------------------------------\r\n\r\nI do NOT have a setup.py\r\n\r\n\r\nMy development tree after an unsuccessful build:\r\n> tree\r\n.\r\n\r\n\u251c\u2500\u2500 pyproject.toml\r\n\u251c\u2500\u2500 setup.cfg\r\n\u2514\u2500\u2500 src\r\n \u251c\u2500\u2500 monitor-vsphere\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 __init__.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 __pycache__\r\n \u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 __init__.cpython-38.pyc\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 auth.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 device42.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 dotdict.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 logs.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 main.py\r\n \u2502\u00a0\u00a0 \u251c\u2500\u2500 virtualmachine.py\r\n \u2502\u00a0\u00a0 \u2514\u2500\u2500 vsphere.py\r\n \u2514\u2500\u2500 monitor_vsphere.egg-info\r\n \u251c\u2500\u2500 PKG-INFO\r\n \u251c\u2500\u2500 dependency_links.txt\r\n \u2514\u2500\u2500 not-zip-safe\r\n\r\n### Expected behavior\r\n\r\nBuild a sdist and a wheel. \r\n\r\n### How to Reproduce\r\n\r\n```console\r\npython -m build\r\n```\r\n\r\nMy apologies if this something very simple I've missed - the traceback is generic enough (a regex failure) that my google fu was weak on this one. \r\n\r\nI did try downgrading both setuptools and importlib-metadata a couple of versions (guessing, based on the traceback) but no joy. \r\n\r\nPlease let me know if other information is needed.\r\n\r\n\r\n### Output\r\n\r\n```console\r\n> python -m build\r\n* Creating venv isolated environment...\r\n* Installing packages in isolated environment... (build, setuptools)\r\n* Getting build dependencies for sdist...\r\nrunning egg_info\r\nwriting src/monitor_vsphere.egg-info/PKG-INFO\r\nwriting dependency_links to src/monitor_vsphere.egg-info/dependency_links.txt\r\nTraceback (most recent call last):\r\n File \"/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py\", line 351, in <module>\r\n main()\r\n File \"/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py\", line 333, in main\r\n json_out['return_val'] = hook(**hook_input['kwargs'])\r\n File \"/Users/matthewsht/projects/monitor-vsphere/.direnv/python-3.8.13/lib/python3.8/site-packages/pep517/in_process/_in_process.py\", line 285, in get_requires_for_build_sdist\r\n return hook(config_settings)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py\", line 341, in get_requires_for_build_sdist\r\n return self._get_build_requires(config_settings, requirements=[])\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py\", line 320, in _get_build_requires\r\n self.run_setup()\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/build_meta.py\", line 335, in run_setup\r\n exec(code, locals())\r\n File \"<string>\", line 1, in <module>\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/__init__.py\", line 87, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/core.py\", line 185, in setup\r\n return run_commands(dist)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/core.py\", line 201, in run_commands\r\n dist.run_commands()\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 968, in run_commands\r\n self.run_command(cmd)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/dist.py\", line 1217, in run_command\r\n super().run_command(command)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 987, in run_command\r\n cmd_obj.run()\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/command/egg_info.py\", line 301, in run\r\n writer(self, ep.name, os.path.join(self.egg_info, ep.name))\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/command/egg_info.py\", line 741, in write_entries\r\n eps = _entry_points.load(cmd.distribution.entry_points)\r\n File \"/Users/matthewsht/.local/pyenv/versions/3.8.13/lib/python3.8/functools.py\", line 875, in wrapper\r\n return dispatch(args[0].__class__)(*args, **kw)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py\", line 51, in load\r\n return validate(metadata.EntryPoints(groups))\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py\", line 39, in validate\r\n consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_vendor/more_itertools/recipes.py\", line 139, in consume\r\n deque(iterator, maxlen=0)\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_entry_points.py\", line 17, in ensure_valid\r\n ep.extras\r\n File \"/private/var/folders/kg/ylmhpwmn3hb2bdlwl84t6c989ztfk2/T/build-env-vqmomun4/lib/python3.8/site-packages/setuptools/_vendor/importlib_metadata/__init__.py\", line 211, in extras\r\n return list(re.finditer(r'\\w+', match.group('extras') or ''))\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\n\r\nERROR Backend subprocess exited when trying to invoke get_requires_for_build_sdist\r\n```\r\n\n", "before_files": [{"content": "import functools\nimport operator\nimport itertools\n\nfrom .extern.jaraco.text import yield_lines\nfrom .extern.jaraco.functools import pass_none\nfrom ._importlib import metadata\nfrom ._itertools import ensure_unique\nfrom .extern.more_itertools import consume\n\n\ndef ensure_valid(ep):\n \"\"\"\n Exercise one of the dynamic properties to trigger\n the pattern match.\n \"\"\"\n ep.extras\n\n\ndef load_group(value, group):\n \"\"\"\n Given a value of an entry point or series of entry points,\n return each as an EntryPoint.\n \"\"\"\n # normalize to a single sequence of lines\n lines = yield_lines(value)\n text = f'[{group}]\\n' + '\\n'.join(lines)\n return metadata.EntryPoints._from_text(text)\n\n\ndef by_group_and_name(ep):\n return ep.group, ep.name\n\n\ndef validate(eps: metadata.EntryPoints):\n \"\"\"\n Ensure entry points are unique by group and name and validate each.\n \"\"\"\n consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))\n return eps\n\n\[email protected]\ndef load(eps):\n \"\"\"\n Given a Distribution.entry_points, produce EntryPoints.\n \"\"\"\n groups = itertools.chain.from_iterable(\n load_group(value, group)\n for group, value in eps.items())\n return validate(metadata.EntryPoints(groups))\n\n\[email protected](str)\ndef _(eps):\n r\"\"\"\n >>> ep, = load('[console_scripts]\\nfoo=bar')\n >>> ep.group\n 'console_scripts'\n >>> ep.name\n 'foo'\n >>> ep.value\n 'bar'\n \"\"\"\n return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))\n\n\nload.register(type(None), lambda x: x)\n\n\n@pass_none\ndef render(eps: metadata.EntryPoints):\n by_group = operator.attrgetter('group')\n groups = itertools.groupby(sorted(eps, key=by_group), by_group)\n\n return '\\n'.join(\n f'[{group}]\\n{render_items(items)}\\n'\n for group, items in groups\n )\n\n\ndef render_items(eps):\n return '\\n'.join(\n f'{ep.name} = {ep.value}'\n for ep in sorted(eps)\n )\n", "path": "setuptools/_entry_points.py"}]}
| 3,654 | 207 |
gh_patches_debug_34000
|
rasdani/github-patches
|
git_diff
|
AlexsLemonade__refinebio-2280
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Engagement bot thinks every user is a returning user
### Context
https://alexslemonade.slack.com/archives/CRK42AL1Y/p1587988808265500
### Problem or idea
@dvenprasad says 6 of those are new users. There must be a bug in the queries it uses or something.
### Solution or next step
Fix the engagement bot so it reports new users as new users.
</issue>
<code>
[start of api/data_refinery_api/management/commands/post_downloads_summary.py]
1 import datetime
2
3 from django.conf import settings
4 from django.core.management.base import BaseCommand
5 from django.utils import timezone
6
7 import requests
8
9 from data_refinery_common.models import DatasetAnnotation
10
11
12 class Command(BaseCommand):
13 help = "Post downloads summary to slack"
14
15 def add_arguments(self, parser):
16 parser.add_argument(
17 "--days",
18 type=int,
19 default=7, # default to a week
20 help=("Number of days in the past for which to build the stats"),
21 )
22 parser.add_argument(
23 "--channel",
24 type=str,
25 default="ccdl-general",
26 help=("Optional parameter to choose the channel where the message will be posted."),
27 )
28
29 def handle(self, *args, **options):
30 days = options["days"]
31 start_time = timezone.now() - datetime.timedelta(days=days)
32
33 annotation_queryset = DatasetAnnotation.objects.filter(
34 created_at__gt=start_time
35 ).prefetch_related("dataset")
36 annotations = [
37 annotation
38 for annotation in annotation_queryset
39 if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
40 ]
41
42 unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
43 unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
44
45 if unique_users:
46 fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
47 days, len(unique_users), len(unique_ips)
48 )
49 else:
50 fallback_text = "There were no downloads in the last {0} days.".format(days)
51
52 new_users = ""
53 returning_users = ""
54 for email in unique_users:
55 user_annotations = annotation_queryset.filter(dataset__email_address=email)
56 total_downloads = user_annotations.count()
57 unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
58 locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
59 is_new_user = DatasetAnnotation.objects.filter(
60 created_at__lt=start_time, dataset__email_address=email
61 )
62 text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
63 if is_new_user:
64 new_users += text
65 else:
66 returning_users += text
67
68 blocks = [
69 {
70 "type": "section",
71 "text": {"type": "plain_text", "emoji": True, "text": fallback_text},
72 }
73 ]
74 if new_users:
75 blocks.append(
76 {
77 "type": "section",
78 "text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
79 }
80 )
81 if returning_users:
82 blocks.append(
83 {
84 "type": "section",
85 "text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
86 }
87 )
88
89 # Post to slack
90 requests.post(
91 settings.ENGAGEMENTBOT_WEBHOOK,
92 json={
93 "username": "EngagementBot",
94 "icon_emoji": ":halal:",
95 "channel": "#" + options["channel"],
96 "text": fallback_text,
97 "blocks": blocks,
98 },
99 headers={"Content-Type": "application/json"},
100 timeout=10,
101 )
102
103
104 def should_display_email(email: str) -> bool:
105 """ Returns true if the given email is not associated with the CCDL suers """
106 if not email:
107 return False
108 return not (
109 email.startswith("cansav09")
110 or email.startswith("arielsvn")
111 or email.startswith("jaclyn.n.taroni")
112 or email.startswith("kurt.wheeler")
113 or email.startswith("greenescientist")
114 or email.startswith("miserlou")
115 or email.startswith("d.prasad")
116 or email.endswith("@alexslemonade.org")
117 or email is ("[email protected]")
118 or email is ("[email protected]")
119 )
120
121
122 def get_ip_location(remote_ip):
123 try:
124 data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()
125 return "{0}, {1}".format(data["city"], data["country_name"])
126 except Exception:
127 return remote_ip
128
[end of api/data_refinery_api/management/commands/post_downloads_summary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py
--- a/api/data_refinery_api/management/commands/post_downloads_summary.py
+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py
@@ -42,28 +42,30 @@
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
- if unique_users:
- fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
- days, len(unique_users), len(unique_ips)
- )
- else:
- fallback_text = "There were no downloads in the last {0} days.".format(days)
-
new_users = ""
returning_users = ""
+ total_downloads = 0
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
- total_downloads = user_annotations.count()
+ downloads = user_annotations.count()
+ total_downloads += downloads
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
- is_new_user = DatasetAnnotation.objects.filter(
+ is_returning_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
- text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
- if is_new_user:
- new_users += text
- else:
+ text = "{0} | {1} downloads from {2}\n".format(email, downloads, locations)
+ if is_returning_user:
returning_users += text
+ else:
+ new_users += text
+
+ if total_downloads > 0:
+ fallback_text = "In the last {0} days, {1} users downloaded {2} datasets from {3} locations.".format(
+ days, len(unique_users), total_downloads, len(unique_ips)
+ )
+ else:
+ fallback_text = "There were no downloads in the last {0} days.".format(days)
blocks = [
{
|
{"golden_diff": "diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py\n--- a/api/data_refinery_api/management/commands/post_downloads_summary.py\n+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py\n@@ -42,28 +42,30 @@\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n \n- if unique_users:\n- fallback_text = \"In the last {0} days, {1} users downloaded datasets from {2} locations.\".format(\n- days, len(unique_users), len(unique_ips)\n- )\n- else:\n- fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n-\n new_users = \"\"\n returning_users = \"\"\n+ total_downloads = 0\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n- total_downloads = user_annotations.count()\n+ downloads = user_annotations.count()\n+ total_downloads += downloads\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n- is_new_user = DatasetAnnotation.objects.filter(\n+ is_returning_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n- text = \"{0} | {1} downloads from {2}\\n\".format(email, total_downloads, locations)\n- if is_new_user:\n- new_users += text\n- else:\n+ text = \"{0} | {1} downloads from {2}\\n\".format(email, downloads, locations)\n+ if is_returning_user:\n returning_users += text\n+ else:\n+ new_users += text\n+\n+ if total_downloads > 0:\n+ fallback_text = \"In the last {0} days, {1} users downloaded {2} datasets from {3} locations.\".format(\n+ days, len(unique_users), total_downloads, len(unique_ips)\n+ )\n+ else:\n+ fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n \n blocks = [\n {\n", "issue": "Engagement bot thinks every user is a returning user\n### Context\r\n\r\nhttps://alexslemonade.slack.com/archives/CRK42AL1Y/p1587988808265500\r\n\r\n### Problem or idea\r\n\r\n@dvenprasad says 6 of those are new users. There must be a bug in the queries it uses or something.\r\n\r\n### Solution or next step\r\n\r\nFix the engagement bot so it reports new users as new users.\n", "before_files": [{"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # default to a week\n help=(\"Number of days in the past for which to build the stats\"),\n )\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n\n def handle(self, *args, **options):\n days = options[\"days\"]\n start_time = timezone.now() - datetime.timedelta(days=days)\n\n annotation_queryset = DatasetAnnotation.objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"dataset\")\n annotations = [\n annotation\n for annotation in annotation_queryset\n if annotation.data[\"start\"] and should_display_email(annotation.dataset.email_address)\n ]\n\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n\n if unique_users:\n fallback_text = \"In the last {0} days, {1} users downloaded datasets from {2} locations.\".format(\n days, len(unique_users), len(unique_ips)\n )\n else:\n fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n\n new_users = \"\"\n returning_users = \"\"\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n total_downloads = user_annotations.count()\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n is_new_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n text = \"{0} | {1} downloads from {2}\\n\".format(email, total_downloads, locations)\n if is_new_user:\n new_users += text\n else:\n returning_users += text\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n }\n ]\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*New users* \\n\" + new_users,},\n }\n )\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*Returning users* \\n\" + returning_users,},\n }\n )\n\n # Post to slack\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": \"#\" + options[\"channel\"],\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n\n\ndef should_display_email(email: str) -> bool:\n \"\"\" Returns true if the given email is not associated with the CCDL suers \"\"\"\n if not email:\n return False\n return not (\n email.startswith(\"cansav09\")\n or email.startswith(\"arielsvn\")\n or email.startswith(\"jaclyn.n.taroni\")\n or email.startswith(\"kurt.wheeler\")\n or email.startswith(\"greenescientist\")\n or email.startswith(\"miserlou\")\n or email.startswith(\"d.prasad\")\n or email.endswith(\"@alexslemonade.org\")\n or email is (\"[email protected]\")\n or email is (\"[email protected]\")\n )\n\n\ndef get_ip_location(remote_ip):\n try:\n data = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()\n return \"{0}, {1}\".format(data[\"city\"], data[\"country_name\"])\n except Exception:\n return remote_ip\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}]}
| 1,859 | 515 |
gh_patches_debug_26690
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-2755
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make EventScrubber recursive
### Problem Statement
We have a custom `before_send` implementation that scrubs data recursively. I was hopping to replace the custom implementation with the built-in EventScrubber but I found out that it doesn't scrub `vars` recursively.
As far as I can tell this was a consistency, perf trade-off thing but it would be nice to have a built-in option to make it recursive.
Thank you!
### Solution Brainstorm
`EventScrubber(recursive=True)`
</issue>
<code>
[start of sentry_sdk/scrubber.py]
1 from sentry_sdk.utils import (
2 capture_internal_exceptions,
3 AnnotatedValue,
4 iter_event_frames,
5 )
6 from sentry_sdk._compat import string_types
7 from sentry_sdk._types import TYPE_CHECKING
8
9 if TYPE_CHECKING:
10 from sentry_sdk._types import Event
11 from typing import Any
12 from typing import Dict
13 from typing import List
14 from typing import Optional
15
16
17 DEFAULT_DENYLIST = [
18 # stolen from relay
19 "password",
20 "passwd",
21 "secret",
22 "api_key",
23 "apikey",
24 "auth",
25 "credentials",
26 "mysql_pwd",
27 "privatekey",
28 "private_key",
29 "token",
30 "ip_address",
31 "session",
32 # django
33 "csrftoken",
34 "sessionid",
35 # wsgi
36 "remote_addr",
37 "x_csrftoken",
38 "x_forwarded_for",
39 "set_cookie",
40 "cookie",
41 "authorization",
42 "x_api_key",
43 "x_forwarded_for",
44 "x_real_ip",
45 # other common names used in the wild
46 "aiohttp_session", # aiohttp
47 "connect.sid", # Express
48 "csrf_token", # Pyramid
49 "csrf", # (this is a cookie name used in accepted answers on stack overflow)
50 "_csrf", # Express
51 "_csrf_token", # Bottle
52 "PHPSESSID", # PHP
53 "_session", # Sanic
54 "symfony", # Symfony
55 "user_session", # Vue
56 "_xsrf", # Tornado
57 "XSRF-TOKEN", # Angular, Laravel
58 ]
59
60
61 class EventScrubber(object):
62 def __init__(self, denylist=None):
63 # type: (Optional[List[str]]) -> None
64 self.denylist = DEFAULT_DENYLIST if denylist is None else denylist
65 self.denylist = [x.lower() for x in self.denylist]
66
67 def scrub_dict(self, d):
68 # type: (Dict[str, Any]) -> None
69 if not isinstance(d, dict):
70 return
71
72 for k in d.keys():
73 if isinstance(k, string_types) and k.lower() in self.denylist:
74 d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
75
76 def scrub_request(self, event):
77 # type: (Event) -> None
78 with capture_internal_exceptions():
79 if "request" in event:
80 if "headers" in event["request"]:
81 self.scrub_dict(event["request"]["headers"])
82 if "cookies" in event["request"]:
83 self.scrub_dict(event["request"]["cookies"])
84 if "data" in event["request"]:
85 self.scrub_dict(event["request"]["data"])
86
87 def scrub_extra(self, event):
88 # type: (Event) -> None
89 with capture_internal_exceptions():
90 if "extra" in event:
91 self.scrub_dict(event["extra"])
92
93 def scrub_user(self, event):
94 # type: (Event) -> None
95 with capture_internal_exceptions():
96 if "user" in event:
97 self.scrub_dict(event["user"])
98
99 def scrub_breadcrumbs(self, event):
100 # type: (Event) -> None
101 with capture_internal_exceptions():
102 if "breadcrumbs" in event:
103 if "values" in event["breadcrumbs"]:
104 for value in event["breadcrumbs"]["values"]:
105 if "data" in value:
106 self.scrub_dict(value["data"])
107
108 def scrub_frames(self, event):
109 # type: (Event) -> None
110 with capture_internal_exceptions():
111 for frame in iter_event_frames(event):
112 if "vars" in frame:
113 self.scrub_dict(frame["vars"])
114
115 def scrub_spans(self, event):
116 # type: (Event) -> None
117 with capture_internal_exceptions():
118 if "spans" in event:
119 for span in event["spans"]:
120 if "data" in span:
121 self.scrub_dict(span["data"])
122
123 def scrub_event(self, event):
124 # type: (Event) -> None
125 self.scrub_request(event)
126 self.scrub_extra(event)
127 self.scrub_user(event)
128 self.scrub_breadcrumbs(event)
129 self.scrub_frames(event)
130 self.scrub_spans(event)
131
[end of sentry_sdk/scrubber.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/scrubber.py b/sentry_sdk/scrubber.py
--- a/sentry_sdk/scrubber.py
+++ b/sentry_sdk/scrubber.py
@@ -59,19 +59,36 @@
class EventScrubber(object):
- def __init__(self, denylist=None):
- # type: (Optional[List[str]]) -> None
+ def __init__(self, denylist=None, recursive=False):
+ # type: (Optional[List[str]], bool) -> None
self.denylist = DEFAULT_DENYLIST if denylist is None else denylist
self.denylist = [x.lower() for x in self.denylist]
+ self.recursive = recursive
+
+ def scrub_list(self, lst):
+ # type: (List[Any]) -> None
+ if not isinstance(lst, list):
+ return
+
+ for v in lst:
+ if isinstance(v, dict):
+ self.scrub_dict(v)
+ elif isinstance(v, list):
+ self.scrub_list(v)
def scrub_dict(self, d):
# type: (Dict[str, Any]) -> None
if not isinstance(d, dict):
return
- for k in d.keys():
+ for k, v in d.items():
if isinstance(k, string_types) and k.lower() in self.denylist:
d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()
+ elif self.recursive:
+ if isinstance(v, dict):
+ self.scrub_dict(v)
+ elif isinstance(v, list):
+ self.scrub_list(v)
def scrub_request(self, event):
# type: (Event) -> None
|
{"golden_diff": "diff --git a/sentry_sdk/scrubber.py b/sentry_sdk/scrubber.py\n--- a/sentry_sdk/scrubber.py\n+++ b/sentry_sdk/scrubber.py\n@@ -59,19 +59,36 @@\n \n \n class EventScrubber(object):\n- def __init__(self, denylist=None):\n- # type: (Optional[List[str]]) -> None\n+ def __init__(self, denylist=None, recursive=False):\n+ # type: (Optional[List[str]], bool) -> None\n self.denylist = DEFAULT_DENYLIST if denylist is None else denylist\n self.denylist = [x.lower() for x in self.denylist]\n+ self.recursive = recursive\n+\n+ def scrub_list(self, lst):\n+ # type: (List[Any]) -> None\n+ if not isinstance(lst, list):\n+ return\n+\n+ for v in lst:\n+ if isinstance(v, dict):\n+ self.scrub_dict(v)\n+ elif isinstance(v, list):\n+ self.scrub_list(v)\n \n def scrub_dict(self, d):\n # type: (Dict[str, Any]) -> None\n if not isinstance(d, dict):\n return\n \n- for k in d.keys():\n+ for k, v in d.items():\n if isinstance(k, string_types) and k.lower() in self.denylist:\n d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()\n+ elif self.recursive:\n+ if isinstance(v, dict):\n+ self.scrub_dict(v)\n+ elif isinstance(v, list):\n+ self.scrub_list(v)\n \n def scrub_request(self, event):\n # type: (Event) -> None\n", "issue": "Make EventScrubber recursive\n### Problem Statement\r\n\r\nWe have a custom `before_send` implementation that scrubs data recursively. I was hopping to replace the custom implementation with the built-in EventScrubber but I found out that it doesn't scrub `vars` recursively.\r\n\r\nAs far as I can tell this was a consistency, perf trade-off thing but it would be nice to have a built-in option to make it recursive.\r\n\r\nThank you!\r\n\r\n### Solution Brainstorm\r\n\r\n`EventScrubber(recursive=True)`\n", "before_files": [{"content": "from sentry_sdk.utils import (\n capture_internal_exceptions,\n AnnotatedValue,\n iter_event_frames,\n)\nfrom sentry_sdk._compat import string_types\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from sentry_sdk._types import Event\n from typing import Any\n from typing import Dict\n from typing import List\n from typing import Optional\n\n\nDEFAULT_DENYLIST = [\n # stolen from relay\n \"password\",\n \"passwd\",\n \"secret\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"credentials\",\n \"mysql_pwd\",\n \"privatekey\",\n \"private_key\",\n \"token\",\n \"ip_address\",\n \"session\",\n # django\n \"csrftoken\",\n \"sessionid\",\n # wsgi\n \"remote_addr\",\n \"x_csrftoken\",\n \"x_forwarded_for\",\n \"set_cookie\",\n \"cookie\",\n \"authorization\",\n \"x_api_key\",\n \"x_forwarded_for\",\n \"x_real_ip\",\n # other common names used in the wild\n \"aiohttp_session\", # aiohttp\n \"connect.sid\", # Express\n \"csrf_token\", # Pyramid\n \"csrf\", # (this is a cookie name used in accepted answers on stack overflow)\n \"_csrf\", # Express\n \"_csrf_token\", # Bottle\n \"PHPSESSID\", # PHP\n \"_session\", # Sanic\n \"symfony\", # Symfony\n \"user_session\", # Vue\n \"_xsrf\", # Tornado\n \"XSRF-TOKEN\", # Angular, Laravel\n]\n\n\nclass EventScrubber(object):\n def __init__(self, denylist=None):\n # type: (Optional[List[str]]) -> None\n self.denylist = DEFAULT_DENYLIST if denylist is None else denylist\n self.denylist = [x.lower() for x in self.denylist]\n\n def scrub_dict(self, d):\n # type: (Dict[str, Any]) -> None\n if not isinstance(d, dict):\n return\n\n for k in d.keys():\n if isinstance(k, string_types) and k.lower() in self.denylist:\n d[k] = AnnotatedValue.substituted_because_contains_sensitive_data()\n\n def scrub_request(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"request\" in event:\n if \"headers\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"headers\"])\n if \"cookies\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"cookies\"])\n if \"data\" in event[\"request\"]:\n self.scrub_dict(event[\"request\"][\"data\"])\n\n def scrub_extra(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"extra\" in event:\n self.scrub_dict(event[\"extra\"])\n\n def scrub_user(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"user\" in event:\n self.scrub_dict(event[\"user\"])\n\n def scrub_breadcrumbs(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"breadcrumbs\" in event:\n if \"values\" in event[\"breadcrumbs\"]:\n for value in event[\"breadcrumbs\"][\"values\"]:\n if \"data\" in value:\n self.scrub_dict(value[\"data\"])\n\n def scrub_frames(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n for frame in iter_event_frames(event):\n if \"vars\" in frame:\n self.scrub_dict(frame[\"vars\"])\n\n def scrub_spans(self, event):\n # type: (Event) -> None\n with capture_internal_exceptions():\n if \"spans\" in event:\n for span in event[\"spans\"]:\n if \"data\" in span:\n self.scrub_dict(span[\"data\"])\n\n def scrub_event(self, event):\n # type: (Event) -> None\n self.scrub_request(event)\n self.scrub_extra(event)\n self.scrub_user(event)\n self.scrub_breadcrumbs(event)\n self.scrub_frames(event)\n self.scrub_spans(event)\n", "path": "sentry_sdk/scrubber.py"}]}
| 1,881 | 385 |
gh_patches_debug_16574
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-4801
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SVD is returning 4 pointers instead of 3
## Description
When sending a tensor to a worker and performing SVD, returns four pointers instead of three. Also, the third one is not gettable. By experimentation, I have had to solve the issue using `U, s, _, V = x.svd()`.
## How to Reproduce
```python
import torch
import syft as sy
hook = sy.TorchHook(torch)
bob = sy.VirtualWorker(hook, id='bob')
x = torch.rand(250, 84).send(bob) # Synthetic tensor
x.svd()
# Output:
# ((Wrapper)>[PointerTensor | me:88822589827 -> bob:10423896311],
# (Wrapper)>[PointerTensor | me:22528885369 -> bob:34285527022],
# (Wrapper)>[PointerTensor | me:46709676193 -> bob:67244907535],
# (Wrapper)>[PointerTensor | me:235847656 -> bob:15738446586])
```
## Expected Behavior
Should return **three** pointers: `U, s, V = x.svd()`
## System Information
- Official released Docker container
- Same for pip package:
- Ubuntu 18.04.5 LTS (Bionic Beaver)
- Python 3.6.9
</issue>
<code>
[start of syft/generic/object_storage.py]
1 from collections import defaultdict
2 from typing import Union
3
4 from syft.exceptions import ObjectNotFoundError
5 from syft.generic.frameworks.types import FrameworkTensor
6 from syft.generic.frameworks.types import FrameworkTensorType
7 from syft.generic.abstract.tensor import AbstractTensor
8 from syft.workers.abstract import AbstractWorker
9
10
11 class ObjectStore:
12 """A storage of objects identifiable by their id.
13
14 A wrapper object to a collection of objects where all objects
15 are stored using their IDs as keys.
16 """
17
18 def __init__(self, owner: AbstractWorker = None):
19 self.owner = owner
20
21 # This is the collection of objects being stored.
22 self._objects = {}
23 # This is an index to retrieve objects from their tags in an efficient way
24 self._tag_to_object_ids = defaultdict(set)
25
26 # Garbage collect all remote data on a worker every garbage_delay seconds
27 self.garbage_delay = 0
28 # Store at most trash_capacity elements before garbage collecting
29 self.trash_capacity = 10_000
30 # Trash is a dict referencing for each worker key a tuple with the timestamp
31 # of the last GC and the list of object to GC
32 self.trash = {}
33
34 @property
35 def _tensors(self):
36 return {id_: obj for id_, obj in self._objects.items() if isinstance(obj, FrameworkTensor)}
37
38 def register_obj(self, obj: object, obj_id: Union[str, int] = None):
39 """Registers the specified object with the current worker node.
40
41 Selects an id for the object, assigns a list of owners, and establishes
42 whether it's a pointer or not. This method is generally not used by the
43 client and is instead used by internal processes (hooks and workers).
44
45 Args:
46 obj: A torch Tensor or Variable object to be registered.
47 obj_id (int or string): random integer between 0 and 1e10 or
48 string uniquely identifying the object.
49 """
50 if obj_id is not None and hasattr(obj, "id"):
51 obj.id = obj_id
52 self.set_obj(obj)
53
54 def de_register_obj(self, obj: object, _recurse_torch_objs: bool = True):
55 """Deregisters the specified object.
56
57 Deregister and remove attributes which are indicative of registration.
58
59 Args:
60 obj: A torch Tensor or Variable object to be deregistered.
61 _recurse_torch_objs: A boolean indicating whether the object is
62 more complex and needs to be explored. Is not supported at the
63 moment.
64 """
65 if hasattr(obj, "id"):
66 self.rm_obj(obj.id)
67 if hasattr(obj, "_owner"):
68 del obj._owner
69
70 def get_obj(self, obj_id: Union[str, int]) -> object:
71 """Returns the object from registry.
72
73 Look up an object from the registry using its ID.
74
75 Args:
76 obj_id: A string or integer id of an object to look up.
77
78 Returns:
79 Object with id equals to `obj_id`.
80 """
81
82 try:
83 obj = self._objects[obj_id]
84 except KeyError as e:
85 if obj_id not in self._objects:
86 raise ObjectNotFoundError(obj_id, self)
87 else:
88 raise e
89
90 return obj
91
92 def set_obj(self, obj: Union[FrameworkTensorType, AbstractTensor]) -> None:
93 """Adds an object to the registry of objects.
94
95 Args:
96 obj: A torch or syft tensor with an id.
97 """
98 obj.owner = self.owner
99 self._objects[obj.id] = obj
100 # Add entry in the tag index
101 if obj.tags:
102 for tag in obj.tags:
103 if tag not in self._tag_to_object_ids:
104 self._tag_to_object_ids[tag] = {obj.id}
105 else:
106 self._tag_to_object_ids[tag].add(obj.id)
107
108 def rm_obj(self, obj_id: Union[str, int], force=False):
109 """Removes an object.
110
111 Remove the object from the permanent object registry if it exists.
112
113 Args:
114 obj_id: A string or integer representing id of the object to be
115 removed.
116 force: if true, explicitly forces removal of the object modifying the
117 `garbage_collect_data` attribute.
118 """
119 if obj_id in self._objects:
120 obj = self._objects[obj_id]
121 # update tag index
122 if obj.tags:
123 for tag in obj.tags:
124 if tag not in self._tag_to_object_ids:
125 self._tag_to_object_ids[tag].remove(obj.id)
126
127 if force and hasattr(obj, "child") and hasattr(obj.child, "garbage_collect_data"):
128 obj.child.garbage_collect_data = True
129
130 del self._objects[obj_id]
131
132 def force_rm_obj(self, obj_id: Union[str, int]):
133 self.rm_obj(obj_id, force=True)
134
135 def clear_objects(self):
136 """Removes all objects from the object storage."""
137 self._objects.clear()
138
139 def current_objects(self):
140 """Returns a copy of the objects in the object storage."""
141 return self._objects.copy()
142
143 def find_by_id(self, id):
144 """Local search by id"""
145 return self._objects.get(id)
146
147 def find_by_tag(self, tag):
148 """Local search by tag
149
150 Args:
151 tag (str): exact tag searched
152
153 Return:
154 A list of results, possibly empty
155 """
156 if tag in self._tag_to_object_ids:
157 results = []
158 for obj_id in self._tag_to_object_ids[tag]:
159 obj = self.find_by_id(obj_id)
160 if obj is not None:
161 results.append(obj)
162 return results
163 return []
164
165 def register_tags(self, obj):
166 # NOTE: this is a fix to correct faulty registration that can sometimes happen
167 if obj.id not in self._objects:
168 self.owner.register_obj(obj)
169
170 for tag in obj.tags:
171 self._tag_to_object_ids[tag].add(obj.id)
172
173 def __len__(self):
174 """
175 Return the number of objects in the store
176 """
177 return len(self._objects)
178
179 def __str__(self):
180 return f"<ObjectStorage of {self.owner.id}>"
181
[end of syft/generic/object_storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/syft/generic/object_storage.py b/syft/generic/object_storage.py
--- a/syft/generic/object_storage.py
+++ b/syft/generic/object_storage.py
@@ -6,6 +6,7 @@
from syft.generic.frameworks.types import FrameworkTensorType
from syft.generic.abstract.tensor import AbstractTensor
from syft.workers.abstract import AbstractWorker
+import torch
class ObjectStore:
@@ -62,7 +63,8 @@
more complex and needs to be explored. Is not supported at the
moment.
"""
- if hasattr(obj, "id"):
+ has_id = hasattr(obj, "_id") if isinstance(obj, torch.Tensor) else hasattr(obj, "id")
+ if has_id:
self.rm_obj(obj.id)
if hasattr(obj, "_owner"):
del obj._owner
|
{"golden_diff": "diff --git a/syft/generic/object_storage.py b/syft/generic/object_storage.py\n--- a/syft/generic/object_storage.py\n+++ b/syft/generic/object_storage.py\n@@ -6,6 +6,7 @@\n from syft.generic.frameworks.types import FrameworkTensorType\n from syft.generic.abstract.tensor import AbstractTensor\n from syft.workers.abstract import AbstractWorker\n+import torch\n \n \n class ObjectStore:\n@@ -62,7 +63,8 @@\n more complex and needs to be explored. Is not supported at the\n moment.\n \"\"\"\n- if hasattr(obj, \"id\"):\n+ has_id = hasattr(obj, \"_id\") if isinstance(obj, torch.Tensor) else hasattr(obj, \"id\")\n+ if has_id:\n self.rm_obj(obj.id)\n if hasattr(obj, \"_owner\"):\n del obj._owner\n", "issue": "SVD is returning 4 pointers instead of 3\n## Description\r\nWhen sending a tensor to a worker and performing SVD, returns four pointers instead of three. Also, the third one is not gettable. By experimentation, I have had to solve the issue using `U, s, _, V = x.svd()`.\r\n\r\n## How to Reproduce\r\n```python\r\nimport torch\r\nimport syft as sy\r\n\r\nhook = sy.TorchHook(torch)\r\nbob = sy.VirtualWorker(hook, id='bob')\r\n\r\nx = torch.rand(250, 84).send(bob) # Synthetic tensor\r\nx.svd()\r\n\r\n# Output:\r\n# ((Wrapper)>[PointerTensor | me:88822589827 -> bob:10423896311],\r\n# (Wrapper)>[PointerTensor | me:22528885369 -> bob:34285527022],\r\n# (Wrapper)>[PointerTensor | me:46709676193 -> bob:67244907535],\r\n# (Wrapper)>[PointerTensor | me:235847656 -> bob:15738446586])\r\n```\r\n\r\n## Expected Behavior\r\nShould return **three** pointers: `U, s, V = x.svd()`\r\n\r\n## System Information\r\n - Official released Docker container\r\n - Same for pip package:\r\n - Ubuntu 18.04.5 LTS (Bionic Beaver)\r\n - Python 3.6.9\n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import Union\n\nfrom syft.exceptions import ObjectNotFoundError\nfrom syft.generic.frameworks.types import FrameworkTensor\nfrom syft.generic.frameworks.types import FrameworkTensorType\nfrom syft.generic.abstract.tensor import AbstractTensor\nfrom syft.workers.abstract import AbstractWorker\n\n\nclass ObjectStore:\n \"\"\"A storage of objects identifiable by their id.\n\n A wrapper object to a collection of objects where all objects\n are stored using their IDs as keys.\n \"\"\"\n\n def __init__(self, owner: AbstractWorker = None):\n self.owner = owner\n\n # This is the collection of objects being stored.\n self._objects = {}\n # This is an index to retrieve objects from their tags in an efficient way\n self._tag_to_object_ids = defaultdict(set)\n\n # Garbage collect all remote data on a worker every garbage_delay seconds\n self.garbage_delay = 0\n # Store at most trash_capacity elements before garbage collecting\n self.trash_capacity = 10_000\n # Trash is a dict referencing for each worker key a tuple with the timestamp\n # of the last GC and the list of object to GC\n self.trash = {}\n\n @property\n def _tensors(self):\n return {id_: obj for id_, obj in self._objects.items() if isinstance(obj, FrameworkTensor)}\n\n def register_obj(self, obj: object, obj_id: Union[str, int] = None):\n \"\"\"Registers the specified object with the current worker node.\n\n Selects an id for the object, assigns a list of owners, and establishes\n whether it's a pointer or not. This method is generally not used by the\n client and is instead used by internal processes (hooks and workers).\n\n Args:\n obj: A torch Tensor or Variable object to be registered.\n obj_id (int or string): random integer between 0 and 1e10 or\n string uniquely identifying the object.\n \"\"\"\n if obj_id is not None and hasattr(obj, \"id\"):\n obj.id = obj_id\n self.set_obj(obj)\n\n def de_register_obj(self, obj: object, _recurse_torch_objs: bool = True):\n \"\"\"Deregisters the specified object.\n\n Deregister and remove attributes which are indicative of registration.\n\n Args:\n obj: A torch Tensor or Variable object to be deregistered.\n _recurse_torch_objs: A boolean indicating whether the object is\n more complex and needs to be explored. Is not supported at the\n moment.\n \"\"\"\n if hasattr(obj, \"id\"):\n self.rm_obj(obj.id)\n if hasattr(obj, \"_owner\"):\n del obj._owner\n\n def get_obj(self, obj_id: Union[str, int]) -> object:\n \"\"\"Returns the object from registry.\n\n Look up an object from the registry using its ID.\n\n Args:\n obj_id: A string or integer id of an object to look up.\n\n Returns:\n Object with id equals to `obj_id`.\n \"\"\"\n\n try:\n obj = self._objects[obj_id]\n except KeyError as e:\n if obj_id not in self._objects:\n raise ObjectNotFoundError(obj_id, self)\n else:\n raise e\n\n return obj\n\n def set_obj(self, obj: Union[FrameworkTensorType, AbstractTensor]) -> None:\n \"\"\"Adds an object to the registry of objects.\n\n Args:\n obj: A torch or syft tensor with an id.\n \"\"\"\n obj.owner = self.owner\n self._objects[obj.id] = obj\n # Add entry in the tag index\n if obj.tags:\n for tag in obj.tags:\n if tag not in self._tag_to_object_ids:\n self._tag_to_object_ids[tag] = {obj.id}\n else:\n self._tag_to_object_ids[tag].add(obj.id)\n\n def rm_obj(self, obj_id: Union[str, int], force=False):\n \"\"\"Removes an object.\n\n Remove the object from the permanent object registry if it exists.\n\n Args:\n obj_id: A string or integer representing id of the object to be\n removed.\n force: if true, explicitly forces removal of the object modifying the\n `garbage_collect_data` attribute.\n \"\"\"\n if obj_id in self._objects:\n obj = self._objects[obj_id]\n # update tag index\n if obj.tags:\n for tag in obj.tags:\n if tag not in self._tag_to_object_ids:\n self._tag_to_object_ids[tag].remove(obj.id)\n\n if force and hasattr(obj, \"child\") and hasattr(obj.child, \"garbage_collect_data\"):\n obj.child.garbage_collect_data = True\n\n del self._objects[obj_id]\n\n def force_rm_obj(self, obj_id: Union[str, int]):\n self.rm_obj(obj_id, force=True)\n\n def clear_objects(self):\n \"\"\"Removes all objects from the object storage.\"\"\"\n self._objects.clear()\n\n def current_objects(self):\n \"\"\"Returns a copy of the objects in the object storage.\"\"\"\n return self._objects.copy()\n\n def find_by_id(self, id):\n \"\"\"Local search by id\"\"\"\n return self._objects.get(id)\n\n def find_by_tag(self, tag):\n \"\"\"Local search by tag\n\n Args:\n tag (str): exact tag searched\n\n Return:\n A list of results, possibly empty\n \"\"\"\n if tag in self._tag_to_object_ids:\n results = []\n for obj_id in self._tag_to_object_ids[tag]:\n obj = self.find_by_id(obj_id)\n if obj is not None:\n results.append(obj)\n return results\n return []\n\n def register_tags(self, obj):\n # NOTE: this is a fix to correct faulty registration that can sometimes happen\n if obj.id not in self._objects:\n self.owner.register_obj(obj)\n\n for tag in obj.tags:\n self._tag_to_object_ids[tag].add(obj.id)\n\n def __len__(self):\n \"\"\"\n Return the number of objects in the store\n \"\"\"\n return len(self._objects)\n\n def __str__(self):\n return f\"<ObjectStorage of {self.owner.id}>\"\n", "path": "syft/generic/object_storage.py"}]}
| 2,672 | 188 |
gh_patches_debug_8131
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-3440
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parsl creates hidden working files, doesn't clean them up
**Is your feature request related to a problem? Please describe.**
I didn't know Parsl was creating so many log files because they were hidden.
I'm OK with files appearing in my run directory, but not accumulating them endlessly without my notice.
To be specific, I don't know what is responsible for `.workers.block-0.17158*` files but I'd like them to be visible.
**Describe the solution you'd like**
No hidden files.
**Describe alternatives you've considered**
Adding a "clean up Parsl's logging files" CLI tool
**Additional context**
Is anyone else concerned?
</issue>
<code>
[start of parsl/providers/local/local.py]
1 import logging
2 import os
3 import time
4
5 from parsl.channels import LocalChannel
6 from parsl.jobs.states import JobState, JobStatus
7 from parsl.launchers import SingleNodeLauncher
8 from parsl.providers.base import ExecutionProvider
9 from parsl.providers.errors import SchedulerMissingArgs, ScriptPathError, SubmitException
10 from parsl.utils import RepresentationMixin
11
12 logger = logging.getLogger(__name__)
13
14
15 class LocalProvider(ExecutionProvider, RepresentationMixin):
16 """ Local Execution Provider
17
18 This provider is used to provide execution resources from the localhost.
19
20 Parameters
21 ----------
22
23 min_blocks : int
24 Minimum number of blocks to maintain.
25 max_blocks : int
26 Maximum number of blocks to maintain.
27 parallelism : float
28 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
29 scaling where as many resources as possible are used; parallelism close to 0 represents
30 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
31 move_files : Optional[Bool]
32 Should files be moved? By default, Parsl will try to figure this out itself (= None).
33 If True, then will always move. If False, will never move.
34 worker_init : str
35 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
36 """
37
38 def __init__(self,
39 channel=LocalChannel(),
40 nodes_per_block=1,
41 launcher=SingleNodeLauncher(),
42 init_blocks=1,
43 min_blocks=0,
44 max_blocks=1,
45 worker_init='',
46 cmd_timeout=30,
47 parallelism=1,
48 move_files=None):
49 self.channel = channel
50 self._label = 'local'
51 self.nodes_per_block = nodes_per_block
52 self.launcher = launcher
53 self.worker_init = worker_init
54 self.init_blocks = init_blocks
55 self.min_blocks = min_blocks
56 self.max_blocks = max_blocks
57 self.parallelism = parallelism
58 self.script_dir = None
59 self.cmd_timeout = cmd_timeout
60 self.move_files = move_files
61
62 # Dictionary that keeps track of jobs, keyed on job_id
63 self.resources = {}
64
65 def status(self, job_ids):
66 ''' Get the status of a list of jobs identified by their ids.
67
68 Args:
69 - job_ids (List of ids) : List of identifiers for the jobs
70
71 Returns:
72 - List of status codes.
73
74 '''
75
76 for job_id in self.resources:
77 # This job dict should really be a class on its own
78 job_dict = self.resources[job_id]
79 if job_dict['status'] and job_dict['status'].terminal:
80 # We already checked this and it can't change after that
81 continue
82 # Script path should point to remote path if _should_move_files() is True
83 script_path = job_dict['script_path']
84
85 alive = self._is_alive(job_dict)
86 str_ec = self._read_job_file(script_path, '.ec').strip()
87
88 status = None
89 if str_ec == '-':
90 if alive:
91 status = JobStatus(JobState.RUNNING)
92 else:
93 # not alive but didn't get to write an exit code
94 if 'cancelled' in job_dict:
95 # because we cancelled it
96 status = JobStatus(JobState.CANCELLED)
97 else:
98 # we didn't cancel it, so it must have been killed by something outside
99 # parsl; we don't have a state for this, but we'll use CANCELLED with
100 # a specific message
101 status = JobStatus(JobState.CANCELLED, message='Killed')
102 else:
103 try:
104 # TODO: ensure that these files are only read once and clean them
105 ec = int(str_ec)
106 stdout_path = self._job_file_path(script_path, '.out')
107 stderr_path = self._job_file_path(script_path, '.err')
108 if ec == 0:
109 state = JobState.COMPLETED
110 else:
111 state = JobState.FAILED
112 status = JobStatus(state, exit_code=ec,
113 stdout_path=stdout_path, stderr_path=stderr_path)
114 except Exception:
115 status = JobStatus(JobState.FAILED,
116 'Cannot parse exit code: {}'.format(str_ec))
117
118 job_dict['status'] = status
119
120 return [self.resources[jid]['status'] for jid in job_ids]
121
122 def _is_alive(self, job_dict):
123 retcode, stdout, stderr = self.channel.execute_wait(
124 'ps -p {} > /dev/null 2> /dev/null; echo "STATUS:$?" '.format(
125 job_dict['remote_pid']), self.cmd_timeout)
126 for line in stdout.split('\n'):
127 if line.startswith("STATUS:"):
128 status = line.split("STATUS:")[1].strip()
129 if status == "0":
130 return True
131 else:
132 return False
133
134 def _job_file_path(self, script_path: str, suffix: str) -> str:
135 path = '{0}{1}'.format(script_path, suffix)
136 if self._should_move_files():
137 path = self.channel.pull_file(path, self.script_dir)
138 return path
139
140 def _read_job_file(self, script_path: str, suffix: str) -> str:
141 path = self._job_file_path(script_path, suffix)
142
143 with open(path, 'r') as f:
144 return f.read()
145
146 def _write_submit_script(self, script_string, script_filename):
147 '''
148 Load the template string with config values and write the generated submit script to
149 a submit script file.
150
151 Args:
152 - template_string (string) : The template string to be used for the writing submit script
153 - script_filename (string) : Name of the submit script
154
155 Returns:
156 - True: on success
157
158 Raises:
159 SchedulerMissingArgs : If template is missing args
160 ScriptPathError : Unable to write submit script out
161 '''
162
163 try:
164 with open(script_filename, 'w') as f:
165 f.write(script_string)
166
167 except KeyError as e:
168 logger.error("Missing keys for submit script: %s", e)
169 raise SchedulerMissingArgs(e.args, self.label)
170
171 except IOError as e:
172 logger.error("Failed writing to submit script: %s", script_filename)
173 raise ScriptPathError(script_filename, e)
174
175 return True
176
177 def submit(self, command, tasks_per_node, job_name="parsl.localprovider"):
178 ''' Submits the command onto an Local Resource Manager job.
179 Submit returns an ID that corresponds to the task that was just submitted.
180
181 If tasks_per_node < 1:
182 1/tasks_per_node is provisioned
183
184 If tasks_per_node == 1:
185 A single node is provisioned
186
187 If tasks_per_node > 1 :
188 tasks_per_node nodes are provisioned.
189
190 Args:
191 - command :(String) Commandline invocation to be made on the remote side.
192 - tasks_per_node (int) : command invocations to be launched per node
193
194 Kwargs:
195 - job_name (String): Name for job, must be unique
196
197 Returns:
198 - None: At capacity, cannot provision more
199 - job_id: (string) Identifier for the job
200
201 '''
202
203 job_name = "{0}.{1}".format(job_name, time.time())
204
205 # Set script path
206 script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
207 script_path = os.path.abspath(script_path)
208
209 wrap_command = self.worker_init + f'\nexport JOBNAME=${job_name}\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)
210
211 self._write_submit_script(wrap_command, script_path)
212
213 job_id = None
214 remote_pid = None
215 if self._should_move_files():
216 logger.debug("Pushing start script")
217 script_path = self.channel.push_file(script_path, self.channel.script_dir)
218
219 logger.debug("Launching")
220 # We need to capture the exit code and the streams, so we put them in files. We also write
221 # '-' to the exit code file to isolate potential problems with writing to files in the
222 # script directory
223 #
224 # The basic flow is:
225 # 1. write "-" to the exit code file. If this fails, exit
226 # 2. Launch the following sequence in the background:
227 # a. the command to run
228 # b. write the exit code of the command from (a) to the exit code file
229 # 3. Write the PID of the background sequence on stdout. The PID is needed if we want to
230 # cancel the task later.
231 #
232 # We need to do the >/dev/null 2>&1 so that bash closes stdout, otherwise
233 # channel.execute_wait hangs reading the process stdout until all the
234 # background commands complete.
235 cmd = '/bin/bash -c \'echo - >{0}.ec && {{ {{ bash {0} 1>{0}.out 2>{0}.err ; ' \
236 'echo $? > {0}.ec ; }} >/dev/null 2>&1 & echo "PID:$!" ; }}\''.format(script_path)
237 retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)
238 if retcode != 0:
239 raise SubmitException(job_name, "Launch command exited with code {0}".format(retcode),
240 stdout, stderr)
241 for line in stdout.split('\n'):
242 if line.startswith("PID:"):
243 remote_pid = line.split("PID:")[1].strip()
244 job_id = remote_pid
245 if job_id is None:
246 raise SubmitException(job_name, "Channel failed to start remote command/retrieve PID")
247
248 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.RUNNING),
249 'remote_pid': remote_pid, 'script_path': script_path}
250
251 return job_id
252
253 def _should_move_files(self):
254 return (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files)
255
256 def cancel(self, job_ids):
257 ''' Cancels the jobs specified by a list of job ids
258
259 Args:
260 job_ids : [<job_id> ...]
261
262 Returns: [True] Always returns true for every job_id, regardless of
263 whether an individual cancel failed (unless an
264 exception is raised)
265 '''
266 for job in job_ids:
267 job_dict = self.resources[job]
268 job_dict['cancelled'] = True
269 logger.debug("Terminating job/process ID: {0}".format(job))
270 cmd = "kill -- -$(ps -o pgid= {} | grep -o '[0-9]*')".format(job_dict['remote_pid'])
271 retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)
272 if retcode != 0:
273 logger.warning("Failed to kill PID: {} and child processes on {}".format(job_dict['remote_pid'],
274 self.label))
275
276 rets = [True for i in job_ids]
277 return rets
278
279 @property
280 def label(self):
281 return self._label
282
283 @property
284 def status_polling_interval(self):
285 return 5
286
[end of parsl/providers/local/local.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/providers/local/local.py b/parsl/providers/local/local.py
--- a/parsl/providers/local/local.py
+++ b/parsl/providers/local/local.py
@@ -206,7 +206,7 @@
script_path = "{0}/{1}.sh".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
- wrap_command = self.worker_init + f'\nexport JOBNAME=${job_name}\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)
+ wrap_command = self.worker_init + f'\nexport JOBNAME={job_name}\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)
self._write_submit_script(wrap_command, script_path)
|
{"golden_diff": "diff --git a/parsl/providers/local/local.py b/parsl/providers/local/local.py\n--- a/parsl/providers/local/local.py\n+++ b/parsl/providers/local/local.py\n@@ -206,7 +206,7 @@\n script_path = \"{0}/{1}.sh\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n \n- wrap_command = self.worker_init + f'\\nexport JOBNAME=${job_name}\\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)\n+ wrap_command = self.worker_init + f'\\nexport JOBNAME={job_name}\\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)\n \n self._write_submit_script(wrap_command, script_path)\n", "issue": "Parsl creates hidden working files, doesn't clean them up\n**Is your feature request related to a problem? Please describe.**\r\nI didn't know Parsl was creating so many log files because they were hidden. \r\nI'm OK with files appearing in my run directory, but not accumulating them endlessly without my notice.\r\n\r\nTo be specific, I don't know what is responsible for `.workers.block-0.17158*` files but I'd like them to be visible.\r\n\r\n**Describe the solution you'd like**\r\nNo hidden files.\r\n\r\n**Describe alternatives you've considered**\r\nAdding a \"clean up Parsl's logging files\" CLI tool\r\n\r\n**Additional context**\r\nIs anyone else concerned?\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.jobs.states import JobState, JobStatus\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.base import ExecutionProvider\nfrom parsl.providers.errors import SchedulerMissingArgs, ScriptPathError, SubmitException\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalProvider(ExecutionProvider, RepresentationMixin):\n \"\"\" Local Execution Provider\n\n This provider is used to provide execution resources from the localhost.\n\n Parameters\n ----------\n\n min_blocks : int\n Minimum number of blocks to maintain.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n move_files : Optional[Bool]\n Should files be moved? By default, Parsl will try to figure this out itself (= None).\n If True, then will always move. If False, will never move.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n \"\"\"\n\n def __init__(self,\n channel=LocalChannel(),\n nodes_per_block=1,\n launcher=SingleNodeLauncher(),\n init_blocks=1,\n min_blocks=0,\n max_blocks=1,\n worker_init='',\n cmd_timeout=30,\n parallelism=1,\n move_files=None):\n self.channel = channel\n self._label = 'local'\n self.nodes_per_block = nodes_per_block\n self.launcher = launcher\n self.worker_init = worker_init\n self.init_blocks = init_blocks\n self.min_blocks = min_blocks\n self.max_blocks = max_blocks\n self.parallelism = parallelism\n self.script_dir = None\n self.cmd_timeout = cmd_timeout\n self.move_files = move_files\n\n # Dictionary that keeps track of jobs, keyed on job_id\n self.resources = {}\n\n def status(self, job_ids):\n ''' Get the status of a list of jobs identified by their ids.\n\n Args:\n - job_ids (List of ids) : List of identifiers for the jobs\n\n Returns:\n - List of status codes.\n\n '''\n\n for job_id in self.resources:\n # This job dict should really be a class on its own\n job_dict = self.resources[job_id]\n if job_dict['status'] and job_dict['status'].terminal:\n # We already checked this and it can't change after that\n continue\n # Script path should point to remote path if _should_move_files() is True\n script_path = job_dict['script_path']\n\n alive = self._is_alive(job_dict)\n str_ec = self._read_job_file(script_path, '.ec').strip()\n\n status = None\n if str_ec == '-':\n if alive:\n status = JobStatus(JobState.RUNNING)\n else:\n # not alive but didn't get to write an exit code\n if 'cancelled' in job_dict:\n # because we cancelled it\n status = JobStatus(JobState.CANCELLED)\n else:\n # we didn't cancel it, so it must have been killed by something outside\n # parsl; we don't have a state for this, but we'll use CANCELLED with\n # a specific message\n status = JobStatus(JobState.CANCELLED, message='Killed')\n else:\n try:\n # TODO: ensure that these files are only read once and clean them\n ec = int(str_ec)\n stdout_path = self._job_file_path(script_path, '.out')\n stderr_path = self._job_file_path(script_path, '.err')\n if ec == 0:\n state = JobState.COMPLETED\n else:\n state = JobState.FAILED\n status = JobStatus(state, exit_code=ec,\n stdout_path=stdout_path, stderr_path=stderr_path)\n except Exception:\n status = JobStatus(JobState.FAILED,\n 'Cannot parse exit code: {}'.format(str_ec))\n\n job_dict['status'] = status\n\n return [self.resources[jid]['status'] for jid in job_ids]\n\n def _is_alive(self, job_dict):\n retcode, stdout, stderr = self.channel.execute_wait(\n 'ps -p {} > /dev/null 2> /dev/null; echo \"STATUS:$?\" '.format(\n job_dict['remote_pid']), self.cmd_timeout)\n for line in stdout.split('\\n'):\n if line.startswith(\"STATUS:\"):\n status = line.split(\"STATUS:\")[1].strip()\n if status == \"0\":\n return True\n else:\n return False\n\n def _job_file_path(self, script_path: str, suffix: str) -> str:\n path = '{0}{1}'.format(script_path, suffix)\n if self._should_move_files():\n path = self.channel.pull_file(path, self.script_dir)\n return path\n\n def _read_job_file(self, script_path: str, suffix: str) -> str:\n path = self._job_file_path(script_path, suffix)\n\n with open(path, 'r') as f:\n return f.read()\n\n def _write_submit_script(self, script_string, script_filename):\n '''\n Load the template string with config values and write the generated submit script to\n a submit script file.\n\n Args:\n - template_string (string) : The template string to be used for the writing submit script\n - script_filename (string) : Name of the submit script\n\n Returns:\n - True: on success\n\n Raises:\n SchedulerMissingArgs : If template is missing args\n ScriptPathError : Unable to write submit script out\n '''\n\n try:\n with open(script_filename, 'w') as f:\n f.write(script_string)\n\n except KeyError as e:\n logger.error(\"Missing keys for submit script: %s\", e)\n raise SchedulerMissingArgs(e.args, self.label)\n\n except IOError as e:\n logger.error(\"Failed writing to submit script: %s\", script_filename)\n raise ScriptPathError(script_filename, e)\n\n return True\n\n def submit(self, command, tasks_per_node, job_name=\"parsl.localprovider\"):\n ''' Submits the command onto an Local Resource Manager job.\n Submit returns an ID that corresponds to the task that was just submitted.\n\n If tasks_per_node < 1:\n 1/tasks_per_node is provisioned\n\n If tasks_per_node == 1:\n A single node is provisioned\n\n If tasks_per_node > 1 :\n tasks_per_node nodes are provisioned.\n\n Args:\n - command :(String) Commandline invocation to be made on the remote side.\n - tasks_per_node (int) : command invocations to be launched per node\n\n Kwargs:\n - job_name (String): Name for job, must be unique\n\n Returns:\n - None: At capacity, cannot provision more\n - job_id: (string) Identifier for the job\n\n '''\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n # Set script path\n script_path = \"{0}/{1}.sh\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n\n wrap_command = self.worker_init + f'\\nexport JOBNAME=${job_name}\\n' + self.launcher(command, tasks_per_node, self.nodes_per_block)\n\n self._write_submit_script(wrap_command, script_path)\n\n job_id = None\n remote_pid = None\n if self._should_move_files():\n logger.debug(\"Pushing start script\")\n script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n logger.debug(\"Launching\")\n # We need to capture the exit code and the streams, so we put them in files. We also write\n # '-' to the exit code file to isolate potential problems with writing to files in the\n # script directory\n #\n # The basic flow is:\n # 1. write \"-\" to the exit code file. If this fails, exit\n # 2. Launch the following sequence in the background:\n # a. the command to run\n # b. write the exit code of the command from (a) to the exit code file\n # 3. Write the PID of the background sequence on stdout. The PID is needed if we want to\n # cancel the task later.\n #\n # We need to do the >/dev/null 2>&1 so that bash closes stdout, otherwise\n # channel.execute_wait hangs reading the process stdout until all the\n # background commands complete.\n cmd = '/bin/bash -c \\'echo - >{0}.ec && {{ {{ bash {0} 1>{0}.out 2>{0}.err ; ' \\\n 'echo $? > {0}.ec ; }} >/dev/null 2>&1 & echo \"PID:$!\" ; }}\\''.format(script_path)\n retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)\n if retcode != 0:\n raise SubmitException(job_name, \"Launch command exited with code {0}\".format(retcode),\n stdout, stderr)\n for line in stdout.split('\\n'):\n if line.startswith(\"PID:\"):\n remote_pid = line.split(\"PID:\")[1].strip()\n job_id = remote_pid\n if job_id is None:\n raise SubmitException(job_name, \"Channel failed to start remote command/retrieve PID\")\n\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.RUNNING),\n 'remote_pid': remote_pid, 'script_path': script_path}\n\n return job_id\n\n def _should_move_files(self):\n return (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files)\n\n def cancel(self, job_ids):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [<job_id> ...]\n\n Returns: [True] Always returns true for every job_id, regardless of\n whether an individual cancel failed (unless an\n exception is raised)\n '''\n for job in job_ids:\n job_dict = self.resources[job]\n job_dict['cancelled'] = True\n logger.debug(\"Terminating job/process ID: {0}\".format(job))\n cmd = \"kill -- -$(ps -o pgid= {} | grep -o '[0-9]*')\".format(job_dict['remote_pid'])\n retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)\n if retcode != 0:\n logger.warning(\"Failed to kill PID: {} and child processes on {}\".format(job_dict['remote_pid'],\n self.label))\n\n rets = [True for i in job_ids]\n return rets\n\n @property\n def label(self):\n return self._label\n\n @property\n def status_polling_interval(self):\n return 5\n", "path": "parsl/providers/local/local.py"}]}
| 3,886 | 170 |
gh_patches_debug_38722
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-1009
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configuration strict option not checking broken links
I was testing mkdocs earlier this year and was using "strict: true" in my mkdocs.yml. If I recall correctly, it was working then and building would break if there was a broken link in the .md.
However I am now using v0.14.0 and this config option seems to have no effect on the build - I get no warnings or errors when building regardless of the value of strict.
</issue>
<code>
[start of mkdocs/__main__.py]
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5 import logging
6 import click
7 import socket
8
9 from mkdocs import __version__
10 from mkdocs import utils
11 from mkdocs import exceptions
12 from mkdocs.config import load_config
13 from mkdocs.commands import build, gh_deploy, new, serve
14
15 log = logging.getLogger(__name__)
16
17 # Disable the warning that Click displays (as of Click version 5.0) when users
18 # use unicode_literals in Python 2.
19 # See http://click.pocoo.org/dev/python3/#unicode-literals for more details.
20 click.disable_unicode_literals_warning = True
21
22
23 class State(object):
24 ''' Maintain logging level.'''
25
26 def __init__(self, log_name='mkdocs', level=logging.INFO):
27 self.logger = logging.getLogger(log_name)
28 self.logger.propagate = False
29 stream = logging.StreamHandler()
30 formatter = logging.Formatter("%(levelname)-7s - %(message)s ")
31 stream.setFormatter(formatter)
32 self.logger.addHandler(stream)
33
34 self.logger.setLevel(level)
35
36
37 pass_state = click.make_pass_decorator(State, ensure=True)
38
39
40 def verbose_option(f):
41 def callback(ctx, param, value):
42 state = ctx.ensure_object(State)
43 if value:
44 state.logger.setLevel(logging.DEBUG)
45 return click.option('-v', '--verbose',
46 is_flag=True,
47 expose_value=False,
48 help='Enable verbose output',
49 callback=callback)(f)
50
51
52 def quiet_option(f):
53 def callback(ctx, param, value):
54 state = ctx.ensure_object(State)
55 if value:
56 state.logger.setLevel(logging.ERROR)
57 return click.option('-q', '--quiet',
58 is_flag=True,
59 expose_value=False,
60 help='Silence warnings',
61 callback=callback)(f)
62
63
64 def common_options(f):
65 f = verbose_option(f)
66 f = quiet_option(f)
67 return f
68
69
70 clean_help = "Remove old files from the site_dir before building"
71 config_help = "Provide a specific MkDocs config"
72 dev_addr_help = ("IP address and port to serve documentation locally (default: "
73 "localhost:8000)")
74 strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
75 "on any warnings.")
76 theme_dir_help = "The theme directory to use when building your documentation."
77 theme_help = "The theme to use when building your documentation."
78 theme_choices = utils.get_theme_names()
79 site_dir_help = "The directory to output the result of the documentation build."
80 reload_help = "Enable the live reloading in the development server (this is the default)"
81 no_reload_help = "Disable the live reloading in the development server."
82 dirty_reload_help = "Enable the live reloading in the development server, but only re-build files that have changed"
83 commit_message_help = ("A commit message to use when commiting to the "
84 "Github Pages remote branch")
85 remote_branch_help = ("The remote branch to commit to for Github Pages. This "
86 "overrides the value specified in config")
87 remote_name_help = ("The remote name to commit to for Github Pages. This "
88 "overrides the value specified in config")
89
90
91 @click.group(context_settings={'help_option_names': ['-h', '--help']})
92 @click.version_option(__version__, '-V', '--version')
93 @common_options
94 def cli():
95 """
96 MkDocs - Project documentation with Markdown.
97 """
98
99
100 @cli.command(name="serve")
101 @click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
102 @click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
103 @click.option('-s', '--strict', is_flag=True, help=strict_help)
104 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
105 @click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
106 @click.option('--livereload', 'livereload', flag_value='livereload', help=reload_help)
107 @click.option('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)
108 @click.option('-d', '--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)
109 @common_options
110 def serve_command(dev_addr, config_file, strict, theme, theme_dir, livereload):
111 """Run the builtin development server"""
112
113 logging.getLogger('tornado').setLevel(logging.WARNING)
114
115 try:
116 serve.serve(
117 config_file=config_file,
118 dev_addr=dev_addr,
119 strict=strict,
120 theme=theme,
121 theme_dir=theme_dir,
122 livereload=livereload
123 )
124 except (exceptions.ConfigurationError, socket.error) as e:
125 # Avoid ugly, unhelpful traceback
126 raise SystemExit('\n' + str(e))
127
128
129 @cli.command(name="build")
130 @click.option('-c', '--clean/--dirty', is_flag=True, help=clean_help)
131 @click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
132 @click.option('-s', '--strict', is_flag=True, help=strict_help)
133 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
134 @click.option('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)
135 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
136 @common_options
137 def build_command(clean, config_file, strict, theme, theme_dir, site_dir):
138 """Build the MkDocs documentation"""
139 try:
140 build.build(load_config(
141 config_file=config_file,
142 strict=strict,
143 theme=theme,
144 theme_dir=theme_dir,
145 site_dir=site_dir
146 ), dirty=not clean)
147 except exceptions.ConfigurationError as e:
148 # Avoid ugly, unhelpful traceback
149 raise SystemExit('\n' + str(e))
150
151
152 @cli.command(name="json")
153 @click.option('-c', '--clean', is_flag=True, help=clean_help)
154 @click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
155 @click.option('-s', '--strict', is_flag=True, help=strict_help)
156 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
157 @common_options
158 def json_command(clean, config_file, strict, site_dir):
159 """Build the MkDocs documentation to JSON files
160
161 Rather than building your documentation to HTML pages, this
162 outputs each page in a simple JSON format. This command is
163 useful if you want to index your documentation in an external
164 search engine.
165 """
166
167 log.warning("The json command is deprecated and will be removed in a "
168 "future MkDocs release. For details on updating: "
169 "http://www.mkdocs.org/about/release-notes/")
170
171 try:
172 build.build(load_config(
173 config_file=config_file,
174 strict=strict,
175 site_dir=site_dir
176 ), dump_json=True, dirty=not clean)
177 except exceptions.ConfigurationError as e:
178 # Avoid ugly, unhelpful traceback
179 raise SystemExit('\n' + str(e))
180
181
182 @cli.command(name="gh-deploy")
183 @click.option('-c', '--clean', is_flag=True, help=clean_help)
184 @click.option('-f', '--config-file', type=click.File('rb'), help=config_help)
185 @click.option('-m', '--message', help=commit_message_help)
186 @click.option('-b', '--remote-branch', help=remote_branch_help)
187 @click.option('-r', '--remote-name', help=remote_name_help)
188 @common_options
189 def gh_deploy_command(config_file, clean, message, remote_branch, remote_name):
190 """Deploy your documentation to GitHub Pages"""
191 try:
192 config = load_config(
193 config_file=config_file,
194 remote_branch=remote_branch,
195 remote_name=remote_name
196 )
197 build.build(config, dirty=not clean)
198 gh_deploy.gh_deploy(config, message=message)
199 except exceptions.ConfigurationError as e:
200 # Avoid ugly, unhelpful traceback
201 raise SystemExit('\n' + str(e))
202
203
204 @cli.command(name="new")
205 @click.argument("project_directory")
206 @common_options
207 def new_command(project_directory):
208 """Create a new MkDocs project"""
209 new.new(project_directory)
210
211 if __name__ == '__main__':
212 cli()
213
[end of mkdocs/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py
--- a/mkdocs/__main__.py
+++ b/mkdocs/__main__.py
@@ -9,7 +9,7 @@
from mkdocs import __version__
from mkdocs import utils
from mkdocs import exceptions
-from mkdocs.config import load_config
+from mkdocs import config
from mkdocs.commands import build, gh_deploy, new, serve
log = logging.getLogger(__name__)
@@ -112,6 +112,10 @@
logging.getLogger('tornado').setLevel(logging.WARNING)
+ # Don't override config value if user did not specify --strict flag
+ # Conveniently, load_config drops None values
+ strict = strict or None
+
try:
serve.serve(
config_file=config_file,
@@ -136,8 +140,13 @@
@common_options
def build_command(clean, config_file, strict, theme, theme_dir, site_dir):
"""Build the MkDocs documentation"""
+
+ # Don't override config value if user did not specify --strict flag
+ # Conveniently, load_config drops None values
+ strict = strict or None
+
try:
- build.build(load_config(
+ build.build(config.load_config(
config_file=config_file,
strict=strict,
theme=theme,
@@ -168,8 +177,12 @@
"future MkDocs release. For details on updating: "
"http://www.mkdocs.org/about/release-notes/")
+ # Don't override config value if user did not specify --strict flag
+ # Conveniently, load_config drops None values
+ strict = strict or None
+
try:
- build.build(load_config(
+ build.build(config.load_config(
config_file=config_file,
strict=strict,
site_dir=site_dir
@@ -189,13 +202,13 @@
def gh_deploy_command(config_file, clean, message, remote_branch, remote_name):
"""Deploy your documentation to GitHub Pages"""
try:
- config = load_config(
+ cfg = config.load_config(
config_file=config_file,
remote_branch=remote_branch,
remote_name=remote_name
)
- build.build(config, dirty=not clean)
- gh_deploy.gh_deploy(config, message=message)
+ build.build(cfg, dirty=not clean)
+ gh_deploy.gh_deploy(cfg, message=message)
except exceptions.ConfigurationError as e:
# Avoid ugly, unhelpful traceback
raise SystemExit('\n' + str(e))
|
{"golden_diff": "diff --git a/mkdocs/__main__.py b/mkdocs/__main__.py\n--- a/mkdocs/__main__.py\n+++ b/mkdocs/__main__.py\n@@ -9,7 +9,7 @@\n from mkdocs import __version__\n from mkdocs import utils\n from mkdocs import exceptions\n-from mkdocs.config import load_config\n+from mkdocs import config\n from mkdocs.commands import build, gh_deploy, new, serve\n \n log = logging.getLogger(__name__)\n@@ -112,6 +112,10 @@\n \n logging.getLogger('tornado').setLevel(logging.WARNING)\n \n+ # Don't override config value if user did not specify --strict flag\n+ # Conveniently, load_config drops None values\n+ strict = strict or None\n+\n try:\n serve.serve(\n config_file=config_file,\n@@ -136,8 +140,13 @@\n @common_options\n def build_command(clean, config_file, strict, theme, theme_dir, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n+\n+ # Don't override config value if user did not specify --strict flag\n+ # Conveniently, load_config drops None values\n+ strict = strict or None\n+\n try:\n- build.build(load_config(\n+ build.build(config.load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n@@ -168,8 +177,12 @@\n \"future MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n \n+ # Don't override config value if user did not specify --strict flag\n+ # Conveniently, load_config drops None values\n+ strict = strict or None\n+\n try:\n- build.build(load_config(\n+ build.build(config.load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n@@ -189,13 +202,13 @@\n def gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n- config = load_config(\n+ cfg = config.load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n- build.build(config, dirty=not clean)\n- gh_deploy.gh_deploy(config, message=message)\n+ build.build(cfg, dirty=not clean)\n+ gh_deploy.gh_deploy(cfg, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n", "issue": "Configuration strict option not checking broken links\nI was testing mkdocs earlier this year and was using \"strict: true\" in my mkdocs.yml. If I recall correctly, it was working then and building would break if there was a broken link in the .md.\n\nHowever I am now using v0.14.0 and this config option seems to have no effect on the build - I get no warnings or errors when building regardless of the value of strict.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\nfrom mkdocs.commands import build, gh_deploy, new, serve\n\nlog = logging.getLogger(__name__)\n\n# Disable the warning that Click displays (as of Click version 5.0) when users\n# use unicode_literals in Python 2.\n# See http://click.pocoo.org/dev/python3/#unicode-literals for more details.\nclick.disable_unicode_literals_warning = True\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_dir_help = \"The theme directory to use when building your documentation.\"\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable the live reloading in the development server (this is the default)\"\nno_reload_help = \"Disable the live reloading in the development server.\"\ndirty_reload_help = \"Enable the live reloading in the development server, but only re-build files that have changed\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\nremote_name_help = (\"The remote name to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)\[email protected]('--livereload', 'livereload', flag_value='livereload', help=reload_help)\[email protected]('--no-livereload', 'livereload', flag_value='no-livereload', help=no_reload_help)\[email protected]('-d', '--dirtyreload', 'livereload', flag_value='dirty', help=dirty_reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, theme_dir, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n livereload=livereload\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean/--dirty', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-e', '--theme-dir', type=click.Path(), help=theme_dir_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, theme_dir, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir\n ), dirty=not clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprecated and will be removed in a \"\n \"future MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, dirty=not clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_name_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, dirty=not clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n\nif __name__ == '__main__':\n cli()\n", "path": "mkdocs/__main__.py"}]}
| 2,968 | 576 |
gh_patches_debug_6031
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-579
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_audio_backends platform independent
## 🐛 Bug
The constant [_audio_backends](https://github.com/pytorch/audio/blob/c29598d54185d73b4ed04103330573e190bbdb69/torchaudio/_backend.py#L12) is referenced by [BACKENDS](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/common_utils.py#L10) in the common utilities of the test folder. [test_batch_mfcc](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/test_batch_consistency.py#L181) is skipped if the 'sox' key is not present in that constant, but it always is. That means this test will be executed in environments where the package may not exist.
```
(base) PS C:\Users\chris\dev\audio> python .\test\test_batch_consistency.py TestTransforms.test_batch_mfcc
E
======================================================================
ERROR: test_batch_mfcc (__main__.TestTransforms)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\tools\Anaconda3\lib\contextlib.py", line 74, in inner
return func(*args, **kwds)
File ".\test\test_batch_consistency.py", line 185, in test_batch_mfcc
waveform, _ = torchaudio.load(test_filepath)
File "c:\users\chris\dev\audio\torchaudio\__init__.py", line 87, in load
filetype=filetype,
File "c:\users\chris\dev\audio\torchaudio\_sox_backend.py", line 38, in load
import _torch_sox
ModuleNotFoundError: No module named '_torch_sox'
----------------------------------------------------------------------
Ran 1 test in 0.001s
FAILED (errors=1)
```
## To Reproduce
Steps to reproduce the behavior:
1. Remove sox from your environment
2. Run ```python test/test_batch_consistency.py TestTransform.test_batch_mfcc```
I can provide more detailed information if required.
## Expected behavior
The test should be skipped if sox is not available.
## Environment
```
(base) PS C:\Users\chris\dev\audio> python .\collect_env_1.py
Collecting environment information...
PyTorch version: 1.6.0a0+8a60d8b
Is debug build: No
CUDA used to build PyTorch: None
OS: Microsoft Windows 10 Home
GCC version: Could not collect
CMake version: version 3.14.0
Python version: 3.7
Is CUDA available: No
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
Versions of relevant libraries:
[pip] numpy==1.18.1
[pip] numpydoc==0.9.2
[pip] torch==1.6.0a0+8a60d8b
[pip] torchaudio==0.5.0a0+5a75b63
[conda] blas 1.0 mkl
[conda] mkl 2020.0 166
[conda] mkl-include 2020.0 166
[conda] mkl-service 2.3.0 py37hb782905_0
[conda] mkl_fft 1.0.15 py37h14836fe_0
[conda] mkl_random 1.1.0 py37h675688f_0
[conda] numpy 1.18.1 py37h93ca92e_0
[conda] numpy-base 1.18.1 py37hc3f5095_1
[conda] numpydoc 0.9.2 py_0
[conda] torch 1.6.0a0+8a60d8b dev_0 <develop>
[conda] torchaudio 0.5.0a0+5a75b63 dev_0 <develop>
```
## Additional context
</issue>
<code>
[start of torchaudio/_backend.py]
1 from functools import wraps
2 from typing import Any, List, Union
3
4 import platform
5 import torch
6 from torch import Tensor
7
8 from . import _soundfile_backend, _sox_backend
9
10
11 _audio_backend = "soundfile" if platform.system() == "Windows" else "sox"
12 _audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
13
14
15 def set_audio_backend(backend: str) -> None:
16 """
17 Specifies the package used to load.
18 Args:
19 backend (str): Name of the backend. One of {}.
20 """.format(_audio_backends.keys())
21 global _audio_backend
22 if backend not in _audio_backends:
23 raise ValueError(
24 "Invalid backend '{}'. Options are {}.".format(backend, _audio_backends.keys())
25 )
26 _audio_backend = backend
27
28
29 def get_audio_backend() -> str:
30 """
31 Gets the name of the package used to load.
32 """
33 return _audio_backend
34
35
36 def _get_audio_backend_module() -> Any:
37 """
38 Gets the module backend to load.
39 """
40 backend = get_audio_backend()
41 return _audio_backends[backend]
42
43
44 def _audio_backend_guard(backends: Union[str, List[str]]) -> Any:
45
46 if isinstance(backends, str):
47 backends = [backends]
48
49 def decorator(func):
50 @wraps(func)
51 def wrapper(*args, **kwargs):
52 if get_audio_backend() not in backends:
53 raise RuntimeError("Function {} requires backend to be one of {}.".format(func.__name__, backends))
54 return func(*args, **kwargs)
55 return wrapper
56
57 return decorator
58
59
60 def check_input(src: Tensor) -> None:
61 if not torch.is_tensor(src):
62 raise TypeError('Expected a tensor, got %s' % type(src))
63 if src.is_cuda:
64 raise TypeError('Expected a CPU based tensor, got %s' % type(src))
65
[end of torchaudio/_backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchaudio/_backend.py b/torchaudio/_backend.py
--- a/torchaudio/_backend.py
+++ b/torchaudio/_backend.py
@@ -8,8 +8,12 @@
from . import _soundfile_backend, _sox_backend
-_audio_backend = "soundfile" if platform.system() == "Windows" else "sox"
-_audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
+if platform.system() == "Windows":
+ _audio_backend = "soundfile"
+ _audio_backends = {"soundfile": _soundfile_backend}
+else:
+ _audio_backend = "sox"
+ _audio_backends = {"sox": _sox_backend, "soundfile": _soundfile_backend}
def set_audio_backend(backend: str) -> None:
|
{"golden_diff": "diff --git a/torchaudio/_backend.py b/torchaudio/_backend.py\n--- a/torchaudio/_backend.py\n+++ b/torchaudio/_backend.py\n@@ -8,8 +8,12 @@\n from . import _soundfile_backend, _sox_backend\n \n \n-_audio_backend = \"soundfile\" if platform.system() == \"Windows\" else \"sox\"\n-_audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n+if platform.system() == \"Windows\":\n+ _audio_backend = \"soundfile\"\n+ _audio_backends = {\"soundfile\": _soundfile_backend}\n+else:\n+ _audio_backend = \"sox\"\n+ _audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n \n \n def set_audio_backend(backend: str) -> None:\n", "issue": "_audio_backends platform independent\n## \ud83d\udc1b Bug\r\n\r\nThe constant [_audio_backends](https://github.com/pytorch/audio/blob/c29598d54185d73b4ed04103330573e190bbdb69/torchaudio/_backend.py#L12) is referenced by [BACKENDS](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/common_utils.py#L10) in the common utilities of the test folder. [test_batch_mfcc](https://github.com/pytorch/audio/blob/0e5581cb2a9616205a00cbabf4c9a30613a1037f/test/test_batch_consistency.py#L181) is skipped if the 'sox' key is not present in that constant, but it always is. That means this test will be executed in environments where the package may not exist.\r\n\r\n```\r\n(base) PS C:\\Users\\chris\\dev\\audio> python .\\test\\test_batch_consistency.py TestTransforms.test_batch_mfcc\r\nE\r\n======================================================================\r\nERROR: test_batch_mfcc (__main__.TestTransforms)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"C:\\tools\\Anaconda3\\lib\\contextlib.py\", line 74, in inner\r\n return func(*args, **kwds)\r\n File \".\\test\\test_batch_consistency.py\", line 185, in test_batch_mfcc\r\n waveform, _ = torchaudio.load(test_filepath)\r\n File \"c:\\users\\chris\\dev\\audio\\torchaudio\\__init__.py\", line 87, in load\r\n filetype=filetype,\r\n File \"c:\\users\\chris\\dev\\audio\\torchaudio\\_sox_backend.py\", line 38, in load\r\n import _torch_sox\r\nModuleNotFoundError: No module named '_torch_sox'\r\n\r\n----------------------------------------------------------------------\r\nRan 1 test in 0.001s\r\n\r\nFAILED (errors=1)\r\n```\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Remove sox from your environment\r\n2. Run ```python test/test_batch_consistency.py TestTransform.test_batch_mfcc```\r\n\r\nI can provide more detailed information if required.\r\n\r\n## Expected behavior\r\n\r\nThe test should be skipped if sox is not available.\r\n\r\n## Environment\r\n\r\n```\r\n(base) PS C:\\Users\\chris\\dev\\audio> python .\\collect_env_1.py\r\nCollecting environment information...\r\nPyTorch version: 1.6.0a0+8a60d8b\r\nIs debug build: No\r\nCUDA used to build PyTorch: None\r\n\r\nOS: Microsoft Windows 10 Home\r\nGCC version: Could not collect\r\nCMake version: version 3.14.0\r\n\r\nPython version: 3.7\r\nIs CUDA available: No\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\n\r\nVersions of relevant libraries:\r\n[pip] numpy==1.18.1\r\n[pip] numpydoc==0.9.2\r\n[pip] torch==1.6.0a0+8a60d8b\r\n[pip] torchaudio==0.5.0a0+5a75b63\r\n[conda] blas 1.0 mkl\r\n[conda] mkl 2020.0 166\r\n[conda] mkl-include 2020.0 166\r\n[conda] mkl-service 2.3.0 py37hb782905_0\r\n[conda] mkl_fft 1.0.15 py37h14836fe_0\r\n[conda] mkl_random 1.1.0 py37h675688f_0\r\n[conda] numpy 1.18.1 py37h93ca92e_0\r\n[conda] numpy-base 1.18.1 py37hc3f5095_1\r\n[conda] numpydoc 0.9.2 py_0\r\n[conda] torch 1.6.0a0+8a60d8b dev_0 <develop>\r\n[conda] torchaudio 0.5.0a0+5a75b63 dev_0 <develop>\r\n```\r\n## Additional context\r\n\r\n\n", "before_files": [{"content": "from functools import wraps\nfrom typing import Any, List, Union\n\nimport platform\nimport torch\nfrom torch import Tensor\n\nfrom . import _soundfile_backend, _sox_backend\n\n\n_audio_backend = \"soundfile\" if platform.system() == \"Windows\" else \"sox\"\n_audio_backends = {\"sox\": _sox_backend, \"soundfile\": _soundfile_backend}\n\n\ndef set_audio_backend(backend: str) -> None:\n \"\"\"\n Specifies the package used to load.\n Args:\n backend (str): Name of the backend. One of {}.\n \"\"\".format(_audio_backends.keys())\n global _audio_backend\n if backend not in _audio_backends:\n raise ValueError(\n \"Invalid backend '{}'. Options are {}.\".format(backend, _audio_backends.keys())\n )\n _audio_backend = backend\n\n\ndef get_audio_backend() -> str:\n \"\"\"\n Gets the name of the package used to load.\n \"\"\"\n return _audio_backend\n\n\ndef _get_audio_backend_module() -> Any:\n \"\"\"\n Gets the module backend to load.\n \"\"\"\n backend = get_audio_backend()\n return _audio_backends[backend]\n\n\ndef _audio_backend_guard(backends: Union[str, List[str]]) -> Any:\n\n if isinstance(backends, str):\n backends = [backends]\n\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if get_audio_backend() not in backends:\n raise RuntimeError(\"Function {} requires backend to be one of {}.\".format(func.__name__, backends))\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_backend.py"}]}
| 2,129 | 195 |
gh_patches_debug_6426
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-936
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
recent changes break MKS937B check_set_errors
It was previously ok that `check_errors` returns `None` (or nothing) as for example in [MKS937B](https://github.com/pymeasure/pymeasure/blob/68f5487b0837e9f3c5debd144a7ddfeb7bddf3e5/pymeasure/instruments/mksinst/mks937b.py#L150).
Recently it got mandatory that the `check_set_errors` and `check_get_errors` return lists. This is correctly documented and some existing devices were also changed -> 1214d32.
`MKS937B` it seems was overlooked.
https://github.com/pymeasure/pymeasure/blob/68f5487b0837e9f3c5debd144a7ddfeb7bddf3e5/pymeasure/instruments/mksinst/mks937b.py#L150-L162
Is the only change needed here to return an empty list upon no error?
</issue>
<code>
[start of pymeasure/instruments/mksinst/mks937b.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2023 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import re
26
27 from pymeasure.instruments import Channel, Instrument
28 from pymeasure.instruments.validators import strict_discrete_set
29
30
31 _ion_gauge_status = {"Wait": "W",
32 "Off": "O",
33 "Protect": "P",
34 "Degas": "D",
35 "Control": "C",
36 "Rear panel Ctrl off": "R",
37 "HC filament fault": "H",
38 "No gauge": "N",
39 "Good": "G",
40 "NOT_IONGAUGE": "NAK152",
41 "INVALID COMMAND": "NAK160",
42 }
43
44
45 class PressureChannel(Channel):
46 pressure = Channel.measurement(
47 "PR{ch}?", """ Pressure on the channel in units selected on the device""",
48 )
49
50 power_enabled = Channel.control(
51 "CP{ch}?", "CP{ch}!%s",
52 """Power status of the channel""",
53 validator=strict_discrete_set,
54 map_values=True,
55 values={True: "ON", False: "OFF"},
56 check_set_errors=True,
57 )
58
59
60 class IonGaugeAndPressureChannel(PressureChannel):
61 """Channel having both a pressure and an ion gauge sensor"""
62 ion_gauge_status = Channel.measurement(
63 "T{ch}?",
64 """Ion gauge status of the channel""",
65 map_values=True,
66 values=_ion_gauge_status,
67 )
68
69
70 class MKS937B(Instrument):
71 """ MKS 937B vacuum gauge controller
72
73 Connection to the device is made through an RS232/RS485 serial connection.
74 The communication protocol of this device is as follows:
75
76 Query: '@<aaa><Command>?;FF' with the response '@<aaa>ACK<Response>;FF'
77 Set command: '@<aaa><Command>!<parameter>;FF' with the response '@<aaa>ACK<Response>;FF'
78 Above <aaa> is an address from 001 to 254 which can be specified upon
79 initialization. Since ';FF' is not supported by pyvisa as terminator this
80 class overloads the device communication methods.
81
82 :param adapter: pyvisa resource name of the instrument or adapter instance
83 :param string name: The name of the instrument.
84 :param address: device address included in every message to the instrument
85 (default=253)
86 :param kwargs: Any valid key-word argument for Instrument
87 """
88 channels = Instrument.ChannelCreator(
89 (IonGaugeAndPressureChannel, PressureChannel) * 3,
90 ("1", "2", "3", "4", "5", "6"),
91 ) # Channels 1,3,5 have both an ion gauge and a pressure sensor, 2,4,6 only a pressure sensor
92
93 def __init__(self, adapter, name="MKS 937B vacuum gauge controller", address=253, **kwargs):
94 super().__init__(
95 adapter,
96 name,
97 includeSCPI=False,
98 read_termination=";", # in reality its ";FF"
99 # which is, however, invalid for pyvisa. Therefore extra bytes have to
100 # be read in the read() method and the terminators are hardcoded here.
101 write_termination=";FF",
102 **kwargs
103 )
104 self.address = address
105 # compiled regular expression for finding numerical values in reply strings
106 self._re_response = re.compile(fr"@{self.address:03d}(?P<ack>ACK)?(?P<msg>.*)")
107
108 def _extract_reply(self, reply):
109 """ preprocess_reply function which tries to extract <Response> from
110 '@<aaa>ACK<Response>;FF'. If <Response> can not be identified the orignal string
111 is returned.
112 :param reply: reply string
113 :returns: string with only the response, or the original string
114 """
115 rvalue = self._re_response.search(reply)
116 if rvalue:
117 return rvalue.group('msg')
118 return reply
119
120 def _prepend_address(self, cmd):
121 """
122 create command string by including the device address
123 """
124 return f"@{self.address:03d}{cmd}"
125
126 def _check_extra_termination(self):
127 """
128 Check the read termination to correspond to the protocol
129 """
130 t = super().read_bytes(2) # read extra termination chars 'FF'
131 if t != b'FF':
132 raise ValueError(f"unexpected termination string received {t}")
133
134 def read(self):
135 """
136 Reads from the instrument including the correct termination characters
137 """
138 ret = super().read()
139 self._check_extra_termination()
140 return self._extract_reply(ret)
141
142 def write(self, command):
143 """
144 Write to the instrument including the device address.
145
146 :param command: command string to be sent to the instrument
147 """
148 super().write(self._prepend_address(command))
149
150 def check_set_errors(self):
151 """
152 Check reply string for acknowledgement string.
153 """
154 ret = super().read() # use super read to get raw reply
155 reply = self._re_response.search(ret)
156 if reply:
157 if reply.group('ack') == 'ACK':
158 return
159 # no valid acknowledgement message found
160 raise ValueError(f"invalid reply '{ret}' found in check_errors")
161
162 serial = Instrument.measurement(
163 "SN?", """ Serial number of the instrument """,
164 cast=str,
165 )
166
167 all_pressures = Instrument.measurement(
168 "PRZ?", """ Read pressures on all channels in selected units """,
169 )
170
171 combined_pressure1 = Instrument.measurement(
172 "PC1?", """ Read pressure on channel 1 and its combination sensor """,
173 )
174
175 combined_pressure2 = Instrument.measurement(
176 "PC2?", """ Read pressure on channel 2 and its combination sensor """,
177 )
178
179 unit = Instrument.control(
180 "U?", "U!%s",
181 """Pressure unit used for all pressure readings from the instrument""",
182 validator=strict_discrete_set,
183 map_values=True,
184 values={"Torr": "TORR",
185 "mBar": "mBAR",
186 "Pascal": "PASCAL",
187 "Micron": "MICRON",
188 },
189 check_set_errors=True,
190 )
191
[end of pymeasure/instruments/mksinst/mks937b.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pymeasure/instruments/mksinst/mks937b.py b/pymeasure/instruments/mksinst/mks937b.py
--- a/pymeasure/instruments/mksinst/mks937b.py
+++ b/pymeasure/instruments/mksinst/mks937b.py
@@ -155,7 +155,8 @@
reply = self._re_response.search(ret)
if reply:
if reply.group('ack') == 'ACK':
- return
+ self._check_extra_termination()
+ return []
# no valid acknowledgement message found
raise ValueError(f"invalid reply '{ret}' found in check_errors")
|
{"golden_diff": "diff --git a/pymeasure/instruments/mksinst/mks937b.py b/pymeasure/instruments/mksinst/mks937b.py\n--- a/pymeasure/instruments/mksinst/mks937b.py\n+++ b/pymeasure/instruments/mksinst/mks937b.py\n@@ -155,7 +155,8 @@\n reply = self._re_response.search(ret)\n if reply:\n if reply.group('ack') == 'ACK':\n- return\n+ self._check_extra_termination()\n+ return []\n # no valid acknowledgement message found\n raise ValueError(f\"invalid reply '{ret}' found in check_errors\")\n", "issue": "recent changes break MKS937B check_set_errors\nIt was previously ok that `check_errors` returns `None` (or nothing) as for example in [MKS937B](https://github.com/pymeasure/pymeasure/blob/68f5487b0837e9f3c5debd144a7ddfeb7bddf3e5/pymeasure/instruments/mksinst/mks937b.py#L150).\r\n\r\nRecently it got mandatory that the `check_set_errors` and `check_get_errors` return lists. This is correctly documented and some existing devices were also changed -> 1214d32.\r\n`MKS937B` it seems was overlooked. \r\n\r\nhttps://github.com/pymeasure/pymeasure/blob/68f5487b0837e9f3c5debd144a7ddfeb7bddf3e5/pymeasure/instruments/mksinst/mks937b.py#L150-L162\r\n\r\nIs the only change needed here to return an empty list upon no error?\r\n\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport re\n\nfrom pymeasure.instruments import Channel, Instrument\nfrom pymeasure.instruments.validators import strict_discrete_set\n\n\n_ion_gauge_status = {\"Wait\": \"W\",\n \"Off\": \"O\",\n \"Protect\": \"P\",\n \"Degas\": \"D\",\n \"Control\": \"C\",\n \"Rear panel Ctrl off\": \"R\",\n \"HC filament fault\": \"H\",\n \"No gauge\": \"N\",\n \"Good\": \"G\",\n \"NOT_IONGAUGE\": \"NAK152\",\n \"INVALID COMMAND\": \"NAK160\",\n }\n\n\nclass PressureChannel(Channel):\n pressure = Channel.measurement(\n \"PR{ch}?\", \"\"\" Pressure on the channel in units selected on the device\"\"\",\n )\n\n power_enabled = Channel.control(\n \"CP{ch}?\", \"CP{ch}!%s\",\n \"\"\"Power status of the channel\"\"\",\n validator=strict_discrete_set,\n map_values=True,\n values={True: \"ON\", False: \"OFF\"},\n check_set_errors=True,\n )\n\n\nclass IonGaugeAndPressureChannel(PressureChannel):\n \"\"\"Channel having both a pressure and an ion gauge sensor\"\"\"\n ion_gauge_status = Channel.measurement(\n \"T{ch}?\",\n \"\"\"Ion gauge status of the channel\"\"\",\n map_values=True,\n values=_ion_gauge_status,\n )\n\n\nclass MKS937B(Instrument):\n \"\"\" MKS 937B vacuum gauge controller\n\n Connection to the device is made through an RS232/RS485 serial connection.\n The communication protocol of this device is as follows:\n\n Query: '@<aaa><Command>?;FF' with the response '@<aaa>ACK<Response>;FF'\n Set command: '@<aaa><Command>!<parameter>;FF' with the response '@<aaa>ACK<Response>;FF'\n Above <aaa> is an address from 001 to 254 which can be specified upon\n initialization. Since ';FF' is not supported by pyvisa as terminator this\n class overloads the device communication methods.\n\n :param adapter: pyvisa resource name of the instrument or adapter instance\n :param string name: The name of the instrument.\n :param address: device address included in every message to the instrument\n (default=253)\n :param kwargs: Any valid key-word argument for Instrument\n \"\"\"\n channels = Instrument.ChannelCreator(\n (IonGaugeAndPressureChannel, PressureChannel) * 3,\n (\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"),\n ) # Channels 1,3,5 have both an ion gauge and a pressure sensor, 2,4,6 only a pressure sensor\n\n def __init__(self, adapter, name=\"MKS 937B vacuum gauge controller\", address=253, **kwargs):\n super().__init__(\n adapter,\n name,\n includeSCPI=False,\n read_termination=\";\", # in reality its \";FF\"\n # which is, however, invalid for pyvisa. Therefore extra bytes have to\n # be read in the read() method and the terminators are hardcoded here.\n write_termination=\";FF\",\n **kwargs\n )\n self.address = address\n # compiled regular expression for finding numerical values in reply strings\n self._re_response = re.compile(fr\"@{self.address:03d}(?P<ack>ACK)?(?P<msg>.*)\")\n\n def _extract_reply(self, reply):\n \"\"\" preprocess_reply function which tries to extract <Response> from\n '@<aaa>ACK<Response>;FF'. If <Response> can not be identified the orignal string\n is returned.\n :param reply: reply string\n :returns: string with only the response, or the original string\n \"\"\"\n rvalue = self._re_response.search(reply)\n if rvalue:\n return rvalue.group('msg')\n return reply\n\n def _prepend_address(self, cmd):\n \"\"\"\n create command string by including the device address\n \"\"\"\n return f\"@{self.address:03d}{cmd}\"\n\n def _check_extra_termination(self):\n \"\"\"\n Check the read termination to correspond to the protocol\n \"\"\"\n t = super().read_bytes(2) # read extra termination chars 'FF'\n if t != b'FF':\n raise ValueError(f\"unexpected termination string received {t}\")\n\n def read(self):\n \"\"\"\n Reads from the instrument including the correct termination characters\n \"\"\"\n ret = super().read()\n self._check_extra_termination()\n return self._extract_reply(ret)\n\n def write(self, command):\n \"\"\"\n Write to the instrument including the device address.\n\n :param command: command string to be sent to the instrument\n \"\"\"\n super().write(self._prepend_address(command))\n\n def check_set_errors(self):\n \"\"\"\n Check reply string for acknowledgement string.\n \"\"\"\n ret = super().read() # use super read to get raw reply\n reply = self._re_response.search(ret)\n if reply:\n if reply.group('ack') == 'ACK':\n return\n # no valid acknowledgement message found\n raise ValueError(f\"invalid reply '{ret}' found in check_errors\")\n\n serial = Instrument.measurement(\n \"SN?\", \"\"\" Serial number of the instrument \"\"\",\n cast=str,\n )\n\n all_pressures = Instrument.measurement(\n \"PRZ?\", \"\"\" Read pressures on all channels in selected units \"\"\",\n )\n\n combined_pressure1 = Instrument.measurement(\n \"PC1?\", \"\"\" Read pressure on channel 1 and its combination sensor \"\"\",\n )\n\n combined_pressure2 = Instrument.measurement(\n \"PC2?\", \"\"\" Read pressure on channel 2 and its combination sensor \"\"\",\n )\n\n unit = Instrument.control(\n \"U?\", \"U!%s\",\n \"\"\"Pressure unit used for all pressure readings from the instrument\"\"\",\n validator=strict_discrete_set,\n map_values=True,\n values={\"Torr\": \"TORR\",\n \"mBar\": \"mBAR\",\n \"Pascal\": \"PASCAL\",\n \"Micron\": \"MICRON\",\n },\n check_set_errors=True,\n )\n", "path": "pymeasure/instruments/mksinst/mks937b.py"}]}
| 2,883 | 151 |
gh_patches_debug_9051
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-7790
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The "Did you mean..." suggestions' total is wrong
The "Did you mean..." says there are supposed to be 103 matches on this suggestion:
<img width="503" alt="Screen Shot 2021-02-09 at 5 21 50 PM" src="https://user-images.githubusercontent.com/26739/107436375-6fcd6780-6afb-11eb-8989-a6ea605881eb.png">
But if you apply it, you get a different total:
<img width="458" alt="Screen Shot 2021-02-09 at 5 22 00 PM" src="https://user-images.githubusercontent.com/26739/107436459-8e336300-6afb-11eb-90bf-7e19fdddf08d.png">
Not sure if it's an error in Kuma or in how the results are presented in Yari.
</issue>
<code>
[start of kuma/api/v1/search/__init__.py]
1 from django import http
2 from django.conf import settings
3 from elasticsearch import exceptions
4 from elasticsearch_dsl import Q, query, Search
5 from redo import retrying
6
7 from kuma.api.v1.decorators import allow_CORS_GET
8
9 from .forms import SearchForm
10
11
12 class JsonResponse(http.JsonResponse):
13 """The only reason this exists is so that other Django views can call
14 views that return instances of this and then get to the data before it
15 gets JSON serialized.
16 This is something that rest_framework's JsonResponse supports.
17 Ultimately, the only view that cares is the (old) Kuma search view page
18 that calls the view function here in this file. Now it can do something like:
19
20 response = kuma.api.v1.search.search(request)
21 found = response.data
22
23 """
24
25 def __init__(self, data, *args, **kwargs):
26 self.data = data
27 super().__init__(data, *args, **kwargs)
28
29
30 def legacy(request, locale=None):
31 raise NotImplementedError("work harder")
32
33
34 @allow_CORS_GET
35 def search(request, locale=None):
36 initial = {"size": 10, "page": 1, "archive": SearchForm.ARCHIVE_CHOICES[0]}
37 if locale:
38 initial["locale"] = locale
39 form = SearchForm(request.GET, initial=initial)
40 if not form.is_valid():
41 return JsonResponse({"errors": form.errors.get_json_data()}, status=400)
42
43 locales = form.cleaned_data["locale"] or [settings.LANGUAGE_CODE]
44 assert isinstance(locales, list)
45
46 params = {
47 "locales": [x.lower() for x in locales],
48 "archive": form.cleaned_data["archive"],
49 "query": form.cleaned_data["q"],
50 "size": form.cleaned_data["size"],
51 "page": form.cleaned_data["page"],
52 "sort": form.cleaned_data["sort"],
53 # The `slug` is always stored, as a Keyword index, in lowercase.
54 "slug_prefixes": [x.lower() for x in form.cleaned_data["slug_prefix"]],
55 }
56 results = _find(
57 params,
58 make_suggestions=True,
59 )
60 return JsonResponse(results)
61
62
63 def _find(params, total_only=False, make_suggestions=False, min_suggestion_score=0.8):
64 search_query = Search(
65 index=settings.SEARCH_INDEX_NAME,
66 )
67 if make_suggestions:
68 # XXX research if it it's better to use phrase suggesters and if
69 # that works
70 # https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters.html#phrase-suggester
71 search_query = search_query.suggest(
72 "title_suggestions", params["query"], term={"field": "title"}
73 )
74 search_query = search_query.suggest(
75 "body_suggestions", params["query"], term={"field": "body"}
76 )
77
78 sub_queries = []
79 sub_queries.append(Q("match", title={"query": params["query"], "boost": 2.0}))
80 sub_queries.append(Q("match", body={"query": params["query"], "boost": 1.0}))
81 if " " in params["query"]:
82 sub_queries.append(
83 Q("match_phrase", title={"query": params["query"], "boost": 10.0})
84 )
85 sub_queries.append(
86 Q("match_phrase", body={"query": params["query"], "boost": 5.0})
87 )
88
89 sub_query = query.Bool(should=sub_queries)
90
91 if params["locales"]:
92 search_query = search_query.filter("terms", locale=params["locales"])
93 if params["archive"] == "exclude":
94 search_query = search_query.filter("term", archived=False)
95 elif params["archive"] == "only":
96 search_query = search_query.filter("term", archived=True)
97
98 if params["slug_prefixes"]:
99 sub_queries = [Q("prefix", slug=x) for x in params["slug_prefixes"]]
100 search_query = search_query.query(query.Bool(should=sub_queries))
101
102 search_query = search_query.highlight_options(
103 pre_tags=["<mark>"],
104 post_tags=["</mark>"],
105 number_of_fragments=3,
106 fragment_size=120,
107 encoder="html",
108 )
109 search_query = search_query.highlight("title", "body")
110
111 if params["sort"] == "relevance":
112 search_query = search_query.sort("_score", "-popularity")
113 search_query = search_query.query(sub_query)
114 elif params["sort"] == "popularity":
115 search_query = search_query.sort("-popularity", "_score")
116 search_query = search_query.query(sub_query)
117 else:
118 popularity_factor = 10.0
119 boost_mode = "sum"
120 score_mode = "max"
121 search_query = search_query.query(
122 "function_score",
123 query=sub_query,
124 functions=[
125 query.SF(
126 "field_value_factor",
127 field="popularity",
128 factor=popularity_factor,
129 missing=0.0,
130 )
131 ],
132 boost_mode=boost_mode,
133 score_mode=score_mode,
134 )
135
136 search_query = search_query.source(excludes=["body"])
137
138 search_query = search_query[
139 params["size"] * (params["page"] - 1) : params["size"] * params["page"]
140 ]
141
142 retry_options = {
143 "retry_exceptions": (
144 # This is the standard operational exception.
145 exceptions.ConnectionError,
146 # This can happen if the search happened right as the index had
147 # just been deleted due to a fresh re-indexing happening in Yari.
148 exceptions.NotFoundError,
149 # This can happen when the index simply isn't ready yet.
150 exceptions.TransportError,
151 ),
152 # The default in redo is 60 seconds. Let's tone that down.
153 "sleeptime": settings.ES_RETRY_SLEEPTIME,
154 "attempts": settings.ES_RETRY_ATTEMPTS,
155 "jitter": settings.ES_RETRY_JITTER,
156 }
157 with retrying(search_query.execute, **retry_options) as retrying_function:
158 response = retrying_function()
159
160 if total_only:
161 return response.hits.total
162
163 metadata = {
164 "took_ms": response.took,
165 "total": {
166 # The `response.hits.total` is a `elasticsearch_dsl.utils.AttrDict`
167 # instance. Pluck only the exact data needed.
168 "value": response.hits.total.value,
169 "relation": response.hits.total.relation,
170 },
171 "size": params["size"],
172 "page": params["page"],
173 }
174 documents = []
175 for hit in response:
176 try:
177 body_highlight = list(hit.meta.highlight.body)
178 except AttributeError:
179 body_highlight = []
180 try:
181 title_highlight = list(hit.meta.highlight.title)
182 except AttributeError:
183 title_highlight = []
184
185 d = {
186 "mdn_url": hit.meta.id,
187 "score": hit.meta.score,
188 "title": hit.title,
189 "locale": hit.locale,
190 "slug": hit.slug,
191 "popularity": hit.popularity,
192 "archived": hit.archived,
193 "summary": hit.summary,
194 "highlight": {
195 "body": body_highlight,
196 "title": title_highlight,
197 },
198 }
199 documents.append(d)
200
201 try:
202 suggest = getattr(response, "suggest")
203 except AttributeError:
204 suggest = None
205
206 suggestions = []
207 if suggest:
208 suggestion_strings = _unpack_suggestions(
209 params["query"],
210 response.suggest,
211 ("body_suggestions", "title_suggestions"),
212 )
213
214 for score, string in suggestion_strings:
215 if score > min_suggestion_score or 1:
216 # Sure, this is different way to spell, but what will it yield
217 # if you actually search it?
218 total = _find(params, total_only=True)
219 if total["value"] > 0:
220 suggestions.append(
221 {
222 "text": string,
223 "total": {
224 # This 'total' is an `AttrDict` instance.
225 "value": total.value,
226 "relation": total.relation,
227 },
228 }
229 )
230 # Since they're sorted by score, it's usually never useful
231 # to suggestion more than exactly 1 good suggestion.
232 break
233
234 return {
235 "documents": documents,
236 "metadata": metadata,
237 "suggestions": suggestions,
238 }
239
240
241 def _unpack_suggestions(query, suggest, keys):
242 alternatives = []
243 for key in keys:
244 for suggestion in getattr(suggest, key, []):
245 for option in suggestion.options:
246 alternatives.append(
247 (
248 option.score,
249 query[0 : suggestion.offset]
250 + option.text
251 + query[suggestion.offset + suggestion.length :],
252 )
253 )
254 alternatives.sort(reverse=True) # highest score first
255 return alternatives
256
[end of kuma/api/v1/search/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/api/v1/search/__init__.py b/kuma/api/v1/search/__init__.py
--- a/kuma/api/v1/search/__init__.py
+++ b/kuma/api/v1/search/__init__.py
@@ -215,7 +215,7 @@
if score > min_suggestion_score or 1:
# Sure, this is different way to spell, but what will it yield
# if you actually search it?
- total = _find(params, total_only=True)
+ total = _find(dict(params, query=string), total_only=True)
if total["value"] > 0:
suggestions.append(
{
|
{"golden_diff": "diff --git a/kuma/api/v1/search/__init__.py b/kuma/api/v1/search/__init__.py\n--- a/kuma/api/v1/search/__init__.py\n+++ b/kuma/api/v1/search/__init__.py\n@@ -215,7 +215,7 @@\n if score > min_suggestion_score or 1:\n # Sure, this is different way to spell, but what will it yield\n # if you actually search it?\n- total = _find(params, total_only=True)\n+ total = _find(dict(params, query=string), total_only=True)\n if total[\"value\"] > 0:\n suggestions.append(\n {\n", "issue": "The \"Did you mean...\" suggestions' total is wrong\nThe \"Did you mean...\" says there are supposed to be 103 matches on this suggestion:\r\n<img width=\"503\" alt=\"Screen Shot 2021-02-09 at 5 21 50 PM\" src=\"https://user-images.githubusercontent.com/26739/107436375-6fcd6780-6afb-11eb-8989-a6ea605881eb.png\">\r\n\r\nBut if you apply it, you get a different total:\r\n<img width=\"458\" alt=\"Screen Shot 2021-02-09 at 5 22 00 PM\" src=\"https://user-images.githubusercontent.com/26739/107436459-8e336300-6afb-11eb-90bf-7e19fdddf08d.png\">\r\n\r\nNot sure if it's an error in Kuma or in how the results are presented in Yari. \r\n\n", "before_files": [{"content": "from django import http\nfrom django.conf import settings\nfrom elasticsearch import exceptions\nfrom elasticsearch_dsl import Q, query, Search\nfrom redo import retrying\n\nfrom kuma.api.v1.decorators import allow_CORS_GET\n\nfrom .forms import SearchForm\n\n\nclass JsonResponse(http.JsonResponse):\n \"\"\"The only reason this exists is so that other Django views can call\n views that return instances of this and then get to the data before it\n gets JSON serialized.\n This is something that rest_framework's JsonResponse supports.\n Ultimately, the only view that cares is the (old) Kuma search view page\n that calls the view function here in this file. Now it can do something like:\n\n response = kuma.api.v1.search.search(request)\n found = response.data\n\n \"\"\"\n\n def __init__(self, data, *args, **kwargs):\n self.data = data\n super().__init__(data, *args, **kwargs)\n\n\ndef legacy(request, locale=None):\n raise NotImplementedError(\"work harder\")\n\n\n@allow_CORS_GET\ndef search(request, locale=None):\n initial = {\"size\": 10, \"page\": 1, \"archive\": SearchForm.ARCHIVE_CHOICES[0]}\n if locale:\n initial[\"locale\"] = locale\n form = SearchForm(request.GET, initial=initial)\n if not form.is_valid():\n return JsonResponse({\"errors\": form.errors.get_json_data()}, status=400)\n\n locales = form.cleaned_data[\"locale\"] or [settings.LANGUAGE_CODE]\n assert isinstance(locales, list)\n\n params = {\n \"locales\": [x.lower() for x in locales],\n \"archive\": form.cleaned_data[\"archive\"],\n \"query\": form.cleaned_data[\"q\"],\n \"size\": form.cleaned_data[\"size\"],\n \"page\": form.cleaned_data[\"page\"],\n \"sort\": form.cleaned_data[\"sort\"],\n # The `slug` is always stored, as a Keyword index, in lowercase.\n \"slug_prefixes\": [x.lower() for x in form.cleaned_data[\"slug_prefix\"]],\n }\n results = _find(\n params,\n make_suggestions=True,\n )\n return JsonResponse(results)\n\n\ndef _find(params, total_only=False, make_suggestions=False, min_suggestion_score=0.8):\n search_query = Search(\n index=settings.SEARCH_INDEX_NAME,\n )\n if make_suggestions:\n # XXX research if it it's better to use phrase suggesters and if\n # that works\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters.html#phrase-suggester\n search_query = search_query.suggest(\n \"title_suggestions\", params[\"query\"], term={\"field\": \"title\"}\n )\n search_query = search_query.suggest(\n \"body_suggestions\", params[\"query\"], term={\"field\": \"body\"}\n )\n\n sub_queries = []\n sub_queries.append(Q(\"match\", title={\"query\": params[\"query\"], \"boost\": 2.0}))\n sub_queries.append(Q(\"match\", body={\"query\": params[\"query\"], \"boost\": 1.0}))\n if \" \" in params[\"query\"]:\n sub_queries.append(\n Q(\"match_phrase\", title={\"query\": params[\"query\"], \"boost\": 10.0})\n )\n sub_queries.append(\n Q(\"match_phrase\", body={\"query\": params[\"query\"], \"boost\": 5.0})\n )\n\n sub_query = query.Bool(should=sub_queries)\n\n if params[\"locales\"]:\n search_query = search_query.filter(\"terms\", locale=params[\"locales\"])\n if params[\"archive\"] == \"exclude\":\n search_query = search_query.filter(\"term\", archived=False)\n elif params[\"archive\"] == \"only\":\n search_query = search_query.filter(\"term\", archived=True)\n\n if params[\"slug_prefixes\"]:\n sub_queries = [Q(\"prefix\", slug=x) for x in params[\"slug_prefixes\"]]\n search_query = search_query.query(query.Bool(should=sub_queries))\n\n search_query = search_query.highlight_options(\n pre_tags=[\"<mark>\"],\n post_tags=[\"</mark>\"],\n number_of_fragments=3,\n fragment_size=120,\n encoder=\"html\",\n )\n search_query = search_query.highlight(\"title\", \"body\")\n\n if params[\"sort\"] == \"relevance\":\n search_query = search_query.sort(\"_score\", \"-popularity\")\n search_query = search_query.query(sub_query)\n elif params[\"sort\"] == \"popularity\":\n search_query = search_query.sort(\"-popularity\", \"_score\")\n search_query = search_query.query(sub_query)\n else:\n popularity_factor = 10.0\n boost_mode = \"sum\"\n score_mode = \"max\"\n search_query = search_query.query(\n \"function_score\",\n query=sub_query,\n functions=[\n query.SF(\n \"field_value_factor\",\n field=\"popularity\",\n factor=popularity_factor,\n missing=0.0,\n )\n ],\n boost_mode=boost_mode,\n score_mode=score_mode,\n )\n\n search_query = search_query.source(excludes=[\"body\"])\n\n search_query = search_query[\n params[\"size\"] * (params[\"page\"] - 1) : params[\"size\"] * params[\"page\"]\n ]\n\n retry_options = {\n \"retry_exceptions\": (\n # This is the standard operational exception.\n exceptions.ConnectionError,\n # This can happen if the search happened right as the index had\n # just been deleted due to a fresh re-indexing happening in Yari.\n exceptions.NotFoundError,\n # This can happen when the index simply isn't ready yet.\n exceptions.TransportError,\n ),\n # The default in redo is 60 seconds. Let's tone that down.\n \"sleeptime\": settings.ES_RETRY_SLEEPTIME,\n \"attempts\": settings.ES_RETRY_ATTEMPTS,\n \"jitter\": settings.ES_RETRY_JITTER,\n }\n with retrying(search_query.execute, **retry_options) as retrying_function:\n response = retrying_function()\n\n if total_only:\n return response.hits.total\n\n metadata = {\n \"took_ms\": response.took,\n \"total\": {\n # The `response.hits.total` is a `elasticsearch_dsl.utils.AttrDict`\n # instance. Pluck only the exact data needed.\n \"value\": response.hits.total.value,\n \"relation\": response.hits.total.relation,\n },\n \"size\": params[\"size\"],\n \"page\": params[\"page\"],\n }\n documents = []\n for hit in response:\n try:\n body_highlight = list(hit.meta.highlight.body)\n except AttributeError:\n body_highlight = []\n try:\n title_highlight = list(hit.meta.highlight.title)\n except AttributeError:\n title_highlight = []\n\n d = {\n \"mdn_url\": hit.meta.id,\n \"score\": hit.meta.score,\n \"title\": hit.title,\n \"locale\": hit.locale,\n \"slug\": hit.slug,\n \"popularity\": hit.popularity,\n \"archived\": hit.archived,\n \"summary\": hit.summary,\n \"highlight\": {\n \"body\": body_highlight,\n \"title\": title_highlight,\n },\n }\n documents.append(d)\n\n try:\n suggest = getattr(response, \"suggest\")\n except AttributeError:\n suggest = None\n\n suggestions = []\n if suggest:\n suggestion_strings = _unpack_suggestions(\n params[\"query\"],\n response.suggest,\n (\"body_suggestions\", \"title_suggestions\"),\n )\n\n for score, string in suggestion_strings:\n if score > min_suggestion_score or 1:\n # Sure, this is different way to spell, but what will it yield\n # if you actually search it?\n total = _find(params, total_only=True)\n if total[\"value\"] > 0:\n suggestions.append(\n {\n \"text\": string,\n \"total\": {\n # This 'total' is an `AttrDict` instance.\n \"value\": total.value,\n \"relation\": total.relation,\n },\n }\n )\n # Since they're sorted by score, it's usually never useful\n # to suggestion more than exactly 1 good suggestion.\n break\n\n return {\n \"documents\": documents,\n \"metadata\": metadata,\n \"suggestions\": suggestions,\n }\n\n\ndef _unpack_suggestions(query, suggest, keys):\n alternatives = []\n for key in keys:\n for suggestion in getattr(suggest, key, []):\n for option in suggestion.options:\n alternatives.append(\n (\n option.score,\n query[0 : suggestion.offset]\n + option.text\n + query[suggestion.offset + suggestion.length :],\n )\n )\n alternatives.sort(reverse=True) # highest score first\n return alternatives\n", "path": "kuma/api/v1/search/__init__.py"}]}
| 3,355 | 148 |
gh_patches_debug_30458
|
rasdani/github-patches
|
git_diff
|
borgbackup__borg-8100
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`borg with-lock` raises `FileNotFoundError`
Hey,
I just want to find out what `with-lock` does. I tried some things and I ended up with an exception :)
```bash
kmille@linbox:~ borg --verbose with-lock /home/kmille/tmp/borgrepo list
Local Exception
Traceback (most recent call last):
File "/usr/lib/python3.11/site-packages/borg/archiver.py", line 5343, in main
exit_code = archiver.run(args)
^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/borg/archiver.py", line 5263, in run
return set_ec(func(args))
^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/borg/archiver.py", line 189, in wrapper
return method(self, args, repository=repository, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/borg/archiver.py", line 1853, in do_with_lock
return subprocess.call([args.command] + args.args, env=env)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/subprocess.py", line 389, in call
with Popen(*popenargs, **kwargs) as p:
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/subprocess.py", line 1026, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.11/subprocess.py", line 1950, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'list'
Platform: Linux linbox 6.6.8-hardened1-1-hardened #1 SMP PREEMPT_DYNAMIC Thu, 21 Dec 2023 23:45:24 +0000 x86_64
Linux: Unknown Linux
Borg: 1.2.7 Python: CPython 3.11.6 msgpack: 1.0.5 fuse: llfuse 1.5.0 [pyfuse3,llfuse]
PID: 186644 CWD: /home/kmille
sys.argv: ['/usr/bin/borg', '--verbose', 'with-lock', '/home/kmille/tmp/borgrepo', 'list']
SSH_ORIGINAL_COMMAND: None
```
I don't understand the [docs](https://borgbackup.readthedocs.io/en/stable/usage/lock.html). After reading, I ask myself:
- What's the purpose of a lock if I can run other borg commands in the meantime
- "It will first try to acquire the lock": Does this just mean: wait until it's free?
-
Maybe it makes sense to update the docs :)
</issue>
<code>
[start of src/borg/archiver/lock_cmds.py]
1 import argparse
2 import subprocess
3
4 from ._common import with_repository
5 from ..cache import Cache
6 from ..constants import * # NOQA
7 from ..helpers import prepare_subprocess_env, set_ec
8 from ..manifest import Manifest
9
10 from ..logger import create_logger
11
12 logger = create_logger()
13
14
15 class LocksMixIn:
16 @with_repository(manifest=False, exclusive=True)
17 def do_with_lock(self, args, repository):
18 """run a user specified command with the repository lock held"""
19 # for a new server, this will immediately take an exclusive lock.
20 # to support old servers, that do not have "exclusive" arg in open()
21 # RPC API, we also do it the old way:
22 # re-write manifest to start a repository transaction - this causes a
23 # lock upgrade to exclusive for remote (and also for local) repositories.
24 # by using manifest=False in the decorator, we avoid having to require
25 # the encryption key (and can operate just with encrypted data).
26 data = repository.get(Manifest.MANIFEST_ID)
27 repository.put(Manifest.MANIFEST_ID, data)
28 # usually, a 0 byte (open for writing) segment file would be visible in the filesystem here.
29 # we write and close this file, to rather have a valid segment file on disk, before invoking the subprocess.
30 # we can only do this for local repositories (with .io), though:
31 if hasattr(repository, "io"):
32 repository.io.close_segment()
33 env = prepare_subprocess_env(system=True)
34 try:
35 # we exit with the return code we get from the subprocess
36 rc = subprocess.call([args.command] + args.args, env=env)
37 set_ec(rc)
38 finally:
39 # we need to commit the "no change" operation we did to the manifest
40 # because it created a new segment file in the repository. if we would
41 # roll back, the same file would be later used otherwise (for other content).
42 # that would be bad if somebody uses rsync with ignore-existing (or
43 # any other mechanism relying on existing segment data not changing).
44 # see issue #1867.
45 repository.commit(compact=False)
46
47 @with_repository(lock=False, manifest=False)
48 def do_break_lock(self, args, repository):
49 """Break the repository lock (e.g. in case it was left by a dead borg."""
50 repository.break_lock()
51 Cache.break_lock(repository)
52
53 def build_parser_locks(self, subparsers, common_parser, mid_common_parser):
54 from ._common import process_epilog
55
56 break_lock_epilog = process_epilog(
57 """
58 This command breaks the repository and cache locks.
59 Please use carefully and only while no borg process (on any machine) is
60 trying to access the Cache or the Repository.
61 """
62 )
63 subparser = subparsers.add_parser(
64 "break-lock",
65 parents=[common_parser],
66 add_help=False,
67 description=self.do_break_lock.__doc__,
68 epilog=break_lock_epilog,
69 formatter_class=argparse.RawDescriptionHelpFormatter,
70 help="break repository and cache locks",
71 )
72 subparser.set_defaults(func=self.do_break_lock)
73
74 with_lock_epilog = process_epilog(
75 """
76 This command runs a user-specified command while locking the repository. For example:
77
78 ::
79
80 $ BORG_REPO=/mnt/borgrepo borg with-lock rsync -av /mnt/borgrepo /somewhere/else/borgrepo
81
82 It will first try to acquire the lock (make sure that no other operation is
83 running in the repo), then execute the given command as a subprocess and wait
84 for its termination, release the lock and return the user command's return
85 code as borg's return code.
86
87 .. note::
88
89 If you copy a repository with the lock held, the lock will be present in
90 the copy. Thus, before using borg on the copy from a different host,
91 you need to use "borg break-lock" on the copied repository, because
92 Borg is cautious and does not automatically remove stale locks made by a different host.
93 """
94 )
95 subparser = subparsers.add_parser(
96 "with-lock",
97 parents=[common_parser],
98 add_help=False,
99 description=self.do_with_lock.__doc__,
100 epilog=with_lock_epilog,
101 formatter_class=argparse.RawDescriptionHelpFormatter,
102 help="run user command with lock held",
103 )
104 subparser.set_defaults(func=self.do_with_lock)
105 subparser.add_argument("command", metavar="COMMAND", help="command to run")
106 subparser.add_argument("args", metavar="ARGS", nargs=argparse.REMAINDER, help="command arguments")
107
[end of src/borg/archiver/lock_cmds.py]
[start of src/borg/testsuite/archiver/lock_cmds.py]
1 import os
2
3 from ...constants import * # NOQA
4 from . import cmd, generate_archiver_tests, RK_ENCRYPTION
5
6 pytest_generate_tests = lambda metafunc: generate_archiver_tests(metafunc, kinds="local,remote,binary") # NOQA
7
8
9 def test_break_lock(archivers, request):
10 archiver = request.getfixturevalue(archivers)
11 cmd(archiver, "rcreate", RK_ENCRYPTION)
12 cmd(archiver, "break-lock")
13
14
15 def test_with_lock(archivers, request):
16 archiver = request.getfixturevalue(archivers)
17 cmd(archiver, "rcreate", RK_ENCRYPTION)
18 lock_path = os.path.join(archiver.repository_path, "lock.exclusive")
19 command = "python3", "-c", 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
20 cmd(archiver, "with-lock", *command, fork=True, exit_code=42)
21
[end of src/borg/testsuite/archiver/lock_cmds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/borg/archiver/lock_cmds.py b/src/borg/archiver/lock_cmds.py
--- a/src/borg/archiver/lock_cmds.py
+++ b/src/borg/archiver/lock_cmds.py
@@ -4,7 +4,7 @@
from ._common import with_repository
from ..cache import Cache
from ..constants import * # NOQA
-from ..helpers import prepare_subprocess_env, set_ec
+from ..helpers import prepare_subprocess_env, set_ec, CommandError
from ..manifest import Manifest
from ..logger import create_logger
@@ -35,6 +35,8 @@
# we exit with the return code we get from the subprocess
rc = subprocess.call([args.command] + args.args, env=env)
set_ec(rc)
+ except (FileNotFoundError, OSError, ValueError) as e:
+ raise CommandError(f"Error while trying to run '{args.command}': {e}")
finally:
# we need to commit the "no change" operation we did to the manifest
# because it created a new segment file in the repository. if we would
diff --git a/src/borg/testsuite/archiver/lock_cmds.py b/src/borg/testsuite/archiver/lock_cmds.py
--- a/src/borg/testsuite/archiver/lock_cmds.py
+++ b/src/borg/testsuite/archiver/lock_cmds.py
@@ -18,3 +18,10 @@
lock_path = os.path.join(archiver.repository_path, "lock.exclusive")
command = "python3", "-c", 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
cmd(archiver, "with-lock", *command, fork=True, exit_code=42)
+
+
+def test_with_lock_non_existent_command(archivers, request):
+ archiver = request.getfixturevalue(archivers)
+ cmd(archiver, "rcreate", RK_ENCRYPTION)
+ command = ["non_existent_command"]
+ cmd(archiver, "with-lock", *command, fork=True, exit_code=EXIT_ERROR)
|
{"golden_diff": "diff --git a/src/borg/archiver/lock_cmds.py b/src/borg/archiver/lock_cmds.py\n--- a/src/borg/archiver/lock_cmds.py\n+++ b/src/borg/archiver/lock_cmds.py\n@@ -4,7 +4,7 @@\n from ._common import with_repository\n from ..cache import Cache\n from ..constants import * # NOQA\n-from ..helpers import prepare_subprocess_env, set_ec\n+from ..helpers import prepare_subprocess_env, set_ec, CommandError\n from ..manifest import Manifest\n \n from ..logger import create_logger\n@@ -35,6 +35,8 @@\n # we exit with the return code we get from the subprocess\n rc = subprocess.call([args.command] + args.args, env=env)\n set_ec(rc)\n+ except (FileNotFoundError, OSError, ValueError) as e:\n+ raise CommandError(f\"Error while trying to run '{args.command}': {e}\")\n finally:\n # we need to commit the \"no change\" operation we did to the manifest\n # because it created a new segment file in the repository. if we would\ndiff --git a/src/borg/testsuite/archiver/lock_cmds.py b/src/borg/testsuite/archiver/lock_cmds.py\n--- a/src/borg/testsuite/archiver/lock_cmds.py\n+++ b/src/borg/testsuite/archiver/lock_cmds.py\n@@ -18,3 +18,10 @@\n lock_path = os.path.join(archiver.repository_path, \"lock.exclusive\")\n command = \"python3\", \"-c\", 'import os, sys; sys.exit(42 if os.path.exists(\"%s\") else 23)' % lock_path\n cmd(archiver, \"with-lock\", *command, fork=True, exit_code=42)\n+\n+\n+def test_with_lock_non_existent_command(archivers, request):\n+ archiver = request.getfixturevalue(archivers)\n+ cmd(archiver, \"rcreate\", RK_ENCRYPTION)\n+ command = [\"non_existent_command\"]\n+ cmd(archiver, \"with-lock\", *command, fork=True, exit_code=EXIT_ERROR)\n", "issue": "`borg with-lock` raises `FileNotFoundError`\nHey,\r\nI just want to find out what `with-lock` does. I tried some things and I ended up with an exception :)\r\n\r\n```bash\r\nkmille@linbox:~ borg --verbose with-lock /home/kmille/tmp/borgrepo list\r\nLocal Exception\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.11/site-packages/borg/archiver.py\", line 5343, in main\r\n exit_code = archiver.run(args)\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/borg/archiver.py\", line 5263, in run\r\n return set_ec(func(args))\r\n ^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/borg/archiver.py\", line 189, in wrapper\r\n return method(self, args, repository=repository, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/borg/archiver.py\", line 1853, in do_with_lock\r\n return subprocess.call([args.command] + args.args, env=env)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/subprocess.py\", line 389, in call\r\n with Popen(*popenargs, **kwargs) as p:\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/subprocess.py\", line 1026, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/usr/lib/python3.11/subprocess.py\", line 1950, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'list'\r\n\r\nPlatform: Linux linbox 6.6.8-hardened1-1-hardened #1 SMP PREEMPT_DYNAMIC Thu, 21 Dec 2023 23:45:24 +0000 x86_64\r\nLinux: Unknown Linux \r\nBorg: 1.2.7 Python: CPython 3.11.6 msgpack: 1.0.5 fuse: llfuse 1.5.0 [pyfuse3,llfuse]\r\nPID: 186644 CWD: /home/kmille\r\nsys.argv: ['/usr/bin/borg', '--verbose', 'with-lock', '/home/kmille/tmp/borgrepo', 'list']\r\nSSH_ORIGINAL_COMMAND: None\r\n```\r\n\r\nI don't understand the [docs](https://borgbackup.readthedocs.io/en/stable/usage/lock.html). After reading, I ask myself:\r\n- What's the purpose of a lock if I can run other borg commands in the meantime\r\n- \"It will first try to acquire the lock\": Does this just mean: wait until it's free?\r\n- \r\n\r\nMaybe it makes sense to update the docs :)\n", "before_files": [{"content": "import argparse\nimport subprocess\n\nfrom ._common import with_repository\nfrom ..cache import Cache\nfrom ..constants import * # NOQA\nfrom ..helpers import prepare_subprocess_env, set_ec\nfrom ..manifest import Manifest\n\nfrom ..logger import create_logger\n\nlogger = create_logger()\n\n\nclass LocksMixIn:\n @with_repository(manifest=False, exclusive=True)\n def do_with_lock(self, args, repository):\n \"\"\"run a user specified command with the repository lock held\"\"\"\n # for a new server, this will immediately take an exclusive lock.\n # to support old servers, that do not have \"exclusive\" arg in open()\n # RPC API, we also do it the old way:\n # re-write manifest to start a repository transaction - this causes a\n # lock upgrade to exclusive for remote (and also for local) repositories.\n # by using manifest=False in the decorator, we avoid having to require\n # the encryption key (and can operate just with encrypted data).\n data = repository.get(Manifest.MANIFEST_ID)\n repository.put(Manifest.MANIFEST_ID, data)\n # usually, a 0 byte (open for writing) segment file would be visible in the filesystem here.\n # we write and close this file, to rather have a valid segment file on disk, before invoking the subprocess.\n # we can only do this for local repositories (with .io), though:\n if hasattr(repository, \"io\"):\n repository.io.close_segment()\n env = prepare_subprocess_env(system=True)\n try:\n # we exit with the return code we get from the subprocess\n rc = subprocess.call([args.command] + args.args, env=env)\n set_ec(rc)\n finally:\n # we need to commit the \"no change\" operation we did to the manifest\n # because it created a new segment file in the repository. if we would\n # roll back, the same file would be later used otherwise (for other content).\n # that would be bad if somebody uses rsync with ignore-existing (or\n # any other mechanism relying on existing segment data not changing).\n # see issue #1867.\n repository.commit(compact=False)\n\n @with_repository(lock=False, manifest=False)\n def do_break_lock(self, args, repository):\n \"\"\"Break the repository lock (e.g. in case it was left by a dead borg.\"\"\"\n repository.break_lock()\n Cache.break_lock(repository)\n\n def build_parser_locks(self, subparsers, common_parser, mid_common_parser):\n from ._common import process_epilog\n\n break_lock_epilog = process_epilog(\n \"\"\"\n This command breaks the repository and cache locks.\n Please use carefully and only while no borg process (on any machine) is\n trying to access the Cache or the Repository.\n \"\"\"\n )\n subparser = subparsers.add_parser(\n \"break-lock\",\n parents=[common_parser],\n add_help=False,\n description=self.do_break_lock.__doc__,\n epilog=break_lock_epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help=\"break repository and cache locks\",\n )\n subparser.set_defaults(func=self.do_break_lock)\n\n with_lock_epilog = process_epilog(\n \"\"\"\n This command runs a user-specified command while locking the repository. For example:\n\n ::\n\n $ BORG_REPO=/mnt/borgrepo borg with-lock rsync -av /mnt/borgrepo /somewhere/else/borgrepo\n\n It will first try to acquire the lock (make sure that no other operation is\n running in the repo), then execute the given command as a subprocess and wait\n for its termination, release the lock and return the user command's return\n code as borg's return code.\n\n .. note::\n\n If you copy a repository with the lock held, the lock will be present in\n the copy. Thus, before using borg on the copy from a different host,\n you need to use \"borg break-lock\" on the copied repository, because\n Borg is cautious and does not automatically remove stale locks made by a different host.\n \"\"\"\n )\n subparser = subparsers.add_parser(\n \"with-lock\",\n parents=[common_parser],\n add_help=False,\n description=self.do_with_lock.__doc__,\n epilog=with_lock_epilog,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help=\"run user command with lock held\",\n )\n subparser.set_defaults(func=self.do_with_lock)\n subparser.add_argument(\"command\", metavar=\"COMMAND\", help=\"command to run\")\n subparser.add_argument(\"args\", metavar=\"ARGS\", nargs=argparse.REMAINDER, help=\"command arguments\")\n", "path": "src/borg/archiver/lock_cmds.py"}, {"content": "import os\n\nfrom ...constants import * # NOQA\nfrom . import cmd, generate_archiver_tests, RK_ENCRYPTION\n\npytest_generate_tests = lambda metafunc: generate_archiver_tests(metafunc, kinds=\"local,remote,binary\") # NOQA\n\n\ndef test_break_lock(archivers, request):\n archiver = request.getfixturevalue(archivers)\n cmd(archiver, \"rcreate\", RK_ENCRYPTION)\n cmd(archiver, \"break-lock\")\n\n\ndef test_with_lock(archivers, request):\n archiver = request.getfixturevalue(archivers)\n cmd(archiver, \"rcreate\", RK_ENCRYPTION)\n lock_path = os.path.join(archiver.repository_path, \"lock.exclusive\")\n command = \"python3\", \"-c\", 'import os, sys; sys.exit(42 if os.path.exists(\"%s\") else 23)' % lock_path\n cmd(archiver, \"with-lock\", *command, fork=True, exit_code=42)\n", "path": "src/borg/testsuite/archiver/lock_cmds.py"}]}
| 2,726 | 467 |
gh_patches_debug_19340
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-7382
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include Rust version in DEBUG ASSISTENCE message?
I'm not sure what the best way to do this is but it seems like it would be helpful to include the output of `rustc -V` in the DEBUG ASSISTENCE.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 import os
8 import platform
9 import sys
10
11 from setuptools import setup
12
13 try:
14 from setuptools_rust import RustExtension
15 except ImportError:
16 print(
17 """
18 =============================DEBUG ASSISTANCE==========================
19 If you are seeing an error here please try the following to
20 successfully install cryptography:
21
22 Upgrade to the latest pip and try again. This will fix errors for most
23 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
24 =============================DEBUG ASSISTANCE==========================
25 """
26 )
27 raise
28
29
30 base_dir = os.path.dirname(__file__)
31 src_dir = os.path.join(base_dir, "src")
32
33 # When executing the setup.py, we need to be able to import ourselves, this
34 # means that we need to add the src/ directory to the sys.path.
35 sys.path.insert(0, src_dir)
36
37 try:
38 # See setup.cfg for most of the config metadata.
39 setup(
40 cffi_modules=[
41 "src/_cffi_src/build_openssl.py:ffi",
42 ],
43 rust_extensions=[
44 RustExtension(
45 "_rust",
46 "src/rust/Cargo.toml",
47 py_limited_api=True,
48 # Enable abi3 mode if we're not using PyPy.
49 features=(
50 []
51 if platform.python_implementation() == "PyPy"
52 else ["pyo3/abi3-py36"]
53 ),
54 rust_version=">=1.48.0",
55 )
56 ],
57 )
58 except: # noqa: E722
59 # Note: This is a bare exception that re-raises so that we don't interfere
60 # with anything the installation machinery might want to do. Because we
61 # print this for any exception this msg can appear (e.g. in verbose logs)
62 # even if there's no failure. For example, SetupRequirementsError is raised
63 # during PEP517 building and prints this text. setuptools raises SystemExit
64 # when compilation fails right now, but it's possible this isn't stable
65 # or a public API commitment so we'll remain ultra conservative.
66
67 import pkg_resources
68
69 print(
70 """
71 =============================DEBUG ASSISTANCE=============================
72 If you are seeing a compilation error please try the following steps to
73 successfully install cryptography:
74 1) Upgrade to the latest pip and try again. This will fix errors for most
75 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
76 2) Read https://cryptography.io/en/latest/installation/ for specific
77 instructions for your platform.
78 3) Check our frequently asked questions for more information:
79 https://cryptography.io/en/latest/faq/
80 4) Ensure you have a recent Rust toolchain installed:
81 https://cryptography.io/en/latest/installation/#rust
82 """
83 )
84 print(f" Python: {'.'.join(str(v) for v in sys.version_info[:3])}")
85 print(f" platform: {platform.platform()}")
86 for dist in ["pip", "setuptools", "setuptools_rust"]:
87 try:
88 version = pkg_resources.get_distribution(dist).version
89 except pkg_resources.DistributionNotFound:
90 version = "n/a"
91 print(f" {dist}: {version}")
92 print(
93 """\
94 =============================DEBUG ASSISTANCE=============================
95 """
96 )
97 raise
98
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,9 @@
import os
import platform
+import re
+import shutil
+import subprocess
import sys
from setuptools import setup
@@ -89,6 +92,22 @@
except pkg_resources.DistributionNotFound:
version = "n/a"
print(f" {dist}: {version}")
+ version = "n/a"
+ if shutil.which("rustc") is not None:
+ try:
+ # If for any reason `rustc --version` fails, silently ignore it
+ rustc_output = subprocess.run(
+ ["rustc", "--version"],
+ capture_output=True,
+ timeout=0.5,
+ encoding="utf8",
+ check=True,
+ ).stdout
+ version = re.sub("^rustc ", "", rustc_output.strip())
+ except subprocess.SubprocessError:
+ pass
+ print(f" rustc: {version}")
+
print(
"""\
=============================DEBUG ASSISTANCE=============================
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,6 +6,9 @@\n \n import os\n import platform\n+import re\n+import shutil\n+import subprocess\n import sys\n \n from setuptools import setup\n@@ -89,6 +92,22 @@\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n+ version = \"n/a\"\n+ if shutil.which(\"rustc\") is not None:\n+ try:\n+ # If for any reason `rustc --version` fails, silently ignore it\n+ rustc_output = subprocess.run(\n+ [\"rustc\", \"--version\"],\n+ capture_output=True,\n+ timeout=0.5,\n+ encoding=\"utf8\",\n+ check=True,\n+ ).stdout\n+ version = re.sub(\"^rustc \", \"\", rustc_output.strip())\n+ except subprocess.SubprocessError:\n+ pass\n+ print(f\" rustc: {version}\")\n+\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n", "issue": "Include Rust version in DEBUG ASSISTENCE message?\nI'm not sure what the best way to do this is but it seems like it would be helpful to include the output of `rustc -V` in the DEBUG ASSISTENCE.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport platform\nimport sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE==========================\n If you are seeing an error here please try the following to\n successfully install cryptography:\n\n Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n =============================DEBUG ASSISTANCE==========================\n \"\"\"\n )\n raise\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\ntry:\n # See setup.cfg for most of the config metadata.\n setup(\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n ],\n rust_extensions=[\n RustExtension(\n \"_rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\n features=(\n []\n if platform.python_implementation() == \"PyPy\"\n else [\"pyo3/abi3-py36\"]\n ),\n rust_version=\">=1.48.0\",\n )\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n\n import pkg_resources\n\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation/ for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq/\n 4) Ensure you have a recent Rust toolchain installed:\n https://cryptography.io/en/latest/installation/#rust\n \"\"\"\n )\n print(f\" Python: {'.'.join(str(v) for v in sys.version_info[:3])}\")\n print(f\" platform: {platform.platform()}\")\n for dist in [\"pip\", \"setuptools\", \"setuptools_rust\"]:\n try:\n version = pkg_resources.get_distribution(dist).version\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}]}
| 1,547 | 245 |
gh_patches_debug_23159
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2608
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not throw away bank accounts that may not be deleted during member data minimisation
Closes #2539
### Summary
Moves bank account minimization to the payments app. This will keep bank accounts 13 months after last usage for collecting a Thalia Pay batch, as is required.
### How to test
1. Do data minimisation
2. Bank accounts will only be removed 13 months after last usage
</issue>
<code>
[start of website/payments/services.py]
1 """The services defined by the payments package."""
2 import datetime
3 from typing import Union
4
5 from django.conf import settings
6 from django.db.models import QuerySet, Q, Sum, Model
7 from django.urls import reverse
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10
11 from members.models import Member
12 from utils.snippets import send_email
13 from .exceptions import PaymentError
14 from .models import Payment, BankAccount, PaymentUser
15 from .payables import payables, Payable
16
17
18 def create_payment(
19 model_payable: Union[Model, Payable],
20 processed_by: Member,
21 pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],
22 ) -> Payment:
23 """Create a new payment from a payable object.
24
25 :param model_payable: Payable or Model object
26 :param processed_by: PaymentUser that processed this payment
27 :param pay_type: Payment type
28 :return: Payment object
29 """
30 if pay_type not in (Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY):
31 raise PaymentError("Invalid payment type")
32
33 if isinstance(model_payable, Payable):
34 payable = model_payable
35 else:
36 payable = payables.get_payable(model_payable)
37
38 payer = (
39 PaymentUser.objects.get(pk=payable.payment_payer.pk)
40 if payable.payment_payer
41 else None
42 )
43
44 if not (
45 (payer and payer == processed_by and pay_type == Payment.TPAY)
46 or (payable.can_manage_payment(processed_by) and pay_type != Payment.TPAY)
47 ):
48 raise PaymentError(
49 _("User processing payment does not have the right permissions")
50 )
51
52 if payable.payment_amount == 0:
53 raise PaymentError(_("Payment amount 0 is not accepted"))
54
55 if pay_type == Payment.TPAY and not payer.tpay_enabled:
56 raise PaymentError(_("This user does not have Thalia Pay enabled"))
57
58 if not payable.paying_allowed:
59 raise PaymentError(_("Payment restricted"))
60
61 if payable.payment is not None:
62 payable.payment.amount = payable.payment_amount
63 payable.payment.notes = payable.payment_notes
64 payable.payment.topic = payable.payment_topic
65 payable.payment.paid_by = payer
66 payable.payment.processed_by = processed_by
67 payable.payment.type = pay_type
68 payable.payment.save()
69 else:
70 payable.payment = Payment.objects.create(
71 processed_by=processed_by,
72 amount=payable.payment_amount,
73 notes=payable.payment_notes,
74 topic=payable.payment_topic,
75 paid_by=payer,
76 type=pay_type,
77 )
78 return payable.payment
79
80
81 def delete_payment(model: Model, member: Member = None, ignore_change_window=False):
82 """Remove a payment from a payable object.
83
84 :param model: Payable or Model object
85 :param member: member deleting the payment
86 :param ignore_change_window: ignore the payment change window
87 :return:
88 """
89 payable = payables.get_payable(model)
90
91 if member and not payable.can_manage_payment(member):
92 raise PaymentError(
93 _("User deleting payment does not have the right permissions.")
94 )
95
96 payment = payable.payment
97 if (
98 payment.created_at
99 < timezone.now() - timezone.timedelta(seconds=settings.PAYMENT_CHANGE_WINDOW)
100 and not ignore_change_window
101 ):
102 raise PaymentError(_("This payment cannot be deleted anymore."))
103 if payment.batch and payment.batch.processed:
104 raise PaymentError(
105 _("This payment has already been processed and hence cannot be deleted.")
106 )
107
108 payable.payment = None
109 payable.model.save()
110 payment.delete()
111
112
113 def update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:
114 """Update the last used field of a BankAccount queryset.
115
116 :param queryset: Queryset of BankAccounts
117 :param date: date to set last_used to
118 :return: number of affected rows
119 """
120 if not date:
121 date = timezone.now().date()
122
123 result = queryset.filter(
124 (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))
125 | Q(valid_until=None)
126 ).update(last_used=date)
127 return result
128
129
130 def revoke_old_mandates() -> int:
131 """Revoke all mandates that have not been used for 36 months or more.
132
133 :return: number of affected rows
134 """
135 return BankAccount.objects.filter(
136 last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))
137 ).update(valid_until=timezone.now().date())
138
139
140 def process_batch(batch):
141 """Process a Thalia Pay batch.
142
143 :param batch: the batch to be processed
144 :return:
145 """
146 batch.processed = True
147
148 payments = batch.payments_set.select_related("paid_by")
149 for payment in payments:
150 bank_account = payment.paid_by.bank_accounts.last()
151 bank_account.last_used = batch.withdrawal_date
152 bank_account.save()
153
154 batch.save()
155
156 send_tpay_batch_processing_emails(batch)
157
158
159 def derive_next_mandate_no(member) -> str:
160 accounts = (
161 BankAccount.objects.filter(owner=PaymentUser.objects.get(pk=member.pk))
162 .exclude(mandate_no=None)
163 .filter(mandate_no__regex=BankAccount.MANDATE_NO_DEFAULT_REGEX)
164 )
165 new_mandate_no = 1 + max(
166 [int(account.mandate_no.split("-")[1]) for account in accounts], default=0
167 )
168 return f"{member.pk}-{new_mandate_no}"
169
170
171 def send_tpay_batch_processing_emails(batch):
172 """Send withdrawal notice emails to all members in a batch."""
173 member_payments = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
174 for member_row in member_payments:
175 member = PaymentUser.objects.get(pk=member_row["paid_by"])
176 total_amount = member_row["total"]
177
178 send_email(
179 member.email,
180 _("Thalia Pay withdrawal notice"),
181 "payments/email/tpay_withdrawal_notice_mail.txt",
182 {
183 "name": member.get_full_name(),
184 "batch": batch,
185 "bank_account": member.bank_accounts.filter(
186 mandate_no__isnull=False
187 ).last(),
188 "creditor_id": settings.SEPA_CREDITOR_ID,
189 "payments": batch.payments_set.filter(paid_by=member),
190 "total_amount": total_amount,
191 "payments_url": (
192 settings.BASE_URL
193 + reverse(
194 "payments:payment-list",
195 )
196 ),
197 },
198 )
199 return len(member_payments)
200
201
202 def execute_data_minimisation(dry_run=False):
203 """Anonymizes payments older than 7 years."""
204 # Sometimes years are 366 days of course, but better delete 1 or 2 days early than late
205 payment_deletion_period = timezone.now().date() - timezone.timedelta(days=(365 * 7))
206 bankaccount_deletion_period = timezone.now() - datetime.timedelta(days=(31 * 13))
207
208 queryset_payments = Payment.objects.filter(
209 created_at__lte=payment_deletion_period
210 ).exclude(paid_by__isnull=True)
211
212 queryset_bankaccounts = BankAccount.objects.filter(
213 Q(owner__paid_payment_set__type=Payment.TPAY),
214 Q(owner__paid_payment_set__batch__isnull=True)
215 | Q(owner__paid_payment_set__batch__processed=False)
216 | Q(
217 owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period
218 ),
219 )
220
221 if not dry_run:
222 queryset_payments.update(paid_by=None, processed_by=None)
223 queryset_bankaccounts.delete()
224 return queryset_payments
225
[end of website/payments/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/payments/services.py b/website/payments/services.py
--- a/website/payments/services.py
+++ b/website/payments/services.py
@@ -209,12 +209,24 @@
created_at__lte=payment_deletion_period
).exclude(paid_by__isnull=True)
- queryset_bankaccounts = BankAccount.objects.filter(
- Q(owner__paid_payment_set__type=Payment.TPAY),
- Q(owner__paid_payment_set__batch__isnull=True)
- | Q(owner__paid_payment_set__batch__processed=False)
+ # Delete bank accounts that are not valid anymore, and have not been used in the last 13 months
+ # (13 months is the required time we need to keep the mandates for)
+ queryset_bankaccounts = BankAccount.objects.all()
+ queryset_bankaccounts = queryset_bankaccounts.filter(
+ valid_until__lt=timezone.now()
+ ) # Keep valid bank accounts
+ queryset_bankaccounts = queryset_bankaccounts.exclude( # Also keep bank accounts that
+ Q(
+ owner__paid_payment_set__type=Payment.TPAY
+ ), # are used for Thalia Pay payments, AND
+ Q(
+ owner__paid_payment_set__batch__isnull=True
+ ) # have a payment that is in no batch, OR
| Q(
- owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period
+ owner__paid_payment_set__batch__processed=False
+ ) # have an unprocessed batch, OR
+ | Q(
+ owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period # or have a processed batch that is not older than 13 months
),
)
|
{"golden_diff": "diff --git a/website/payments/services.py b/website/payments/services.py\n--- a/website/payments/services.py\n+++ b/website/payments/services.py\n@@ -209,12 +209,24 @@\n created_at__lte=payment_deletion_period\n ).exclude(paid_by__isnull=True)\n \n- queryset_bankaccounts = BankAccount.objects.filter(\n- Q(owner__paid_payment_set__type=Payment.TPAY),\n- Q(owner__paid_payment_set__batch__isnull=True)\n- | Q(owner__paid_payment_set__batch__processed=False)\n+ # Delete bank accounts that are not valid anymore, and have not been used in the last 13 months\n+ # (13 months is the required time we need to keep the mandates for)\n+ queryset_bankaccounts = BankAccount.objects.all()\n+ queryset_bankaccounts = queryset_bankaccounts.filter(\n+ valid_until__lt=timezone.now()\n+ ) # Keep valid bank accounts\n+ queryset_bankaccounts = queryset_bankaccounts.exclude( # Also keep bank accounts that\n+ Q(\n+ owner__paid_payment_set__type=Payment.TPAY\n+ ), # are used for Thalia Pay payments, AND\n+ Q(\n+ owner__paid_payment_set__batch__isnull=True\n+ ) # have a payment that is in no batch, OR\n | Q(\n- owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period\n+ owner__paid_payment_set__batch__processed=False\n+ ) # have an unprocessed batch, OR\n+ | Q(\n+ owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period # or have a processed batch that is not older than 13 months\n ),\n )\n", "issue": "Do not throw away bank accounts that may not be deleted during member data minimisation\nCloses #2539 \r\n\r\n### Summary\r\nMoves bank account minimization to the payments app. This will keep bank accounts 13 months after last usage for collecting a Thalia Pay batch, as is required.\r\n\r\n\r\n### How to test\r\n1. Do data minimisation\r\n2. Bank accounts will only be removed 13 months after last usage\n", "before_files": [{"content": "\"\"\"The services defined by the payments package.\"\"\"\nimport datetime\nfrom typing import Union\n\nfrom django.conf import settings\nfrom django.db.models import QuerySet, Q, Sum, Model\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom utils.snippets import send_email\nfrom .exceptions import PaymentError\nfrom .models import Payment, BankAccount, PaymentUser\nfrom .payables import payables, Payable\n\n\ndef create_payment(\n model_payable: Union[Model, Payable],\n processed_by: Member,\n pay_type: Union[Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY],\n) -> Payment:\n \"\"\"Create a new payment from a payable object.\n\n :param model_payable: Payable or Model object\n :param processed_by: PaymentUser that processed this payment\n :param pay_type: Payment type\n :return: Payment object\n \"\"\"\n if pay_type not in (Payment.CASH, Payment.CARD, Payment.WIRE, Payment.TPAY):\n raise PaymentError(\"Invalid payment type\")\n\n if isinstance(model_payable, Payable):\n payable = model_payable\n else:\n payable = payables.get_payable(model_payable)\n\n payer = (\n PaymentUser.objects.get(pk=payable.payment_payer.pk)\n if payable.payment_payer\n else None\n )\n\n if not (\n (payer and payer == processed_by and pay_type == Payment.TPAY)\n or (payable.can_manage_payment(processed_by) and pay_type != Payment.TPAY)\n ):\n raise PaymentError(\n _(\"User processing payment does not have the right permissions\")\n )\n\n if payable.payment_amount == 0:\n raise PaymentError(_(\"Payment amount 0 is not accepted\"))\n\n if pay_type == Payment.TPAY and not payer.tpay_enabled:\n raise PaymentError(_(\"This user does not have Thalia Pay enabled\"))\n\n if not payable.paying_allowed:\n raise PaymentError(_(\"Payment restricted\"))\n\n if payable.payment is not None:\n payable.payment.amount = payable.payment_amount\n payable.payment.notes = payable.payment_notes\n payable.payment.topic = payable.payment_topic\n payable.payment.paid_by = payer\n payable.payment.processed_by = processed_by\n payable.payment.type = pay_type\n payable.payment.save()\n else:\n payable.payment = Payment.objects.create(\n processed_by=processed_by,\n amount=payable.payment_amount,\n notes=payable.payment_notes,\n topic=payable.payment_topic,\n paid_by=payer,\n type=pay_type,\n )\n return payable.payment\n\n\ndef delete_payment(model: Model, member: Member = None, ignore_change_window=False):\n \"\"\"Remove a payment from a payable object.\n\n :param model: Payable or Model object\n :param member: member deleting the payment\n :param ignore_change_window: ignore the payment change window\n :return:\n \"\"\"\n payable = payables.get_payable(model)\n\n if member and not payable.can_manage_payment(member):\n raise PaymentError(\n _(\"User deleting payment does not have the right permissions.\")\n )\n\n payment = payable.payment\n if (\n payment.created_at\n < timezone.now() - timezone.timedelta(seconds=settings.PAYMENT_CHANGE_WINDOW)\n and not ignore_change_window\n ):\n raise PaymentError(_(\"This payment cannot be deleted anymore.\"))\n if payment.batch and payment.batch.processed:\n raise PaymentError(\n _(\"This payment has already been processed and hence cannot be deleted.\")\n )\n\n payable.payment = None\n payable.model.save()\n payment.delete()\n\n\ndef update_last_used(queryset: QuerySet, date: datetime.date = None) -> int:\n \"\"\"Update the last used field of a BankAccount queryset.\n\n :param queryset: Queryset of BankAccounts\n :param date: date to set last_used to\n :return: number of affected rows\n \"\"\"\n if not date:\n date = timezone.now().date()\n\n result = queryset.filter(\n (Q(valid_from__gte=timezone.now()) & Q(valid_until__lt=timezone.now()))\n | Q(valid_until=None)\n ).update(last_used=date)\n return result\n\n\ndef revoke_old_mandates() -> int:\n \"\"\"Revoke all mandates that have not been used for 36 months or more.\n\n :return: number of affected rows\n \"\"\"\n return BankAccount.objects.filter(\n last_used__lte=(timezone.now() - timezone.timedelta(days=36 * 30))\n ).update(valid_until=timezone.now().date())\n\n\ndef process_batch(batch):\n \"\"\"Process a Thalia Pay batch.\n\n :param batch: the batch to be processed\n :return:\n \"\"\"\n batch.processed = True\n\n payments = batch.payments_set.select_related(\"paid_by\")\n for payment in payments:\n bank_account = payment.paid_by.bank_accounts.last()\n bank_account.last_used = batch.withdrawal_date\n bank_account.save()\n\n batch.save()\n\n send_tpay_batch_processing_emails(batch)\n\n\ndef derive_next_mandate_no(member) -> str:\n accounts = (\n BankAccount.objects.filter(owner=PaymentUser.objects.get(pk=member.pk))\n .exclude(mandate_no=None)\n .filter(mandate_no__regex=BankAccount.MANDATE_NO_DEFAULT_REGEX)\n )\n new_mandate_no = 1 + max(\n [int(account.mandate_no.split(\"-\")[1]) for account in accounts], default=0\n )\n return f\"{member.pk}-{new_mandate_no}\"\n\n\ndef send_tpay_batch_processing_emails(batch):\n \"\"\"Send withdrawal notice emails to all members in a batch.\"\"\"\n member_payments = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n for member_row in member_payments:\n member = PaymentUser.objects.get(pk=member_row[\"paid_by\"])\n total_amount = member_row[\"total\"]\n\n send_email(\n member.email,\n _(\"Thalia Pay withdrawal notice\"),\n \"payments/email/tpay_withdrawal_notice_mail.txt\",\n {\n \"name\": member.get_full_name(),\n \"batch\": batch,\n \"bank_account\": member.bank_accounts.filter(\n mandate_no__isnull=False\n ).last(),\n \"creditor_id\": settings.SEPA_CREDITOR_ID,\n \"payments\": batch.payments_set.filter(paid_by=member),\n \"total_amount\": total_amount,\n \"payments_url\": (\n settings.BASE_URL\n + reverse(\n \"payments:payment-list\",\n )\n ),\n },\n )\n return len(member_payments)\n\n\ndef execute_data_minimisation(dry_run=False):\n \"\"\"Anonymizes payments older than 7 years.\"\"\"\n # Sometimes years are 366 days of course, but better delete 1 or 2 days early than late\n payment_deletion_period = timezone.now().date() - timezone.timedelta(days=(365 * 7))\n bankaccount_deletion_period = timezone.now() - datetime.timedelta(days=(31 * 13))\n\n queryset_payments = Payment.objects.filter(\n created_at__lte=payment_deletion_period\n ).exclude(paid_by__isnull=True)\n\n queryset_bankaccounts = BankAccount.objects.filter(\n Q(owner__paid_payment_set__type=Payment.TPAY),\n Q(owner__paid_payment_set__batch__isnull=True)\n | Q(owner__paid_payment_set__batch__processed=False)\n | Q(\n owner__paid_payment_set__batch__processing_date__gt=bankaccount_deletion_period\n ),\n )\n\n if not dry_run:\n queryset_payments.update(paid_by=None, processed_by=None)\n queryset_bankaccounts.delete()\n return queryset_payments\n", "path": "website/payments/services.py"}]}
| 2,861 | 400 |
gh_patches_debug_42599
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-5467
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix multiple file support in linux.file_watch.line + black + fstring
When multiple file_watch rules are defined, the last defined file reference is used for all files being watched. This causes trigger-instances to fail rule enforcement.
Adding the reference to the logging shows `test1.log` has the reference ending with `8c505`
```
2021-11-30 18:50:40,434 140243179888112 INFO file_watch_sensor [-] Added file "/var/log/test1.log" with reference linux.7e55ad75-b10c-44db-b53e-95164a18c505
2021-11-30 18:50:41,459 140243179888112 INFO file_watch_sensor [-] Added file "/var/log/test2.log" with reference linux.590de8c1-c578-4125-9082-2cee03b030a9
```
When the file contents are updated a trigger is emitted by the sensor using the reference of `test2.log` ending in `b030a9`
```
root@u1804:~# st2 trigger-instance get 61a6649f164625c2d94dccb8 -y
id: 61a6649f164625c2d94dccb8
occurrence_time: '2021-11-30T17:51:27.294000Z'
payload:
file_name: test1.log
file_path: /var/log/test1.log
line: Tue Nov 30 18:51:27 CET 2021 dhcp
status: processed
trigger: linux.590de8c1-c578-4125-9082-2cee03b030a9
```
This PR consists of adding a dictionary that is used to track the `path_name` and `reference` pair and looks up the reference for the file that was altered when creating the trigger.
The code is formatted with black and updated to use fstrings since all instances will be using Python 3.6+
</issue>
<code>
[start of contrib/linux/sensors/file_watch_sensor.py]
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17
18 import eventlet
19
20 from logshipper.tail import Tail
21
22 from st2reactor.sensor.base import Sensor
23
24
25 class FileWatchSensor(Sensor):
26 def __init__(self, sensor_service, config=None):
27 super(FileWatchSensor, self).__init__(
28 sensor_service=sensor_service, config=config
29 )
30 self._trigger = None
31 self._logger = self._sensor_service.get_logger(__name__)
32 self._tail = None
33
34 def setup(self):
35 self._tail = Tail(filenames=[])
36 self._tail.handler = self._handle_line
37 self._tail.should_run = True
38
39 def run(self):
40 self._tail.run()
41
42 def cleanup(self):
43 if self._tail:
44 self._tail.should_run = False
45
46 try:
47 self._tail.notifier.stop()
48 except Exception:
49 self._logger.exception("Unable to stop the tail notifier")
50
51 def add_trigger(self, trigger):
52 file_path = trigger["parameters"].get("file_path", None)
53
54 if not file_path:
55 self._logger.error('Received trigger type without "file_path" field.')
56 return
57
58 self._trigger = trigger.get("ref", None)
59
60 if not self._trigger:
61 raise Exception("Trigger %s did not contain a ref." % trigger)
62
63 # Wait a bit to avoid initialization race in logshipper library
64 eventlet.sleep(1.0)
65
66 self._tail.add_file(filename=file_path)
67 self._logger.info('Added file "%s"' % (file_path))
68
69 def update_trigger(self, trigger):
70 pass
71
72 def remove_trigger(self, trigger):
73 file_path = trigger["parameters"].get("file_path", None)
74
75 if not file_path:
76 self._logger.error('Received trigger type without "file_path" field.')
77 return
78
79 self._tail.remove_file(filename=file_path)
80 self._trigger = None
81
82 self._logger.info('Removed file "%s"' % (file_path))
83
84 def _handle_line(self, file_path, line):
85 trigger = self._trigger
86 payload = {
87 "file_path": file_path,
88 "file_name": os.path.basename(file_path),
89 "line": line,
90 }
91 self._logger.debug(
92 "Sending payload %s for trigger %s to sensor_service.", payload, trigger
93 )
94 self.sensor_service.dispatch(trigger=trigger, payload=payload)
95
[end of contrib/linux/sensors/file_watch_sensor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/contrib/linux/sensors/file_watch_sensor.py b/contrib/linux/sensors/file_watch_sensor.py
--- a/contrib/linux/sensors/file_watch_sensor.py
+++ b/contrib/linux/sensors/file_watch_sensor.py
@@ -14,7 +14,6 @@
# limitations under the License.
import os
-
import eventlet
from logshipper.tail import Tail
@@ -27,44 +26,46 @@
super(FileWatchSensor, self).__init__(
sensor_service=sensor_service, config=config
)
- self._trigger = None
- self._logger = self._sensor_service.get_logger(__name__)
- self._tail = None
+ self.log = self._sensor_service.get_logger(__name__)
+ self.tail = None
+ self.file_ref = {}
def setup(self):
- self._tail = Tail(filenames=[])
- self._tail.handler = self._handle_line
- self._tail.should_run = True
+ self.tail = Tail(filenames=[])
+ self.tail.handler = self._handle_line
+ self.tail.should_run = True
def run(self):
- self._tail.run()
+ self.tail.run()
def cleanup(self):
- if self._tail:
- self._tail.should_run = False
+ if self.tail:
+ self.tail.should_run = False
try:
- self._tail.notifier.stop()
+ self.tail.notifier.stop()
except Exception:
- self._logger.exception("Unable to stop the tail notifier")
+ self.log.exception("Unable to stop the tail notifier")
def add_trigger(self, trigger):
file_path = trigger["parameters"].get("file_path", None)
if not file_path:
- self._logger.error('Received trigger type without "file_path" field.')
+ self.log.error('Received trigger type without "file_path" field.')
return
- self._trigger = trigger.get("ref", None)
+ trigger = trigger.get("ref", None)
- if not self._trigger:
- raise Exception("Trigger %s did not contain a ref." % trigger)
+ if not trigger:
+ raise Exception(f"Trigger {trigger} did not contain a ref.")
# Wait a bit to avoid initialization race in logshipper library
eventlet.sleep(1.0)
- self._tail.add_file(filename=file_path)
- self._logger.info('Added file "%s"' % (file_path))
+ self.tail.add_file(filename=file_path)
+ self.file_ref[file_path] = trigger
+
+ self.log.info(f"Added file '{file_path}' ({trigger}) to watch list.")
def update_trigger(self, trigger):
pass
@@ -73,22 +74,28 @@
file_path = trigger["parameters"].get("file_path", None)
if not file_path:
- self._logger.error('Received trigger type without "file_path" field.')
+ self.log.error("Received trigger type without 'file_path' field.")
return
- self._tail.remove_file(filename=file_path)
- self._trigger = None
+ self.tail.remove_file(filename=file_path)
+ self.file_ref.pop(file_path)
- self._logger.info('Removed file "%s"' % (file_path))
+ self.log.info(f"Removed file '{file_path}' ({trigger}) from watch list.")
def _handle_line(self, file_path, line):
- trigger = self._trigger
+ if file_path not in self.file_ref:
+ self.log.error(
+ f"No reference found for {file_path}, unable to emit trigger!"
+ )
+ return
+
+ trigger = self.file_ref[file_path]
payload = {
"file_path": file_path,
"file_name": os.path.basename(file_path),
"line": line,
}
- self._logger.debug(
- "Sending payload %s for trigger %s to sensor_service.", payload, trigger
+ self.log.debug(
+ f"Sending payload {payload} for trigger {trigger} to sensor_service."
)
self.sensor_service.dispatch(trigger=trigger, payload=payload)
|
{"golden_diff": "diff --git a/contrib/linux/sensors/file_watch_sensor.py b/contrib/linux/sensors/file_watch_sensor.py\n--- a/contrib/linux/sensors/file_watch_sensor.py\n+++ b/contrib/linux/sensors/file_watch_sensor.py\n@@ -14,7 +14,6 @@\n # limitations under the License.\n \n import os\n-\n import eventlet\n \n from logshipper.tail import Tail\n@@ -27,44 +26,46 @@\n super(FileWatchSensor, self).__init__(\n sensor_service=sensor_service, config=config\n )\n- self._trigger = None\n- self._logger = self._sensor_service.get_logger(__name__)\n- self._tail = None\n+ self.log = self._sensor_service.get_logger(__name__)\n+ self.tail = None\n+ self.file_ref = {}\n \n def setup(self):\n- self._tail = Tail(filenames=[])\n- self._tail.handler = self._handle_line\n- self._tail.should_run = True\n+ self.tail = Tail(filenames=[])\n+ self.tail.handler = self._handle_line\n+ self.tail.should_run = True\n \n def run(self):\n- self._tail.run()\n+ self.tail.run()\n \n def cleanup(self):\n- if self._tail:\n- self._tail.should_run = False\n+ if self.tail:\n+ self.tail.should_run = False\n \n try:\n- self._tail.notifier.stop()\n+ self.tail.notifier.stop()\n except Exception:\n- self._logger.exception(\"Unable to stop the tail notifier\")\n+ self.log.exception(\"Unable to stop the tail notifier\")\n \n def add_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n \n if not file_path:\n- self._logger.error('Received trigger type without \"file_path\" field.')\n+ self.log.error('Received trigger type without \"file_path\" field.')\n return\n \n- self._trigger = trigger.get(\"ref\", None)\n+ trigger = trigger.get(\"ref\", None)\n \n- if not self._trigger:\n- raise Exception(\"Trigger %s did not contain a ref.\" % trigger)\n+ if not trigger:\n+ raise Exception(f\"Trigger {trigger} did not contain a ref.\")\n \n # Wait a bit to avoid initialization race in logshipper library\n eventlet.sleep(1.0)\n \n- self._tail.add_file(filename=file_path)\n- self._logger.info('Added file \"%s\"' % (file_path))\n+ self.tail.add_file(filename=file_path)\n+ self.file_ref[file_path] = trigger\n+\n+ self.log.info(f\"Added file '{file_path}' ({trigger}) to watch list.\")\n \n def update_trigger(self, trigger):\n pass\n@@ -73,22 +74,28 @@\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n \n if not file_path:\n- self._logger.error('Received trigger type without \"file_path\" field.')\n+ self.log.error(\"Received trigger type without 'file_path' field.\")\n return\n \n- self._tail.remove_file(filename=file_path)\n- self._trigger = None\n+ self.tail.remove_file(filename=file_path)\n+ self.file_ref.pop(file_path)\n \n- self._logger.info('Removed file \"%s\"' % (file_path))\n+ self.log.info(f\"Removed file '{file_path}' ({trigger}) from watch list.\")\n \n def _handle_line(self, file_path, line):\n- trigger = self._trigger\n+ if file_path not in self.file_ref:\n+ self.log.error(\n+ f\"No reference found for {file_path}, unable to emit trigger!\"\n+ )\n+ return\n+\n+ trigger = self.file_ref[file_path]\n payload = {\n \"file_path\": file_path,\n \"file_name\": os.path.basename(file_path),\n \"line\": line,\n }\n- self._logger.debug(\n- \"Sending payload %s for trigger %s to sensor_service.\", payload, trigger\n+ self.log.debug(\n+ f\"Sending payload {payload} for trigger {trigger} to sensor_service.\"\n )\n self.sensor_service.dispatch(trigger=trigger, payload=payload)\n", "issue": "Fix multiple file support in linux.file_watch.line + black + fstring\nWhen multiple file_watch rules are defined, the last defined file reference is used for all files being watched. This causes trigger-instances to fail rule enforcement.\r\n\r\nAdding the reference to the logging shows `test1.log` has the reference ending with `8c505`\r\n```\r\n2021-11-30 18:50:40,434 140243179888112 INFO file_watch_sensor [-] Added file \"/var/log/test1.log\" with reference linux.7e55ad75-b10c-44db-b53e-95164a18c505\r\n2021-11-30 18:50:41,459 140243179888112 INFO file_watch_sensor [-] Added file \"/var/log/test2.log\" with reference linux.590de8c1-c578-4125-9082-2cee03b030a9\r\n```\r\n\r\nWhen the file contents are updated a trigger is emitted by the sensor using the reference of `test2.log` ending in `b030a9`\r\n```\r\nroot@u1804:~# st2 trigger-instance get 61a6649f164625c2d94dccb8 -y\r\nid: 61a6649f164625c2d94dccb8\r\noccurrence_time: '2021-11-30T17:51:27.294000Z'\r\npayload:\r\n file_name: test1.log\r\n file_path: /var/log/test1.log\r\n line: Tue Nov 30 18:51:27 CET 2021 dhcp\r\nstatus: processed\r\ntrigger: linux.590de8c1-c578-4125-9082-2cee03b030a9\r\n```\r\n\r\nThis PR consists of adding a dictionary that is used to track the `path_name` and `reference` pair and looks up the reference for the file that was altered when creating the trigger.\r\n\r\nThe code is formatted with black and updated to use fstrings since all instances will be using Python 3.6+\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport eventlet\n\nfrom logshipper.tail import Tail\n\nfrom st2reactor.sensor.base import Sensor\n\n\nclass FileWatchSensor(Sensor):\n def __init__(self, sensor_service, config=None):\n super(FileWatchSensor, self).__init__(\n sensor_service=sensor_service, config=config\n )\n self._trigger = None\n self._logger = self._sensor_service.get_logger(__name__)\n self._tail = None\n\n def setup(self):\n self._tail = Tail(filenames=[])\n self._tail.handler = self._handle_line\n self._tail.should_run = True\n\n def run(self):\n self._tail.run()\n\n def cleanup(self):\n if self._tail:\n self._tail.should_run = False\n\n try:\n self._tail.notifier.stop()\n except Exception:\n self._logger.exception(\"Unable to stop the tail notifier\")\n\n def add_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self._logger.error('Received trigger type without \"file_path\" field.')\n return\n\n self._trigger = trigger.get(\"ref\", None)\n\n if not self._trigger:\n raise Exception(\"Trigger %s did not contain a ref.\" % trigger)\n\n # Wait a bit to avoid initialization race in logshipper library\n eventlet.sleep(1.0)\n\n self._tail.add_file(filename=file_path)\n self._logger.info('Added file \"%s\"' % (file_path))\n\n def update_trigger(self, trigger):\n pass\n\n def remove_trigger(self, trigger):\n file_path = trigger[\"parameters\"].get(\"file_path\", None)\n\n if not file_path:\n self._logger.error('Received trigger type without \"file_path\" field.')\n return\n\n self._tail.remove_file(filename=file_path)\n self._trigger = None\n\n self._logger.info('Removed file \"%s\"' % (file_path))\n\n def _handle_line(self, file_path, line):\n trigger = self._trigger\n payload = {\n \"file_path\": file_path,\n \"file_name\": os.path.basename(file_path),\n \"line\": line,\n }\n self._logger.debug(\n \"Sending payload %s for trigger %s to sensor_service.\", payload, trigger\n )\n self.sensor_service.dispatch(trigger=trigger, payload=payload)\n", "path": "contrib/linux/sensors/file_watch_sensor.py"}]}
| 1,933 | 917 |
gh_patches_debug_11313
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4804
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin Access required for accessing Order endpoint
**Describe the bug**
Admin access is required for endpoints related to Order.
**To Reproduce**
GET/POST request to Order endpoints.
**Expected behavior**
The organizer should be able to query the Order endpoints as it was before.
**Stacktrace**
`{
"errors": [
{
"status": 403,
"source": {
"source": ""
},
"title": "Access Forbidden",
"detail": "Admin Access Required"
}
],
"jsonapi": {
"version": "1.0"
}
}`
</issue>
<code>
[start of app/api/orders.py]
1 from datetime import datetime
2
3 from flask import request, render_template
4 from flask_jwt import current_identity as current_user
5 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
6 from marshmallow_jsonapi import fields
7 from marshmallow_jsonapi.flask import Schema
8
9 from app.api.data_layers.ChargesLayer import ChargesLayer
10 from app.api.helpers.db import save_to_db, safe_query
11 from app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity
12 from app.api.helpers.files import create_save_pdf
13 from app.api.helpers.files import make_frontend_url
14 from app.api.helpers.mail import send_email_to_attendees
15 from app.api.helpers.mail import send_order_cancel_email
16 from app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer, \
17 send_notif_ticket_cancel
18 from app.api.helpers.permission_manager import has_access
19 from app.api.helpers.permissions import jwt_required
20 from app.api.helpers.query import event_query
21 from app.api.helpers.ticketing import TicketingManager
22 from app.api.helpers.utilities import dasherize, require_relationship
23 from app.api.schema.orders import OrderSchema
24 from app.models import db
25 from app.models.discount_code import DiscountCode, TICKET
26 from app.models.order import Order, OrderTicket
27 from app.models.ticket_holder import TicketHolder
28
29
30 class OrdersListPost(ResourceList):
31 """
32 OrderListPost class for OrderSchema
33 """
34 def before_post(self, args, kwargs, data=None):
35 """
36 before post method to check for required relationship and proper permission
37 :param args:
38 :param kwargs:
39 :param data:
40 :return:
41 """
42 require_relationship(['event', 'ticket_holders'], data)
43 if not has_access('is_coorganizer', event_id=data['event']):
44 data['status'] = 'pending'
45
46 def before_create_object(self, data, view_kwargs):
47 """
48 before create object method for OrderListPost Class
49 :param data:
50 :param view_kwargs:
51 :return:
52 """
53 if data.get('cancel_note'):
54 del data['cancel_note']
55
56 # Apply discount only if the user is not event admin
57 if data.get('discount') and not has_access('is_coorganizer', event_id=data['event']):
58 discount_code = safe_query(self, DiscountCode, 'id', data['discount'], 'discount_code_id')
59 if not discount_code.is_active:
60 raise UnprocessableEntity({'source': 'discount_code_id'}, "Inactive Discount Code")
61 else:
62 now = datetime.utcnow()
63 valid_from = datetime.strptime(discount_code.valid_from, '%Y-%m-%d %H:%M:%S')
64 valid_till = datetime.strptime(discount_code.valid_till, '%Y-%m-%d %H:%M:%S')
65 if not (valid_from <= now <= valid_till):
66 raise UnprocessableEntity({'source': 'discount_code_id'}, "Inactive Discount Code")
67 if not TicketingManager.match_discount_quantity(discount_code, data['ticket_holders']):
68 raise UnprocessableEntity({'source': 'discount_code_id'}, 'Discount Usage Exceeded')
69
70 if discount_code.event.id != data['event'] and discount_code.user_for == TICKET:
71 raise UnprocessableEntity({'source': 'discount_code_id'}, "Invalid Discount Code")
72
73 def after_create_object(self, order, data, view_kwargs):
74 """
75 after create object method for OrderListPost Class
76 :param order:
77 :param data:
78 :param view_kwargs:
79 :return:
80 """
81 order_tickets = {}
82 for holder in order.ticket_holders:
83 if holder.id != current_user.id:
84 pdf = create_save_pdf(render_template('/pdf/ticket_attendee.html', order=order, holder=holder))
85 else:
86 pdf = create_save_pdf(render_template('/pdf/ticket_purchaser.html', order=order))
87 holder.pdf_url = pdf
88 save_to_db(holder)
89 if order_tickets.get(holder.ticket_id) is None:
90 order_tickets[holder.ticket_id] = 1
91 else:
92 order_tickets[holder.ticket_id] += 1
93 for ticket in order_tickets:
94 od = OrderTicket(order_id=order.id, ticket_id=ticket, quantity=order_tickets[ticket])
95 save_to_db(od)
96 order.quantity = order.get_tickets_count()
97 save_to_db(order)
98 if not has_access('is_coorganizer', event_id=data['event']):
99 TicketingManager.calculate_update_amount(order)
100 send_email_to_attendees(order, current_user.id)
101 send_notif_to_attendees(order, current_user.id)
102
103 order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))
104 for organizer in order.event.organizers:
105 send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name)
106
107 data['user_id'] = current_user.id
108
109 methods = ['POST', ]
110 decorators = (jwt_required,)
111 schema = OrderSchema
112 data_layer = {'session': db.session,
113 'model': Order,
114 'methods': {'before_create_object': before_create_object,
115 'after_create_object': after_create_object
116 }}
117
118
119 class OrdersList(ResourceList):
120 """
121 OrderList class for OrderSchema
122 """
123 def before_get(self, args, kwargs):
124 """
125 before get method to get the resource id for fetching details
126 :param args:
127 :param kwargs:
128 :return:
129 """
130 if kwargs.get('event_id') is None:
131 if 'GET' in request.method and has_access('is_admin'):
132 pass
133 else:
134 raise ForbiddenException({'source': ''}, "Admin Access Required")
135 elif not has_access('is_coorganizer', event_id=kwargs['event_id']):
136 raise ForbiddenException({'source': ''}, "Co-Organizer Access Required")
137
138 def query(self, view_kwargs):
139 query_ = self.session.query(Order)
140 query_ = event_query(self, query_, view_kwargs)
141
142 return query_
143
144 decorators = (jwt_required,)
145 methods = ['GET', ]
146 schema = OrderSchema
147 data_layer = {'session': db.session,
148 'model': Order,
149 'methods': {
150 'query': query
151 }}
152
153
154 class OrderDetail(ResourceDetail):
155 """
156 OrderDetail class for OrderSchema
157 """
158 def before_get_object(self, view_kwargs):
159 """
160 before get method to get the resource id for fetching details
161 :param view_kwargs:
162 :return:
163 """
164 if view_kwargs.get('attendee_id'):
165 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
166 view_kwargs['order_identifier'] = attendee.order.identifier
167
168 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
169
170 if not has_access('is_coorganizer_or_user_itself', event_id=order.event_id, user_id=order.user_id):
171 return ForbiddenException({'source': ''}, 'Access Forbidden')
172
173 def before_update_object(self, order, data, view_kwargs):
174 """
175 :param order:
176 :param data:
177 :param view_kwargs:
178 :return:
179 """
180 if not has_access('is_admin'):
181 for element in data:
182 if element != 'status':
183 setattr(data, element, getattr(order, element))
184
185 if not has_access('is_coorganizer', event_id=order.event.id):
186 raise ForbiddenException({'pointer': 'data/status'},
187 "To update status minimum Co-organizer access required")
188
189 def after_update_object(self, order, data, view_kwargs):
190 """
191 :param order:
192 :param data:
193 :param view_kwargs:
194 :return:
195 """
196 if order.status == 'cancelled':
197 send_order_cancel_email(order)
198 send_notif_ticket_cancel(order)
199
200 def before_delete_object(self, order, view_kwargs):
201 """
202 method to check for proper permissions for deleting
203 :param order:
204 :param view_kwargs:
205 :return:
206 """
207 if not has_access('is_coorganizer', event_id=order.event.id):
208 raise ForbiddenException({'source': ''}, 'Access Forbidden')
209
210 decorators = (jwt_required,)
211
212 schema = OrderSchema
213 data_layer = {'session': db.session,
214 'model': Order,
215 'url_field': 'order_identifier',
216 'id_field': 'identifier',
217 'methods': {
218 'before_update_object': before_update_object,
219 'before_delete_object': before_delete_object,
220 'before_get_object': before_get_object,
221 'after_update_object': after_update_object
222 }}
223
224
225 class OrderRelationship(ResourceRelationship):
226 """
227 Order relationship
228 """
229 decorators = (jwt_required,)
230 schema = OrderSchema
231 data_layer = {'session': db.session,
232 'model': Order}
233
234
235 class ChargeSchema(Schema):
236 """
237 ChargeSchema
238 """
239 class Meta:
240 """
241 Meta class for ChargeSchema
242 """
243 type_ = 'charge'
244 inflect = dasherize
245 self_view = 'v1.charge_list'
246 self_view_kwargs = {'id': '<id>'}
247
248 id = fields.Str(dump_only=True)
249 stripe = fields.Str(allow_none=True)
250
251
252 class ChargeList(ResourceList):
253 """
254 ChargeList ResourceList for ChargesLayer class
255 """
256 methods = ['POST', ]
257 schema = ChargeSchema
258
259 data_layer = {
260 'class': ChargesLayer,
261 'session': db.session
262 }
263
[end of app/api/orders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/orders.py b/app/api/orders.py
--- a/app/api/orders.py
+++ b/app/api/orders.py
@@ -127,11 +127,8 @@
:param kwargs:
:return:
"""
- if kwargs.get('event_id') is None:
- if 'GET' in request.method and has_access('is_admin'):
- pass
- else:
- raise ForbiddenException({'source': ''}, "Admin Access Required")
+ if 'GET' in request.method and kwargs.get('event_id') is None:
+ pass
elif not has_access('is_coorganizer', event_id=kwargs['event_id']):
raise ForbiddenException({'source': ''}, "Co-Organizer Access Required")
|
{"golden_diff": "diff --git a/app/api/orders.py b/app/api/orders.py\n--- a/app/api/orders.py\n+++ b/app/api/orders.py\n@@ -127,11 +127,8 @@\n :param kwargs:\n :return:\n \"\"\"\n- if kwargs.get('event_id') is None:\n- if 'GET' in request.method and has_access('is_admin'):\n- pass\n- else:\n- raise ForbiddenException({'source': ''}, \"Admin Access Required\")\n+ if 'GET' in request.method and kwargs.get('event_id') is None:\n+ pass\n elif not has_access('is_coorganizer', event_id=kwargs['event_id']):\n raise ForbiddenException({'source': ''}, \"Co-Organizer Access Required\")\n", "issue": "Admin Access required for accessing Order endpoint\n**Describe the bug**\r\nAdmin access is required for endpoints related to Order. \r\n\r\n**To Reproduce**\r\nGET/POST request to Order endpoints.\r\n\r\n**Expected behavior**\r\nThe organizer should be able to query the Order endpoints as it was before.\r\n\r\n**Stacktrace**\r\n`{\r\n \"errors\": [\r\n {\r\n \"status\": 403,\r\n \"source\": {\r\n \"source\": \"\"\r\n },\r\n \"title\": \"Access Forbidden\",\r\n \"detail\": \"Admin Access Required\"\r\n }\r\n ],\r\n \"jsonapi\": {\r\n \"version\": \"1.0\"\r\n }\r\n}`\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask import request, render_template\nfrom flask_jwt import current_identity as current_user\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema\n\nfrom app.api.data_layers.ChargesLayer import ChargesLayer\nfrom app.api.helpers.db import save_to_db, safe_query\nfrom app.api.helpers.exceptions import ForbiddenException, UnprocessableEntity\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.mail import send_order_cancel_email\nfrom app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer, \\\n send_notif_ticket_cancel\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.ticketing import TicketingManager\nfrom app.api.helpers.utilities import dasherize, require_relationship\nfrom app.api.schema.orders import OrderSchema\nfrom app.models import db\nfrom app.models.discount_code import DiscountCode, TICKET\nfrom app.models.order import Order, OrderTicket\nfrom app.models.ticket_holder import TicketHolder\n\n\nclass OrdersListPost(ResourceList):\n \"\"\"\n OrderListPost class for OrderSchema\n \"\"\"\n def before_post(self, args, kwargs, data=None):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'ticket_holders'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n data['status'] = 'pending'\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create object method for OrderListPost Class\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('cancel_note'):\n del data['cancel_note']\n\n # Apply discount only if the user is not event admin\n if data.get('discount') and not has_access('is_coorganizer', event_id=data['event']):\n discount_code = safe_query(self, DiscountCode, 'id', data['discount'], 'discount_code_id')\n if not discount_code.is_active:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n else:\n now = datetime.utcnow()\n valid_from = datetime.strptime(discount_code.valid_from, '%Y-%m-%d %H:%M:%S')\n valid_till = datetime.strptime(discount_code.valid_till, '%Y-%m-%d %H:%M:%S')\n if not (valid_from <= now <= valid_till):\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Inactive Discount Code\")\n if not TicketingManager.match_discount_quantity(discount_code, data['ticket_holders']):\n raise UnprocessableEntity({'source': 'discount_code_id'}, 'Discount Usage Exceeded')\n\n if discount_code.event.id != data['event'] and discount_code.user_for == TICKET:\n raise UnprocessableEntity({'source': 'discount_code_id'}, \"Invalid Discount Code\")\n\n def after_create_object(self, order, data, view_kwargs):\n \"\"\"\n after create object method for OrderListPost Class\n :param order:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n order_tickets = {}\n for holder in order.ticket_holders:\n if holder.id != current_user.id:\n pdf = create_save_pdf(render_template('/pdf/ticket_attendee.html', order=order, holder=holder))\n else:\n pdf = create_save_pdf(render_template('/pdf/ticket_purchaser.html', order=order))\n holder.pdf_url = pdf\n save_to_db(holder)\n if order_tickets.get(holder.ticket_id) is None:\n order_tickets[holder.ticket_id] = 1\n else:\n order_tickets[holder.ticket_id] += 1\n for ticket in order_tickets:\n od = OrderTicket(order_id=order.id, ticket_id=ticket, quantity=order_tickets[ticket])\n save_to_db(od)\n order.quantity = order.get_tickets_count()\n save_to_db(order)\n if not has_access('is_coorganizer', event_id=data['event']):\n TicketingManager.calculate_update_amount(order)\n send_email_to_attendees(order, current_user.id)\n send_notif_to_attendees(order, current_user.id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name)\n\n data['user_id'] = current_user.id\n\n methods = ['POST', ]\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {'before_create_object': before_create_object,\n 'after_create_object': after_create_object\n }}\n\n\nclass OrdersList(ResourceList):\n \"\"\"\n OrderList class for OrderSchema\n \"\"\"\n def before_get(self, args, kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n if kwargs.get('event_id') is None:\n if 'GET' in request.method and has_access('is_admin'):\n pass\n else:\n raise ForbiddenException({'source': ''}, \"Admin Access Required\")\n elif not has_access('is_coorganizer', event_id=kwargs['event_id']):\n raise ForbiddenException({'source': ''}, \"Co-Organizer Access Required\")\n\n def query(self, view_kwargs):\n query_ = self.session.query(Order)\n query_ = event_query(self, query_, view_kwargs)\n\n return query_\n\n decorators = (jwt_required,)\n methods = ['GET', ]\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'methods': {\n 'query': query\n }}\n\n\nclass OrderDetail(ResourceDetail):\n \"\"\"\n OrderDetail class for OrderSchema\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n view_kwargs['order_identifier'] = attendee.order.identifier\n\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n\n if not has_access('is_coorganizer_or_user_itself', event_id=order.event_id, user_id=order.user_id):\n return ForbiddenException({'source': ''}, 'Access Forbidden')\n\n def before_update_object(self, order, data, view_kwargs):\n \"\"\"\n :param order:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if not has_access('is_admin'):\n for element in data:\n if element != 'status':\n setattr(data, element, getattr(order, element))\n\n if not has_access('is_coorganizer', event_id=order.event.id):\n raise ForbiddenException({'pointer': 'data/status'},\n \"To update status minimum Co-organizer access required\")\n\n def after_update_object(self, order, data, view_kwargs):\n \"\"\"\n :param order:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if order.status == 'cancelled':\n send_order_cancel_email(order)\n send_notif_ticket_cancel(order)\n\n def before_delete_object(self, order, view_kwargs):\n \"\"\"\n method to check for proper permissions for deleting\n :param order:\n :param view_kwargs:\n :return:\n \"\"\"\n if not has_access('is_coorganizer', event_id=order.event.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n\n decorators = (jwt_required,)\n\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order,\n 'url_field': 'order_identifier',\n 'id_field': 'identifier',\n 'methods': {\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object,\n 'before_get_object': before_get_object,\n 'after_update_object': after_update_object\n }}\n\n\nclass OrderRelationship(ResourceRelationship):\n \"\"\"\n Order relationship\n \"\"\"\n decorators = (jwt_required,)\n schema = OrderSchema\n data_layer = {'session': db.session,\n 'model': Order}\n\n\nclass ChargeSchema(Schema):\n \"\"\"\n ChargeSchema\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for ChargeSchema\n \"\"\"\n type_ = 'charge'\n inflect = dasherize\n self_view = 'v1.charge_list'\n self_view_kwargs = {'id': '<id>'}\n\n id = fields.Str(dump_only=True)\n stripe = fields.Str(allow_none=True)\n\n\nclass ChargeList(ResourceList):\n \"\"\"\n ChargeList ResourceList for ChargesLayer class\n \"\"\"\n methods = ['POST', ]\n schema = ChargeSchema\n\n data_layer = {\n 'class': ChargesLayer,\n 'session': db.session\n }\n", "path": "app/api/orders.py"}]}
| 3,379 | 164 |
gh_patches_debug_24486
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4652
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the ability to call non-pulp-related functions as task targets
</issue>
<code>
[start of pulpcore/tasking/tasks.py]
1 import asyncio
2 import contextlib
3 import contextvars
4 import importlib
5 import logging
6 import sys
7 import traceback
8 from gettext import gettext as _
9
10 from django.db import connection, transaction
11 from django.db.models import Model
12 from django_guid import get_guid
13 from pulpcore.app.apps import MODULE_PLUGIN_VERSIONS
14 from pulpcore.app.loggers import deprecation_logger
15 from pulpcore.app.models import Task
16 from pulpcore.app.util import current_task, get_domain, get_url
17 from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
18
19 _logger = logging.getLogger(__name__)
20
21
22 def _validate_and_get_resources(resources):
23 resource_set = set()
24 for r in resources:
25 if isinstance(r, str):
26 resource_set.add(r)
27 elif isinstance(r, Model):
28 resource_set.add(get_url(r))
29 elif r is None:
30 # Silently drop None values
31 pass
32 else:
33 raise ValueError(_("Must be (str|Model)"))
34 return list(resource_set)
35
36
37 def wakeup_worker():
38 # Notify workers
39 with connection.connection.cursor() as cursor:
40 cursor.execute("NOTIFY pulp_worker_wakeup")
41
42
43 def execute_task(task):
44 # This extra stack is needed to isolate the current_task ContextVar
45 contextvars.copy_context().run(_execute_task, task)
46
47
48 def _execute_task(task):
49 # Store the task id in the context for `Task.current()`.
50 current_task.set(task)
51 task.set_running()
52 try:
53 _logger.info(_("Starting task %s"), task.pk)
54
55 # Execute task
56 module_name, function_name = task.name.rsplit(".", 1)
57 module = importlib.import_module(module_name)
58 func = getattr(module, function_name)
59 args = task.enc_args or ()
60 kwargs = task.enc_kwargs or {}
61 result = func(*args, **kwargs)
62 if asyncio.iscoroutine(result):
63 _logger.debug(_("Task is coroutine %s"), task.pk)
64 loop = asyncio.get_event_loop()
65 loop.run_until_complete(result)
66
67 except Exception:
68 exc_type, exc, tb = sys.exc_info()
69 task.set_failed(exc, tb)
70 _logger.info(_("Task %s failed (%s)"), task.pk, exc)
71 _logger.info("\n".join(traceback.format_list(traceback.extract_tb(tb))))
72 else:
73 task.set_completed()
74 _logger.info(_("Task completed %s"), task.pk)
75
76
77 def dispatch(
78 func,
79 args=None,
80 kwargs=None,
81 task_group=None,
82 exclusive_resources=None,
83 shared_resources=None,
84 immediate=False,
85 deferred=True,
86 versions=None,
87 ):
88 """
89 Enqueue a message to Pulp workers with a reservation.
90
91 This method provides normal enqueue functionality, while also requesting necessary locks for
92 serialized urls. No two tasks that claim the same resource can execute concurrently. It
93 accepts resources which it transforms into a list of urls (one for each resource).
94
95 This method creates a :class:`pulpcore.app.models.Task` object and returns it.
96
97 The values in `args` and `kwargs` must be JSON serializable, but may contain instances of
98 ``uuid.UUID``.
99
100 Args:
101 func (callable | str): The function to be run when the necessary locks are acquired.
102 args (tuple): The positional arguments to pass on to the task.
103 kwargs (dict): The keyword arguments to pass on to the task.
104 task_group (pulpcore.app.models.TaskGroup): A TaskGroup to add the created Task to.
105 exclusive_resources (list): A list of resources this task needs exclusive access to while
106 running. Each resource can be either a `str` or a `django.models.Model` instance.
107 shared_resources (list): A list of resources this task needs non-exclusive access to while
108 running. Each resource can be either a `str` or a `django.models.Model` instance.
109 immediate (bool): Whether to allow running this task immediately. It must be guaranteed to
110 execute fast without blocking. If not all resource constraints are met, the task will
111 either be returned in a canceled state or, if `deferred` is `True` be left in the queue
112 to be picked up by a worker eventually. Defaults to `False`.
113 deferred (bool): Whether to allow defer running the task to a pulpcore_worker. Defaults to
114 `True`. `immediate` and `deferred` cannot both be `False`.
115 versions (Optional[Dict[str, str]]): Minimum versions of components by app_label the worker
116 must provide to handle the task.
117
118 Returns (pulpcore.app.models.Task): The Pulp Task that was created.
119
120 Raises:
121 ValueError: When `resources` is an unsupported type.
122 """
123
124 assert deferred or immediate, "A task must be at least `deferred` or `immediate`."
125
126 if callable(func):
127 function_name = f"{func.__module__}.{func.__name__}"
128 else:
129 function_name = func
130
131 if versions is None:
132 try:
133 versions = MODULE_PLUGIN_VERSIONS[function_name.split(".", maxsplit=1)[0]]
134 except KeyError:
135 deprecation_logger.warn(
136 _(
137 "Using functions outside of pulp components as tasks is not supported and will "
138 "result in runtime errors with pulpcore>=3.40."
139 )
140 )
141 # The best we can do now...
142 versions = MODULE_PLUGIN_VERSIONS["pulpcore"]
143
144 if exclusive_resources is None:
145 exclusive_resources = []
146 else:
147 exclusive_resources = _validate_and_get_resources(exclusive_resources)
148 if shared_resources is None:
149 shared_resources = []
150 else:
151 shared_resources = _validate_and_get_resources(shared_resources)
152
153 # A task that is exclusive on a domain will block all tasks within that domain
154 domain_url = get_url(get_domain())
155 if domain_url not in exclusive_resources:
156 shared_resources.append(domain_url)
157 resources = exclusive_resources + [f"shared:{resource}" for resource in shared_resources]
158
159 notify_workers = False
160 with contextlib.ExitStack() as stack:
161 with transaction.atomic():
162 task = Task.objects.create(
163 state=TASK_STATES.WAITING,
164 logging_cid=(get_guid()),
165 task_group=task_group,
166 name=function_name,
167 enc_args=args,
168 enc_kwargs=kwargs,
169 parent_task=Task.current(),
170 reserved_resources_record=resources,
171 versions=versions,
172 )
173 if immediate:
174 # Grab the advisory lock before the task hits the db.
175 stack.enter_context(task)
176 else:
177 notify_workers = True
178 if immediate:
179 prior_tasks = Task.objects.filter(
180 state__in=TASK_INCOMPLETE_STATES, pulp_created__lt=task.pulp_created
181 )
182 # Compile a list of resources that must not be taken by other tasks.
183 colliding_resources = (
184 shared_resources
185 + exclusive_resources
186 + [f"shared:{resource}" for resource in exclusive_resources]
187 )
188 # Can we execute this task immediately?
189 if (
190 not colliding_resources
191 or not prior_tasks.filter(
192 reserved_resources_record__overlap=colliding_resources
193 ).exists()
194 ):
195 execute_task(task)
196 if resources:
197 notify_workers = True
198 elif deferred:
199 notify_workers = True
200 else:
201 task.set_canceling()
202 task.set_canceled(TASK_STATES.CANCELED, "Resources temporarily unavailable.")
203 if notify_workers:
204 wakeup_worker()
205 return task
206
207
208 def cancel_task(task_id):
209 """
210 Cancel the task that is represented by the given task_id.
211
212 This method cancels only the task with given task_id, not the spawned tasks. This also updates
213 task's state to 'canceling'.
214
215 Args:
216 task_id (str): The ID of the task you wish to cancel
217
218 Raises:
219 rest_framework.exceptions.NotFound: If a task with given task_id does not exist
220 """
221 task = Task.objects.get(pk=task_id)
222
223 if task.state in TASK_FINAL_STATES:
224 # If the task is already done, just stop
225 _logger.debug(
226 "Task [{task_id}] already in a final state: {state}".format(
227 task_id=task_id, state=task.state
228 )
229 )
230 return task
231
232 _logger.info(_("Canceling task: {id}").format(id=task_id))
233
234 # This is the only valid transition without holding the task lock
235 task.set_canceling()
236 # Notify the worker that might be running that task and other workers to clean up
237 with connection.cursor() as cursor:
238 cursor.execute("SELECT pg_notify('pulp_worker_cancel', %s)", (str(task.pk),))
239 cursor.execute("NOTIFY pulp_worker_wakeup")
240 return task
241
[end of pulpcore/tasking/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/tasking/tasks.py b/pulpcore/tasking/tasks.py
--- a/pulpcore/tasking/tasks.py
+++ b/pulpcore/tasking/tasks.py
@@ -11,7 +11,6 @@
from django.db.models import Model
from django_guid import get_guid
from pulpcore.app.apps import MODULE_PLUGIN_VERSIONS
-from pulpcore.app.loggers import deprecation_logger
from pulpcore.app.models import Task
from pulpcore.app.util import current_task, get_domain, get_url
from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES
@@ -129,17 +128,7 @@
function_name = func
if versions is None:
- try:
- versions = MODULE_PLUGIN_VERSIONS[function_name.split(".", maxsplit=1)[0]]
- except KeyError:
- deprecation_logger.warn(
- _(
- "Using functions outside of pulp components as tasks is not supported and will "
- "result in runtime errors with pulpcore>=3.40."
- )
- )
- # The best we can do now...
- versions = MODULE_PLUGIN_VERSIONS["pulpcore"]
+ versions = MODULE_PLUGIN_VERSIONS[function_name.split(".", maxsplit=1)[0]]
if exclusive_resources is None:
exclusive_resources = []
|
{"golden_diff": "diff --git a/pulpcore/tasking/tasks.py b/pulpcore/tasking/tasks.py\n--- a/pulpcore/tasking/tasks.py\n+++ b/pulpcore/tasking/tasks.py\n@@ -11,7 +11,6 @@\n from django.db.models import Model\n from django_guid import get_guid\n from pulpcore.app.apps import MODULE_PLUGIN_VERSIONS\n-from pulpcore.app.loggers import deprecation_logger\n from pulpcore.app.models import Task\n from pulpcore.app.util import current_task, get_domain, get_url\n from pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n@@ -129,17 +128,7 @@\n function_name = func\n \n if versions is None:\n- try:\n- versions = MODULE_PLUGIN_VERSIONS[function_name.split(\".\", maxsplit=1)[0]]\n- except KeyError:\n- deprecation_logger.warn(\n- _(\n- \"Using functions outside of pulp components as tasks is not supported and will \"\n- \"result in runtime errors with pulpcore>=3.40.\"\n- )\n- )\n- # The best we can do now...\n- versions = MODULE_PLUGIN_VERSIONS[\"pulpcore\"]\n+ versions = MODULE_PLUGIN_VERSIONS[function_name.split(\".\", maxsplit=1)[0]]\n \n if exclusive_resources is None:\n exclusive_resources = []\n", "issue": "Remove the ability to call non-pulp-related functions as task targets\n\n", "before_files": [{"content": "import asyncio\nimport contextlib\nimport contextvars\nimport importlib\nimport logging\nimport sys\nimport traceback\nfrom gettext import gettext as _\n\nfrom django.db import connection, transaction\nfrom django.db.models import Model\nfrom django_guid import get_guid\nfrom pulpcore.app.apps import MODULE_PLUGIN_VERSIONS\nfrom pulpcore.app.loggers import deprecation_logger\nfrom pulpcore.app.models import Task\nfrom pulpcore.app.util import current_task, get_domain, get_url\nfrom pulpcore.constants import TASK_FINAL_STATES, TASK_INCOMPLETE_STATES, TASK_STATES\n\n_logger = logging.getLogger(__name__)\n\n\ndef _validate_and_get_resources(resources):\n resource_set = set()\n for r in resources:\n if isinstance(r, str):\n resource_set.add(r)\n elif isinstance(r, Model):\n resource_set.add(get_url(r))\n elif r is None:\n # Silently drop None values\n pass\n else:\n raise ValueError(_(\"Must be (str|Model)\"))\n return list(resource_set)\n\n\ndef wakeup_worker():\n # Notify workers\n with connection.connection.cursor() as cursor:\n cursor.execute(\"NOTIFY pulp_worker_wakeup\")\n\n\ndef execute_task(task):\n # This extra stack is needed to isolate the current_task ContextVar\n contextvars.copy_context().run(_execute_task, task)\n\n\ndef _execute_task(task):\n # Store the task id in the context for `Task.current()`.\n current_task.set(task)\n task.set_running()\n try:\n _logger.info(_(\"Starting task %s\"), task.pk)\n\n # Execute task\n module_name, function_name = task.name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n func = getattr(module, function_name)\n args = task.enc_args or ()\n kwargs = task.enc_kwargs or {}\n result = func(*args, **kwargs)\n if asyncio.iscoroutine(result):\n _logger.debug(_(\"Task is coroutine %s\"), task.pk)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(result)\n\n except Exception:\n exc_type, exc, tb = sys.exc_info()\n task.set_failed(exc, tb)\n _logger.info(_(\"Task %s failed (%s)\"), task.pk, exc)\n _logger.info(\"\\n\".join(traceback.format_list(traceback.extract_tb(tb))))\n else:\n task.set_completed()\n _logger.info(_(\"Task completed %s\"), task.pk)\n\n\ndef dispatch(\n func,\n args=None,\n kwargs=None,\n task_group=None,\n exclusive_resources=None,\n shared_resources=None,\n immediate=False,\n deferred=True,\n versions=None,\n):\n \"\"\"\n Enqueue a message to Pulp workers with a reservation.\n\n This method provides normal enqueue functionality, while also requesting necessary locks for\n serialized urls. No two tasks that claim the same resource can execute concurrently. It\n accepts resources which it transforms into a list of urls (one for each resource).\n\n This method creates a :class:`pulpcore.app.models.Task` object and returns it.\n\n The values in `args` and `kwargs` must be JSON serializable, but may contain instances of\n ``uuid.UUID``.\n\n Args:\n func (callable | str): The function to be run when the necessary locks are acquired.\n args (tuple): The positional arguments to pass on to the task.\n kwargs (dict): The keyword arguments to pass on to the task.\n task_group (pulpcore.app.models.TaskGroup): A TaskGroup to add the created Task to.\n exclusive_resources (list): A list of resources this task needs exclusive access to while\n running. Each resource can be either a `str` or a `django.models.Model` instance.\n shared_resources (list): A list of resources this task needs non-exclusive access to while\n running. Each resource can be either a `str` or a `django.models.Model` instance.\n immediate (bool): Whether to allow running this task immediately. It must be guaranteed to\n execute fast without blocking. If not all resource constraints are met, the task will\n either be returned in a canceled state or, if `deferred` is `True` be left in the queue\n to be picked up by a worker eventually. Defaults to `False`.\n deferred (bool): Whether to allow defer running the task to a pulpcore_worker. Defaults to\n `True`. `immediate` and `deferred` cannot both be `False`.\n versions (Optional[Dict[str, str]]): Minimum versions of components by app_label the worker\n must provide to handle the task.\n\n Returns (pulpcore.app.models.Task): The Pulp Task that was created.\n\n Raises:\n ValueError: When `resources` is an unsupported type.\n \"\"\"\n\n assert deferred or immediate, \"A task must be at least `deferred` or `immediate`.\"\n\n if callable(func):\n function_name = f\"{func.__module__}.{func.__name__}\"\n else:\n function_name = func\n\n if versions is None:\n try:\n versions = MODULE_PLUGIN_VERSIONS[function_name.split(\".\", maxsplit=1)[0]]\n except KeyError:\n deprecation_logger.warn(\n _(\n \"Using functions outside of pulp components as tasks is not supported and will \"\n \"result in runtime errors with pulpcore>=3.40.\"\n )\n )\n # The best we can do now...\n versions = MODULE_PLUGIN_VERSIONS[\"pulpcore\"]\n\n if exclusive_resources is None:\n exclusive_resources = []\n else:\n exclusive_resources = _validate_and_get_resources(exclusive_resources)\n if shared_resources is None:\n shared_resources = []\n else:\n shared_resources = _validate_and_get_resources(shared_resources)\n\n # A task that is exclusive on a domain will block all tasks within that domain\n domain_url = get_url(get_domain())\n if domain_url not in exclusive_resources:\n shared_resources.append(domain_url)\n resources = exclusive_resources + [f\"shared:{resource}\" for resource in shared_resources]\n\n notify_workers = False\n with contextlib.ExitStack() as stack:\n with transaction.atomic():\n task = Task.objects.create(\n state=TASK_STATES.WAITING,\n logging_cid=(get_guid()),\n task_group=task_group,\n name=function_name,\n enc_args=args,\n enc_kwargs=kwargs,\n parent_task=Task.current(),\n reserved_resources_record=resources,\n versions=versions,\n )\n if immediate:\n # Grab the advisory lock before the task hits the db.\n stack.enter_context(task)\n else:\n notify_workers = True\n if immediate:\n prior_tasks = Task.objects.filter(\n state__in=TASK_INCOMPLETE_STATES, pulp_created__lt=task.pulp_created\n )\n # Compile a list of resources that must not be taken by other tasks.\n colliding_resources = (\n shared_resources\n + exclusive_resources\n + [f\"shared:{resource}\" for resource in exclusive_resources]\n )\n # Can we execute this task immediately?\n if (\n not colliding_resources\n or not prior_tasks.filter(\n reserved_resources_record__overlap=colliding_resources\n ).exists()\n ):\n execute_task(task)\n if resources:\n notify_workers = True\n elif deferred:\n notify_workers = True\n else:\n task.set_canceling()\n task.set_canceled(TASK_STATES.CANCELED, \"Resources temporarily unavailable.\")\n if notify_workers:\n wakeup_worker()\n return task\n\n\ndef cancel_task(task_id):\n \"\"\"\n Cancel the task that is represented by the given task_id.\n\n This method cancels only the task with given task_id, not the spawned tasks. This also updates\n task's state to 'canceling'.\n\n Args:\n task_id (str): The ID of the task you wish to cancel\n\n Raises:\n rest_framework.exceptions.NotFound: If a task with given task_id does not exist\n \"\"\"\n task = Task.objects.get(pk=task_id)\n\n if task.state in TASK_FINAL_STATES:\n # If the task is already done, just stop\n _logger.debug(\n \"Task [{task_id}] already in a final state: {state}\".format(\n task_id=task_id, state=task.state\n )\n )\n return task\n\n _logger.info(_(\"Canceling task: {id}\").format(id=task_id))\n\n # This is the only valid transition without holding the task lock\n task.set_canceling()\n # Notify the worker that might be running that task and other workers to clean up\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT pg_notify('pulp_worker_cancel', %s)\", (str(task.pk),))\n cursor.execute(\"NOTIFY pulp_worker_wakeup\")\n return task\n", "path": "pulpcore/tasking/tasks.py"}]}
| 3,035 | 290 |
gh_patches_debug_1844
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-13326
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ubuntu 21.10 (Impish Indri) has reached end of life as of July 14, 2022
See https://lists.ubuntu.com/archives/ubuntu-announce/2022-July/000281.html
I don't think we have good docs for removing a distribution, but should be the opposite of [gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution](https://gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution).
</issue>
<code>
[start of scripts-dev/build_debian_packages.py]
1 #!/usr/bin/env python3
2
3 # Build the Debian packages using Docker images.
4 #
5 # This script builds the Docker images and then executes them sequentially, each
6 # one building a Debian package for the targeted operating system. It is
7 # designed to be a "single command" to produce all the images.
8 #
9 # By default, builds for all known distributions, but a list of distributions
10 # can be passed on the commandline for debugging.
11
12 import argparse
13 import json
14 import os
15 import signal
16 import subprocess
17 import sys
18 import threading
19 from concurrent.futures import ThreadPoolExecutor
20 from types import FrameType
21 from typing import Collection, Optional, Sequence, Set
22
23 DISTS = (
24 "debian:buster", # oldstable: EOL 2022-08
25 "debian:bullseye",
26 "debian:bookworm",
27 "debian:sid",
28 "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
29 "ubuntu:impish", # 21.10 (EOL 2022-07)
30 "ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
31 )
32
33 DESC = """\
34 Builds .debs for synapse, using a Docker image for the build environment.
35
36 By default, builds for all known distributions, but a list of distributions
37 can be passed on the commandline for debugging.
38 """
39
40 projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
41
42
43 class Builder(object):
44 def __init__(
45 self,
46 redirect_stdout: bool = False,
47 docker_build_args: Optional[Sequence[str]] = None,
48 ):
49 self.redirect_stdout = redirect_stdout
50 self._docker_build_args = tuple(docker_build_args or ())
51 self.active_containers: Set[str] = set()
52 self._lock = threading.Lock()
53 self._failed = False
54
55 def run_build(self, dist: str, skip_tests: bool = False) -> None:
56 """Build deb for a single distribution"""
57
58 if self._failed:
59 print("not building %s due to earlier failure" % (dist,))
60 raise Exception("failed")
61
62 try:
63 self._inner_build(dist, skip_tests)
64 except Exception as e:
65 print("build of %s failed: %s" % (dist, e), file=sys.stderr)
66 self._failed = True
67 raise
68
69 def _inner_build(self, dist: str, skip_tests: bool = False) -> None:
70 tag = dist.split(":", 1)[1]
71
72 # Make the dir where the debs will live.
73 #
74 # Note that we deliberately put this outside the source tree, otherwise
75 # we tend to get source packages which are full of debs. (We could hack
76 # around that with more magic in the build_debian.sh script, but that
77 # doesn't solve the problem for natively-run dpkg-buildpakage).
78 debsdir = os.path.join(projdir, "../debs")
79 os.makedirs(debsdir, exist_ok=True)
80
81 if self.redirect_stdout:
82 logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
83 print("building %s: directing output to %s" % (dist, logfile))
84 stdout = open(logfile, "w")
85 else:
86 stdout = None
87
88 # first build a docker image for the build environment
89 build_args = (
90 (
91 "docker",
92 "build",
93 "--tag",
94 "dh-venv-builder:" + tag,
95 "--build-arg",
96 "distro=" + dist,
97 "-f",
98 "docker/Dockerfile-dhvirtualenv",
99 )
100 + self._docker_build_args
101 + ("docker",)
102 )
103
104 subprocess.check_call(
105 build_args,
106 stdout=stdout,
107 stderr=subprocess.STDOUT,
108 cwd=projdir,
109 )
110
111 container_name = "synapse_build_" + tag
112 with self._lock:
113 self.active_containers.add(container_name)
114
115 # then run the build itself
116 subprocess.check_call(
117 [
118 "docker",
119 "run",
120 "--rm",
121 "--name",
122 container_name,
123 "--volume=" + projdir + ":/synapse/source:ro",
124 "--volume=" + debsdir + ":/debs",
125 "-e",
126 "TARGET_USERID=%i" % (os.getuid(),),
127 "-e",
128 "TARGET_GROUPID=%i" % (os.getgid(),),
129 "-e",
130 "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
131 "dh-venv-builder:" + tag,
132 ],
133 stdout=stdout,
134 stderr=subprocess.STDOUT,
135 )
136
137 with self._lock:
138 self.active_containers.remove(container_name)
139
140 if stdout is not None:
141 stdout.close()
142 print("Completed build of %s" % (dist,))
143
144 def kill_containers(self) -> None:
145 with self._lock:
146 active = list(self.active_containers)
147
148 for c in active:
149 print("killing container %s" % (c,))
150 subprocess.run(
151 [
152 "docker",
153 "kill",
154 c,
155 ],
156 stdout=subprocess.DEVNULL,
157 )
158 with self._lock:
159 self.active_containers.remove(c)
160
161
162 def run_builds(
163 builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False
164 ) -> None:
165 def sig(signum: int, _frame: Optional[FrameType]) -> None:
166 print("Caught SIGINT")
167 builder.kill_containers()
168
169 signal.signal(signal.SIGINT, sig)
170
171 with ThreadPoolExecutor(max_workers=jobs) as e:
172 res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
173
174 # make sure we consume the iterable so that exceptions are raised.
175 for _ in res:
176 pass
177
178
179 if __name__ == "__main__":
180 parser = argparse.ArgumentParser(
181 description=DESC,
182 )
183 parser.add_argument(
184 "-j",
185 "--jobs",
186 type=int,
187 default=1,
188 help="specify the number of builds to run in parallel",
189 )
190 parser.add_argument(
191 "--no-check",
192 action="store_true",
193 help="skip running tests after building",
194 )
195 parser.add_argument(
196 "--docker-build-arg",
197 action="append",
198 help="specify an argument to pass to docker build",
199 )
200 parser.add_argument(
201 "--show-dists-json",
202 action="store_true",
203 help="instead of building the packages, just list the dists to build for, as a json array",
204 )
205 parser.add_argument(
206 "dist",
207 nargs="*",
208 default=DISTS,
209 help="a list of distributions to build for. Default: %(default)s",
210 )
211 args = parser.parse_args()
212 if args.show_dists_json:
213 print(json.dumps(DISTS))
214 else:
215 builder = Builder(
216 redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg
217 )
218 run_builds(
219 builder,
220 dists=args.dist,
221 jobs=args.jobs,
222 skip_tests=args.no_check,
223 )
224
[end of scripts-dev/build_debian_packages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -26,7 +26,6 @@
"debian:bookworm",
"debian:sid",
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
- "ubuntu:impish", # 21.10 (EOL 2022-07)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
)
|
{"golden_diff": "diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py\n--- a/scripts-dev/build_debian_packages.py\n+++ b/scripts-dev/build_debian_packages.py\n@@ -26,7 +26,6 @@\n \"debian:bookworm\",\n \"debian:sid\",\n \"ubuntu:focal\", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)\n- \"ubuntu:impish\", # 21.10 (EOL 2022-07)\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04)\n )\n", "issue": "Ubuntu 21.10 (Impish Indri) has reached end of life as of July 14, 2022\nSee https://lists.ubuntu.com/archives/ubuntu-announce/2022-July/000281.html\r\n\r\nI don't think we have good docs for removing a distribution, but should be the opposite of [gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution](https://gitlab.matrix.org/new-vector/internal/-/wikis/Synapse-Debian-Packages#adding-a-new-distribution).\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Build the Debian packages using Docker images.\n#\n# This script builds the Docker images and then executes them sequentially, each\n# one building a Debian package for the targeted operating system. It is\n# designed to be a \"single command\" to produce all the images.\n#\n# By default, builds for all known distributions, but a list of distributions\n# can be passed on the commandline for debugging.\n\nimport argparse\nimport json\nimport os\nimport signal\nimport subprocess\nimport sys\nimport threading\nfrom concurrent.futures import ThreadPoolExecutor\nfrom types import FrameType\nfrom typing import Collection, Optional, Sequence, Set\n\nDISTS = (\n \"debian:buster\", # oldstable: EOL 2022-08\n \"debian:bullseye\",\n \"debian:bookworm\",\n \"debian:sid\",\n \"ubuntu:focal\", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)\n \"ubuntu:impish\", # 21.10 (EOL 2022-07)\n \"ubuntu:jammy\", # 22.04 LTS (EOL 2027-04)\n)\n\nDESC = \"\"\"\\\nBuilds .debs for synapse, using a Docker image for the build environment.\n\nBy default, builds for all known distributions, but a list of distributions\ncan be passed on the commandline for debugging.\n\"\"\"\n\nprojdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass Builder(object):\n def __init__(\n self,\n redirect_stdout: bool = False,\n docker_build_args: Optional[Sequence[str]] = None,\n ):\n self.redirect_stdout = redirect_stdout\n self._docker_build_args = tuple(docker_build_args or ())\n self.active_containers: Set[str] = set()\n self._lock = threading.Lock()\n self._failed = False\n\n def run_build(self, dist: str, skip_tests: bool = False) -> None:\n \"\"\"Build deb for a single distribution\"\"\"\n\n if self._failed:\n print(\"not building %s due to earlier failure\" % (dist,))\n raise Exception(\"failed\")\n\n try:\n self._inner_build(dist, skip_tests)\n except Exception as e:\n print(\"build of %s failed: %s\" % (dist, e), file=sys.stderr)\n self._failed = True\n raise\n\n def _inner_build(self, dist: str, skip_tests: bool = False) -> None:\n tag = dist.split(\":\", 1)[1]\n\n # Make the dir where the debs will live.\n #\n # Note that we deliberately put this outside the source tree, otherwise\n # we tend to get source packages which are full of debs. (We could hack\n # around that with more magic in the build_debian.sh script, but that\n # doesn't solve the problem for natively-run dpkg-buildpakage).\n debsdir = os.path.join(projdir, \"../debs\")\n os.makedirs(debsdir, exist_ok=True)\n\n if self.redirect_stdout:\n logfile = os.path.join(debsdir, \"%s.buildlog\" % (tag,))\n print(\"building %s: directing output to %s\" % (dist, logfile))\n stdout = open(logfile, \"w\")\n else:\n stdout = None\n\n # first build a docker image for the build environment\n build_args = (\n (\n \"docker\",\n \"build\",\n \"--tag\",\n \"dh-venv-builder:\" + tag,\n \"--build-arg\",\n \"distro=\" + dist,\n \"-f\",\n \"docker/Dockerfile-dhvirtualenv\",\n )\n + self._docker_build_args\n + (\"docker\",)\n )\n\n subprocess.check_call(\n build_args,\n stdout=stdout,\n stderr=subprocess.STDOUT,\n cwd=projdir,\n )\n\n container_name = \"synapse_build_\" + tag\n with self._lock:\n self.active_containers.add(container_name)\n\n # then run the build itself\n subprocess.check_call(\n [\n \"docker\",\n \"run\",\n \"--rm\",\n \"--name\",\n container_name,\n \"--volume=\" + projdir + \":/synapse/source:ro\",\n \"--volume=\" + debsdir + \":/debs\",\n \"-e\",\n \"TARGET_USERID=%i\" % (os.getuid(),),\n \"-e\",\n \"TARGET_GROUPID=%i\" % (os.getgid(),),\n \"-e\",\n \"DEB_BUILD_OPTIONS=%s\" % (\"nocheck\" if skip_tests else \"\"),\n \"dh-venv-builder:\" + tag,\n ],\n stdout=stdout,\n stderr=subprocess.STDOUT,\n )\n\n with self._lock:\n self.active_containers.remove(container_name)\n\n if stdout is not None:\n stdout.close()\n print(\"Completed build of %s\" % (dist,))\n\n def kill_containers(self) -> None:\n with self._lock:\n active = list(self.active_containers)\n\n for c in active:\n print(\"killing container %s\" % (c,))\n subprocess.run(\n [\n \"docker\",\n \"kill\",\n c,\n ],\n stdout=subprocess.DEVNULL,\n )\n with self._lock:\n self.active_containers.remove(c)\n\n\ndef run_builds(\n builder: Builder, dists: Collection[str], jobs: int = 1, skip_tests: bool = False\n) -> None:\n def sig(signum: int, _frame: Optional[FrameType]) -> None:\n print(\"Caught SIGINT\")\n builder.kill_containers()\n\n signal.signal(signal.SIGINT, sig)\n\n with ThreadPoolExecutor(max_workers=jobs) as e:\n res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)\n\n # make sure we consume the iterable so that exceptions are raised.\n for _ in res:\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=DESC,\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=1,\n help=\"specify the number of builds to run in parallel\",\n )\n parser.add_argument(\n \"--no-check\",\n action=\"store_true\",\n help=\"skip running tests after building\",\n )\n parser.add_argument(\n \"--docker-build-arg\",\n action=\"append\",\n help=\"specify an argument to pass to docker build\",\n )\n parser.add_argument(\n \"--show-dists-json\",\n action=\"store_true\",\n help=\"instead of building the packages, just list the dists to build for, as a json array\",\n )\n parser.add_argument(\n \"dist\",\n nargs=\"*\",\n default=DISTS,\n help=\"a list of distributions to build for. Default: %(default)s\",\n )\n args = parser.parse_args()\n if args.show_dists_json:\n print(json.dumps(DISTS))\n else:\n builder = Builder(\n redirect_stdout=(args.jobs > 1), docker_build_args=args.docker_build_arg\n )\n run_builds(\n builder,\n dists=args.dist,\n jobs=args.jobs,\n skip_tests=args.no_check,\n )\n", "path": "scripts-dev/build_debian_packages.py"}]}
| 2,832 | 160 |
gh_patches_debug_28415
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-602
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commit should meet the XDG Base Directory Specification
XDG Base Directory Specification is quite common now. Just `ls ~/.cache ~/.config ~/.local` to realize it.
I think `~/.pre-commit` should be moved to `$XDG_CACHE_HOME` or `$HOME/.cache`
https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
</issue>
<code>
[start of pre_commit/store.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import sqlite3
8 import tempfile
9
10 from cached_property import cached_property
11
12 import pre_commit.constants as C
13 from pre_commit import file_lock
14 from pre_commit.util import clean_path_on_failure
15 from pre_commit.util import cmd_output
16 from pre_commit.util import copy_tree_to_path
17 from pre_commit.util import cwd
18 from pre_commit.util import no_git_env
19 from pre_commit.util import resource_filename
20
21
22 logger = logging.getLogger('pre_commit')
23
24
25 def _get_default_directory():
26 """Returns the default directory for the Store. This is intentionally
27 underscored to indicate that `Store.get_default_directory` is the intended
28 way to get this information. This is also done so
29 `Store.get_default_directory` can be mocked in tests and
30 `_get_default_directory` can be tested.
31 """
32 return os.environ.get(
33 'PRE_COMMIT_HOME',
34 os.path.join(os.path.expanduser('~'), '.pre-commit'),
35 )
36
37
38 class Store(object):
39 get_default_directory = staticmethod(_get_default_directory)
40 __created = False
41
42 def __init__(self, directory=None):
43 if directory is None:
44 directory = self.get_default_directory()
45
46 self.directory = directory
47
48 @contextlib.contextmanager
49 def exclusive_lock(self):
50 def blocked_cb(): # pragma: no cover (tests are single-process)
51 logger.info('Locking pre-commit directory')
52
53 with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):
54 yield
55
56 def _write_readme(self):
57 with io.open(os.path.join(self.directory, 'README'), 'w') as readme:
58 readme.write(
59 'This directory is maintained by the pre-commit project.\n'
60 'Learn more: https://github.com/pre-commit/pre-commit\n',
61 )
62
63 def _write_sqlite_db(self):
64 # To avoid a race where someone ^Cs between db creation and execution
65 # of the CREATE TABLE statement
66 fd, tmpfile = tempfile.mkstemp(dir=self.directory)
67 # We'll be managing this file ourselves
68 os.close(fd)
69 # sqlite doesn't close its fd with its contextmanager >.<
70 # contextlib.closing fixes this.
71 # See: http://stackoverflow.com/a/28032829/812183
72 with contextlib.closing(sqlite3.connect(tmpfile)) as db:
73 db.executescript(
74 'CREATE TABLE repos ('
75 ' repo CHAR(255) NOT NULL,'
76 ' ref CHAR(255) NOT NULL,'
77 ' path CHAR(255) NOT NULL,'
78 ' PRIMARY KEY (repo, ref)'
79 ');',
80 )
81
82 # Atomic file move
83 os.rename(tmpfile, self.db_path)
84
85 def _create(self):
86 if not os.path.exists(self.directory):
87 os.makedirs(self.directory)
88 self._write_readme()
89
90 if os.path.exists(self.db_path):
91 return
92 with self.exclusive_lock():
93 # Another process may have already completed this work
94 if os.path.exists(self.db_path): # pragma: no cover (race)
95 return
96 self._write_sqlite_db()
97
98 def require_created(self):
99 """Require the pre-commit file store to be created."""
100 if not self.__created:
101 self._create()
102 self.__created = True
103
104 def _new_repo(self, repo, ref, make_strategy):
105 self.require_created()
106
107 def _get_result():
108 # Check if we already exist
109 with sqlite3.connect(self.db_path) as db:
110 result = db.execute(
111 'SELECT path FROM repos WHERE repo = ? AND ref = ?',
112 [repo, ref],
113 ).fetchone()
114 if result:
115 return result[0]
116
117 result = _get_result()
118 if result:
119 return result
120 with self.exclusive_lock():
121 # Another process may have already completed this work
122 result = _get_result()
123 if result: # pragma: no cover (race)
124 return result
125
126 logger.info('Initializing environment for {}.'.format(repo))
127
128 directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
129 with clean_path_on_failure(directory):
130 make_strategy(directory)
131
132 # Update our db with the created repo
133 with sqlite3.connect(self.db_path) as db:
134 db.execute(
135 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
136 [repo, ref, directory],
137 )
138 return directory
139
140 def clone(self, repo, ref):
141 """Clone the given url and checkout the specific ref."""
142 def clone_strategy(directory):
143 cmd_output(
144 'git', 'clone', '--no-checkout', repo, directory,
145 env=no_git_env(),
146 )
147 with cwd(directory):
148 cmd_output('git', 'reset', ref, '--hard', env=no_git_env())
149 cmd_output(
150 'git', 'submodule', 'update', '--init', '--recursive',
151 env=no_git_env(),
152 )
153
154 return self._new_repo(repo, ref, clone_strategy)
155
156 def make_local(self, deps):
157 def make_local_strategy(directory):
158 copy_tree_to_path(resource_filename('empty_template'), directory)
159 return self._new_repo(
160 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,
161 make_local_strategy,
162 )
163
164 @cached_property
165 def db_path(self):
166 return os.path.join(self.directory, 'db.db')
167
[end of pre_commit/store.py]
[start of pre_commit/error_handler.py]
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 import contextlib
6 import os.path
7 import traceback
8
9 import six
10
11 from pre_commit import five
12 from pre_commit import output
13 from pre_commit.errors import FatalError
14 from pre_commit.store import Store
15
16
17 def _to_bytes(exc):
18 try:
19 return bytes(exc)
20 except Exception:
21 return six.text_type(exc).encode('UTF-8')
22
23
24 def _log_and_exit(msg, exc, formatted):
25 error_msg = b''.join((
26 five.to_bytes(msg), b': ',
27 five.to_bytes(type(exc).__name__), b': ',
28 _to_bytes(exc), b'\n',
29 ))
30 output.write(error_msg)
31 output.write_line('Check the log at ~/.pre-commit/pre-commit.log')
32 store = Store()
33 store.require_created()
34 with open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:
35 output.write(error_msg, stream=log)
36 output.write_line(formatted, stream=log)
37 raise SystemExit(1)
38
39
40 @contextlib.contextmanager
41 def error_handler():
42 try:
43 yield
44 except FatalError as e:
45 _log_and_exit('An error has occurred', e, traceback.format_exc())
46 except Exception as e:
47 _log_and_exit(
48 'An unexpected error has occurred',
49 e,
50 traceback.format_exc(),
51 )
52
[end of pre_commit/error_handler.py]
[start of pre_commit/commands/clean.py]
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import os.path
5
6 from pre_commit import output
7 from pre_commit.util import rmtree
8
9
10 def clean(runner):
11 if os.path.exists(runner.store.directory):
12 rmtree(runner.store.directory)
13 output.write_line('Cleaned {}.'.format(runner.store.directory))
14 return 0
15
[end of pre_commit/commands/clean.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/commands/clean.py b/pre_commit/commands/clean.py
--- a/pre_commit/commands/clean.py
+++ b/pre_commit/commands/clean.py
@@ -8,7 +8,9 @@
def clean(runner):
- if os.path.exists(runner.store.directory):
- rmtree(runner.store.directory)
- output.write_line('Cleaned {}.'.format(runner.store.directory))
+ legacy_path = os.path.expanduser('~/.pre-commit')
+ for directory in (runner.store.directory, legacy_path):
+ if os.path.exists(directory):
+ rmtree(directory)
+ output.write_line('Cleaned {}.'.format(directory))
return 0
diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py
--- a/pre_commit/error_handler.py
+++ b/pre_commit/error_handler.py
@@ -28,10 +28,11 @@
_to_bytes(exc), b'\n',
))
output.write(error_msg)
- output.write_line('Check the log at ~/.pre-commit/pre-commit.log')
store = Store()
store.require_created()
- with open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:
+ log_path = os.path.join(store.directory, 'pre-commit.log')
+ output.write_line('Check the log at {}'.format(log_path))
+ with open(log_path, 'wb') as log:
output.write(error_msg, stream=log)
output.write_line(formatted, stream=log)
raise SystemExit(1)
diff --git a/pre_commit/store.py b/pre_commit/store.py
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -29,9 +29,9 @@
`Store.get_default_directory` can be mocked in tests and
`_get_default_directory` can be tested.
"""
- return os.environ.get(
- 'PRE_COMMIT_HOME',
- os.path.join(os.path.expanduser('~'), '.pre-commit'),
+ return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
+ os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
+ 'pre-commit',
)
|
{"golden_diff": "diff --git a/pre_commit/commands/clean.py b/pre_commit/commands/clean.py\n--- a/pre_commit/commands/clean.py\n+++ b/pre_commit/commands/clean.py\n@@ -8,7 +8,9 @@\n \n \n def clean(runner):\n- if os.path.exists(runner.store.directory):\n- rmtree(runner.store.directory)\n- output.write_line('Cleaned {}.'.format(runner.store.directory))\n+ legacy_path = os.path.expanduser('~/.pre-commit')\n+ for directory in (runner.store.directory, legacy_path):\n+ if os.path.exists(directory):\n+ rmtree(directory)\n+ output.write_line('Cleaned {}.'.format(directory))\n return 0\ndiff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py\n--- a/pre_commit/error_handler.py\n+++ b/pre_commit/error_handler.py\n@@ -28,10 +28,11 @@\n _to_bytes(exc), b'\\n',\n ))\n output.write(error_msg)\n- output.write_line('Check the log at ~/.pre-commit/pre-commit.log')\n store = Store()\n store.require_created()\n- with open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:\n+ log_path = os.path.join(store.directory, 'pre-commit.log')\n+ output.write_line('Check the log at {}'.format(log_path))\n+ with open(log_path, 'wb') as log:\n output.write(error_msg, stream=log)\n output.write_line(formatted, stream=log)\n raise SystemExit(1)\ndiff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -29,9 +29,9 @@\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n- return os.environ.get(\n- 'PRE_COMMIT_HOME',\n- os.path.join(os.path.expanduser('~'), '.pre-commit'),\n+ return os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n+ os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n+ 'pre-commit',\n )\n", "issue": "pre-commit should meet the XDG Base Directory Specification\nXDG Base Directory Specification is quite common now. Just `ls ~/.cache ~/.config ~/.local` to realize it.\r\n\r\nI think `~/.pre-commit` should be moved to `$XDG_CACHE_HOME` or `$HOME/.cache`\r\n\r\nhttps://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\n\nfrom cached_property import cached_property\n\nimport pre_commit.constants as C\nfrom pre_commit import file_lock\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import copy_tree_to_path\nfrom pre_commit.util import cwd\nfrom pre_commit.util import no_git_env\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory():\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get(\n 'PRE_COMMIT_HOME',\n os.path.join(os.path.expanduser('~'), '.pre-commit'),\n )\n\n\nclass Store(object):\n get_default_directory = staticmethod(_get_default_directory)\n __created = False\n\n def __init__(self, directory=None):\n if directory is None:\n directory = self.get_default_directory()\n\n self.directory = directory\n\n @contextlib.contextmanager\n def exclusive_lock(self):\n def blocked_cb(): # pragma: no cover (tests are single-process)\n logger.info('Locking pre-commit directory')\n\n with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):\n yield\n\n def _write_readme(self):\n with io.open(os.path.join(self.directory, 'README'), 'w') as readme:\n readme.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n',\n )\n\n def _write_sqlite_db(self):\n # To avoid a race where someone ^Cs between db creation and execution\n # of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: http://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(tmpfile)) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo CHAR(255) NOT NULL,'\n ' ref CHAR(255) NOT NULL,'\n ' path CHAR(255) NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');',\n )\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n def _create(self):\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n self._write_readme()\n\n if os.path.exists(self.db_path):\n return\n with self.exclusive_lock():\n # Another process may have already completed this work\n if os.path.exists(self.db_path): # pragma: no cover (race)\n return\n self._write_sqlite_db()\n\n def require_created(self):\n \"\"\"Require the pre-commit file store to be created.\"\"\"\n if not self.__created:\n self._create()\n self.__created = True\n\n def _new_repo(self, repo, ref, make_strategy):\n self.require_created()\n\n def _get_result():\n # Check if we already exist\n with sqlite3.connect(self.db_path) as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n [repo, ref],\n ).fetchone()\n if result:\n return result[0]\n\n result = _get_result()\n if result:\n return result\n with self.exclusive_lock():\n # Another process may have already completed this work\n result = _get_result()\n if result: # pragma: no cover (race)\n return result\n\n logger.info('Initializing environment for {}.'.format(repo))\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with sqlite3.connect(self.db_path) as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def clone(self, repo, ref):\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n def clone_strategy(directory):\n cmd_output(\n 'git', 'clone', '--no-checkout', repo, directory,\n env=no_git_env(),\n )\n with cwd(directory):\n cmd_output('git', 'reset', ref, '--hard', env=no_git_env())\n cmd_output(\n 'git', 'submodule', 'update', '--init', '--recursive',\n env=no_git_env(),\n )\n\n return self._new_repo(repo, ref, clone_strategy)\n\n def make_local(self, deps):\n def make_local_strategy(directory):\n copy_tree_to_path(resource_filename('empty_template'), directory)\n return self._new_repo(\n 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,\n make_local_strategy,\n )\n\n @cached_property\n def db_path(self):\n return os.path.join(self.directory, 'db.db')\n", "path": "pre_commit/store.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport contextlib\nimport os.path\nimport traceback\n\nimport six\n\nfrom pre_commit import five\nfrom pre_commit import output\nfrom pre_commit.errors import FatalError\nfrom pre_commit.store import Store\n\n\ndef _to_bytes(exc):\n try:\n return bytes(exc)\n except Exception:\n return six.text_type(exc).encode('UTF-8')\n\n\ndef _log_and_exit(msg, exc, formatted):\n error_msg = b''.join((\n five.to_bytes(msg), b': ',\n five.to_bytes(type(exc).__name__), b': ',\n _to_bytes(exc), b'\\n',\n ))\n output.write(error_msg)\n output.write_line('Check the log at ~/.pre-commit/pre-commit.log')\n store = Store()\n store.require_created()\n with open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:\n output.write(error_msg, stream=log)\n output.write_line(formatted, stream=log)\n raise SystemExit(1)\n\n\[email protected]\ndef error_handler():\n try:\n yield\n except FatalError as e:\n _log_and_exit('An error has occurred', e, traceback.format_exc())\n except Exception as e:\n _log_and_exit(\n 'An unexpected error has occurred',\n e,\n traceback.format_exc(),\n )\n", "path": "pre_commit/error_handler.py"}, {"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os.path\n\nfrom pre_commit import output\nfrom pre_commit.util import rmtree\n\n\ndef clean(runner):\n if os.path.exists(runner.store.directory):\n rmtree(runner.store.directory)\n output.write_line('Cleaned {}.'.format(runner.store.directory))\n return 0\n", "path": "pre_commit/commands/clean.py"}]}
| 2,748 | 472 |
gh_patches_debug_2998
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-763
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
in xfce, it need xarchiver for create archive & extract here-to
</issue>
<code>
[start of profiles/xfce4.py]
1 # A desktop environment using "Xfce4"
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "xfce4",
9 "xfce4-goodies",
10 "pavucontrol",
11 "lightdm",
12 "lightdm-gtk-greeter",
13 "gvfs",
14 "network-manager-applet",
15 ]
16
17
18 def _prep_function(*args, **kwargs):
19 """
20 Magic function called by the importing installer
21 before continuing any further. It also avoids executing any
22 other code in this stage. So it's a safe way to ask the user
23 for more input before any other installer steps start.
24 """
25
26 # XFCE requires a functional xorg installation.
27 profile = archinstall.Profile(None, 'xorg')
28 with profile.load_instructions(namespace='xorg.py') as imported:
29 if hasattr(imported, '_prep_function'):
30 return imported._prep_function()
31 else:
32 print('Deprecated (??): xorg profile has no _prep_function() anymore')
33
34
35 # Ensures that this code only gets executed if executed
36 # through importlib.util.spec_from_file_location("xfce4", "/somewhere/xfce4.py")
37 # or through conventional import xfce4
38 if __name__ == 'xfce4':
39 # Install dependency profiles
40 archinstall.storage['installation_session'].install_profile('xorg')
41
42 # Install the XFCE4 packages
43 archinstall.storage['installation_session'].add_additional_packages(__packages__)
44
45 archinstall.storage['installation_session'].enable_service('lightdm') # Light Display Manager
46
[end of profiles/xfce4.py]
[start of profiles/kde.py]
1 # A desktop environment using "KDE".
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 __packages__ = [
8 "plasma-meta",
9 "konsole",
10 "kate",
11 "dolphin",
12 "sddm",
13 "plasma-wayland-session",
14 "egl-wayland",
15 ]
16
17
18 # TODO: Remove hard dependency of bash (due to .bash_profile)
19
20
21 def _prep_function(*args, **kwargs):
22 """
23 Magic function called by the importing installer
24 before continuing any further. It also avoids executing any
25 other code in this stage. So it's a safe way to ask the user
26 for more input before any other installer steps start.
27 """
28
29 # KDE requires a functioning Xorg installation.
30 profile = archinstall.Profile(None, 'xorg')
31 with profile.load_instructions(namespace='xorg.py') as imported:
32 if hasattr(imported, '_prep_function'):
33 return imported._prep_function()
34 else:
35 print('Deprecated (??): xorg profile has no _prep_function() anymore')
36
37
38 """
39 def _post_install(*args, **kwargs):
40 if "nvidia" in _gfx_driver_packages:
41 print("Plasma Wayland has known compatibility issues with the proprietary Nvidia driver")
42 print("After booting, you can choose between Wayland and Xorg using the drop-down menu")
43 return True
44 """
45
46 # Ensures that this code only gets executed if executed
47 # through importlib.util.spec_from_file_location("kde", "/somewhere/kde.py")
48 # or through conventional import kde
49 if __name__ == 'kde':
50 # Install dependency profiles
51 archinstall.storage['installation_session'].install_profile('xorg')
52
53 # Install the KDE packages
54 archinstall.storage['installation_session'].add_additional_packages(__packages__)
55
56 # Enable autostart of KDE for all users
57 archinstall.storage['installation_session'].enable_service('sddm')
58
[end of profiles/kde.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/profiles/kde.py b/profiles/kde.py
--- a/profiles/kde.py
+++ b/profiles/kde.py
@@ -9,6 +9,7 @@
"konsole",
"kate",
"dolphin",
+ "ark",
"sddm",
"plasma-wayland-session",
"egl-wayland",
diff --git a/profiles/xfce4.py b/profiles/xfce4.py
--- a/profiles/xfce4.py
+++ b/profiles/xfce4.py
@@ -12,6 +12,7 @@
"lightdm-gtk-greeter",
"gvfs",
"network-manager-applet",
+ "xarchiver"
]
|
{"golden_diff": "diff --git a/profiles/kde.py b/profiles/kde.py\n--- a/profiles/kde.py\n+++ b/profiles/kde.py\n@@ -9,6 +9,7 @@\n \t\"konsole\",\n \t\"kate\",\n \t\"dolphin\",\n+\t\"ark\",\n \t\"sddm\",\n \t\"plasma-wayland-session\",\n \t\"egl-wayland\",\ndiff --git a/profiles/xfce4.py b/profiles/xfce4.py\n--- a/profiles/xfce4.py\n+++ b/profiles/xfce4.py\n@@ -12,6 +12,7 @@\n \t\"lightdm-gtk-greeter\",\n \t\"gvfs\",\n \t\"network-manager-applet\",\n+\t\"xarchiver\"\n ]\n", "issue": "in xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\nin xfce, it need xarchiver for create archive & extract here-to\n", "before_files": [{"content": "# A desktop environment using \"Xfce4\"\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"xfce4\",\n\t\"xfce4-goodies\",\n\t\"pavucontrol\",\n\t\"lightdm\",\n\t\"lightdm-gtk-greeter\",\n\t\"gvfs\",\n\t\"network-manager-applet\",\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# XFCE requires a functional xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xfce4\", \"/somewhere/xfce4.py\")\n# or through conventional import xfce4\nif __name__ == 'xfce4':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the XFCE4 packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\tarchinstall.storage['installation_session'].enable_service('lightdm') # Light Display Manager\n", "path": "profiles/xfce4.py"}, {"content": "# A desktop environment using \"KDE\".\n\nimport archinstall\n\nis_top_level_profile = False\n\n__packages__ = [\n\t\"plasma-meta\",\n\t\"konsole\",\n\t\"kate\",\n\t\"dolphin\",\n\t\"sddm\",\n\t\"plasma-wayland-session\",\n\t\"egl-wayland\",\n]\n\n\n# TODO: Remove hard dependency of bash (due to .bash_profile)\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# KDE requires a functioning Xorg installation.\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n\"\"\"\ndef _post_install(*args, **kwargs):\n\tif \"nvidia\" in _gfx_driver_packages:\n\t\tprint(\"Plasma Wayland has known compatibility issues with the proprietary Nvidia driver\")\n\tprint(\"After booting, you can choose between Wayland and Xorg using the drop-down menu\")\n\treturn True\n\"\"\"\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"kde\", \"/somewhere/kde.py\")\n# or through conventional import kde\nif __name__ == 'kde':\n\t# Install dependency profiles\n\tarchinstall.storage['installation_session'].install_profile('xorg')\n\n\t# Install the KDE packages\n\tarchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\n\t# Enable autostart of KDE for all users\n\tarchinstall.storage['installation_session'].enable_service('sddm')\n", "path": "profiles/kde.py"}]}
| 1,603 | 170 |
gh_patches_debug_28713
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-1021
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] poco/1.10.1: Options not available / not correctly parsed
### Package and Environment Details
* Package Name/Version: **poco/1.10.1**
* Operating System+version: **Windows 10 64-bit**
* Compiler+version: **Visual Studio 15**
* Conan version: **conan 1.22.2**
* Python version: **Python 3.7.1**
---
I'm currently setting up a Poco-example project with Conan and I'm basically disabling every option (project only relies on Poco::Foundation).
However, when I use a list of options to disable, which also includes
- `poco:enable_encodings=False`
- `poco:enable_jwt=False`
I get the errors:
```
ERROR: poco/1.10.1: 'options.enable_encodings' doesn't exist
```
or
```
ERROR: poco/1.10.1: 'options.enable_jwt' doesn't exist
```
Looking at the `conanfile.py` that totally makes sense since these options are not available. **Why is that?**
However, If I use upper case options
- `Poco:enable_encodings=False`
- `Poco:enable_jwt=False`
I don't get any error; all options that can be disabled are disabled after build and encodings and jwt (as well as depending libraries) are created.
So I guess if `poco` was written in lower case some kind of "option-check" is performed before building Poco. If `Poco` was written in upper case the check is not performed but the options are correctly sent to Conan.
Additionally I tested this by adding arbitrary options
- `poco:enable_foobar=False`
- `Poco:enable_foobar=False`
Both did not make any difference in detecting unsupported options.
I think it would make sense to inform a user
- if upper case namespace options are not available (`Poco:enable_encodings=False`)
- if upper case / lower case options are not available at all (`poco:enable_foobar=False`)
</issue>
<code>
[start of recipes/poco/all/conanfile.py]
1 import os
2
3 from conans import ConanFile, CMake, tools
4 from conans.errors import ConanInvalidConfiguration
5 from conans.tools import Version
6
7
8 class PocoConan(ConanFile):
9 name = "poco"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "https://pocoproject.org"
12 topics = ("conan", "poco", "building", "networking", "server", "mobile", "embedded")
13 exports_sources = "CMakeLists.txt"
14 generators = "cmake"
15 settings = "os", "arch", "compiler", "build_type"
16 license = "BSL-1.0"
17 description = "Modern, powerful open source C++ class libraries for building network- and internet-based " \
18 "applications that run on desktop, server, mobile and embedded systems."
19 options = {"shared": [True, False],
20 "fPIC": [True, False],
21 "enable_xml": [True, False],
22 "enable_json": [True, False],
23 "enable_mongodb": [True, False],
24 "enable_pdf": [True, False],
25 "enable_util": [True, False],
26 "enable_net": [True, False],
27 "enable_netssl": [True, False],
28 "enable_netssl_win": [True, False],
29 "enable_crypto": [True, False],
30 "enable_data": [True, False],
31 "enable_data_sqlite": [True, False],
32 "enable_data_mysql": [True, False],
33 "enable_data_odbc": [True, False],
34 "enable_sevenzip": [True, False],
35 "enable_zip": [True, False],
36 "enable_apacheconnector": [True, False],
37 "enable_cppparser": [True, False],
38 "enable_pocodoc": [True, False],
39 "enable_pagecompiler": [True, False],
40 "enable_pagecompiler_file2page": [True, False],
41 "enable_redis": [True, False],
42 "force_openssl": [True, False],
43 "enable_tests": [True, False],
44 "poco_unbundled": [True, False],
45 "cxx_14": [True, False]
46 }
47 default_options = {
48 "shared": False,
49 "fPIC": True,
50 "enable_xml": True,
51 "enable_json": True,
52 "enable_mongodb": True,
53 "enable_pdf": False,
54 "enable_util": True,
55 "enable_net": True,
56 "enable_netssl": True,
57 "enable_netssl_win": True,
58 "enable_crypto": True,
59 "enable_data": True,
60 "enable_data_sqlite": True,
61 "enable_data_mysql": False,
62 "enable_data_odbc": False,
63 "enable_sevenzip": False,
64 "enable_zip": True,
65 "enable_apacheconnector": False,
66 "enable_cppparser": False,
67 "enable_pocodoc": False,
68 "enable_pagecompiler": False,
69 "enable_pagecompiler_file2page": False,
70 "enable_redis": True,
71 "force_openssl": True,
72 "enable_tests": False,
73 "poco_unbundled": False,
74 "cxx_14": False
75 }
76
77 @property
78 def _source_subfolder(self):
79 return "source_subfolder"
80
81 @property
82 def _build_subfolder(self):
83 return "build_subfolder"
84
85 def source(self):
86 tools.get(**self.conan_data["sources"][self.version])
87 extracted_folder = "poco-poco-{}-release".format(self.version)
88 os.rename(extracted_folder, self._source_subfolder)
89
90 def config_options(self):
91 if self.settings.os == "Windows":
92 del self.options.fPIC
93
94 def configure(self):
95 if self.options.enable_apacheconnector:
96 raise ConanInvalidConfiguration("Apache connector not supported: https://github.com/pocoproject/poco/issues/1764")
97 if self.options.enable_data_mysql:
98 raise ConanInvalidConfiguration("MySQL not supported yet, open an issue here please: %s" % self.url)
99
100 def requirements(self):
101 if self.options.enable_netssl or \
102 self.options.enable_netssl_win or \
103 self.options.enable_crypto or \
104 self.options.force_openssl:
105 self.requires.add("openssl/1.0.2t")
106
107 def _patch(self):
108 if self.settings.compiler == "Visual Studio":
109 replace = "POCO_INSTALL_PDB(${target_name})"
110 tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "PocoMacros.cmake"), replace, "# " + replace)
111 if self.options.shared:
112 self.output.warn("Adding ws2_32 dependency...")
113 replace = 'Net Util Foundation Crypt32.lib'
114 if Version(self.version) >= "1.10.0":
115 replace = 'Poco::Net Poco::Util Crypt32.lib'
116 tools.replace_in_file(os.path.join(self._source_subfolder, "NetSSL_Win", "CMakeLists.txt"), replace, replace + " ws2_32 ")
117
118 replace = 'Foundation ${OPENSSL_LIBRARIES}'
119 if Version(self.version) >= "1.10.0":
120 replace = 'Poco::Foundation OpenSSL::SSL OpenSSL::Crypto'
121 tools.replace_in_file(os.path.join(self._source_subfolder, "Crypto", "CMakeLists.txt"), replace, replace + " ws2_32 Crypt32.lib")
122
123 # Poco 1.9.x - CMAKE_SOURCE_DIR is required in many places
124 os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"), os.path.join(self._source_subfolder, "CMakeListsOriginal.cmake"))
125 os.rename("CMakeLists.txt", os.path.join(self._source_subfolder, "CMakeLists.txt"))
126
127 def _configure_cmake(self):
128 cmake = CMake(self, parallel=None)
129 for option_name in self.options.values.fields:
130 activated = getattr(self.options, option_name)
131 if option_name == "shared":
132 cmake.definitions["POCO_STATIC"] = "OFF" if activated else "ON"
133 elif not option_name == "fPIC":
134 cmake.definitions[option_name.upper()] = "ON" if activated else "OFF"
135
136 cmake.definitions["CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP"] = True
137 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio": # MT or MTd
138 cmake.definitions["POCO_MT"] = "ON" if "MT" in str(self.settings.compiler.runtime) else "OFF"
139 self.output.info(cmake.definitions)
140 cmake.configure(build_dir=self._build_subfolder, source_dir=os.path.join("..", self._source_subfolder))
141 return cmake
142
143 def build(self):
144 self._patch()
145 cmake = self._configure_cmake()
146 cmake.build()
147
148 def package(self):
149 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
150 cmake = self._configure_cmake()
151 cmake.install()
152 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
153 tools.rmdir(os.path.join(self.package_folder, "cmake"))
154
155 def package_info(self):
156 libs = [("enable_mongodb", "PocoMongoDB"),
157 ("enable_pdf", "PocoPDF"),
158 ("enable_netssl", "PocoNetSSL"),
159 ("enable_netssl_win", "PocoNetSSLWin"),
160 ("enable_net", "PocoNet"),
161 ("enable_crypto", "PocoCrypto"),
162 ("enable_data_sqlite", "PocoDataSQLite"),
163 ("enable_data_mysql", "PocoDataMySQL"),
164 ("enable_data_odbc", "PocoDataODBC"),
165 ("enable_data", "PocoData"),
166 ("enable_sevenzip", "PocoSevenZip"),
167 ("enable_zip", "PocoZip"),
168 ("enable_apacheconnector", "PocoApacheConnector"),
169 ("enable_util", "PocoUtil"),
170 ("enable_xml", "PocoXML"),
171 ("enable_json", "PocoJSON"),
172 ("enable_redis", "PocoRedis")]
173
174 suffix = str(self.settings.compiler.runtime).lower() \
175 if self.settings.compiler == "Visual Studio" and not self.options.shared \
176 else ("d" if self.settings.build_type=="Debug" else "")
177 for flag, lib in libs:
178 if getattr(self.options, flag):
179 if self.settings.os == "Windows" and flag == "enable_netssl" and self.options.enable_netssl_win:
180 continue
181
182 if self.settings.os != "Windows" and flag == "enable_netssl_win":
183 continue
184
185 self.cpp_info.libs.append("%s%s" % (lib, suffix))
186
187 self.cpp_info.libs.append("PocoFoundation%s" % suffix)
188
189 # in linux we need to link also with these libs
190 if self.settings.os == "Linux":
191 self.cpp_info.libs.extend(["pthread", "dl", "rt"])
192
193 if not self.options.shared:
194 self.cpp_info.defines.extend(["POCO_STATIC=ON", "POCO_NO_AUTOMATIC_LIBS"])
195 if self.settings.compiler == "Visual Studio":
196 self.cpp_info.libs.extend(["ws2_32", "Iphlpapi", "Crypt32"])
197 self.cpp_info.names["cmake_find_package"] = "Poco"
198 self.cpp_info.names["cmake_find_package_multi"] = "Poco"
199
[end of recipes/poco/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py
--- a/recipes/poco/all/conanfile.py
+++ b/recipes/poco/all/conanfile.py
@@ -31,6 +31,8 @@
"enable_data_sqlite": [True, False],
"enable_data_mysql": [True, False],
"enable_data_odbc": [True, False],
+ "enable_encodings": [True, False],
+ "enable_jwt": [True, False],
"enable_sevenzip": [True, False],
"enable_zip": [True, False],
"enable_apacheconnector": [True, False],
@@ -60,6 +62,8 @@
"enable_data_sqlite": True,
"enable_data_mysql": False,
"enable_data_odbc": False,
+ "enable_encodings": True,
+ "enable_jwt": True,
"enable_sevenzip": False,
"enable_zip": True,
"enable_apacheconnector": False,
@@ -101,7 +105,8 @@
if self.options.enable_netssl or \
self.options.enable_netssl_win or \
self.options.enable_crypto or \
- self.options.force_openssl:
+ self.options.force_openssl or \
+ self.options.enable_jwt:
self.requires.add("openssl/1.0.2t")
def _patch(self):
|
{"golden_diff": "diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py\n--- a/recipes/poco/all/conanfile.py\n+++ b/recipes/poco/all/conanfile.py\n@@ -31,6 +31,8 @@\n \"enable_data_sqlite\": [True, False],\n \"enable_data_mysql\": [True, False],\n \"enable_data_odbc\": [True, False],\n+ \"enable_encodings\": [True, False],\n+ \"enable_jwt\": [True, False],\n \"enable_sevenzip\": [True, False],\n \"enable_zip\": [True, False],\n \"enable_apacheconnector\": [True, False],\n@@ -60,6 +62,8 @@\n \"enable_data_sqlite\": True,\n \"enable_data_mysql\": False,\n \"enable_data_odbc\": False,\n+ \"enable_encodings\": True,\n+ \"enable_jwt\": True,\n \"enable_sevenzip\": False,\n \"enable_zip\": True,\n \"enable_apacheconnector\": False,\n@@ -101,7 +105,8 @@\n if self.options.enable_netssl or \\\n self.options.enable_netssl_win or \\\n self.options.enable_crypto or \\\n- self.options.force_openssl:\n+ self.options.force_openssl or \\\n+ self.options.enable_jwt:\n self.requires.add(\"openssl/1.0.2t\")\n \n def _patch(self):\n", "issue": "[package] poco/1.10.1: Options not available / not correctly parsed\n### Package and Environment Details\r\n * Package Name/Version: **poco/1.10.1**\r\n * Operating System+version: **Windows 10 64-bit**\r\n * Compiler+version: **Visual Studio 15**\r\n * Conan version: **conan 1.22.2**\r\n * Python version: **Python 3.7.1**\r\n\r\n---\r\n\r\nI'm currently setting up a Poco-example project with Conan and I'm basically disabling every option (project only relies on Poco::Foundation).\r\nHowever, when I use a list of options to disable, which also includes\r\n- `poco:enable_encodings=False`\r\n- `poco:enable_jwt=False`\r\n\r\nI get the errors:\r\n```\r\nERROR: poco/1.10.1: 'options.enable_encodings' doesn't exist\r\n```\r\nor\r\n```\r\nERROR: poco/1.10.1: 'options.enable_jwt' doesn't exist\r\n```\r\n\r\nLooking at the `conanfile.py` that totally makes sense since these options are not available. **Why is that?**\r\n\r\nHowever, If I use upper case options\r\n- `Poco:enable_encodings=False`\r\n- `Poco:enable_jwt=False`\r\n\r\nI don't get any error; all options that can be disabled are disabled after build and encodings and jwt (as well as depending libraries) are created.\r\n\r\nSo I guess if `poco` was written in lower case some kind of \"option-check\" is performed before building Poco. If `Poco` was written in upper case the check is not performed but the options are correctly sent to Conan.\r\n\r\nAdditionally I tested this by adding arbitrary options\r\n- `poco:enable_foobar=False`\r\n- `Poco:enable_foobar=False`\r\n\r\nBoth did not make any difference in detecting unsupported options.\r\n\r\nI think it would make sense to inform a user \r\n- if upper case namespace options are not available (`Poco:enable_encodings=False`)\r\n- if upper case / lower case options are not available at all (`poco:enable_foobar=False`)\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nfrom conans.tools import Version\n\n\nclass PocoConan(ConanFile):\n name = \"poco\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://pocoproject.org\"\n topics = (\"conan\", \"poco\", \"building\", \"networking\", \"server\", \"mobile\", \"embedded\")\n exports_sources = \"CMakeLists.txt\"\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n license = \"BSL-1.0\"\n description = \"Modern, powerful open source C++ class libraries for building network- and internet-based \" \\\n \"applications that run on desktop, server, mobile and embedded systems.\"\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_xml\": [True, False],\n \"enable_json\": [True, False],\n \"enable_mongodb\": [True, False],\n \"enable_pdf\": [True, False],\n \"enable_util\": [True, False],\n \"enable_net\": [True, False],\n \"enable_netssl\": [True, False],\n \"enable_netssl_win\": [True, False],\n \"enable_crypto\": [True, False],\n \"enable_data\": [True, False],\n \"enable_data_sqlite\": [True, False],\n \"enable_data_mysql\": [True, False],\n \"enable_data_odbc\": [True, False],\n \"enable_sevenzip\": [True, False],\n \"enable_zip\": [True, False],\n \"enable_apacheconnector\": [True, False],\n \"enable_cppparser\": [True, False],\n \"enable_pocodoc\": [True, False],\n \"enable_pagecompiler\": [True, False],\n \"enable_pagecompiler_file2page\": [True, False],\n \"enable_redis\": [True, False],\n \"force_openssl\": [True, False],\n \"enable_tests\": [True, False],\n \"poco_unbundled\": [True, False],\n \"cxx_14\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_xml\": True,\n \"enable_json\": True,\n \"enable_mongodb\": True,\n \"enable_pdf\": False,\n \"enable_util\": True,\n \"enable_net\": True,\n \"enable_netssl\": True,\n \"enable_netssl_win\": True,\n \"enable_crypto\": True,\n \"enable_data\": True,\n \"enable_data_sqlite\": True,\n \"enable_data_mysql\": False,\n \"enable_data_odbc\": False,\n \"enable_sevenzip\": False,\n \"enable_zip\": True,\n \"enable_apacheconnector\": False,\n \"enable_cppparser\": False,\n \"enable_pocodoc\": False,\n \"enable_pagecompiler\": False,\n \"enable_pagecompiler_file2page\": False,\n \"enable_redis\": True,\n \"force_openssl\": True,\n \"enable_tests\": False,\n \"poco_unbundled\": False,\n \"cxx_14\": False\n }\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_folder = \"poco-poco-{}-release\".format(self.version)\n os.rename(extracted_folder, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.enable_apacheconnector:\n raise ConanInvalidConfiguration(\"Apache connector not supported: https://github.com/pocoproject/poco/issues/1764\")\n if self.options.enable_data_mysql:\n raise ConanInvalidConfiguration(\"MySQL not supported yet, open an issue here please: %s\" % self.url)\n\n def requirements(self):\n if self.options.enable_netssl or \\\n self.options.enable_netssl_win or \\\n self.options.enable_crypto or \\\n self.options.force_openssl:\n self.requires.add(\"openssl/1.0.2t\")\n\n def _patch(self):\n if self.settings.compiler == \"Visual Studio\":\n replace = \"POCO_INSTALL_PDB(${target_name})\"\n tools.replace_in_file(os.path.join(self._source_subfolder, \"cmake\", \"PocoMacros.cmake\"), replace, \"# \" + replace)\n if self.options.shared:\n self.output.warn(\"Adding ws2_32 dependency...\")\n replace = 'Net Util Foundation Crypt32.lib'\n if Version(self.version) >= \"1.10.0\":\n replace = 'Poco::Net Poco::Util Crypt32.lib'\n tools.replace_in_file(os.path.join(self._source_subfolder, \"NetSSL_Win\", \"CMakeLists.txt\"), replace, replace + \" ws2_32 \")\n\n replace = 'Foundation ${OPENSSL_LIBRARIES}'\n if Version(self.version) >= \"1.10.0\":\n replace = 'Poco::Foundation OpenSSL::SSL OpenSSL::Crypto'\n tools.replace_in_file(os.path.join(self._source_subfolder, \"Crypto\", \"CMakeLists.txt\"), replace, replace + \" ws2_32 Crypt32.lib\")\n\n # Poco 1.9.x - CMAKE_SOURCE_DIR is required in many places\n os.rename(os.path.join(self._source_subfolder, \"CMakeLists.txt\"), os.path.join(self._source_subfolder, \"CMakeListsOriginal.cmake\"))\n os.rename(\"CMakeLists.txt\", os.path.join(self._source_subfolder, \"CMakeLists.txt\"))\n\n def _configure_cmake(self):\n cmake = CMake(self, parallel=None)\n for option_name in self.options.values.fields:\n activated = getattr(self.options, option_name)\n if option_name == \"shared\":\n cmake.definitions[\"POCO_STATIC\"] = \"OFF\" if activated else \"ON\"\n elif not option_name == \"fPIC\":\n cmake.definitions[option_name.upper()] = \"ON\" if activated else \"OFF\"\n\n cmake.definitions[\"CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP\"] = True\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\": # MT or MTd\n cmake.definitions[\"POCO_MT\"] = \"ON\" if \"MT\" in str(self.settings.compiler.runtime) else \"OFF\"\n self.output.info(cmake.definitions)\n cmake.configure(build_dir=self._build_subfolder, source_dir=os.path.join(\"..\", self._source_subfolder))\n return cmake\n\n def build(self):\n self._patch()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n def package_info(self):\n libs = [(\"enable_mongodb\", \"PocoMongoDB\"),\n (\"enable_pdf\", \"PocoPDF\"),\n (\"enable_netssl\", \"PocoNetSSL\"),\n (\"enable_netssl_win\", \"PocoNetSSLWin\"),\n (\"enable_net\", \"PocoNet\"),\n (\"enable_crypto\", \"PocoCrypto\"),\n (\"enable_data_sqlite\", \"PocoDataSQLite\"),\n (\"enable_data_mysql\", \"PocoDataMySQL\"),\n (\"enable_data_odbc\", \"PocoDataODBC\"),\n (\"enable_data\", \"PocoData\"),\n (\"enable_sevenzip\", \"PocoSevenZip\"),\n (\"enable_zip\", \"PocoZip\"),\n (\"enable_apacheconnector\", \"PocoApacheConnector\"),\n (\"enable_util\", \"PocoUtil\"),\n (\"enable_xml\", \"PocoXML\"),\n (\"enable_json\", \"PocoJSON\"),\n (\"enable_redis\", \"PocoRedis\")]\n\n suffix = str(self.settings.compiler.runtime).lower() \\\n if self.settings.compiler == \"Visual Studio\" and not self.options.shared \\\n else (\"d\" if self.settings.build_type==\"Debug\" else \"\")\n for flag, lib in libs:\n if getattr(self.options, flag):\n if self.settings.os == \"Windows\" and flag == \"enable_netssl\" and self.options.enable_netssl_win:\n continue\n\n if self.settings.os != \"Windows\" and flag == \"enable_netssl_win\":\n continue\n\n self.cpp_info.libs.append(\"%s%s\" % (lib, suffix))\n\n self.cpp_info.libs.append(\"PocoFoundation%s\" % suffix)\n\n # in linux we need to link also with these libs\n if self.settings.os == \"Linux\":\n self.cpp_info.libs.extend([\"pthread\", \"dl\", \"rt\"])\n\n if not self.options.shared:\n self.cpp_info.defines.extend([\"POCO_STATIC=ON\", \"POCO_NO_AUTOMATIC_LIBS\"])\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.libs.extend([\"ws2_32\", \"Iphlpapi\", \"Crypt32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "path": "recipes/poco/all/conanfile.py"}]}
| 3,551 | 318 |
gh_patches_debug_2479
|
rasdani/github-patches
|
git_diff
|
boto__boto-2475
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VPC Peering Connection "delete()" calls wrong method
The "delete()" method of VpcPeeringConnection calls "self.connection.delete_vpc(self.id)" instead of "self.connection.delete_vpc_peering_connection(self.id)"
**File:** boto/vpc/vpc_peering_connection.py
</issue>
<code>
[start of boto/vpc/vpc_peering_connection.py]
1 # Copyright (c) 2014 Skytap http://skytap.com/
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a VPC Peering Connection.
24 """
25
26 from boto.ec2.ec2object import TaggedEC2Object
27
28 class VpcInfo(object):
29 def __init__(self):
30 """
31 Information on peer Vpc.
32
33 :ivar id: The unique ID of peer Vpc.
34 :ivar owner_id: Owner of peer Vpc.
35 :ivar cidr_block: CIDR Block of peer Vpc.
36 """
37
38 self.vpc_id = None
39 self.owner_id = None
40 self.cidr_block = None
41
42 def __repr__(self):
43 return 'VpcInfo:%s' % self.vpc_id
44
45 def startElement(self, name, attrs, connection):
46 pass
47
48 def endElement(self, name, value, connection):
49 if name == 'vpcId':
50 self.vpc_id = value
51 elif name == 'ownerId':
52 self.owner_id = value
53 elif name == 'cidrBlock':
54 self.cidr_block = value
55 else:
56 setattr(self, name, value)
57
58 class VpcPeeringConnectionStatus(object):
59 """
60 The status of VPC peering connection.
61
62 :ivar code: The status of the VPC peering connection. Valid values are:
63
64 * pending-acceptance
65 * failed
66 * expired
67 * provisioning
68 * active
69 * deleted
70 * rejected
71
72 :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable.
73 """
74 def __init__(self, code=0, message=None):
75 self.code = code
76 self.message = message
77
78 def __repr__(self):
79 return '%s(%d)' % (self.code, self.message)
80
81 def startElement(self, name, attrs, connection):
82 pass
83
84 def endElement(self, name, value, connection):
85 if name == 'code':
86 self.code = value
87 elif name == 'message':
88 self.message = value
89 else:
90 setattr(self, name, value)
91
92
93
94 class VpcPeeringConnection(TaggedEC2Object):
95
96 def __init__(self, connection=None):
97 """
98 Represents a VPC peering connection.
99
100 :ivar id: The unique ID of the VPC peering connection.
101 :ivar accepter_vpc_info: Information on peer Vpc.
102 :ivar requester_vpc_info: Information on requester Vpc.
103 :ivar expiration_time: The expiration date and time for the VPC peering connection.
104 :ivar status_code: The status of the VPC peering connection.
105 :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable.
106 """
107 super(VpcPeeringConnection, self).__init__(connection)
108 self.id = None
109 self.accepter_vpc_info = VpcInfo()
110 self.requester_vpc_info = VpcInfo()
111 self.expiration_time = None
112 self._status = VpcPeeringConnectionStatus()
113
114 @property
115 def status_code(self):
116 return self._status.code
117
118 @property
119 def status_message(self):
120 return self._status.message
121
122 def __repr__(self):
123 return 'VpcPeeringConnection:%s' % self.id
124
125 def startElement(self, name, attrs, connection):
126 retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection)
127 if retval is not None:
128 return retval
129
130 if name == 'requesterVpcInfo':
131 return self.requester_vpc_info
132 elif name == 'accepterVpcInfo':
133 return self.accepter_vpc_info
134 elif name == 'status':
135 return self._status
136
137 return None
138
139 def endElement(self, name, value, connection):
140 if name == 'vpcPeeringConnectionId':
141 self.id = value
142 elif name == 'expirationTime':
143 self.expiration_time = value
144 else:
145 setattr(self, name, value)
146
147 def delete(self):
148 return self.connection.delete_vpc(self.id)
149
150 def _update(self, updated):
151 self.__dict__.update(updated.__dict__)
152
153 def update(self, validate=False, dry_run=False):
154 vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections(
155 [self.id],
156 dry_run=dry_run
157 )
158 if len(vpc_peering_connection_list):
159 updated_vpc_peering_connection = vpc_peering_connection_list[0]
160 self._update(updated_vpc_peering_connection)
161 elif validate:
162 raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,))
163 return self.status_code
164
[end of boto/vpc/vpc_peering_connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/boto/vpc/vpc_peering_connection.py b/boto/vpc/vpc_peering_connection.py
--- a/boto/vpc/vpc_peering_connection.py
+++ b/boto/vpc/vpc_peering_connection.py
@@ -145,7 +145,7 @@
setattr(self, name, value)
def delete(self):
- return self.connection.delete_vpc(self.id)
+ return self.connection.delete_vpc_peering_connection(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
|
{"golden_diff": "diff --git a/boto/vpc/vpc_peering_connection.py b/boto/vpc/vpc_peering_connection.py\n--- a/boto/vpc/vpc_peering_connection.py\n+++ b/boto/vpc/vpc_peering_connection.py\n@@ -145,7 +145,7 @@\n setattr(self, name, value)\n \n def delete(self):\n- return self.connection.delete_vpc(self.id)\n+ return self.connection.delete_vpc_peering_connection(self.id)\n \n def _update(self, updated):\n self.__dict__.update(updated.__dict__)\n", "issue": "VPC Peering Connection \"delete()\" calls wrong method\nThe \"delete()\" method of VpcPeeringConnection calls \"self.connection.delete_vpc(self.id)\" instead of \"self.connection.delete_vpc_peering_connection(self.id)\"\n\n**File:** boto/vpc/vpc_peering_connection.py\n\n", "before_files": [{"content": "# Copyright (c) 2014 Skytap http://skytap.com/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a VPC Peering Connection.\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\n\nclass VpcInfo(object):\n def __init__(self):\n \"\"\"\n Information on peer Vpc.\n \n :ivar id: The unique ID of peer Vpc.\n :ivar owner_id: Owner of peer Vpc.\n :ivar cidr_block: CIDR Block of peer Vpc.\n \"\"\"\n\n self.vpc_id = None\n self.owner_id = None\n self.cidr_block = None\n\n def __repr__(self):\n return 'VpcInfo:%s' % self.vpc_id\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'vpcId':\n self.vpc_id = value\n elif name == 'ownerId':\n self.owner_id = value\n elif name == 'cidrBlock':\n self.cidr_block = value\n else:\n setattr(self, name, value)\n\nclass VpcPeeringConnectionStatus(object):\n \"\"\"\n The status of VPC peering connection.\n\n :ivar code: The status of the VPC peering connection. Valid values are:\n\n * pending-acceptance\n * failed\n * expired\n * provisioning\n * active\n * deleted\n * rejected\n\n :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n def __init__(self, code=0, message=None):\n self.code = code\n self.message = message\n\n def __repr__(self):\n return '%s(%d)' % (self.code, self.message)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'code':\n self.code = value\n elif name == 'message':\n self.message = value\n else:\n setattr(self, name, value)\n\n \n\nclass VpcPeeringConnection(TaggedEC2Object):\n\n def __init__(self, connection=None):\n \"\"\"\n Represents a VPC peering connection.\n\n :ivar id: The unique ID of the VPC peering connection.\n :ivar accepter_vpc_info: Information on peer Vpc.\n :ivar requester_vpc_info: Information on requester Vpc.\n :ivar expiration_time: The expiration date and time for the VPC peering connection.\n :ivar status_code: The status of the VPC peering connection.\n :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable.\n \"\"\"\n super(VpcPeeringConnection, self).__init__(connection)\n self.id = None\n self.accepter_vpc_info = VpcInfo()\n self.requester_vpc_info = VpcInfo()\n self.expiration_time = None\n self._status = VpcPeeringConnectionStatus()\n\n @property\n def status_code(self):\n return self._status.code\n\n @property\n def status_message(self):\n return self._status.message\n\n def __repr__(self):\n return 'VpcPeeringConnection:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection)\n if retval is not None:\n return retval\n \n if name == 'requesterVpcInfo':\n return self.requester_vpc_info\n elif name == 'accepterVpcInfo':\n return self.accepter_vpc_info\n elif name == 'status':\n return self._status\n\n return None\n\n def endElement(self, name, value, connection):\n if name == 'vpcPeeringConnectionId':\n self.id = value\n elif name == 'expirationTime':\n self.expiration_time = value\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_vpc(self.id)\n\n def _update(self, updated):\n self.__dict__.update(updated.__dict__)\n\n def update(self, validate=False, dry_run=False):\n vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections(\n [self.id],\n dry_run=dry_run\n )\n if len(vpc_peering_connection_list):\n updated_vpc_peering_connection = vpc_peering_connection_list[0]\n self._update(updated_vpc_peering_connection)\n elif validate:\n raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,))\n return self.status_code\n", "path": "boto/vpc/vpc_peering_connection.py"}]}
| 2,257 | 124 |
gh_patches_debug_37928
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3315
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider lenscrafters is broken
During the global build at 2021-08-25-14-42-15, spider **lenscrafters** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/lenscrafters.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson))
</issue>
<code>
[start of locations/spiders/lenscrafters.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class LensCraftersSpider(scrapy.Spider):
12 name = "lenscrafters"
13 item_attributes = { 'brand': "Lenscrafters" }
14 allowed_domains = ['local.lenscrafters.com']
15 start_urls = [
16 'https://local.lenscrafters.com/'
17 ]
18
19 def parse_hours(self, hours):
20 opening_hours = OpeningHours()
21 for group in hours:
22 if "Closed" in group:
23 pass
24 else:
25 days, open_time, close_time = re.search(r'([a-zA-Z,]+)\s([\d:]+)-([\d:]+)', group).groups()
26 days = days.split(',')
27 for day in days:
28 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
29
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 urls = response.xpath(
34 '//a[@class="c-directory-list-content-item-link" or @class="c-location-grid-item-link"]/@href').extract()
35 # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page
36 if len(urls) == 0:
37 properties = {
38 'name': response.xpath('//*[@class="location-name h1-normal"]/text()').extract_first(),
39 'addr_full': response.xpath('//*[@class="c-address-street-1"]/text()').extract_first(),
40 'city': response.xpath('//*[@class="c-address-city"]/text()').extract_first(),
41 'state': response.xpath('//*[@class="c-address-state"]/text()').extract_first(),
42 'postcode': response.xpath('//*[@class="c-address-postal-code"]/text()').extract_first(),
43 'phone': response.xpath('//*[@id="phone-main"]/text()').extract_first(),
44 'ref': "_".join(re.search(r".+/(.+?)/(.+?)/(.+?)/?(?:\.html|$)", response.url).groups()),
45 'website': response.url,
46 'lat': response.xpath('//*[@itemprop="latitude"]/@content').extract_first(),
47 'lon': response.xpath('//*[@itemprop="longitude"]/@content').extract_first(),
48 }
49
50 hours = self.parse_hours(response.xpath('//*[@itemprop="openingHours"]/@content').extract())
51 if hours:
52 properties["opening_hours"] = hours
53
54 yield GeojsonPointItem(**properties)
55 else:
56 for path in urls:
57 yield scrapy.Request(url=response.urljoin(path), callback=self.parse)
58
[end of locations/spiders/lenscrafters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/lenscrafters.py b/locations/spiders/lenscrafters.py
--- a/locations/spiders/lenscrafters.py
+++ b/locations/spiders/lenscrafters.py
@@ -10,7 +10,7 @@
class LensCraftersSpider(scrapy.Spider):
name = "lenscrafters"
- item_attributes = { 'brand': "Lenscrafters" }
+ item_attributes = {'brand': "Lenscrafters"}
allowed_domains = ['local.lenscrafters.com']
start_urls = [
'https://local.lenscrafters.com/'
@@ -30,21 +30,21 @@
return opening_hours.as_opening_hours()
def parse(self, response):
- urls = response.xpath(
- '//a[@class="c-directory-list-content-item-link" or @class="c-location-grid-item-link"]/@href').extract()
- # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page
+ urls = response.xpath('//a[@class="Directory-listLink Link--directory"]/@href').extract()
+
+ # If cannot find 'Directory-listLink Link--directory' then this is a store page
if len(urls) == 0:
properties = {
- 'name': response.xpath('//*[@class="location-name h1-normal"]/text()').extract_first(),
- 'addr_full': response.xpath('//*[@class="c-address-street-1"]/text()').extract_first(),
- 'city': response.xpath('//*[@class="c-address-city"]/text()').extract_first(),
- 'state': response.xpath('//*[@class="c-address-state"]/text()').extract_first(),
- 'postcode': response.xpath('//*[@class="c-address-postal-code"]/text()').extract_first(),
- 'phone': response.xpath('//*[@id="phone-main"]/text()').extract_first(),
- 'ref': "_".join(re.search(r".+/(.+?)/(.+?)/(.+?)/?(?:\.html|$)", response.url).groups()),
- 'website': response.url,
- 'lat': response.xpath('//*[@itemprop="latitude"]/@content').extract_first(),
- 'lon': response.xpath('//*[@itemprop="longitude"]/@content').extract_first(),
+ 'name': response.xpath('//h1[@id="location-name"]/text()').extract_first(),
+ 'addr_full': response.xpath('//span[@class="c-address-street-1"]/text()').extract_first(),
+ 'city': response.xpath('//span[@class="c-address-city"]/text()').extract_first(),
+ 'state': response.xpath('//abbr[@class="c-address-state"]/text()').extract_first(),
+ 'postcode': response.xpath('//span[@class="c-address-postal-code"]/text()').extract_first(),
+ 'phone': response.xpath('//div[@id="phone-main"]/text()').extract_first(),
+ 'ref': response.xpath('//link[@rel="canonical"]/@href').extract_first(),
+ 'website': response.xpath('//link[@rel="canonical"]/@href').extract_first(),
+ 'lat': response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
+ 'lon': response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
}
hours = self.parse_hours(response.xpath('//*[@itemprop="openingHours"]/@content').extract())
|
{"golden_diff": "diff --git a/locations/spiders/lenscrafters.py b/locations/spiders/lenscrafters.py\n--- a/locations/spiders/lenscrafters.py\n+++ b/locations/spiders/lenscrafters.py\n@@ -10,7 +10,7 @@\n \n class LensCraftersSpider(scrapy.Spider):\n name = \"lenscrafters\"\n- item_attributes = { 'brand': \"Lenscrafters\" }\n+ item_attributes = {'brand': \"Lenscrafters\"}\n allowed_domains = ['local.lenscrafters.com']\n start_urls = [\n 'https://local.lenscrafters.com/'\n@@ -30,21 +30,21 @@\n return opening_hours.as_opening_hours()\n \n def parse(self, response):\n- urls = response.xpath(\n- '//a[@class=\"c-directory-list-content-item-link\" or @class=\"c-location-grid-item-link\"]/@href').extract()\n- # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page\n+ urls = response.xpath('//a[@class=\"Directory-listLink Link--directory\"]/@href').extract()\n+\n+ # If cannot find 'Directory-listLink Link--directory' then this is a store page\n if len(urls) == 0:\n properties = {\n- 'name': response.xpath('//*[@class=\"location-name h1-normal\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//*[@class=\"c-address-street-1\"]/text()').extract_first(),\n- 'city': response.xpath('//*[@class=\"c-address-city\"]/text()').extract_first(),\n- 'state': response.xpath('//*[@class=\"c-address-state\"]/text()').extract_first(),\n- 'postcode': response.xpath('//*[@class=\"c-address-postal-code\"]/text()').extract_first(),\n- 'phone': response.xpath('//*[@id=\"phone-main\"]/text()').extract_first(),\n- 'ref': \"_\".join(re.search(r\".+/(.+?)/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).groups()),\n- 'website': response.url,\n- 'lat': response.xpath('//*[@itemprop=\"latitude\"]/@content').extract_first(),\n- 'lon': response.xpath('//*[@itemprop=\"longitude\"]/@content').extract_first(),\n+ 'name': response.xpath('//h1[@id=\"location-name\"]/text()').extract_first(),\n+ 'addr_full': response.xpath('//span[@class=\"c-address-street-1\"]/text()').extract_first(),\n+ 'city': response.xpath('//span[@class=\"c-address-city\"]/text()').extract_first(),\n+ 'state': response.xpath('//abbr[@class=\"c-address-state\"]/text()').extract_first(),\n+ 'postcode': response.xpath('//span[@class=\"c-address-postal-code\"]/text()').extract_first(),\n+ 'phone': response.xpath('//div[@id=\"phone-main\"]/text()').extract_first(),\n+ 'ref': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n+ 'website': response.xpath('//link[@rel=\"canonical\"]/@href').extract_first(),\n+ 'lat': response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n+ 'lon': response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n }\n \n hours = self.parse_hours(response.xpath('//*[@itemprop=\"openingHours\"]/@content').extract())\n", "issue": "Spider lenscrafters is broken\nDuring the global build at 2021-08-25-14-42-15, spider **lenscrafters** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/lenscrafters.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/lenscrafters.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LensCraftersSpider(scrapy.Spider):\n name = \"lenscrafters\"\n item_attributes = { 'brand': \"Lenscrafters\" }\n allowed_domains = ['local.lenscrafters.com']\n start_urls = [\n 'https://local.lenscrafters.com/'\n ]\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n for group in hours:\n if \"Closed\" in group:\n pass\n else:\n days, open_time, close_time = re.search(r'([a-zA-Z,]+)\\s([\\d:]+)-([\\d:]+)', group).groups()\n days = days.split(',')\n for day in days:\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath(\n '//a[@class=\"c-directory-list-content-item-link\" or @class=\"c-location-grid-item-link\"]/@href').extract()\n # If cannot find 'c-directory-list-content-item-link' or 'c-location-grid-item-link' then this is a store page\n if len(urls) == 0:\n properties = {\n 'name': response.xpath('//*[@class=\"location-name h1-normal\"]/text()').extract_first(),\n 'addr_full': response.xpath('//*[@class=\"c-address-street-1\"]/text()').extract_first(),\n 'city': response.xpath('//*[@class=\"c-address-city\"]/text()').extract_first(),\n 'state': response.xpath('//*[@class=\"c-address-state\"]/text()').extract_first(),\n 'postcode': response.xpath('//*[@class=\"c-address-postal-code\"]/text()').extract_first(),\n 'phone': response.xpath('//*[@id=\"phone-main\"]/text()').extract_first(),\n 'ref': \"_\".join(re.search(r\".+/(.+?)/(.+?)/(.+?)/?(?:\\.html|$)\", response.url).groups()),\n 'website': response.url,\n 'lat': response.xpath('//*[@itemprop=\"latitude\"]/@content').extract_first(),\n 'lon': response.xpath('//*[@itemprop=\"longitude\"]/@content').extract_first(),\n }\n\n hours = self.parse_hours(response.xpath('//*[@itemprop=\"openingHours\"]/@content').extract())\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n else:\n for path in urls:\n yield scrapy.Request(url=response.urljoin(path), callback=self.parse)\n", "path": "locations/spiders/lenscrafters.py"}]}
| 1,408 | 741 |
gh_patches_debug_43015
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-425
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement `--dry-run` / `-n` option in CLI
We should have a `--dry-run`/`-n` option in the CLI which will print information about the commands to be run, but not actually run them. The CLI option exists, but needs to be implemented in the code.
</issue>
<code>
[start of src/rastervision/cli/main.py]
1 """Raster Vision main program"""
2 import sys
3 from tempfile import TemporaryDirectory
4
5 import click
6
7 import rastervision as rv
8 from rastervision.experiment import (ExperimentLoader, LoaderError)
9 from rastervision.runner import (ExperimentRunner)
10
11
12 def print_error(msg):
13 click.echo(click.style(msg, fg='red'), err=True)
14
15
16 @click.group()
17 @click.option(
18 '--profile', '-p', help='Sets the configuration profile name to use.')
19 def main(profile):
20 # Initialize configuration
21 if profile:
22 rv._registry.initialize_config(profile=profile)
23
24
25 @main.command(
26 'run', short_help='Run Raster Vision commands against Experiments.')
27 @click.argument('runner')
28 @click.argument('commands', nargs=-1)
29 @click.option(
30 '--experiment_module',
31 '-e',
32 help=('Name of an importable module to look for experiment sets '
33 'in. If not supplied, experiments will be loaded '
34 'from __main__'))
35 @click.option(
36 '--dry-run',
37 '-n',
38 is_flag=True,
39 help=('Execute a dry run, which will print out information '
40 'about the commands to be run, but will not actually '
41 'run the commands'))
42 @click.option(
43 '--skip-file-check',
44 '-x',
45 is_flag=True,
46 help=('Skip the step that verifies that file exist.'))
47 @click.option(
48 '--arg',
49 '-a',
50 type=(str, str),
51 multiple=True,
52 metavar='KEY VALUE',
53 help=('Pass a parameter to the experiments if the method '
54 'parameter list takes in a parameter with that key. '
55 'Multiple args can be supplied'))
56 @click.option(
57 '--prefix',
58 metavar='PREFIX',
59 default='exp_',
60 help=('Prefix for methods containing experiments. (default: "exp_")'))
61 @click.option(
62 '--method',
63 '-m',
64 'methods',
65 multiple=True,
66 metavar='PATTERN',
67 help=('Pattern to match method names to run.'))
68 @click.option(
69 '--filter',
70 '-f',
71 'filters',
72 multiple=True,
73 metavar='PATTERN',
74 help=('Pattern to match experiment names to run.'))
75 @click.option(
76 '--rerun',
77 '-r',
78 is_flag=True,
79 default=False,
80 help=('Rerun commands, regardless if '
81 'their output files already exist.'))
82 def run(runner, commands, experiment_module, dry_run, skip_file_check, arg,
83 prefix, methods, filters, rerun):
84 """Run Raster Vision commands from experiments, using the
85 experiment runner named RUNNER."""
86 # Validate runner
87 valid_runners = list(
88 map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))
89 if runner not in valid_runners:
90 print_error('Invalid experiment runner: "{}". '
91 'Must be one of: "{}"'.format(runner,
92 '", "'.join(valid_runners)))
93 sys.exit(1)
94
95 runner = ExperimentRunner.get_runner(runner)
96
97 if experiment_module:
98 module_to_load = experiment_module
99 else:
100 module_to_load = '__main__'
101
102 if not commands:
103 commands = rv.ALL_COMMANDS
104 else:
105 commands = list(map(lambda x: x.upper(), commands))
106
107 experiment_args = {}
108 for k, v in arg:
109 experiment_args[k] = v
110
111 loader = ExperimentLoader(
112 experiment_args=experiment_args,
113 experiment_method_prefix=prefix,
114 experiment_method_patterns=methods,
115 experiment_name_patterns=filters)
116 try:
117 experiments = loader.load_from_module(module_to_load)
118 except LoaderError as e:
119 print_error(str(e))
120 sys.exit(1)
121
122 if not experiments:
123 if experiment_module:
124 print_error(
125 'No experiments found in {}.'.format(experiment_module))
126 else:
127 print_error('No experiments found.')
128
129 runner.run(
130 experiments,
131 commands_to_run=commands,
132 rerun_commands=rerun,
133 skip_file_check=skip_file_check)
134
135
136 @main.command()
137 @click.option(
138 '--experiment_module',
139 '-e',
140 help=('Name of an importable module to look for experiment sets '
141 'in. If not supplied, experiments will be loaded '
142 'from __main__'))
143 @click.option(
144 '--arg',
145 '-a',
146 type=(str, str),
147 multiple=True,
148 metavar='KEY VALUE',
149 help=('Pass a parameter to the experiments if the method '
150 'parameter list takes in a parameter with that key. '
151 'Multiple args can be supplied'))
152 def ls(experiment_module, arg):
153 """Print out a list of Experiment IDs."""
154 if experiment_module:
155 module_to_load = experiment_module
156 else:
157 module_to_load = '__main__'
158
159 experiment_args = {}
160 for k, v in arg:
161 experiment_args[k] = v
162
163 loader = ExperimentLoader(experiment_args=experiment_args)
164 try:
165 experiments = loader.load_from_module(module_to_load)
166 except LoaderError as e:
167 print_error(str(e))
168 sys.exit(1)
169
170 if not experiments:
171 if experiment_module:
172 print_error(
173 'No experiments found in {}.'.format(experiment_module))
174 else:
175 print_error('No experiments found.')
176
177 for e in experiments:
178 click.echo('{}'.format(e.id))
179
180
181 @main.command(
182 'predict', short_help='Make predictions using a predict package.')
183 @click.argument('predict_package', type=click.Path(exists=True))
184 @click.argument('image_uri', type=click.Path(exists=True))
185 @click.argument('output_uri', type=click.Path(exists=False))
186 @click.option(
187 '--update_stats',
188 '-a',
189 is_flag=True,
190 help=('Run an analysis on this individual image, as '
191 'opposed to using any analysis like statistics '
192 'that exist in the prediction package'))
193 @click.option(
194 '--channel-order',
195 help='String containing channel_order.' + ' Example: \"2 1 0\"')
196 def predict(predict_package, image_uri, output_uri, update_stats,
197 channel_order):
198 """Make predictions on the image at IMAGE_URI
199 using PREDICT_PACKAGE and store the
200 prediciton output at OUTPUT_URI.
201 """
202 if channel_order is not None:
203 channel_order = [
204 int(channel_ind) for channel_ind in channel_order.split(' ')
205 ]
206 with TemporaryDirectory() as tmp_dir:
207 predict = rv.Predictor(predict_package, tmp_dir, update_stats,
208 channel_order).predict
209 predict(image_uri, output_uri)
210
211
212 @main.command(
213 'run_command', short_help='Run a command from configuration file.')
214 @click.argument('command_config_uri')
215 def run_command(command_config_uri):
216 """Run a command from a serialized command configuration
217 at COMMAND_CONFIG_URI.
218 """
219 rv.runner.CommandRunner.run(command_config_uri)
220
221
222 if __name__ == '__main__':
223 main()
224
[end of src/rastervision/cli/main.py]
[start of src/rastervision/runner/experiment_runner.py]
1 from abc import (ABC, abstractmethod)
2 from typing import (List, Union)
3
4 import rastervision as rv
5 from rastervision.runner import (CommandDefinition, CommandDAG)
6
7
8 class ExperimentRunner(ABC):
9 def run(self,
10 experiments: Union[List[rv.ExperimentConfig], rv.ExperimentConfig],
11 commands_to_run=rv.ALL_COMMANDS,
12 rerun_commands=False,
13 skip_file_check=False):
14 if not isinstance(experiments, list):
15 experiments = [experiments]
16
17 command_definitions = CommandDefinition.from_experiments(experiments)
18
19 # Filter out commands we aren't running.
20 command_definitions = CommandDefinition.filter_commands(
21 command_definitions, commands_to_run)
22
23 # Check if there are any unsatisfied inputs.
24 missing_inputs = CommandDefinition.get_missing_inputs(
25 command_definitions)
26 if missing_inputs:
27 # TODO: Replace with logging?
28 s = ''
29 for exp_id in missing_inputs:
30 s += 'In {}:\n\t{}\n'.format(
31 exp_id, '\t{}\n'.join(missing_inputs[exp_id]))
32
33 raise rv.ConfigError('There were missing input URIs '
34 'that are required, but were not '
35 'able to be derived: \n{}'.format(s))
36
37 # Remove duplicate commands, defining equality for a command by
38 # the tuple (command_type, input_uris, output_uris)
39 (unique_commands, skipped_commands
40 ) = CommandDefinition.remove_duplicates(command_definitions)
41
42 # Ensure that for each type of command, there are none that clobber
43 # each other's output.
44 clashing_commands = CommandDefinition.get_clashing_commands(
45 unique_commands)
46
47 if clashing_commands:
48 clashing_msgs = []
49 for (output_uri, c_defs) in clashing_commands:
50 command_type = c_defs[0].command_config.command_type
51 experiments = ', '.join(map(lambda c: c.experiment_id, c_defs))
52 clashing_msgs.append(
53 'The {} command in the follwoing experiments '
54 'output {}, but are not equal: {}'.format(
55 command_type, output_uri, experiments))
56 # TODO: Replace with logging?
57 s = '\t\n'.join(clashing_msgs)
58
59 raise rv.ConfigurationError(
60 'ERROR: Command outputs will'
61 'override each other: \n{}\n'.format(s))
62
63 command_dag = CommandDAG(
64 unique_commands, rerun_commands, skip_file_check=skip_file_check)
65
66 if skipped_commands or command_dag.skipped_commands:
67 # TODO: Report skipped commands, either in the dry run or elsewhere.
68 pass
69
70 # Save experiment configs
71 experiments_by_id = dict(map(lambda e: (e.id, e), experiments))
72 seen_ids = set([])
73 for command_def in command_dag.get_command_definitions():
74 if command_def.experiment_id not in seen_ids:
75 seen_ids.add(command_def.experiment_id)
76 experiment = experiments_by_id[command_def.experiment_id]
77 experiment.fully_resolve().save_config()
78
79 self._run_experiment(command_dag)
80
81 @abstractmethod
82 def _run_experiment(self, command_dag):
83 pass
84
85 @staticmethod
86 def get_runner(runner_type):
87 """Gets the runner associated with this runner type."""
88 # Runner keys are upper cased.
89 return rv._registry.get_experiment_runner(runner_type.upper())
90
91 @staticmethod
92 def list_runners():
93 """Returns a list of valid runner keys."""
94 return rv._registry.get_experiment_runner_keys()
95
[end of src/rastervision/runner/experiment_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/rastervision/cli/main.py b/src/rastervision/cli/main.py
--- a/src/rastervision/cli/main.py
+++ b/src/rastervision/cli/main.py
@@ -130,7 +130,8 @@
experiments,
commands_to_run=commands,
rerun_commands=rerun,
- skip_file_check=skip_file_check)
+ skip_file_check=skip_file_check,
+ dry_run=dry_run)
@main.command()
diff --git a/src/rastervision/runner/experiment_runner.py b/src/rastervision/runner/experiment_runner.py
--- a/src/rastervision/runner/experiment_runner.py
+++ b/src/rastervision/runner/experiment_runner.py
@@ -1,7 +1,8 @@
+import rastervision as rv
+
from abc import (ABC, abstractmethod)
from typing import (List, Union)
-import rastervision as rv
from rastervision.runner import (CommandDefinition, CommandDAG)
@@ -10,15 +11,28 @@
experiments: Union[List[rv.ExperimentConfig], rv.ExperimentConfig],
commands_to_run=rv.ALL_COMMANDS,
rerun_commands=False,
- skip_file_check=False):
+ skip_file_check=False,
+ dry_run: bool = False):
if not isinstance(experiments, list):
experiments = [experiments]
- command_definitions = CommandDefinition.from_experiments(experiments)
+ _command_definitions = CommandDefinition.from_experiments(experiments)
# Filter out commands we aren't running.
command_definitions = CommandDefinition.filter_commands(
- command_definitions, commands_to_run)
+ _command_definitions, commands_to_run)
+
+ # Print unrequested commands
+ if dry_run:
+ not_requested = set(_command_definitions) - set(command_definitions)
+ if not_requested:
+ print()
+ print('Not requsted:')
+ for command in not_requested:
+ command_type = command.command_config.command_type
+ experiment_id = command.experiment_id
+ print('{} from {}'.format(command_type, experiment_id))
+ print()
# Check if there are any unsatisfied inputs.
missing_inputs = CommandDefinition.get_missing_inputs(
@@ -63,9 +77,17 @@
command_dag = CommandDAG(
unique_commands, rerun_commands, skip_file_check=skip_file_check)
- if skipped_commands or command_dag.skipped_commands:
- # TODO: Report skipped commands, either in the dry run or elsewhere.
- pass
+ # Print conflicating or alread fulfilled commands
+ if dry_run:
+ skipped_commands.extend(command_dag.skipped_commands)
+ if skipped_commands:
+ print()
+ print('Skipped due to output conflicts:')
+ for command in skipped_commands:
+ command_type = command.command_config.command_type
+ experiment_id = command.experiment_id
+ print('{} from {}'.format(command_type, experiment_id))
+ print()
# Save experiment configs
experiments_by_id = dict(map(lambda e: (e.id, e), experiments))
@@ -74,9 +96,19 @@
if command_def.experiment_id not in seen_ids:
seen_ids.add(command_def.experiment_id)
experiment = experiments_by_id[command_def.experiment_id]
- experiment.fully_resolve().save_config()
-
- self._run_experiment(command_dag)
+ if not dry_run:
+ experiment.fully_resolve().save_config()
+
+ if dry_run:
+ print()
+ print('To be run:')
+ for command in command_dag.get_sorted_commands():
+ command_type = command.command_type
+ root_uri = command.root_uri
+ print('{} in {}'.format(command_type, root_uri))
+ print()
+ else:
+ self._run_experiment(command_dag)
@abstractmethod
def _run_experiment(self, command_dag):
|
{"golden_diff": "diff --git a/src/rastervision/cli/main.py b/src/rastervision/cli/main.py\n--- a/src/rastervision/cli/main.py\n+++ b/src/rastervision/cli/main.py\n@@ -130,7 +130,8 @@\n experiments,\n commands_to_run=commands,\n rerun_commands=rerun,\n- skip_file_check=skip_file_check)\n+ skip_file_check=skip_file_check,\n+ dry_run=dry_run)\n \n \n @main.command()\ndiff --git a/src/rastervision/runner/experiment_runner.py b/src/rastervision/runner/experiment_runner.py\n--- a/src/rastervision/runner/experiment_runner.py\n+++ b/src/rastervision/runner/experiment_runner.py\n@@ -1,7 +1,8 @@\n+import rastervision as rv\n+\n from abc import (ABC, abstractmethod)\n from typing import (List, Union)\n \n-import rastervision as rv\n from rastervision.runner import (CommandDefinition, CommandDAG)\n \n \n@@ -10,15 +11,28 @@\n experiments: Union[List[rv.ExperimentConfig], rv.ExperimentConfig],\n commands_to_run=rv.ALL_COMMANDS,\n rerun_commands=False,\n- skip_file_check=False):\n+ skip_file_check=False,\n+ dry_run: bool = False):\n if not isinstance(experiments, list):\n experiments = [experiments]\n \n- command_definitions = CommandDefinition.from_experiments(experiments)\n+ _command_definitions = CommandDefinition.from_experiments(experiments)\n \n # Filter out commands we aren't running.\n command_definitions = CommandDefinition.filter_commands(\n- command_definitions, commands_to_run)\n+ _command_definitions, commands_to_run)\n+\n+ # Print unrequested commands\n+ if dry_run:\n+ not_requested = set(_command_definitions) - set(command_definitions)\n+ if not_requested:\n+ print()\n+ print('Not requsted:')\n+ for command in not_requested:\n+ command_type = command.command_config.command_type\n+ experiment_id = command.experiment_id\n+ print('{} from {}'.format(command_type, experiment_id))\n+ print()\n \n # Check if there are any unsatisfied inputs.\n missing_inputs = CommandDefinition.get_missing_inputs(\n@@ -63,9 +77,17 @@\n command_dag = CommandDAG(\n unique_commands, rerun_commands, skip_file_check=skip_file_check)\n \n- if skipped_commands or command_dag.skipped_commands:\n- # TODO: Report skipped commands, either in the dry run or elsewhere.\n- pass\n+ # Print conflicating or alread fulfilled commands\n+ if dry_run:\n+ skipped_commands.extend(command_dag.skipped_commands)\n+ if skipped_commands:\n+ print()\n+ print('Skipped due to output conflicts:')\n+ for command in skipped_commands:\n+ command_type = command.command_config.command_type\n+ experiment_id = command.experiment_id\n+ print('{} from {}'.format(command_type, experiment_id))\n+ print()\n \n # Save experiment configs\n experiments_by_id = dict(map(lambda e: (e.id, e), experiments))\n@@ -74,9 +96,19 @@\n if command_def.experiment_id not in seen_ids:\n seen_ids.add(command_def.experiment_id)\n experiment = experiments_by_id[command_def.experiment_id]\n- experiment.fully_resolve().save_config()\n-\n- self._run_experiment(command_dag)\n+ if not dry_run:\n+ experiment.fully_resolve().save_config()\n+\n+ if dry_run:\n+ print()\n+ print('To be run:')\n+ for command in command_dag.get_sorted_commands():\n+ command_type = command.command_type\n+ root_uri = command.root_uri\n+ print('{} in {}'.format(command_type, root_uri))\n+ print()\n+ else:\n+ self._run_experiment(command_dag)\n \n @abstractmethod\n def _run_experiment(self, command_dag):\n", "issue": "Implement `--dry-run` / `-n` option in CLI\nWe should have a `--dry-run`/`-n` option in the CLI which will print information about the commands to be run, but not actually run them. The CLI option exists, but needs to be implemented in the code.\n", "before_files": [{"content": "\"\"\"Raster Vision main program\"\"\"\nimport sys\nfrom tempfile import TemporaryDirectory\n\nimport click\n\nimport rastervision as rv\nfrom rastervision.experiment import (ExperimentLoader, LoaderError)\nfrom rastervision.runner import (ExperimentRunner)\n\n\ndef print_error(msg):\n click.echo(click.style(msg, fg='red'), err=True)\n\n\[email protected]()\[email protected](\n '--profile', '-p', help='Sets the configuration profile name to use.')\ndef main(profile):\n # Initialize configuration\n if profile:\n rv._registry.initialize_config(profile=profile)\n\n\[email protected](\n 'run', short_help='Run Raster Vision commands against Experiments.')\[email protected]('runner')\[email protected]('commands', nargs=-1)\[email protected](\n '--experiment_module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--dry-run',\n '-n',\n is_flag=True,\n help=('Execute a dry run, which will print out information '\n 'about the commands to be run, but will not actually '\n 'run the commands'))\[email protected](\n '--skip-file-check',\n '-x',\n is_flag=True,\n help=('Skip the step that verifies that file exist.'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\[email protected](\n '--prefix',\n metavar='PREFIX',\n default='exp_',\n help=('Prefix for methods containing experiments. (default: \"exp_\")'))\[email protected](\n '--method',\n '-m',\n 'methods',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match method names to run.'))\[email protected](\n '--filter',\n '-f',\n 'filters',\n multiple=True,\n metavar='PATTERN',\n help=('Pattern to match experiment names to run.'))\[email protected](\n '--rerun',\n '-r',\n is_flag=True,\n default=False,\n help=('Rerun commands, regardless if '\n 'their output files already exist.'))\ndef run(runner, commands, experiment_module, dry_run, skip_file_check, arg,\n prefix, methods, filters, rerun):\n \"\"\"Run Raster Vision commands from experiments, using the\n experiment runner named RUNNER.\"\"\"\n # Validate runner\n valid_runners = list(\n map(lambda x: x.lower(), rv.ExperimentRunner.list_runners()))\n if runner not in valid_runners:\n print_error('Invalid experiment runner: \"{}\". '\n 'Must be one of: \"{}\"'.format(runner,\n '\", \"'.join(valid_runners)))\n sys.exit(1)\n\n runner = ExperimentRunner.get_runner(runner)\n\n if experiment_module:\n module_to_load = experiment_module\n else:\n module_to_load = '__main__'\n\n if not commands:\n commands = rv.ALL_COMMANDS\n else:\n commands = list(map(lambda x: x.upper(), commands))\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(\n experiment_args=experiment_args,\n experiment_method_prefix=prefix,\n experiment_method_patterns=methods,\n experiment_name_patterns=filters)\n try:\n experiments = loader.load_from_module(module_to_load)\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n else:\n print_error('No experiments found.')\n\n runner.run(\n experiments,\n commands_to_run=commands,\n rerun_commands=rerun,\n skip_file_check=skip_file_check)\n\n\[email protected]()\[email protected](\n '--experiment_module',\n '-e',\n help=('Name of an importable module to look for experiment sets '\n 'in. If not supplied, experiments will be loaded '\n 'from __main__'))\[email protected](\n '--arg',\n '-a',\n type=(str, str),\n multiple=True,\n metavar='KEY VALUE',\n help=('Pass a parameter to the experiments if the method '\n 'parameter list takes in a parameter with that key. '\n 'Multiple args can be supplied'))\ndef ls(experiment_module, arg):\n \"\"\"Print out a list of Experiment IDs.\"\"\"\n if experiment_module:\n module_to_load = experiment_module\n else:\n module_to_load = '__main__'\n\n experiment_args = {}\n for k, v in arg:\n experiment_args[k] = v\n\n loader = ExperimentLoader(experiment_args=experiment_args)\n try:\n experiments = loader.load_from_module(module_to_load)\n except LoaderError as e:\n print_error(str(e))\n sys.exit(1)\n\n if not experiments:\n if experiment_module:\n print_error(\n 'No experiments found in {}.'.format(experiment_module))\n else:\n print_error('No experiments found.')\n\n for e in experiments:\n click.echo('{}'.format(e.id))\n\n\[email protected](\n 'predict', short_help='Make predictions using a predict package.')\[email protected]('predict_package', type=click.Path(exists=True))\[email protected]('image_uri', type=click.Path(exists=True))\[email protected]('output_uri', type=click.Path(exists=False))\[email protected](\n '--update_stats',\n '-a',\n is_flag=True,\n help=('Run an analysis on this individual image, as '\n 'opposed to using any analysis like statistics '\n 'that exist in the prediction package'))\[email protected](\n '--channel-order',\n help='String containing channel_order.' + ' Example: \\\"2 1 0\\\"')\ndef predict(predict_package, image_uri, output_uri, update_stats,\n channel_order):\n \"\"\"Make predictions on the image at IMAGE_URI\n using PREDICT_PACKAGE and store the\n prediciton output at OUTPUT_URI.\n \"\"\"\n if channel_order is not None:\n channel_order = [\n int(channel_ind) for channel_ind in channel_order.split(' ')\n ]\n with TemporaryDirectory() as tmp_dir:\n predict = rv.Predictor(predict_package, tmp_dir, update_stats,\n channel_order).predict\n predict(image_uri, output_uri)\n\n\[email protected](\n 'run_command', short_help='Run a command from configuration file.')\[email protected]('command_config_uri')\ndef run_command(command_config_uri):\n \"\"\"Run a command from a serialized command configuration\n at COMMAND_CONFIG_URI.\n \"\"\"\n rv.runner.CommandRunner.run(command_config_uri)\n\n\nif __name__ == '__main__':\n main()\n", "path": "src/rastervision/cli/main.py"}, {"content": "from abc import (ABC, abstractmethod)\nfrom typing import (List, Union)\n\nimport rastervision as rv\nfrom rastervision.runner import (CommandDefinition, CommandDAG)\n\n\nclass ExperimentRunner(ABC):\n def run(self,\n experiments: Union[List[rv.ExperimentConfig], rv.ExperimentConfig],\n commands_to_run=rv.ALL_COMMANDS,\n rerun_commands=False,\n skip_file_check=False):\n if not isinstance(experiments, list):\n experiments = [experiments]\n\n command_definitions = CommandDefinition.from_experiments(experiments)\n\n # Filter out commands we aren't running.\n command_definitions = CommandDefinition.filter_commands(\n command_definitions, commands_to_run)\n\n # Check if there are any unsatisfied inputs.\n missing_inputs = CommandDefinition.get_missing_inputs(\n command_definitions)\n if missing_inputs:\n # TODO: Replace with logging?\n s = ''\n for exp_id in missing_inputs:\n s += 'In {}:\\n\\t{}\\n'.format(\n exp_id, '\\t{}\\n'.join(missing_inputs[exp_id]))\n\n raise rv.ConfigError('There were missing input URIs '\n 'that are required, but were not '\n 'able to be derived: \\n{}'.format(s))\n\n # Remove duplicate commands, defining equality for a command by\n # the tuple (command_type, input_uris, output_uris)\n (unique_commands, skipped_commands\n ) = CommandDefinition.remove_duplicates(command_definitions)\n\n # Ensure that for each type of command, there are none that clobber\n # each other's output.\n clashing_commands = CommandDefinition.get_clashing_commands(\n unique_commands)\n\n if clashing_commands:\n clashing_msgs = []\n for (output_uri, c_defs) in clashing_commands:\n command_type = c_defs[0].command_config.command_type\n experiments = ', '.join(map(lambda c: c.experiment_id, c_defs))\n clashing_msgs.append(\n 'The {} command in the follwoing experiments '\n 'output {}, but are not equal: {}'.format(\n command_type, output_uri, experiments))\n # TODO: Replace with logging?\n s = '\\t\\n'.join(clashing_msgs)\n\n raise rv.ConfigurationError(\n 'ERROR: Command outputs will'\n 'override each other: \\n{}\\n'.format(s))\n\n command_dag = CommandDAG(\n unique_commands, rerun_commands, skip_file_check=skip_file_check)\n\n if skipped_commands or command_dag.skipped_commands:\n # TODO: Report skipped commands, either in the dry run or elsewhere.\n pass\n\n # Save experiment configs\n experiments_by_id = dict(map(lambda e: (e.id, e), experiments))\n seen_ids = set([])\n for command_def in command_dag.get_command_definitions():\n if command_def.experiment_id not in seen_ids:\n seen_ids.add(command_def.experiment_id)\n experiment = experiments_by_id[command_def.experiment_id]\n experiment.fully_resolve().save_config()\n\n self._run_experiment(command_dag)\n\n @abstractmethod\n def _run_experiment(self, command_dag):\n pass\n\n @staticmethod\n def get_runner(runner_type):\n \"\"\"Gets the runner associated with this runner type.\"\"\"\n # Runner keys are upper cased.\n return rv._registry.get_experiment_runner(runner_type.upper())\n\n @staticmethod\n def list_runners():\n \"\"\"Returns a list of valid runner keys.\"\"\"\n return rv._registry.get_experiment_runner_keys()\n", "path": "src/rastervision/runner/experiment_runner.py"}]}
| 3,623 | 873 |
gh_patches_debug_19352
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-339
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scopes priorities while selecting configuration
## Bug:
When there are multiple language servers configured, all of which are for similar scopes (Ex. `source.json`, `source.json.sublime.settings`) the configuration with the most specific scope should be preferred; however right now one or the other could "win", some times leading to erroneous configuration.
Example comes from configuring **vscode-json-languageserver** to work with both `json` and `jsonc` languageIds.
### Suggestion:
Give priority to the configuration with the most specific scope that matches.
</issue>
<code>
[start of plugin/core/configurations.py]
1 import sublime
2
3 from .settings import ClientConfig, client_configs
4 from .logging import debug
5 from .workspace import get_project_config
6
7 assert ClientConfig
8
9 try:
10 from typing import Any, List, Dict, Tuple, Callable, Optional
11 assert Any and List and Dict and Tuple and Callable and Optional
12 except ImportError:
13 pass
14
15
16 window_client_configs = dict() # type: Dict[int, List[ClientConfig]]
17
18
19 def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':
20 for config in configs:
21 for scope in config.scopes:
22 if len(view.sel()) > 0:
23 if view.match_selector(view.sel()[0].begin(), scope):
24 return config
25
26 return None
27
28
29 def register_client_config(config: ClientConfig) -> None:
30 window_client_configs.clear()
31 client_configs.add_external_config(config)
32
33
34 def get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
35 return get_scope_client_config(view, client_configs.all)
36
37
38 def get_default_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
39 return get_scope_client_config(view, client_configs.defaults)
40
41
42 def get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':
43 window = view.window()
44 if window:
45 configs_for_window = window_client_configs.get(window.id(), [])
46 return get_scope_client_config(view, configs_for_window)
47 else:
48 return None
49
50
51 def config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':
52 # check window_client_config first
53 window_client_config = get_window_client_config(view)
54 if not window_client_config:
55 global_client_config = get_global_client_config(view)
56
57 if global_client_config:
58 window = view.window()
59 if window:
60 window_client_config = apply_window_settings(global_client_config, view)
61 add_window_client_config(window, window_client_config)
62 return window_client_config
63 else:
64 # always return a client config even if the view has no window anymore
65 return global_client_config
66
67 return window_client_config
68
69
70 def add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):
71 global window_client_configs
72 window_client_configs.setdefault(window.id(), []).append(config)
73
74
75 def clear_window_client_configs(window: 'sublime.Window'):
76 global window_client_configs
77 if window.id() in window_client_configs:
78 del window_client_configs[window.id()]
79
80
81 def apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':
82 window = view.window()
83 if window:
84 window_config = get_project_config(window)
85
86 if client_config.name in window_config:
87 overrides = window_config[client_config.name]
88 debug('window has override for', client_config.name, overrides)
89 return ClientConfig(
90 client_config.name,
91 overrides.get("command", client_config.binary_args),
92 overrides.get("tcp_port", client_config.tcp_port),
93 overrides.get("scopes", client_config.scopes),
94 overrides.get("syntaxes", client_config.syntaxes),
95 overrides.get("languageId", client_config.languageId),
96 overrides.get("enabled", client_config.enabled),
97 overrides.get("initializationOptions", client_config.init_options),
98 overrides.get("settings", client_config.settings),
99 overrides.get("env", client_config.env)
100 )
101
102 return client_config
103
104
105 def is_supportable_syntax(syntax: str) -> bool:
106 # TODO: filter out configs disabled by the user.
107 for config in client_configs.defaults:
108 if syntax in config.syntaxes:
109 return True
110 return False
111
112
113 def is_supported_syntax(syntax: str) -> bool:
114 for config in client_configs.all:
115 if syntax in config.syntaxes:
116 return True
117 return False
118
119
120 def is_supported_view(view: sublime.View) -> bool:
121 # TODO: perhaps make this check for a client instead of a config
122 if config_for_scope(view):
123 return True
124 else:
125 return False
126
[end of plugin/core/configurations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/core/configurations.py b/plugin/core/configurations.py
--- a/plugin/core/configurations.py
+++ b/plugin/core/configurations.py
@@ -17,13 +17,21 @@
def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':
+ # When there are multiple server configurations, all of which are for
+ # similar scopes (e.g. 'source.json', 'source.json.sublime.settings') the
+ # configuration with the most specific scope (highest ranked selector)
+ # in the current position is preferred.
+ scope_score = 0
+ scope_client_config = None
for config in configs:
for scope in config.scopes:
- if len(view.sel()) > 0:
- if view.match_selector(view.sel()[0].begin(), scope):
- return config
-
- return None
+ sel = view.sel()
+ if len(sel) > 0:
+ score = view.score_selector(sel[0].begin(), scope)
+ if score > scope_score:
+ scope_score = score
+ scope_client_config = config
+ return scope_client_config
def register_client_config(config: ClientConfig) -> None:
|
{"golden_diff": "diff --git a/plugin/core/configurations.py b/plugin/core/configurations.py\n--- a/plugin/core/configurations.py\n+++ b/plugin/core/configurations.py\n@@ -17,13 +17,21 @@\n \n \n def get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n+ # When there are multiple server configurations, all of which are for\n+ # similar scopes (e.g. 'source.json', 'source.json.sublime.settings') the\n+ # configuration with the most specific scope (highest ranked selector)\n+ # in the current position is preferred.\n+ scope_score = 0\n+ scope_client_config = None\n for config in configs:\n for scope in config.scopes:\n- if len(view.sel()) > 0:\n- if view.match_selector(view.sel()[0].begin(), scope):\n- return config\n-\n- return None\n+ sel = view.sel()\n+ if len(sel) > 0:\n+ score = view.score_selector(sel[0].begin(), scope)\n+ if score > scope_score:\n+ scope_score = score\n+ scope_client_config = config\n+ return scope_client_config\n \n \n def register_client_config(config: ClientConfig) -> None:\n", "issue": "Scopes priorities while selecting configuration\n## Bug:\r\n\r\nWhen there are multiple language servers configured, all of which are for similar scopes (Ex. `source.json`, `source.json.sublime.settings`) the configuration with the most specific scope should be preferred; however right now one or the other could \"win\", some times leading to erroneous configuration.\r\n\r\nExample comes from configuring **vscode-json-languageserver** to work with both `json` and `jsonc` languageIds.\r\n\r\n### Suggestion:\r\n\r\nGive priority to the configuration with the most specific scope that matches.\r\n\n", "before_files": [{"content": "import sublime\n\nfrom .settings import ClientConfig, client_configs\nfrom .logging import debug\nfrom .workspace import get_project_config\n\nassert ClientConfig\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\n\nwindow_client_configs = dict() # type: Dict[int, List[ClientConfig]]\n\n\ndef get_scope_client_config(view: 'sublime.View', configs: 'List[ClientConfig]') -> 'Optional[ClientConfig]':\n for config in configs:\n for scope in config.scopes:\n if len(view.sel()) > 0:\n if view.match_selector(view.sel()[0].begin(), scope):\n return config\n\n return None\n\n\ndef register_client_config(config: ClientConfig) -> None:\n window_client_configs.clear()\n client_configs.add_external_config(config)\n\n\ndef get_global_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.all)\n\n\ndef get_default_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n return get_scope_client_config(view, client_configs.defaults)\n\n\ndef get_window_client_config(view: sublime.View) -> 'Optional[ClientConfig]':\n window = view.window()\n if window:\n configs_for_window = window_client_configs.get(window.id(), [])\n return get_scope_client_config(view, configs_for_window)\n else:\n return None\n\n\ndef config_for_scope(view: sublime.View) -> 'Optional[ClientConfig]':\n # check window_client_config first\n window_client_config = get_window_client_config(view)\n if not window_client_config:\n global_client_config = get_global_client_config(view)\n\n if global_client_config:\n window = view.window()\n if window:\n window_client_config = apply_window_settings(global_client_config, view)\n add_window_client_config(window, window_client_config)\n return window_client_config\n else:\n # always return a client config even if the view has no window anymore\n return global_client_config\n\n return window_client_config\n\n\ndef add_window_client_config(window: 'sublime.Window', config: 'ClientConfig'):\n global window_client_configs\n window_client_configs.setdefault(window.id(), []).append(config)\n\n\ndef clear_window_client_configs(window: 'sublime.Window'):\n global window_client_configs\n if window.id() in window_client_configs:\n del window_client_configs[window.id()]\n\n\ndef apply_window_settings(client_config: 'ClientConfig', view: 'sublime.View') -> 'ClientConfig':\n window = view.window()\n if window:\n window_config = get_project_config(window)\n\n if client_config.name in window_config:\n overrides = window_config[client_config.name]\n debug('window has override for', client_config.name, overrides)\n return ClientConfig(\n client_config.name,\n overrides.get(\"command\", client_config.binary_args),\n overrides.get(\"tcp_port\", client_config.tcp_port),\n overrides.get(\"scopes\", client_config.scopes),\n overrides.get(\"syntaxes\", client_config.syntaxes),\n overrides.get(\"languageId\", client_config.languageId),\n overrides.get(\"enabled\", client_config.enabled),\n overrides.get(\"initializationOptions\", client_config.init_options),\n overrides.get(\"settings\", client_config.settings),\n overrides.get(\"env\", client_config.env)\n )\n\n return client_config\n\n\ndef is_supportable_syntax(syntax: str) -> bool:\n # TODO: filter out configs disabled by the user.\n for config in client_configs.defaults:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_syntax(syntax: str) -> bool:\n for config in client_configs.all:\n if syntax in config.syntaxes:\n return True\n return False\n\n\ndef is_supported_view(view: sublime.View) -> bool:\n # TODO: perhaps make this check for a client instead of a config\n if config_for_scope(view):\n return True\n else:\n return False\n", "path": "plugin/core/configurations.py"}]}
| 1,790 | 280 |
gh_patches_debug_12787
|
rasdani/github-patches
|
git_diff
|
numba__numba-672
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong type coercion on input arguments
If the following snippet, it looks like first calling the function with int arguments then coerces any further float arguments to int:
```
>>> @jit(nopython=True)
... def mpow(a, b):
... return math.pow(a, b)
...
>>>
>>> mpow(0, 1)
0.0
>>> mpow(0, 0.666)
1.0
>>> mpow(0, 1.666)
0.0
```
It doesn't happen if the function is called with float arguments first:
```
>>> @jit(nopython=True)
... def mpow2(a, b):
... return math.pow(a, b)
...
>>> mpow2(0, 0.666)
0.0
>>> mpow2(0, 1)
0.0
>>> mpow2(0, 0.666)
0.0
```
</issue>
<code>
[start of numba/typeconv/typeconv.py]
1 from __future__ import print_function, absolute_import
2 from . import _typeconv
3
4
5 class TypeManager(object):
6 def __init__(self):
7 self._ptr = _typeconv.new_type_manager()
8
9 def select_overload(self, sig, overloads):
10 sig = [t._code for t in sig]
11 overloads = [[t._code for t in s] for s in overloads ]
12 return _typeconv.select_overload(self._ptr, sig, overloads)
13
14 def check_compatible(self, fromty, toty):
15 return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)
16
17 def set_compatible(self, fromty, toty, by):
18 _typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)
19
20 def set_promote(self, fromty, toty):
21 self.set_compatible(fromty, toty, ord("p"))
22
23 def set_unsafe_convert(self, fromty, toty):
24 self.set_compatible(fromty, toty, ord("u"))
25
26 def set_safe_convert(self, fromty, toty):
27 self.set_compatible(fromty, toty, ord("s"))
28
29 def get_pointer(self):
30 return _typeconv.get_pointer(self._ptr)
31
[end of numba/typeconv/typeconv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numba/typeconv/typeconv.py b/numba/typeconv/typeconv.py
--- a/numba/typeconv/typeconv.py
+++ b/numba/typeconv/typeconv.py
@@ -6,10 +6,10 @@
def __init__(self):
self._ptr = _typeconv.new_type_manager()
- def select_overload(self, sig, overloads):
+ def select_overload(self, sig, overloads, allow_unsafe):
sig = [t._code for t in sig]
overloads = [[t._code for t in s] for s in overloads ]
- return _typeconv.select_overload(self._ptr, sig, overloads)
+ return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe)
def check_compatible(self, fromty, toty):
return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)
|
{"golden_diff": "diff --git a/numba/typeconv/typeconv.py b/numba/typeconv/typeconv.py\n--- a/numba/typeconv/typeconv.py\n+++ b/numba/typeconv/typeconv.py\n@@ -6,10 +6,10 @@\n def __init__(self):\n self._ptr = _typeconv.new_type_manager()\n \n- def select_overload(self, sig, overloads):\n+ def select_overload(self, sig, overloads, allow_unsafe):\n sig = [t._code for t in sig]\n overloads = [[t._code for t in s] for s in overloads ]\n- return _typeconv.select_overload(self._ptr, sig, overloads)\n+ return _typeconv.select_overload(self._ptr, sig, overloads, allow_unsafe)\n \n def check_compatible(self, fromty, toty):\n return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)\n", "issue": "Wrong type coercion on input arguments\nIf the following snippet, it looks like first calling the function with int arguments then coerces any further float arguments to int:\n\n```\n>>> @jit(nopython=True)\n... def mpow(a, b):\n... return math.pow(a, b)\n... \n>>> \n>>> mpow(0, 1)\n0.0\n>>> mpow(0, 0.666)\n1.0\n>>> mpow(0, 1.666)\n0.0\n```\n\nIt doesn't happen if the function is called with float arguments first:\n\n```\n>>> @jit(nopython=True)\n... def mpow2(a, b):\n... return math.pow(a, b)\n... \n>>> mpow2(0, 0.666)\n0.0\n>>> mpow2(0, 1)\n0.0\n>>> mpow2(0, 0.666)\n0.0\n```\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import\nfrom . import _typeconv\n\n\nclass TypeManager(object):\n def __init__(self):\n self._ptr = _typeconv.new_type_manager()\n\n def select_overload(self, sig, overloads):\n sig = [t._code for t in sig]\n overloads = [[t._code for t in s] for s in overloads ]\n return _typeconv.select_overload(self._ptr, sig, overloads)\n\n def check_compatible(self, fromty, toty):\n return _typeconv.check_compatible(self._ptr, fromty._code, toty._code)\n\n def set_compatible(self, fromty, toty, by):\n _typeconv.set_compatible(self._ptr, fromty._code, toty._code, by)\n\n def set_promote(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"p\"))\n\n def set_unsafe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"u\"))\n\n def set_safe_convert(self, fromty, toty):\n self.set_compatible(fromty, toty, ord(\"s\"))\n\n def get_pointer(self):\n return _typeconv.get_pointer(self._ptr)\n", "path": "numba/typeconv/typeconv.py"}]}
| 1,086 | 213 |
gh_patches_debug_33843
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-731
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E3031 CidrIp contains invalid characters fails when Fn::Sub is present
*cfn-lint version: (`cfn-lint --version`)*
`cfn-lint 0.16.0`
*Description of issue.*
When `CidrIp` value is `!Sub`ed from `Parameters` - E3031 lint error is raised. Sample template:
```lang=yaml
AWSTemplateFormatVersion: 2010-09-09
Description: AMI Builder Stack
Parameters:
BuilderCidr:
Type: String
Resources:
SecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Description
VpcId: vpc-id
SecurityGroupIngress:
- IpProtocol: tcp
FromPort: 1
ToPort: 65535
CidrIp: !Sub ${BuilderCidr}
```
Expected output: successful lint
Actual output:
```
E3031 CidrIp contains invalid characters (Pattern: x.x.x.x/y) at Resources/SecurityGroup/Properties/SecurityGroupIngress/0/CidrIp/Fn::Sub
```
> Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
The problem still persists after running `cfn-lint -u`
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/AllowedPattern.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21
22 from cfnlint.helpers import RESOURCE_SPECS
23
24
25 class AllowedPattern(CloudFormationLintRule):
26 """Check if properties have a valid value"""
27 id = 'E3031'
28 shortdesc = 'Check if property values adhere to a specific pattern'
29 description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'
30 source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'
31 tags = ['resources', 'property', 'allowed pattern', 'regex']
32
33 def initialize(self, cfn):
34 """Initialize the rule"""
35 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):
36 self.resource_property_types.append(resource_type_spec)
37 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
38 self.resource_sub_property_types.append(property_type_spec)
39
40 def check_sub(self, value, path, property_name, **kwargs):
41 """Check Value of a Sub"""
42 matches = []
43
44 if isinstance(value, list):
45 if isinstance(value[0], six.string_types):
46 # Remove the sub (${}) from the value
47 stripped_value = re.sub(r'\${.*}', '', value[0])
48 matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))
49 else:
50 # Remove the sub (${}) from the value
51 stripped_value = re.sub(r'\${.*}', '', value)
52 matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))
53 return matches
54
55 def check_value(self, value, path, property_name, **kwargs):
56 """Check Value"""
57 matches = []
58
59 # Get the Allowed Pattern Regex
60 value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})
61 # Get the "Human Readable" version for the error message. Optional, if not specified,
62 # the RegEx itself is used.
63 value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)
64
65 if value_pattern_regex:
66 regex = re.compile(value_pattern_regex)
67 if not regex.match(value):
68 full_path = ('/'.join(str(x) for x in path))
69
70 message = '{} contains invalid characters (Pattern: {}) at {}'
71 matches.append(RuleMatch(path, message.format(property_name, value_pattern, full_path)))
72
73 return matches
74
75 def check(self, cfn, properties, value_specs, property_specs, path):
76 """Check itself"""
77 matches = list()
78 for p_value, p_path in properties.items_safe(path[:]):
79 for prop in p_value:
80 if prop in value_specs:
81 value = value_specs.get(prop).get('Value', {})
82 if value:
83 value_type = value.get('ValueType', '')
84 property_type = property_specs.get('Properties').get(prop).get('Type')
85 matches.extend(
86 cfn.check_value(
87 p_value, prop, p_path,
88 check_value=self.check_value,
89 check_sub=self.check_sub,
90 value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
91 cfn=cfn, property_type=property_type, property_name=prop
92 )
93 )
94 return matches
95
96 def match_resource_sub_properties(self, properties, property_type, path, cfn):
97 """Match for sub properties"""
98 matches = list()
99
100 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})
101 property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)
102 matches.extend(self.check(cfn, properties, specs, property_specs, path))
103
104 return matches
105
106 def match_resource_properties(self, properties, resource_type, path, cfn):
107 """Check CloudFormation Properties"""
108 matches = list()
109
110 specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})
111 resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)
112 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
113
114 return matches
115
[end of src/cfnlint/rules/resources/properties/AllowedPattern.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py
--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py
+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py
@@ -15,7 +15,6 @@
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
-import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -37,21 +36,6 @@
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):
self.resource_sub_property_types.append(property_type_spec)
- def check_sub(self, value, path, property_name, **kwargs):
- """Check Value of a Sub"""
- matches = []
-
- if isinstance(value, list):
- if isinstance(value[0], six.string_types):
- # Remove the sub (${}) from the value
- stripped_value = re.sub(r'\${.*}', '', value[0])
- matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))
- else:
- # Remove the sub (${}) from the value
- stripped_value = re.sub(r'\${.*}', '', value)
- matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))
- return matches
-
def check_value(self, value, path, property_name, **kwargs):
"""Check Value"""
matches = []
@@ -86,7 +70,6 @@
cfn.check_value(
p_value, prop, p_path,
check_value=self.check_value,
- check_sub=self.check_sub,
value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),
cfn=cfn, property_type=property_type, property_name=prop
)
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py\n+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n@@ -15,7 +15,6 @@\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n import re\n-import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -37,21 +36,6 @@\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n \n- def check_sub(self, value, path, property_name, **kwargs):\n- \"\"\"Check Value of a Sub\"\"\"\n- matches = []\n-\n- if isinstance(value, list):\n- if isinstance(value[0], six.string_types):\n- # Remove the sub (${}) from the value\n- stripped_value = re.sub(r'\\${.*}', '', value[0])\n- matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))\n- else:\n- # Remove the sub (${}) from the value\n- stripped_value = re.sub(r'\\${.*}', '', value)\n- matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))\n- return matches\n-\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n@@ -86,7 +70,6 @@\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n- check_sub=self.check_sub,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n", "issue": "E3031 CidrIp contains invalid characters fails when Fn::Sub is present\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n`cfn-lint 0.16.0`\r\n\r\n*Description of issue.*\r\n\r\nWhen `CidrIp` value is `!Sub`ed from `Parameters` - E3031 lint error is raised. Sample template:\r\n\r\n```lang=yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nDescription: AMI Builder Stack\r\n\r\nParameters:\r\n\r\n BuilderCidr:\r\n Type: String\r\n\r\nResources:\r\n\r\n SecurityGroup:\r\n Type: AWS::EC2::SecurityGroup\r\n Properties:\r\n GroupDescription: Description\r\n VpcId: vpc-id\r\n SecurityGroupIngress:\r\n - IpProtocol: tcp\r\n FromPort: 1\r\n ToPort: 65535\r\n CidrIp: !Sub ${BuilderCidr}\r\n```\r\n\r\nExpected output: successful lint\r\nActual output:\r\n\r\n```\r\nE3031 CidrIp contains invalid characters (Pattern: x.x.x.x/y) at Resources/SecurityGroup/Properties/SecurityGroupIngress/0/CidrIp/Fn::Sub\r\n```\r\n\r\n> Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\r\nThe problem still persists after running `cfn-lint -u`\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass AllowedPattern(CloudFormationLintRule):\n \"\"\"Check if properties have a valid value\"\"\"\n id = 'E3031'\n shortdesc = 'Check if property values adhere to a specific pattern'\n description = 'Check if properties have a valid value in case of a pattern (Regular Expression)'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedpattern'\n tags = ['resources', 'property', 'allowed pattern', 'regex']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_sub(self, value, path, property_name, **kwargs):\n \"\"\"Check Value of a Sub\"\"\"\n matches = []\n\n if isinstance(value, list):\n if isinstance(value[0], six.string_types):\n # Remove the sub (${}) from the value\n stripped_value = re.sub(r'\\${.*}', '', value[0])\n matches.extend(self.check_value(stripped_value, path[:] + [0], property_name, **kwargs))\n else:\n # Remove the sub (${}) from the value\n stripped_value = re.sub(r'\\${.*}', '', value)\n matches.extend(self.check_value(stripped_value, path[:], property_name, **kwargs))\n return matches\n\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n\n # Get the Allowed Pattern Regex\n value_pattern_regex = kwargs.get('value_specs', {}).get('AllowedPatternRegex', {})\n # Get the \"Human Readable\" version for the error message. Optional, if not specified,\n # the RegEx itself is used.\n value_pattern = kwargs.get('value_specs', {}).get('AllowedPattern', value_pattern_regex)\n\n if value_pattern_regex:\n regex = re.compile(value_pattern_regex)\n if not regex.match(value):\n full_path = ('/'.join(str(x) for x in path))\n\n message = '{} contains invalid characters (Pattern: {}) at {}'\n matches.append(RuleMatch(path, message.format(property_name, value_pattern, full_path)))\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n property_type = property_specs.get('Properties').get(prop).get('Type')\n matches.extend(\n cfn.check_value(\n p_value, prop, p_path,\n check_value=self.check_value,\n check_sub=self.check_sub,\n value_specs=RESOURCE_SPECS.get(cfn.regions[0]).get('ValueTypes').get(value_type, {}),\n cfn=cfn, property_type=property_type, property_name=prop\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type, {}).get('Properties', {})\n property_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes').get(property_type)\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type, {}).get('Properties', {})\n resource_specs = RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes').get(resource_type)\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AllowedPattern.py"}]}
| 2,312 | 430 |
gh_patches_debug_5376
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-4471
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/types/__init__.py]
1 from .attributes import Attributes # isort:skip
2 from .builder import Builder # isort:skip
3
4 from .domain import ( # isort:skip
5 Domain,
6 SemanticDomainTypes,
7 InferredSemanticDomainType,
8 )
9 from .parameter_container import ( # isort:skip
10 DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
11 FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,
12 PARAMETER_KEY,
13 VARIABLES_KEY,
14 VARIABLES_PREFIX,
15 ParameterNode,
16 ParameterContainer,
17 build_parameter_container,
18 build_parameter_container_for_variables,
19 is_fully_qualified_parameter_name_literal_string_format,
20 get_parameter_value_by_fully_qualified_parameter_name,
21 get_parameter_values_for_fully_qualified_parameter_names,
22 get_fully_qualified_parameter_names,
23 )
24
[end of great_expectations/rule_based_profiler/types/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/great_expectations/rule_based_profiler/types/__init__.py b/great_expectations/rule_based_profiler/types/__init__.py
--- a/great_expectations/rule_based_profiler/types/__init__.py
+++ b/great_expectations/rule_based_profiler/types/__init__.py
@@ -9,6 +9,8 @@
from .parameter_container import ( # isort:skip
DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,
+ FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,
+ FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,
PARAMETER_KEY,
VARIABLES_KEY,
VARIABLES_PREFIX,
|
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/types/__init__.py b/great_expectations/rule_based_profiler/types/__init__.py\n--- a/great_expectations/rule_based_profiler/types/__init__.py\n+++ b/great_expectations/rule_based_profiler/types/__init__.py\n@@ -9,6 +9,8 @@\n from .parameter_container import ( # isort:skip\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,\n+ FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,\n+ FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,\n PARAMETER_KEY,\n VARIABLES_KEY,\n VARIABLES_PREFIX,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from .attributes import Attributes # isort:skip\nfrom .builder import Builder # isort:skip\n\nfrom .domain import ( # isort:skip\n Domain,\n SemanticDomainTypes,\n InferredSemanticDomainType,\n)\nfrom .parameter_container import ( # isort:skip\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER,\n PARAMETER_KEY,\n VARIABLES_KEY,\n VARIABLES_PREFIX,\n ParameterNode,\n ParameterContainer,\n build_parameter_container,\n build_parameter_container_for_variables,\n is_fully_qualified_parameter_name_literal_string_format,\n get_parameter_value_by_fully_qualified_parameter_name,\n get_parameter_values_for_fully_qualified_parameter_names,\n get_fully_qualified_parameter_names,\n)\n", "path": "great_expectations/rule_based_profiler/types/__init__.py"}]}
| 788 | 148 |
gh_patches_debug_8097
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-652
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modify docker configuration to work on OSX
Docker for Mac does not properly support the `network_mode: host` option for containers. In order to run the system on OSX, it will be necessary to network the containers using a bridged network:
> By default Compose sets up a single network for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by them at a hostname identical to the container name."
Rather than accessing other containers via a port on localhost, containers will access each other using the instance name as the hostname. Port 80 will then be exposed from the nginx container to the host.
</issue>
<code>
[start of csunplugged/config/settings/database_proxy.py]
1 # -*- coding: utf-8 -*-
2 """Django settings for connecting via Google Cloud SQL Proxy."""
3
4 from .base import * # noqa: F403
5
6
7 # DATABASE CONFIGURATION
8 # ----------------------------------------------------------------------------
9 # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
10 DATABASES = {
11 "default": {
12 "ENGINE": "django.db.backends.postgresql",
13 "HOST": "localhost",
14 "PORT": "5433",
15 "NAME": "csunplugged",
16 "USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
17 "PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
18 "ATOMIC_REQUESTS": True,
19 }
20 }
21
22 SECRET_KEY = env("DJANGO_SECRET_KEY") # noqa: F405
23
[end of csunplugged/config/settings/database_proxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/csunplugged/config/settings/database_proxy.py b/csunplugged/config/settings/database_proxy.py
--- a/csunplugged/config/settings/database_proxy.py
+++ b/csunplugged/config/settings/database_proxy.py
@@ -10,8 +10,8 @@
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
- "HOST": "localhost",
- "PORT": "5433",
+ "HOST": "cloud_sql_proxy",
+ "PORT": "5432",
"NAME": "csunplugged",
"USER": env("GOOGLE_CLOUD_SQL_DATABASE_USERNAME"), # noqa: F405
"PASSWORD": env("GOOGLE_CLOUD_SQL_DATABASE_PASSWORD"), # noqa: F405
|
{"golden_diff": "diff --git a/csunplugged/config/settings/database_proxy.py b/csunplugged/config/settings/database_proxy.py\n--- a/csunplugged/config/settings/database_proxy.py\n+++ b/csunplugged/config/settings/database_proxy.py\n@@ -10,8 +10,8 @@\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n- \"HOST\": \"localhost\",\n- \"PORT\": \"5433\",\n+ \"HOST\": \"cloud_sql_proxy\",\n+ \"PORT\": \"5432\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n", "issue": "Modify docker configuration to work on OSX\nDocker for Mac does not properly support the `network_mode: host` option for containers. In order to run the system on OSX, it will be necessary to network the containers using a bridged network:\r\n\r\n> By default Compose sets up a single network for your app. Each container for a service joins the default network and is both reachable by other containers on that network, and discoverable by them at a hostname identical to the container name.\"\r\n\r\nRather than accessing other containers via a port on localhost, containers will access each other using the instance name as the hostname. Port 80 will then be exposed from the nginx container to the host.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Django settings for connecting via Google Cloud SQL Proxy.\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": \"localhost\",\n \"PORT\": \"5433\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"ATOMIC_REQUESTS\": True,\n }\n}\n\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n", "path": "csunplugged/config/settings/database_proxy.py"}]}
| 905 | 176 |
gh_patches_debug_2946
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3703
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Minor documentation correction: correct id3.org url
https://github.com/beetbox/beets/blob/master/docs/faq.rst#L303
refers to:
http://www.id3.org/id3v2.4.0-structure
as a reference url for a copy of the ID3v2.4 standard documentation, but this returns a "Not found" error. I've found 2 possibilities for the replacement:
https://id3.org/id3v2.4.0-structure
(with adverts) or
https://github.com/id3/ID3v2.4/raw/master/id3v2.40-structure.txt
(without adverts)
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import division, absolute_import, print_function
4
5 AUTHOR = u'Adrian Sampson'
6
7 # General configuration
8
9 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']
10
11 exclude_patterns = ['_build']
12 source_suffix = '.rst'
13 master_doc = 'index'
14
15 project = u'beets'
16 copyright = u'2016, Adrian Sampson'
17
18 version = '1.5'
19 release = '1.5.0'
20
21 pygments_style = 'sphinx'
22
23 # External links to the bug tracker and other sites.
24 extlinks = {
25 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),
26 'user': ('https://github.com/%s', ''),
27 'pypi': ('https://pypi.org/project/%s/', ''),
28 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),
29 }
30
31 # Options for HTML output
32 htmlhelp_basename = 'beetsdoc'
33
34 # Options for LaTeX output
35 latex_documents = [
36 ('index', 'beets.tex', u'beets Documentation',
37 AUTHOR, 'manual'),
38 ]
39
40 # Options for manual page output
41 man_pages = [
42 ('reference/cli', 'beet', u'music tagger and library organizer',
43 [AUTHOR], 1),
44 ('reference/config', 'beetsconfig', u'beets configuration file',
45 [AUTHOR], 5),
46 ]
47
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -28,6 +28,13 @@
'stdlib': ('https://docs.python.org/3/library/%s.html', ''),
}
+linkcheck_ignore = [
+ r'https://github.com/beetbox/beets/issues/',
+ r'https://github.com/\w+$', # ignore user pages
+ r'.*localhost.*',
+ r'https://www.musixmatch.com/', # blocks requests
+]
+
# Options for HTML output
htmlhelp_basename = 'beetsdoc'
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -28,6 +28,13 @@\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n }\n \n+linkcheck_ignore = [\n+ r'https://github.com/beetbox/beets/issues/',\n+ r'https://github.com/\\w+$', # ignore user pages\n+ r'.*localhost.*',\n+ r'https://www.musixmatch.com/', # blocks requests\n+]\n+\n # Options for HTML output\n htmlhelp_basename = 'beetsdoc'\n", "issue": "Minor documentation correction: correct id3.org url\nhttps://github.com/beetbox/beets/blob/master/docs/faq.rst#L303\r\nrefers to:\r\nhttp://www.id3.org/id3v2.4.0-structure\r\nas a reference url for a copy of the ID3v2.4 standard documentation, but this returns a \"Not found\" error. I've found 2 possibilities for the replacement:\r\nhttps://id3.org/id3v2.4.0-structure\r\n(with adverts) or\r\nhttps://github.com/id3/ID3v2.4/raw/master/id3v2.40-structure.txt\r\n(without adverts)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, print_function\n\nAUTHOR = u'Adrian Sampson'\n\n# General configuration\n\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.extlinks']\n\nexclude_patterns = ['_build']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject = u'beets'\ncopyright = u'2016, Adrian Sampson'\n\nversion = '1.5'\nrelease = '1.5.0'\n\npygments_style = 'sphinx'\n\n# External links to the bug tracker and other sites.\nextlinks = {\n 'bug': ('https://github.com/beetbox/beets/issues/%s', '#'),\n 'user': ('https://github.com/%s', ''),\n 'pypi': ('https://pypi.org/project/%s/', ''),\n 'stdlib': ('https://docs.python.org/3/library/%s.html', ''),\n}\n\n# Options for HTML output\nhtmlhelp_basename = 'beetsdoc'\n\n# Options for LaTeX output\nlatex_documents = [\n ('index', 'beets.tex', u'beets Documentation',\n AUTHOR, 'manual'),\n]\n\n# Options for manual page output\nman_pages = [\n ('reference/cli', 'beet', u'music tagger and library organizer',\n [AUTHOR], 1),\n ('reference/config', 'beetsconfig', u'beets configuration file',\n [AUTHOR], 5),\n]\n", "path": "docs/conf.py"}]}
| 1,080 | 137 |
gh_patches_debug_27634
|
rasdani/github-patches
|
git_diff
|
adap__flower-465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve docstring for `start_server`
</issue>
<code>
[start of src/py/flwr/server/app.py]
1 # Copyright 2020 Adap GmbH. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Flower server app."""
16
17
18 from logging import INFO
19 from typing import Dict, Optional
20
21 from flwr.common import GRPC_MAX_MESSAGE_LENGTH
22 from flwr.common.logger import log
23 from flwr.server.client_manager import SimpleClientManager
24 from flwr.server.grpc_server.grpc_server import start_insecure_grpc_server
25 from flwr.server.server import Server
26 from flwr.server.strategy import FedAvg, Strategy
27
28 DEFAULT_SERVER_ADDRESS = "[::]:8080"
29
30
31 def start_server(
32 server_address: str = DEFAULT_SERVER_ADDRESS,
33 server: Optional[Server] = None,
34 config: Optional[Dict[str, int]] = None,
35 strategy: Optional[Strategy] = None,
36 grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
37 ) -> None:
38 """Start a Flower server using the gRPC transport layer."""
39
40 # Create server instance if none was given
41 if server is None:
42 client_manager = SimpleClientManager()
43 if strategy is None:
44 strategy = FedAvg()
45 server = Server(client_manager=client_manager, strategy=strategy)
46
47 # Set default config values
48 if config is None:
49 config = {}
50 if "num_rounds" not in config:
51 config["num_rounds"] = 1
52
53 # Start gRPC server
54 grpc_server = start_insecure_grpc_server(
55 client_manager=server.client_manager(),
56 server_address=server_address,
57 max_message_length=grpc_max_message_length,
58 )
59 log(INFO, "Flower server running (insecure, %s rounds)", config["num_rounds"])
60
61 # Fit model
62 hist = server.fit(num_rounds=config["num_rounds"])
63 log(INFO, "app_fit: losses_distributed %s", str(hist.losses_distributed))
64 log(INFO, "app_fit: accuracies_distributed %s", str(hist.accuracies_distributed))
65 log(INFO, "app_fit: losses_centralized %s", str(hist.losses_centralized))
66 log(INFO, "app_fit: accuracies_centralized %s", str(hist.accuracies_centralized))
67
68 # Temporary workaround to force distributed evaluation
69 server.strategy.eval_fn = None # type: ignore
70
71 # Evaluate the final trained model
72 res = server.evaluate(rnd=-1)
73 if res is not None:
74 loss, (results, failures) = res
75 log(INFO, "app_evaluate: federated loss: %s", str(loss))
76 log(
77 INFO,
78 "app_evaluate: results %s",
79 str([(res[0].cid, res[1]) for res in results]),
80 )
81 log(INFO, "app_evaluate: failures %s", str(failures))
82 else:
83 log(INFO, "app_evaluate: no evaluation result")
84
85 # Stop the gRPC server
86 grpc_server.stop(1)
87
[end of src/py/flwr/server/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py
--- a/src/py/flwr/server/app.py
+++ b/src/py/flwr/server/app.py
@@ -35,7 +35,33 @@
strategy: Optional[Strategy] = None,
grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,
) -> None:
- """Start a Flower server using the gRPC transport layer."""
+ """Start a Flower server using the gRPC transport layer.
+
+ Arguments:
+ server_address: Optional[str] (default: `"[::]:8080"`). The IPv6
+ address of the server.
+ server: Optional[flwr.server.Server] (default: None). An implementation
+ of the abstract base class `flwr.server.Server`. If no instance is
+ provided, then `start_server` will create one.
+ config: Optional[Dict[str, int]] (default: None). The only currently
+ supported values is `num_rounds`, so a full configuration object
+ instructing the server to perform three rounds of federated
+ learning looks like the following: `{"num_rounds": 3}`.
+ strategy: Optional[flwr.server.Strategy] (default: None). An
+ implementation of the abstract base class `flwr.server.Strategy`.
+ If no strategy is provided, then `start_server` will use
+ `flwr.server.strategy.FedAvg`.
+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).
+ The maximum length of gRPC messages that can be exchanged with the
+ Flower clients. The default should be sufficient for most models.
+ Users who train very large models might need to increase this
+ value. Note that the Flower clients needs to started with the same
+ value (see `flwr.client.start_client`), otherwise clients will not
+ know about the increased limit and block larger messages.
+
+ Returns:
+ None.
+ """
# Create server instance if none was given
if server is None:
|
{"golden_diff": "diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py\n--- a/src/py/flwr/server/app.py\n+++ b/src/py/flwr/server/app.py\n@@ -35,7 +35,33 @@\n strategy: Optional[Strategy] = None,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n ) -> None:\n- \"\"\"Start a Flower server using the gRPC transport layer.\"\"\"\n+ \"\"\"Start a Flower server using the gRPC transport layer.\n+\n+ Arguments:\n+ server_address: Optional[str] (default: `\"[::]:8080\"`). The IPv6\n+ address of the server.\n+ server: Optional[flwr.server.Server] (default: None). An implementation\n+ of the abstract base class `flwr.server.Server`. If no instance is\n+ provided, then `start_server` will create one.\n+ config: Optional[Dict[str, int]] (default: None). The only currently\n+ supported values is `num_rounds`, so a full configuration object\n+ instructing the server to perform three rounds of federated\n+ learning looks like the following: `{\"num_rounds\": 3}`.\n+ strategy: Optional[flwr.server.Strategy] (default: None). An\n+ implementation of the abstract base class `flwr.server.Strategy`.\n+ If no strategy is provided, then `start_server` will use\n+ `flwr.server.strategy.FedAvg`.\n+ grpc_max_message_length: int (default: 536_870_912, this equals 512MB).\n+ The maximum length of gRPC messages that can be exchanged with the\n+ Flower clients. The default should be sufficient for most models.\n+ Users who train very large models might need to increase this\n+ value. Note that the Flower clients needs to started with the same\n+ value (see `flwr.client.start_client`), otherwise clients will not\n+ know about the increased limit and block larger messages.\n+\n+ Returns:\n+ None.\n+ \"\"\"\n \n # Create server instance if none was given\n if server is None:\n", "issue": "Improve docstring for `start_server`\n\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Flower server app.\"\"\"\n\n\nfrom logging import INFO\nfrom typing import Dict, Optional\n\nfrom flwr.common import GRPC_MAX_MESSAGE_LENGTH\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import SimpleClientManager\nfrom flwr.server.grpc_server.grpc_server import start_insecure_grpc_server\nfrom flwr.server.server import Server\nfrom flwr.server.strategy import FedAvg, Strategy\n\nDEFAULT_SERVER_ADDRESS = \"[::]:8080\"\n\n\ndef start_server(\n server_address: str = DEFAULT_SERVER_ADDRESS,\n server: Optional[Server] = None,\n config: Optional[Dict[str, int]] = None,\n strategy: Optional[Strategy] = None,\n grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH,\n) -> None:\n \"\"\"Start a Flower server using the gRPC transport layer.\"\"\"\n\n # Create server instance if none was given\n if server is None:\n client_manager = SimpleClientManager()\n if strategy is None:\n strategy = FedAvg()\n server = Server(client_manager=client_manager, strategy=strategy)\n\n # Set default config values\n if config is None:\n config = {}\n if \"num_rounds\" not in config:\n config[\"num_rounds\"] = 1\n\n # Start gRPC server\n grpc_server = start_insecure_grpc_server(\n client_manager=server.client_manager(),\n server_address=server_address,\n max_message_length=grpc_max_message_length,\n )\n log(INFO, \"Flower server running (insecure, %s rounds)\", config[\"num_rounds\"])\n\n # Fit model\n hist = server.fit(num_rounds=config[\"num_rounds\"])\n log(INFO, \"app_fit: losses_distributed %s\", str(hist.losses_distributed))\n log(INFO, \"app_fit: accuracies_distributed %s\", str(hist.accuracies_distributed))\n log(INFO, \"app_fit: losses_centralized %s\", str(hist.losses_centralized))\n log(INFO, \"app_fit: accuracies_centralized %s\", str(hist.accuracies_centralized))\n\n # Temporary workaround to force distributed evaluation\n server.strategy.eval_fn = None # type: ignore\n\n # Evaluate the final trained model\n res = server.evaluate(rnd=-1)\n if res is not None:\n loss, (results, failures) = res\n log(INFO, \"app_evaluate: federated loss: %s\", str(loss))\n log(\n INFO,\n \"app_evaluate: results %s\",\n str([(res[0].cid, res[1]) for res in results]),\n )\n log(INFO, \"app_evaluate: failures %s\", str(failures))\n else:\n log(INFO, \"app_evaluate: no evaluation result\")\n\n # Stop the gRPC server\n grpc_server.stop(1)\n", "path": "src/py/flwr/server/app.py"}]}
| 1,468 | 476 |
gh_patches_debug_67061
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-1044
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Desktop: Previous Page Button hover color
A clear and concise description of the task.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] Hover color is blue, look at Figma
## Additional context
<!-- Add any other context about the task here -->
</issue>
<code>
[start of benefits/core/viewmodels.py]
1 """
2 The core application: view model definitions for the root of the webapp.
3 """
4 from django.utils.translation import pgettext, gettext_lazy as _
5 from django.urls import reverse
6
7 from benefits.core import models
8
9 from . import session
10
11
12 class Button:
13 """
14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):
15 * classes: str, str[]
16 * id: str
17 * fallback_text: str
18 * label: str
19 * text: str
20 * url: str
21 * target: str
22 * rel: str
23 """
24
25 def __init__(self, **kwargs):
26 classes = kwargs.get("classes", [])
27 if isinstance(classes, str):
28 classes = classes.split()
29
30 self.classes = ["btn", "btn-lg"]
31 self.classes.extend(classes)
32 self.id = kwargs.get("id")
33 self.fallback_text = kwargs.get("fallback_text")
34 self.label = kwargs.get("label")
35 self.text = kwargs.get("text", "Button")
36 self.url = kwargs.get("url")
37 self.target = kwargs.get("target")
38 self.rel = kwargs.get("rel")
39
40 @staticmethod
41 def agency_contact_links(agency):
42 """Create link buttons for agency contact information."""
43 return [
44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"),
45 Button.link(
46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer"
47 ),
48 ]
49
50 @staticmethod
51 def home(request, text=None):
52 """Create a button back to this session's origin."""
53 if text is None:
54 text = _("core.buttons.return_home")
55
56 return Button.primary(text=text, url=session.origin(request))
57
58 @staticmethod
59 def link(**kwargs):
60 classes = kwargs.pop("classes", [])
61 if isinstance(classes, str):
62 classes = classes.split(" ")
63 classes.insert(0, "btn-link")
64 return Button(classes=classes, **kwargs)
65
66 @staticmethod
67 def primary(**kwargs):
68 classes = kwargs.pop("classes", [])
69 if isinstance(classes, str):
70 classes = classes.split(" ")
71 classes.insert(0, "btn-primary")
72 return Button(classes=classes, **kwargs)
73
74 @staticmethod
75 def outline_primary(**kwargs):
76 classes = kwargs.pop("classes", [])
77 if isinstance(classes, str):
78 classes = classes.split(" ")
79 classes.insert(0, "btn-outline-primary")
80 return Button(classes=classes, **kwargs)
81
82 @staticmethod
83 def login(**kwargs):
84 """Create a login.gov button, with a login.gov logo and fallback text"""
85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs)
86 return btn
87
88 @staticmethod
89 def logout(**kwargs):
90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text"""
91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs)
92 return btn
93
94 @staticmethod
95 def previous_page(url):
96 kwargs = {"id": "previous-page-button", "text": _("core.buttons.previous_page"), "url": url}
97 btn = Button(**kwargs)
98 btn.classes.append("btn-outline-dark")
99 return btn
100
101
102 class Icon:
103 """Represents an icon."""
104
105 def __init__(self, icon, alt):
106 self.src = f"img/icon/{icon}.svg"
107 self.alt = alt
108
109
110 class MediaItem:
111 """
112 Represents a media item in a list of items:
113 * icon: core.viewmodels.Icon
114 * details: str, str[]
115 * heading: str
116 * bullets: str, str[]
117 """
118
119 def __init__(self, icon: Icon, details, heading=None, bullets=None):
120 self.icon = icon
121 if isinstance(details, str):
122 self.details = [details]
123 elif isinstance(details, list):
124 self.details = details
125 else:
126 self.details = [str(details)]
127 self.heading = heading
128 if isinstance(bullets, str):
129 self.bullets = [bullets]
130 elif isinstance(bullets, list):
131 self.bullets = bullets
132
133
134 class Page:
135 """
136 Represents a page of content:
137 * title: str
138 * icon: core.viewmodels.Icon
139 * headline: str
140 * paragraphs: str[]
141 * form: django.forms.Form
142 * forms: django.forms.Form[]
143 * button: core.viewmodels.Button
144 * buttons: core.viewmodels.Button[]
145 * classes: str[]
146 """
147
148 def __init__(self, **kwargs):
149 self.title = kwargs.get("title")
150 if self.title is None:
151 self.title = _("core.pages.index.prefix")
152 else:
153 self.title = f"{_('core.pages.index.prefix')}: {self.title}"
154
155 self.icon = kwargs.get("icon")
156 self.headline = kwargs.get("headline")
157 self.paragraphs = kwargs.get("paragraphs", [])
158 self.steps = kwargs.get("steps")
159
160 self.forms = kwargs.get("forms", [])
161 if not isinstance(self.forms, list):
162 self.forms = [self.forms]
163 if "form" in kwargs:
164 self.forms.append(kwargs.get("form"))
165
166 self.buttons = kwargs.get("buttons", [])
167 if not isinstance(self.buttons, list):
168 self.buttons = [self.buttons]
169 if "button" in kwargs:
170 self.buttons.append(kwargs.get("button"))
171
172 self.classes = kwargs.get("classes", [])
173 if not isinstance(self.classes, list):
174 self.classes = self.classes.split(" ")
175
176 def context_dict(self):
177 """Return a context dict for a Page."""
178 return {"page": self}
179
180
181 class ErrorPage(Page):
182 """
183 Represents an error page:
184 * title: str
185 * icon: core.viewmodels.Icon
186 * headline: str
187 * paragraphs: str[]
188 * button: core.viewmodels.Button
189 """
190
191 def __init__(self, **kwargs):
192 super().__init__(
193 title=kwargs.get("title", _("core.pages.error.title")),
194 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))),
195 headline=kwargs.get("headline", _("core.pages.error.title")),
196 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.headline")]),
197 button=kwargs.get("button"),
198 )
199
200 @staticmethod
201 def user_error(
202 title=_("core.pages.user_error.title"),
203 headline=_("core.pages.user_error.headline"),
204 paragraphs=[_("core.pages.user_error.p[0]")],
205 **kwargs,
206 ):
207 """Create a new core.viewmodels.ErrorPage instance with defaults for a user error."""
208 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
209
210 @staticmethod
211 def server_error(
212 title=_("core.pages.server_error.title"),
213 headline=_("core.pages.server_error.title"),
214 paragraphs=[_("core.pages.server_error.p[0]")],
215 **kwargs,
216 ):
217 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
218 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
219
220 @staticmethod
221 def not_found(
222 title=_("core.pages.not_found.title"),
223 headline=_("core.pages.not_found.headline"),
224 paragraphs=[_("core.pages.not_found.p[0]")],
225 **kwargs,
226 ):
227 """Create a new core.viewmodels.ErrorPage with defaults for a 404."""
228 path = kwargs.pop("path", None)
229 if path and title:
230 title = f"{title}: {path}"
231 elif path and not title:
232 title = path
233 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
234
235
236 class PaymentProcessor:
237 """
238 Represents a core.models.PaymentProcessor:
239 * model: core.models.PaymentProcessor
240 * access_token_url: str
241 * element_id: str
242 * color: str
243 * [name: str]
244 * [loading_text: str]
245 """
246
247 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")):
248 if isinstance(model, models.PaymentProcessor):
249 self.access_token_url = access_token_url
250 self.element_id = element_id
251 self.color = color
252 self.name = name or model.name
253 self.loading_text = loading_text
254 self.card_tokenize_url = model.card_tokenize_url
255 self.card_tokenize_func = model.card_tokenize_func
256 self.card_tokenize_env = model.card_tokenize_env
257
258 def context_dict(self):
259 """Return a context dict for a PaymentProcessor."""
260 return {"payment_processor": self}
261
262
263 class TransitAgency:
264 """
265 Represents a core.models.TransitAgency:
266 * model: core.models.TransitAgency
267 """
268
269 def __init__(self, model):
270 if isinstance(model, models.TransitAgency):
271 self.slug = model.slug
272 self.short_name = model.short_name
273 self.long_name = model.long_name
274 self.agency_id = model.agency_id
275 self.merchant_id = model.merchant_id
276 self.info_url = model.info_url
277 self.phone = model.phone
278
279 def context_dict(self):
280 """Return a context dict for a TransitAgency."""
281 return {"agency": self}
282
[end of benefits/core/viewmodels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py
--- a/benefits/core/viewmodels.py
+++ b/benefits/core/viewmodels.py
@@ -93,10 +93,7 @@
@staticmethod
def previous_page(url):
- kwargs = {"id": "previous-page-button", "text": _("core.buttons.previous_page"), "url": url}
- btn = Button(**kwargs)
- btn.classes.append("btn-outline-dark")
- return btn
+ return Button(text=_("core.buttons.previous_page"), url=url)
class Icon:
|
{"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -93,10 +93,7 @@\n \n @staticmethod\n def previous_page(url):\n- kwargs = {\"id\": \"previous-page-button\", \"text\": _(\"core.buttons.previous_page\"), \"url\": url}\n- btn = Button(**kwargs)\n- btn.classes.append(\"btn-outline-dark\")\n- return btn\n+ return Button(text=_(\"core.buttons.previous_page\"), url=url)\n \n \n class Icon:\n", "issue": "Desktop: Previous Page Button hover color\nA clear and concise description of the task.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] Hover color is blue, look at Figma\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the task here -->\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n @staticmethod\n def previous_page(url):\n kwargs = {\"id\": \"previous-page-button\", \"text\": _(\"core.buttons.previous_page\"), \"url\": url}\n btn = Button(**kwargs)\n btn.classes.append(\"btn-outline-dark\")\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass MediaItem:\n \"\"\"\n Represents a media item in a list of items:\n * icon: core.viewmodels.Icon\n * details: str, str[]\n * heading: str\n * bullets: str, str[]\n \"\"\"\n\n def __init__(self, icon: Icon, details, heading=None, bullets=None):\n self.icon = icon\n if isinstance(details, str):\n self.details = [details]\n elif isinstance(details, list):\n self.details = details\n else:\n self.details = [str(details)]\n self.heading = heading\n if isinstance(bullets, str):\n self.bullets = [bullets]\n elif isinstance(bullets, list):\n self.bullets = bullets\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n headline=kwargs.get(\"headline\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.headline\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n headline=_(\"core.pages.user_error.headline\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n headline=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n headline=_(\"core.pages.not_found.headline\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]}
| 3,369 | 131 |
gh_patches_debug_1168
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-4326
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use a utf8 friendly latex backend
The current sphinx configuration in docs/conf.py defaults to pdflatex. This is causing problems on #4169 which introduces API-level doctests with unicode characters in them. I tried several iterations of lualatex and xelatex to try and get it to work, but latex errors are never the most helpful.
I will open a PR to resolve this shortly.
</issue>
<code>
[start of doc/conf.py]
1 from datetime import date
2 from sphinx_gallery.sorting import ExplicitOrder
3 import sphinx_rtd_theme
4 from warnings import filterwarnings
5
6 filterwarnings(
7 "ignore", message="Matplotlib is currently using agg", category=UserWarning
8 )
9
10 # General configuration
11 # ---------------------
12
13 # Add any Sphinx extension module names here, as strings. They can be extensions
14 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
15 extensions = [
16 "sphinx.ext.autosummary",
17 "sphinx.ext.autodoc",
18 "sphinx.ext.coverage",
19 "sphinx.ext.doctest",
20 "sphinx.ext.intersphinx",
21 "sphinx.ext.mathjax",
22 "sphinx.ext.napoleon",
23 "sphinx.ext.todo",
24 "sphinx.ext.viewcode",
25 "sphinx_gallery.gen_gallery",
26 "nb2plots",
27 "texext",
28 ]
29
30 # https://github.com/sphinx-gallery/sphinx-gallery
31 sphinx_gallery_conf = {
32 # path to your examples scripts
33 "examples_dirs": "../examples",
34 "subsection_order": ExplicitOrder(
35 [
36 "../examples/basic",
37 "../examples/drawing",
38 "../examples/graph",
39 "../examples/algorithms",
40 "../examples/advanced",
41 "../examples/3d_drawing",
42 "../examples/pygraphviz",
43 "../examples/geospatial",
44 "../examples/javascript",
45 "../examples/jit",
46 "../examples/applications",
47 "../examples/subclass",
48 ]
49 ),
50 # path where to save gallery generated examples
51 "gallery_dirs": "auto_examples",
52 "backreferences_dir": "modules/generated",
53 }
54
55 # generate autosummary pages
56 autosummary_generate = True
57
58 # Add any paths that contain templates here, relative to this directory.
59 templates_path = ["_templates"]
60
61 suppress_warnings = ["ref.citation", "ref.footnote"]
62
63 # The suffix of source filenames.
64 source_suffix = ".rst"
65
66 # The encoding of source files.
67 source_encoding = "utf-8"
68
69 # The master toctree document.
70 master_doc = "index"
71
72 # Do not include release announcement template
73 exclude_patterns = ["release/release_template.rst"]
74
75 # General substitutions.
76 project = "NetworkX"
77 copyright = f"2004-{date.today().year}, NetworkX Developers"
78
79 # The default replacements for |version| and |release|, also used in various
80 # other places throughout the built documents.
81 #
82 # The short X.Y version.
83 import networkx
84
85 version = networkx.__version__
86 # The full version, including dev info
87 release = networkx.__version__.replace("_", "")
88
89 # There are two options for replacing |today|: either, you set today to some
90 # non-false value, then it is used:
91 # today = ''
92 # Else, today_fmt is used as the format for a strftime call.
93 # today_fmt = '%B %d, %Y'
94
95 # List of documents that shouldn't be included in the build.
96 # unused_docs = ['']
97
98 # If true, '()' will be appended to :func: etc. cross-reference text.
99 # add_function_parentheses = True
100
101 # If true, the current module name will be prepended to all description
102 # unit titles (such as .. function::).
103 add_module_names = False
104
105 # show_authors = True
106
107 # The name of the Pygments (syntax highlighting) style to use.
108 # pygments_style = 'friendly'
109 pygments_style = "sphinx"
110
111 # A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)
112 modindex_common_prefix = ["networkx."]
113
114 doctest_global_setup = "import networkx as nx"
115
116 # treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``
117 napoleon_use_param = False
118
119 # Options for HTML output
120 # -----------------------
121
122
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125
126 html_theme_options = {
127 "canonical_url": "https://networkx.org/documentation/stable/",
128 "navigation_depth": 3,
129 "logo_only": True,
130 }
131
132 html_logo = "_static/networkx_logo.svg"
133
134 # The style sheet to use for HTML and HTML Help pages. A file of that name
135 # must exist either in Sphinx' static/ path, or in one of the custom paths
136 # given in html_static_path.
137 # html_style = ''
138
139 # Add any paths that contain custom static files (such as style sheets) here,
140 # relative to this directory. They are copied after the builtin static files,
141 # so a file named "default.css" will overwrite the builtin "default.css".
142 html_static_path = ["_static"]
143
144 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
145 # using the given strftime format.
146 html_last_updated_fmt = "%b %d, %Y"
147
148 # If true, SmartyPants will be used to convert quotes and dashes to
149 # typographically correct entities.
150 # html_use_smartypants = True
151
152 # Content template for the index page.
153 # html_index = 'index.html'
154
155 # Custom sidebar templates, maps page names to templates.
156 # html_sidebars = {}
157
158 # Additional templates that should be rendered to pages, maps page names to
159 # templates.
160 # html_additional_pages = {'': ''}
161
162 # If true, the reST sources are included in the HTML build as _sources/<name>.
163 html_copy_source = False
164
165 html_use_opensearch = "https://networkx.org"
166
167 # Output file base name for HTML help builder.
168 htmlhelp_basename = "NetworkX"
169
170 # Options for LaTeX output
171 # ------------------------
172
173 # The paper size ('letter' or 'a4').
174 latex_paper_size = "letter"
175
176 # The font size ('10pt', '11pt' or '12pt').
177 # latex_font_size = '10pt'
178
179 # Grouping the document tree into LaTeX files. List of tuples
180 # (source start file, target name, title, author, document class [howto/manual]).
181 latex_documents = [
182 (
183 "reference/index",
184 "networkx_reference.tex",
185 "NetworkX Reference",
186 "Aric Hagberg, Dan Schult, Pieter Swart",
187 "manual",
188 1,
189 )
190 ]
191
192 latex_appendices = ["tutorial"]
193
194 # Intersphinx mapping
195 intersphinx_mapping = {
196 "https://docs.python.org/3/": None,
197 "https://numpy.org/doc/stable/": None,
198 }
199
200 # The reST default role (used for this markup: `text`) to use for all
201 # documents.
202 default_role = "obj"
203
204 numpydoc_show_class_members = False
205
206
207 def setup(app):
208 app.add_css_file("custom.css")
209 app.add_js_file("copybutton.js")
210
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -170,6 +170,8 @@
# Options for LaTeX output
# ------------------------
+# Use a latex engine that allows for unicode characters in docstrings
+latex_engine = "xelatex"
# The paper size ('letter' or 'a4').
latex_paper_size = "letter"
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -170,6 +170,8 @@\n # Options for LaTeX output\n # ------------------------\n \n+# Use a latex engine that allows for unicode characters in docstrings\n+latex_engine = \"xelatex\"\n # The paper size ('letter' or 'a4').\n latex_paper_size = \"letter\"\n", "issue": "Use a utf8 friendly latex backend\nThe current sphinx configuration in docs/conf.py defaults to pdflatex. This is causing problems on #4169 which introduces API-level doctests with unicode characters in them. I tried several iterations of lualatex and xelatex to try and get it to work, but latex errors are never the most helpful.\r\n\r\nI will open a PR to resolve this shortly. \n", "before_files": [{"content": "from datetime import date\nfrom sphinx_gallery.sorting import ExplicitOrder\nimport sphinx_rtd_theme\nfrom warnings import filterwarnings\n\nfilterwarnings(\n \"ignore\", message=\"Matplotlib is currently using agg\", category=UserWarning\n)\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_gallery.gen_gallery\",\n \"nb2plots\",\n \"texext\",\n]\n\n# https://github.com/sphinx-gallery/sphinx-gallery\nsphinx_gallery_conf = {\n # path to your examples scripts\n \"examples_dirs\": \"../examples\",\n \"subsection_order\": ExplicitOrder(\n [\n \"../examples/basic\",\n \"../examples/drawing\",\n \"../examples/graph\",\n \"../examples/algorithms\",\n \"../examples/advanced\",\n \"../examples/3d_drawing\",\n \"../examples/pygraphviz\",\n \"../examples/geospatial\",\n \"../examples/javascript\",\n \"../examples/jit\",\n \"../examples/applications\",\n \"../examples/subclass\",\n ]\n ),\n # path where to save gallery generated examples\n \"gallery_dirs\": \"auto_examples\",\n \"backreferences_dir\": \"modules/generated\",\n}\n\n# generate autosummary pages\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\nsuppress_warnings = [\"ref.citation\", \"ref.footnote\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# Do not include release announcement template\nexclude_patterns = [\"release/release_template.rst\"]\n\n# General substitutions.\nproject = \"NetworkX\"\ncopyright = f\"2004-{date.today().year}, NetworkX Developers\"\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nimport networkx\n\nversion = networkx.__version__\n# The full version, including dev info\nrelease = networkx.__version__.replace(\"_\", \"\")\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = ['']\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# show_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'friendly'\npygments_style = \"sphinx\"\n\n# A list of prefixs that are ignored when creating the module index. (new in Sphinx 0.6)\nmodindex_common_prefix = [\"networkx.\"]\n\ndoctest_global_setup = \"import networkx as nx\"\n\n# treat ``x, y : type`` as vars x and y instead of default ``y(x,) : type``\nnapoleon_use_param = False\n\n# Options for HTML output\n# -----------------------\n\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://networkx.org/documentation/stable/\",\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/networkx_logo.svg\"\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = \"%b %d, %Y\"\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Content template for the index page.\n# html_index = 'index.html'\n\n# Custom sidebar templates, maps page names to templates.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# templates.\n# html_additional_pages = {'': ''}\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = False\n\nhtml_use_opensearch = \"https://networkx.org\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"NetworkX\"\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = \"letter\"\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n (\n \"reference/index\",\n \"networkx_reference.tex\",\n \"NetworkX Reference\",\n \"Aric Hagberg, Dan Schult, Pieter Swart\",\n \"manual\",\n 1,\n )\n]\n\nlatex_appendices = [\"tutorial\"]\n\n# Intersphinx mapping\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://numpy.org/doc/stable/\": None,\n}\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = \"obj\"\n\nnumpydoc_show_class_members = False\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_js_file(\"copybutton.js\")\n", "path": "doc/conf.py"}]}
| 2,607 | 91 |
gh_patches_debug_5034
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6357
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
is_paytm_activated shouldn't be a read only attribute
**Describe the bug**
Alipay, Omise, and PayPal have a property to enable them(e.g. `is_paypal_activated`). They were hybrid properties(condition was like `if client_key and secret_key then true`), so they had a dump_only=True. We need to remove this as we've moved away from such a practice. The reason for this is because we need the keys to be stored, whether the property is enabled/disabled
**Additional context**
Working on this.
</issue>
<code>
[start of app/api/schema/settings.py]
1 from marshmallow_jsonapi import fields
2 from marshmallow_jsonapi.flask import Schema
3
4 from app.api.helpers.utilities import dasherize
5 from app.settings import Environment
6 from utils.common import use_defaults
7
8
9 class SettingSchemaPublic(Schema):
10 """
11 Public Api schema for settings Model
12 """
13 class Meta:
14 """
15 Meta class for setting Api Schema
16 """
17 type_ = 'setting'
18 self_view = 'v1.setting_detail'
19 self_view_kwargs = {'id': '<id>'}
20 inflect = dasherize
21
22 id = fields.Str(dump_only=True)
23
24 # Name of the application. (Eg. Event Yay!, Open Event)
25 app_name = fields.Str(allow_none=True)
26
27 # Tagline for the application. (Eg. Event Management and Ticketing, Home)
28 tagline = fields.Str(allow_none=True)
29
30 # Order Expiry Time
31 order_expiry_time = fields.Integer(allow_none=False, default=15, validate=lambda n: 1 <= n <= 60)
32
33 # Google Analytics
34 analytics_key = fields.Str(allow_none=True)
35
36 # FB
37 fb_client_id = fields.Str(allow_none=True)
38
39 #
40 # Social links
41 #
42 google_url = fields.Str(allow_none=True)
43 github_url = fields.Str(allow_none=True)
44 twitter_url = fields.Str(allow_none=True)
45 support_url = fields.Str(allow_none=True)
46 facebook_url = fields.Str(allow_none=True)
47 youtube_url = fields.Str(allow_none=True)
48
49 # Url of Frontend
50 frontend_url = fields.Url(allow_none=True)
51
52 #
53 # Cookie Policy
54 #
55 cookie_policy = fields.Str(allow_none=True)
56 cookie_policy_link = fields.Str(allow_none=True)
57
58 #
59 # Online Payment Flags
60 #
61 is_paytm_activated = fields.Bool(dump_only=True, allow_none=False, default=False)
62 is_paypal_activated = fields.Bool(dump_only=True)
63 is_stripe_activated = fields.Bool(dump_only=True)
64 is_omise_activated = fields.Bool(dump_only=True)
65 is_alipay_activated = fields.Bool(dump_only=True)
66
67
68 class SettingSchemaNonAdmin(SettingSchemaPublic):
69 """
70 Non Admin Api schema for settings Model
71 """
72 class Meta:
73 """
74 Meta class for setting Api Schema
75 """
76 type_ = 'setting'
77 self_view = 'v1.setting_detail'
78 self_view_kwargs = {'id': '<id>'}
79 inflect = dasherize
80
81 id = fields.Str(dump_only=True)
82
83 # Stripe Keys
84 stripe_client_id = fields.Str(allow_none=True)
85 stripe_publishable_key = fields.Str(allow_none=True)
86 stripe_test_secret_key = fields.Str(allow_none=True)
87 stripe_test_publishable_key = fields.Str(allow_none=True)
88
89 #
90 # Generators
91 #
92 android_app_url = fields.Str(allow_none=True)
93 web_app_url = fields.Str(allow_none=True)
94
95
96 @use_defaults()
97 class SettingSchemaAdmin(SettingSchemaNonAdmin):
98 """
99 Admin Api schema for settings Model
100 """
101 class Meta:
102 """
103 Meta class for setting Api Schema
104 """
105 type_ = 'setting'
106 self_view = 'v1.setting_detail'
107 self_view_kwargs = {'id': '<id>'}
108 inflect = dasherize
109
110 id = fields.Str(dump_only=True)
111 #
112 # General
113 #
114
115 app_environment = fields.Str(default=Environment.PRODUCTION)
116
117 # App secret
118 secret = fields.Str(allow_none=True)
119 # Static domain
120 static_domain = fields.Str(allow_none=True)
121
122 #
123 # STORAGE
124 #
125
126 # storage place, local, s3, .. can be more in future
127 storage_place = fields.Str(allow_none=True)
128 # S3
129 aws_key = fields.Str(allow_none=True)
130 aws_secret = fields.Str(allow_none=True)
131 aws_bucket_name = fields.Str(allow_none=True)
132 aws_region = fields.Str(allow_none=True)
133 # Google Storage
134 gs_key = fields.Str(allow_none=True)
135 gs_secret = fields.Str(allow_none=True)
136 gs_bucket_name = fields.Str(allow_none=True)
137
138 #
139 # CAPTCHA
140 #
141
142 # Google reCAPTCHA
143 is_google_recaptcha_enabled = fields.Bool(allow_none=False, default=False)
144 google_recaptcha_site = fields.Str(allow_none=True)
145 google_recaptcha_secret = fields.Str(allow_none=True)
146
147 #
148 # Social Login
149 #
150
151 # Google Auth
152 google_client_id = fields.Str(allow_none=True)
153 google_client_secret = fields.Str(allow_none=True)
154 # FB
155 fb_client_id = fields.Str(allow_none=True)
156 fb_client_secret = fields.Str(allow_none=True)
157 # Twitter
158 tw_consumer_key = fields.Str(allow_none=True)
159 tw_consumer_secret = fields.Str(allow_none=True)
160 # Instagram
161 in_client_id = fields.Str(allow_none=True)
162 in_client_secret = fields.Str(allow_none=True)
163
164 #
165 # Payment Gateway
166 #
167
168 # Stripe secret key
169 stripe_secret_key = fields.Str(allow_none=True)
170
171 # PayPal Credentials
172 paypal_mode = fields.Str(allow_none=True)
173 paypal_client = fields.Str(allow_none=True)
174 paypal_secret = fields.Str(allow_none=True)
175 paypal_sandbox_client = fields.Str(allow_none=True)
176 paypal_sandbox_secret = fields.Str(allow_none=True)
177
178 # Omise Credentials
179 omise_mode = fields.Str(allow_none=True)
180 omise_test_public = fields.Str(allow_none=True)
181 omise_test_secret = fields.Str(allow_none=True)
182 omise_live_public = fields.Str(allow_none=True)
183 omise_live_secret = fields.Str(allow_none=True)
184
185 #
186 alipay_publishable_key = fields.Str(allow_none=True)
187 alipay_secret_key = fields.Str(allow_none=True)
188
189 # payTM credentials
190 paytm_mode = fields.Str(allow_none=True)
191 paytm_live_merchant = fields.Str(allow_none=True)
192 paytm_live_secret = fields.Str(allow_none=True)
193 paytm_sandbox_merchant = fields.Str(allow_none=True)
194 paytm_sandbox_secret = fields.Str(allow_none=True)
195 #
196 # EMAIL
197 #
198
199 # Email service. (sendgrid,smtp)
200 email_service = fields.Str(allow_none=True)
201 email_from = fields.Str(allow_none=True)
202 email_from_name = fields.Str(allow_none=True)
203 # Sendgrid
204 sendgrid_key = fields.Str(allow_none=True)
205 # SMTP
206 smtp_host = fields.Str(allow_none=True)
207 smtp_username = fields.Str(allow_none=True)
208 smtp_password = fields.Str(allow_none=True)
209 smtp_port = fields.Integer(allow_none=True)
210 smtp_encryption = fields.Str(allow_none=True) # Can be tls, ssl, none
211
212 # Event Invoices settings
213 invoice_sending_day = fields.Integer(allow_none=False, default=1)
214 invoice_sending_timezone = fields.Str(allow_none=False, default="UTC")
215
216 # Admin Invoice Details
217 admin_billing_contact_name = fields.Str(allow_none=True)
218 admin_billing_phone = fields.Str(allow_none=True)
219 admin_billing_email = fields.Email(allow_none=True)
220 admin_billing_state = fields.Str(allow_none=True)
221 admin_billing_country = fields.Str(allow_none=True)
222 admin_billing_tax_info = fields.Str(allow_none=True)
223 admin_company = fields.Str(allow_none=True)
224 admin_billing_address = fields.Str(allow_none=True)
225 admin_billing_city = fields.Str(allow_none=True)
226 admin_billing_zip = fields.Str(allow_none=True)
227 admin_billing_additional_info = fields.Str(allow_none=True)
228
[end of app/api/schema/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/schema/settings.py b/app/api/schema/settings.py
--- a/app/api/schema/settings.py
+++ b/app/api/schema/settings.py
@@ -58,7 +58,7 @@
#
# Online Payment Flags
#
- is_paytm_activated = fields.Bool(dump_only=True, allow_none=False, default=False)
+ is_paytm_activated = fields.Bool(default=False)
is_paypal_activated = fields.Bool(dump_only=True)
is_stripe_activated = fields.Bool(dump_only=True)
is_omise_activated = fields.Bool(dump_only=True)
|
{"golden_diff": "diff --git a/app/api/schema/settings.py b/app/api/schema/settings.py\n--- a/app/api/schema/settings.py\n+++ b/app/api/schema/settings.py\n@@ -58,7 +58,7 @@\n #\n # Online Payment Flags\n #\n- is_paytm_activated = fields.Bool(dump_only=True, allow_none=False, default=False)\n+ is_paytm_activated = fields.Bool(default=False)\n is_paypal_activated = fields.Bool(dump_only=True)\n is_stripe_activated = fields.Bool(dump_only=True)\n is_omise_activated = fields.Bool(dump_only=True)\n", "issue": "is_paytm_activated shouldn't be a read only attribute\n**Describe the bug**\r\n Alipay, Omise, and PayPal have a property to enable them(e.g. `is_paypal_activated`). They were hybrid properties(condition was like `if client_key and secret_key then true`), so they had a dump_only=True. We need to remove this as we've moved away from such a practice. The reason for this is because we need the keys to be stored, whether the property is enabled/disabled\r\n\r\n**Additional context**\r\nWorking on this.\n", "before_files": [{"content": "from marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Schema\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.settings import Environment\nfrom utils.common import use_defaults\n\n\nclass SettingSchemaPublic(Schema):\n \"\"\"\n Public Api schema for settings Model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n\n # Name of the application. (Eg. Event Yay!, Open Event)\n app_name = fields.Str(allow_none=True)\n\n # Tagline for the application. (Eg. Event Management and Ticketing, Home)\n tagline = fields.Str(allow_none=True)\n\n # Order Expiry Time\n order_expiry_time = fields.Integer(allow_none=False, default=15, validate=lambda n: 1 <= n <= 60)\n\n # Google Analytics\n analytics_key = fields.Str(allow_none=True)\n\n # FB\n fb_client_id = fields.Str(allow_none=True)\n\n #\n # Social links\n #\n google_url = fields.Str(allow_none=True)\n github_url = fields.Str(allow_none=True)\n twitter_url = fields.Str(allow_none=True)\n support_url = fields.Str(allow_none=True)\n facebook_url = fields.Str(allow_none=True)\n youtube_url = fields.Str(allow_none=True)\n\n # Url of Frontend\n frontend_url = fields.Url(allow_none=True)\n\n #\n # Cookie Policy\n #\n cookie_policy = fields.Str(allow_none=True)\n cookie_policy_link = fields.Str(allow_none=True)\n\n #\n # Online Payment Flags\n #\n is_paytm_activated = fields.Bool(dump_only=True, allow_none=False, default=False)\n is_paypal_activated = fields.Bool(dump_only=True)\n is_stripe_activated = fields.Bool(dump_only=True)\n is_omise_activated = fields.Bool(dump_only=True)\n is_alipay_activated = fields.Bool(dump_only=True)\n\n\nclass SettingSchemaNonAdmin(SettingSchemaPublic):\n \"\"\"\n Non Admin Api schema for settings Model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n\n # Stripe Keys\n stripe_client_id = fields.Str(allow_none=True)\n stripe_publishable_key = fields.Str(allow_none=True)\n stripe_test_secret_key = fields.Str(allow_none=True)\n stripe_test_publishable_key = fields.Str(allow_none=True)\n\n #\n # Generators\n #\n android_app_url = fields.Str(allow_none=True)\n web_app_url = fields.Str(allow_none=True)\n\n\n@use_defaults()\nclass SettingSchemaAdmin(SettingSchemaNonAdmin):\n \"\"\"\n Admin Api schema for settings Model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for setting Api Schema\n \"\"\"\n type_ = 'setting'\n self_view = 'v1.setting_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n #\n # General\n #\n\n app_environment = fields.Str(default=Environment.PRODUCTION)\n\n # App secret\n secret = fields.Str(allow_none=True)\n # Static domain\n static_domain = fields.Str(allow_none=True)\n\n #\n # STORAGE\n #\n\n # storage place, local, s3, .. can be more in future\n storage_place = fields.Str(allow_none=True)\n # S3\n aws_key = fields.Str(allow_none=True)\n aws_secret = fields.Str(allow_none=True)\n aws_bucket_name = fields.Str(allow_none=True)\n aws_region = fields.Str(allow_none=True)\n # Google Storage\n gs_key = fields.Str(allow_none=True)\n gs_secret = fields.Str(allow_none=True)\n gs_bucket_name = fields.Str(allow_none=True)\n\n #\n # CAPTCHA\n #\n\n # Google reCAPTCHA\n is_google_recaptcha_enabled = fields.Bool(allow_none=False, default=False)\n google_recaptcha_site = fields.Str(allow_none=True)\n google_recaptcha_secret = fields.Str(allow_none=True)\n\n #\n # Social Login\n #\n\n # Google Auth\n google_client_id = fields.Str(allow_none=True)\n google_client_secret = fields.Str(allow_none=True)\n # FB\n fb_client_id = fields.Str(allow_none=True)\n fb_client_secret = fields.Str(allow_none=True)\n # Twitter\n tw_consumer_key = fields.Str(allow_none=True)\n tw_consumer_secret = fields.Str(allow_none=True)\n # Instagram\n in_client_id = fields.Str(allow_none=True)\n in_client_secret = fields.Str(allow_none=True)\n\n #\n # Payment Gateway\n #\n\n # Stripe secret key\n stripe_secret_key = fields.Str(allow_none=True)\n\n # PayPal Credentials\n paypal_mode = fields.Str(allow_none=True)\n paypal_client = fields.Str(allow_none=True)\n paypal_secret = fields.Str(allow_none=True)\n paypal_sandbox_client = fields.Str(allow_none=True)\n paypal_sandbox_secret = fields.Str(allow_none=True)\n\n # Omise Credentials\n omise_mode = fields.Str(allow_none=True)\n omise_test_public = fields.Str(allow_none=True)\n omise_test_secret = fields.Str(allow_none=True)\n omise_live_public = fields.Str(allow_none=True)\n omise_live_secret = fields.Str(allow_none=True)\n\n #\n alipay_publishable_key = fields.Str(allow_none=True)\n alipay_secret_key = fields.Str(allow_none=True)\n\n # payTM credentials\n paytm_mode = fields.Str(allow_none=True)\n paytm_live_merchant = fields.Str(allow_none=True)\n paytm_live_secret = fields.Str(allow_none=True)\n paytm_sandbox_merchant = fields.Str(allow_none=True)\n paytm_sandbox_secret = fields.Str(allow_none=True)\n #\n # EMAIL\n #\n\n # Email service. (sendgrid,smtp)\n email_service = fields.Str(allow_none=True)\n email_from = fields.Str(allow_none=True)\n email_from_name = fields.Str(allow_none=True)\n # Sendgrid\n sendgrid_key = fields.Str(allow_none=True)\n # SMTP\n smtp_host = fields.Str(allow_none=True)\n smtp_username = fields.Str(allow_none=True)\n smtp_password = fields.Str(allow_none=True)\n smtp_port = fields.Integer(allow_none=True)\n smtp_encryption = fields.Str(allow_none=True) # Can be tls, ssl, none\n\n # Event Invoices settings\n invoice_sending_day = fields.Integer(allow_none=False, default=1)\n invoice_sending_timezone = fields.Str(allow_none=False, default=\"UTC\")\n\n # Admin Invoice Details\n admin_billing_contact_name = fields.Str(allow_none=True)\n admin_billing_phone = fields.Str(allow_none=True)\n admin_billing_email = fields.Email(allow_none=True)\n admin_billing_state = fields.Str(allow_none=True)\n admin_billing_country = fields.Str(allow_none=True)\n admin_billing_tax_info = fields.Str(allow_none=True)\n admin_company = fields.Str(allow_none=True)\n admin_billing_address = fields.Str(allow_none=True)\n admin_billing_city = fields.Str(allow_none=True)\n admin_billing_zip = fields.Str(allow_none=True)\n admin_billing_additional_info = fields.Str(allow_none=True)\n", "path": "app/api/schema/settings.py"}]}
| 2,934 | 132 |
gh_patches_debug_2933
|
rasdani/github-patches
|
git_diff
|
conda__conda-5009
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When lacking permissions to write, clone message should quote prefix.
When trying to install a new package into a location that the user lacks write permissions (read-only root), conda helpfully suggests cloning the environment into a new location:
```
CondaIOError: IO error: Missing write permissions in: C:\Program Files\Anaconda
#
# You don't appear to have the necessary permissions to install packages
# into the install area 'C:\Program Files\Anaconda'.
# However you can clone this environment into your home directory and
# then make changes to it.
# This may be done using the command:
#
# $ conda create -n my_deathstar --clone=C:\Program Files\Anaconda\envs\deathstar
```
As shown in the example above, this clone path may include spaces. This will be particularly common on Windows, where a global install will result in files written to Program Files, which a non-administrator user will not be able to write to, and contains spaces. Because the command presents a prefix, it should be quoted to guard against this case.
</issue>
<code>
[start of conda/cli/help.py]
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from os.path import join
4
5 from .common import name_prefix
6 from ..base.context import context
7 from ..exceptions import CondaIOError
8
9
10 def read_message(fn):
11 res = []
12 for envs_dir in context.envs_dirs:
13 path = join(envs_dir, '.conda-help', fn)
14 try:
15 with open(path) as fi:
16 s = fi.read().decode('utf-8')
17 s = s.replace('${envs_dir}', envs_dir)
18 res.append(s)
19 except IOError:
20 pass
21 return ''.join(res)
22
23
24 def root_read_only(command, prefix, json=False):
25 assert command in {'install', 'update', 'remove'}
26
27 msg = read_message('ro.txt')
28 if not msg:
29 msg = """\
30 Missing write permissions in: ${root_dir}
31 #
32 # You don't appear to have the necessary permissions to ${command} packages
33 # into the install area '${root_dir}'.
34 # However you can clone this environment into your home directory and
35 # then make changes to it.
36 # This may be done using the command:
37 #
38 # $ conda create -n my_${name} --clone=${prefix}
39 """
40 msg = msg.replace('${root_dir}', context.root_prefix)
41 msg = msg.replace('${prefix}', prefix)
42 msg = msg.replace('${name}', name_prefix(prefix))
43 msg = msg.replace('${command}', command)
44 raise CondaIOError(msg)
45
[end of conda/cli/help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/cli/help.py b/conda/cli/help.py
--- a/conda/cli/help.py
+++ b/conda/cli/help.py
@@ -35,7 +35,7 @@
# then make changes to it.
# This may be done using the command:
#
-# $ conda create -n my_${name} --clone=${prefix}
+# $ conda create -n my_${name} --clone="${prefix}"
"""
msg = msg.replace('${root_dir}', context.root_prefix)
msg = msg.replace('${prefix}', prefix)
|
{"golden_diff": "diff --git a/conda/cli/help.py b/conda/cli/help.py\n--- a/conda/cli/help.py\n+++ b/conda/cli/help.py\n@@ -35,7 +35,7 @@\n # then make changes to it.\n # This may be done using the command:\n #\n-# $ conda create -n my_${name} --clone=${prefix}\n+# $ conda create -n my_${name} --clone=\"${prefix}\"\n \"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n", "issue": "When lacking permissions to write, clone message should quote prefix.\nWhen trying to install a new package into a location that the user lacks write permissions (read-only root), conda helpfully suggests cloning the environment into a new location:\r\n\r\n```\r\nCondaIOError: IO error: Missing write permissions in: C:\\Program Files\\Anaconda\r\n#\r\n# You don't appear to have the necessary permissions to install packages\r\n# into the install area 'C:\\Program Files\\Anaconda'.\r\n# However you can clone this environment into your home directory and\r\n# then make changes to it.\r\n# This may be done using the command:\r\n#\r\n# $ conda create -n my_deathstar --clone=C:\\Program Files\\Anaconda\\envs\\deathstar\r\n```\r\nAs shown in the example above, this clone path may include spaces. This will be particularly common on Windows, where a global install will result in files written to Program Files, which a non-administrator user will not be able to write to, and contains spaces. Because the command presents a prefix, it should be quoted to guard against this case.\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import join\n\nfrom .common import name_prefix\nfrom ..base.context import context\nfrom ..exceptions import CondaIOError\n\n\ndef read_message(fn):\n res = []\n for envs_dir in context.envs_dirs:\n path = join(envs_dir, '.conda-help', fn)\n try:\n with open(path) as fi:\n s = fi.read().decode('utf-8')\n s = s.replace('${envs_dir}', envs_dir)\n res.append(s)\n except IOError:\n pass\n return ''.join(res)\n\n\ndef root_read_only(command, prefix, json=False):\n assert command in {'install', 'update', 'remove'}\n\n msg = read_message('ro.txt')\n if not msg:\n msg = \"\"\"\\\nMissing write permissions in: ${root_dir}\n#\n# You don't appear to have the necessary permissions to ${command} packages\n# into the install area '${root_dir}'.\n# However you can clone this environment into your home directory and\n# then make changes to it.\n# This may be done using the command:\n#\n# $ conda create -n my_${name} --clone=${prefix}\n\"\"\"\n msg = msg.replace('${root_dir}', context.root_prefix)\n msg = msg.replace('${prefix}', prefix)\n msg = msg.replace('${name}', name_prefix(prefix))\n msg = msg.replace('${command}', command)\n raise CondaIOError(msg)\n", "path": "conda/cli/help.py"}]}
| 1,165 | 118 |
gh_patches_debug_37961
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-2465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Falcon integration does not respect custom exception handlers
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.5.6
### Steps to Reproduce
1. Attach a custom exception handler:
```
import marshmallow
def marshmallow_validation_error_handler(req, res, exc, params):
"""Transforms a marshmallow validation error into the appropriate HTTP response."""
raise falcon.HTTPError(400)
api.add_error_handler(marshmallow.ValidationError, marshmallow_validation_error_handler)
```
2. Do an API request that leads to a validation error
### Expected Result
The `marshmallow` exception should not have been reported to Sentry.
### Actual Result
The custom exception leads to a 4xx, not a 5xx, yet it is still reported as an unhandled error on Sentry.
</issue>
<code>
[start of sentry_sdk/integrations/falcon.py]
1 from __future__ import absolute_import
2
3 from sentry_sdk.hub import Hub
4 from sentry_sdk.integrations import Integration, DidNotEnable
5 from sentry_sdk.integrations._wsgi_common import RequestExtractor
6 from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
7 from sentry_sdk.tracing import SOURCE_FOR_STYLE
8 from sentry_sdk.utils import (
9 capture_internal_exceptions,
10 event_from_exception,
11 parse_version,
12 )
13
14 from sentry_sdk._types import TYPE_CHECKING
15
16 if TYPE_CHECKING:
17 from typing import Any
18 from typing import Dict
19 from typing import Optional
20
21 from sentry_sdk._types import EventProcessor
22
23 # In Falcon 3.0 `falcon.api_helpers` is renamed to `falcon.app_helpers`
24 # and `falcon.API` to `falcon.App`
25
26 try:
27 import falcon # type: ignore
28
29 from falcon import __version__ as FALCON_VERSION
30 except ImportError:
31 raise DidNotEnable("Falcon not installed")
32
33 try:
34 import falcon.app_helpers # type: ignore
35
36 falcon_helpers = falcon.app_helpers
37 falcon_app_class = falcon.App
38 FALCON3 = True
39 except ImportError:
40 import falcon.api_helpers # type: ignore
41
42 falcon_helpers = falcon.api_helpers
43 falcon_app_class = falcon.API
44 FALCON3 = False
45
46
47 class FalconRequestExtractor(RequestExtractor):
48 def env(self):
49 # type: () -> Dict[str, Any]
50 return self.request.env
51
52 def cookies(self):
53 # type: () -> Dict[str, Any]
54 return self.request.cookies
55
56 def form(self):
57 # type: () -> None
58 return None # No such concept in Falcon
59
60 def files(self):
61 # type: () -> None
62 return None # No such concept in Falcon
63
64 def raw_data(self):
65 # type: () -> Optional[str]
66
67 # As request data can only be read once we won't make this available
68 # to Sentry. Just send back a dummy string in case there was a
69 # content length.
70 # TODO(jmagnusson): Figure out if there's a way to support this
71 content_length = self.content_length()
72 if content_length > 0:
73 return "[REQUEST_CONTAINING_RAW_DATA]"
74 else:
75 return None
76
77 if FALCON3:
78
79 def json(self):
80 # type: () -> Optional[Dict[str, Any]]
81 try:
82 return self.request.media
83 except falcon.errors.HTTPBadRequest:
84 return None
85
86 else:
87
88 def json(self):
89 # type: () -> Optional[Dict[str, Any]]
90 try:
91 return self.request.media
92 except falcon.errors.HTTPBadRequest:
93 # NOTE(jmagnusson): We return `falcon.Request._media` here because
94 # falcon 1.4 doesn't do proper type checking in
95 # `falcon.Request.media`. This has been fixed in 2.0.
96 # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953
97 return self.request._media
98
99
100 class SentryFalconMiddleware(object):
101 """Captures exceptions in Falcon requests and send to Sentry"""
102
103 def process_request(self, req, resp, *args, **kwargs):
104 # type: (Any, Any, *Any, **Any) -> None
105 hub = Hub.current
106 integration = hub.get_integration(FalconIntegration)
107 if integration is None:
108 return
109
110 with hub.configure_scope() as scope:
111 scope._name = "falcon"
112 scope.add_event_processor(_make_request_event_processor(req, integration))
113
114
115 TRANSACTION_STYLE_VALUES = ("uri_template", "path")
116
117
118 class FalconIntegration(Integration):
119 identifier = "falcon"
120
121 transaction_style = ""
122
123 def __init__(self, transaction_style="uri_template"):
124 # type: (str) -> None
125 if transaction_style not in TRANSACTION_STYLE_VALUES:
126 raise ValueError(
127 "Invalid value for transaction_style: %s (must be in %s)"
128 % (transaction_style, TRANSACTION_STYLE_VALUES)
129 )
130 self.transaction_style = transaction_style
131
132 @staticmethod
133 def setup_once():
134 # type: () -> None
135
136 version = parse_version(FALCON_VERSION)
137
138 if version is None:
139 raise DidNotEnable("Unparsable Falcon version: {}".format(FALCON_VERSION))
140
141 if version < (1, 4):
142 raise DidNotEnable("Falcon 1.4 or newer required.")
143
144 _patch_wsgi_app()
145 _patch_handle_exception()
146 _patch_prepare_middleware()
147
148
149 def _patch_wsgi_app():
150 # type: () -> None
151 original_wsgi_app = falcon_app_class.__call__
152
153 def sentry_patched_wsgi_app(self, env, start_response):
154 # type: (falcon.API, Any, Any) -> Any
155 hub = Hub.current
156 integration = hub.get_integration(FalconIntegration)
157 if integration is None:
158 return original_wsgi_app(self, env, start_response)
159
160 sentry_wrapped = SentryWsgiMiddleware(
161 lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)
162 )
163
164 return sentry_wrapped(env, start_response)
165
166 falcon_app_class.__call__ = sentry_patched_wsgi_app
167
168
169 def _patch_handle_exception():
170 # type: () -> None
171 original_handle_exception = falcon_app_class._handle_exception
172
173 def sentry_patched_handle_exception(self, *args):
174 # type: (falcon.API, *Any) -> Any
175 # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
176 # method signature from `(ex, req, resp, params)` to
177 # `(req, resp, ex, params)`
178 if isinstance(args[0], Exception):
179 ex = args[0]
180 else:
181 ex = args[2]
182
183 was_handled = original_handle_exception(self, *args)
184
185 hub = Hub.current
186 integration = hub.get_integration(FalconIntegration)
187
188 if integration is not None and _exception_leads_to_http_5xx(ex):
189 # If an integration is there, a client has to be there.
190 client = hub.client # type: Any
191
192 event, hint = event_from_exception(
193 ex,
194 client_options=client.options,
195 mechanism={"type": "falcon", "handled": False},
196 )
197 hub.capture_event(event, hint=hint)
198
199 return was_handled
200
201 falcon_app_class._handle_exception = sentry_patched_handle_exception
202
203
204 def _patch_prepare_middleware():
205 # type: () -> None
206 original_prepare_middleware = falcon_helpers.prepare_middleware
207
208 def sentry_patched_prepare_middleware(
209 middleware=None, independent_middleware=False, asgi=False
210 ):
211 # type: (Any, Any, bool) -> Any
212 if asgi:
213 # We don't support ASGI Falcon apps, so we don't patch anything here
214 return original_prepare_middleware(middleware, independent_middleware, asgi)
215
216 hub = Hub.current
217 integration = hub.get_integration(FalconIntegration)
218 if integration is not None:
219 middleware = [SentryFalconMiddleware()] + (middleware or [])
220
221 # We intentionally omit the asgi argument here, since the default is False anyways,
222 # and this way, we remain backwards-compatible with pre-3.0.0 Falcon versions.
223 return original_prepare_middleware(middleware, independent_middleware)
224
225 falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware
226
227
228 def _exception_leads_to_http_5xx(ex):
229 # type: (Exception) -> bool
230 is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
231 "5"
232 )
233 is_unhandled_error = not isinstance(
234 ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
235 )
236 return is_server_error or is_unhandled_error
237
238
239 def _set_transaction_name_and_source(event, transaction_style, request):
240 # type: (Dict[str, Any], str, falcon.Request) -> None
241 name_for_style = {
242 "uri_template": request.uri_template,
243 "path": request.path,
244 }
245 event["transaction"] = name_for_style[transaction_style]
246 event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
247
248
249 def _make_request_event_processor(req, integration):
250 # type: (falcon.Request, FalconIntegration) -> EventProcessor
251
252 def event_processor(event, hint):
253 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
254 _set_transaction_name_and_source(event, integration.transaction_style, req)
255
256 with capture_internal_exceptions():
257 FalconRequestExtractor(req).extract_into_event(event)
258
259 return event
260
261 return event_processor
262
[end of sentry_sdk/integrations/falcon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/falcon.py b/sentry_sdk/integrations/falcon.py
--- a/sentry_sdk/integrations/falcon.py
+++ b/sentry_sdk/integrations/falcon.py
@@ -175,17 +175,25 @@
# NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
# method signature from `(ex, req, resp, params)` to
# `(req, resp, ex, params)`
- if isinstance(args[0], Exception):
- ex = args[0]
- else:
- ex = args[2]
+ ex = response = None
+ with capture_internal_exceptions():
+ ex = next(argument for argument in args if isinstance(argument, Exception))
+ response = next(
+ argument for argument in args if isinstance(argument, falcon.Response)
+ )
was_handled = original_handle_exception(self, *args)
+ if ex is None or response is None:
+ # Both ex and response should have a non-None value at this point; otherwise,
+ # there is an error with the SDK that will have been captured in the
+ # capture_internal_exceptions block above.
+ return was_handled
+
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
- if integration is not None and _exception_leads_to_http_5xx(ex):
+ if integration is not None and _exception_leads_to_http_5xx(ex, response):
# If an integration is there, a client has to be there.
client = hub.client # type: Any
@@ -225,15 +233,28 @@
falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware
-def _exception_leads_to_http_5xx(ex):
- # type: (Exception) -> bool
+def _exception_leads_to_http_5xx(ex, response):
+ # type: (Exception, falcon.Response) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
- return is_server_error or is_unhandled_error
+
+ # We only check the HTTP status on Falcon 3 because in Falcon 2, the status on the response
+ # at the stage where we capture it is listed as 200, even though we would expect to see a 500
+ # status. Since at the time of this change, Falcon 2 is ca. 4 years old, we have decided to
+ # only perform this check on Falcon 3+, despite the risk that some handled errors might be
+ # reported to Sentry as unhandled on Falcon 2.
+ return (is_server_error or is_unhandled_error) and (
+ not FALCON3 or _has_http_5xx_status(response)
+ )
+
+
+def _has_http_5xx_status(response):
+ # type: (falcon.Response) -> bool
+ return response.status.startswith("5")
def _set_transaction_name_and_source(event, transaction_style, request):
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/falcon.py b/sentry_sdk/integrations/falcon.py\n--- a/sentry_sdk/integrations/falcon.py\n+++ b/sentry_sdk/integrations/falcon.py\n@@ -175,17 +175,25 @@\n # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception\n # method signature from `(ex, req, resp, params)` to\n # `(req, resp, ex, params)`\n- if isinstance(args[0], Exception):\n- ex = args[0]\n- else:\n- ex = args[2]\n+ ex = response = None\n+ with capture_internal_exceptions():\n+ ex = next(argument for argument in args if isinstance(argument, Exception))\n+ response = next(\n+ argument for argument in args if isinstance(argument, falcon.Response)\n+ )\n \n was_handled = original_handle_exception(self, *args)\n \n+ if ex is None or response is None:\n+ # Both ex and response should have a non-None value at this point; otherwise,\n+ # there is an error with the SDK that will have been captured in the\n+ # capture_internal_exceptions block above.\n+ return was_handled\n+\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n \n- if integration is not None and _exception_leads_to_http_5xx(ex):\n+ if integration is not None and _exception_leads_to_http_5xx(ex, response):\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n \n@@ -225,15 +233,28 @@\n falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware\n \n \n-def _exception_leads_to_http_5xx(ex):\n- # type: (Exception) -> bool\n+def _exception_leads_to_http_5xx(ex, response):\n+ # type: (Exception, falcon.Response) -> bool\n is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or \"\").startswith(\n \"5\"\n )\n is_unhandled_error = not isinstance(\n ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)\n )\n- return is_server_error or is_unhandled_error\n+\n+ # We only check the HTTP status on Falcon 3 because in Falcon 2, the status on the response\n+ # at the stage where we capture it is listed as 200, even though we would expect to see a 500\n+ # status. Since at the time of this change, Falcon 2 is ca. 4 years old, we have decided to\n+ # only perform this check on Falcon 3+, despite the risk that some handled errors might be\n+ # reported to Sentry as unhandled on Falcon 2.\n+ return (is_server_error or is_unhandled_error) and (\n+ not FALCON3 or _has_http_5xx_status(response)\n+ )\n+\n+\n+def _has_http_5xx_status(response):\n+ # type: (falcon.Response) -> bool\n+ return response.status.startswith(\"5\")\n \n \n def _set_transaction_name_and_source(event, transaction_style, request):\n", "issue": "Falcon integration does not respect custom exception handlers\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.5.6\n\n### Steps to Reproduce\n\n1. Attach a custom exception handler:\r\n```\r\nimport marshmallow\r\n\r\ndef marshmallow_validation_error_handler(req, res, exc, params):\r\n \"\"\"Transforms a marshmallow validation error into the appropriate HTTP response.\"\"\"\r\n raise falcon.HTTPError(400)\r\n\r\napi.add_error_handler(marshmallow.ValidationError, marshmallow_validation_error_handler)\r\n```\r\n\r\n2. Do an API request that leads to a validation error\n\n### Expected Result\n\nThe `marshmallow` exception should not have been reported to Sentry.\n\n### Actual Result\n\nThe custom exception leads to a 4xx, not a 5xx, yet it is still reported as an unhandled error on Sentry.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor\nfrom sentry_sdk.integrations.wsgi import SentryWsgiMiddleware\nfrom sentry_sdk.tracing import SOURCE_FOR_STYLE\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n parse_version,\n)\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import Dict\n from typing import Optional\n\n from sentry_sdk._types import EventProcessor\n\n# In Falcon 3.0 `falcon.api_helpers` is renamed to `falcon.app_helpers`\n# and `falcon.API` to `falcon.App`\n\ntry:\n import falcon # type: ignore\n\n from falcon import __version__ as FALCON_VERSION\nexcept ImportError:\n raise DidNotEnable(\"Falcon not installed\")\n\ntry:\n import falcon.app_helpers # type: ignore\n\n falcon_helpers = falcon.app_helpers\n falcon_app_class = falcon.App\n FALCON3 = True\nexcept ImportError:\n import falcon.api_helpers # type: ignore\n\n falcon_helpers = falcon.api_helpers\n falcon_app_class = falcon.API\n FALCON3 = False\n\n\nclass FalconRequestExtractor(RequestExtractor):\n def env(self):\n # type: () -> Dict[str, Any]\n return self.request.env\n\n def cookies(self):\n # type: () -> Dict[str, Any]\n return self.request.cookies\n\n def form(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def files(self):\n # type: () -> None\n return None # No such concept in Falcon\n\n def raw_data(self):\n # type: () -> Optional[str]\n\n # As request data can only be read once we won't make this available\n # to Sentry. Just send back a dummy string in case there was a\n # content length.\n # TODO(jmagnusson): Figure out if there's a way to support this\n content_length = self.content_length()\n if content_length > 0:\n return \"[REQUEST_CONTAINING_RAW_DATA]\"\n else:\n return None\n\n if FALCON3:\n\n def json(self):\n # type: () -> Optional[Dict[str, Any]]\n try:\n return self.request.media\n except falcon.errors.HTTPBadRequest:\n return None\n\n else:\n\n def json(self):\n # type: () -> Optional[Dict[str, Any]]\n try:\n return self.request.media\n except falcon.errors.HTTPBadRequest:\n # NOTE(jmagnusson): We return `falcon.Request._media` here because\n # falcon 1.4 doesn't do proper type checking in\n # `falcon.Request.media`. This has been fixed in 2.0.\n # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953\n return self.request._media\n\n\nclass SentryFalconMiddleware(object):\n \"\"\"Captures exceptions in Falcon requests and send to Sentry\"\"\"\n\n def process_request(self, req, resp, *args, **kwargs):\n # type: (Any, Any, *Any, **Any) -> None\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return\n\n with hub.configure_scope() as scope:\n scope._name = \"falcon\"\n scope.add_event_processor(_make_request_event_processor(req, integration))\n\n\nTRANSACTION_STYLE_VALUES = (\"uri_template\", \"path\")\n\n\nclass FalconIntegration(Integration):\n identifier = \"falcon\"\n\n transaction_style = \"\"\n\n def __init__(self, transaction_style=\"uri_template\"):\n # type: (str) -> None\n if transaction_style not in TRANSACTION_STYLE_VALUES:\n raise ValueError(\n \"Invalid value for transaction_style: %s (must be in %s)\"\n % (transaction_style, TRANSACTION_STYLE_VALUES)\n )\n self.transaction_style = transaction_style\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n version = parse_version(FALCON_VERSION)\n\n if version is None:\n raise DidNotEnable(\"Unparsable Falcon version: {}\".format(FALCON_VERSION))\n\n if version < (1, 4):\n raise DidNotEnable(\"Falcon 1.4 or newer required.\")\n\n _patch_wsgi_app()\n _patch_handle_exception()\n _patch_prepare_middleware()\n\n\ndef _patch_wsgi_app():\n # type: () -> None\n original_wsgi_app = falcon_app_class.__call__\n\n def sentry_patched_wsgi_app(self, env, start_response):\n # type: (falcon.API, Any, Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is None:\n return original_wsgi_app(self, env, start_response)\n\n sentry_wrapped = SentryWsgiMiddleware(\n lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)\n )\n\n return sentry_wrapped(env, start_response)\n\n falcon_app_class.__call__ = sentry_patched_wsgi_app\n\n\ndef _patch_handle_exception():\n # type: () -> None\n original_handle_exception = falcon_app_class._handle_exception\n\n def sentry_patched_handle_exception(self, *args):\n # type: (falcon.API, *Any) -> Any\n # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception\n # method signature from `(ex, req, resp, params)` to\n # `(req, resp, ex, params)`\n if isinstance(args[0], Exception):\n ex = args[0]\n else:\n ex = args[2]\n\n was_handled = original_handle_exception(self, *args)\n\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n\n if integration is not None and _exception_leads_to_http_5xx(ex):\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n ex,\n client_options=client.options,\n mechanism={\"type\": \"falcon\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n return was_handled\n\n falcon_app_class._handle_exception = sentry_patched_handle_exception\n\n\ndef _patch_prepare_middleware():\n # type: () -> None\n original_prepare_middleware = falcon_helpers.prepare_middleware\n\n def sentry_patched_prepare_middleware(\n middleware=None, independent_middleware=False, asgi=False\n ):\n # type: (Any, Any, bool) -> Any\n if asgi:\n # We don't support ASGI Falcon apps, so we don't patch anything here\n return original_prepare_middleware(middleware, independent_middleware, asgi)\n\n hub = Hub.current\n integration = hub.get_integration(FalconIntegration)\n if integration is not None:\n middleware = [SentryFalconMiddleware()] + (middleware or [])\n\n # We intentionally omit the asgi argument here, since the default is False anyways,\n # and this way, we remain backwards-compatible with pre-3.0.0 Falcon versions.\n return original_prepare_middleware(middleware, independent_middleware)\n\n falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware\n\n\ndef _exception_leads_to_http_5xx(ex):\n # type: (Exception) -> bool\n is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or \"\").startswith(\n \"5\"\n )\n is_unhandled_error = not isinstance(\n ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)\n )\n return is_server_error or is_unhandled_error\n\n\ndef _set_transaction_name_and_source(event, transaction_style, request):\n # type: (Dict[str, Any], str, falcon.Request) -> None\n name_for_style = {\n \"uri_template\": request.uri_template,\n \"path\": request.path,\n }\n event[\"transaction\"] = name_for_style[transaction_style]\n event[\"transaction_info\"] = {\"source\": SOURCE_FOR_STYLE[transaction_style]}\n\n\ndef _make_request_event_processor(req, integration):\n # type: (falcon.Request, FalconIntegration) -> EventProcessor\n\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n _set_transaction_name_and_source(event, integration.transaction_style, req)\n\n with capture_internal_exceptions():\n FalconRequestExtractor(req).extract_into_event(event)\n\n return event\n\n return event_processor\n", "path": "sentry_sdk/integrations/falcon.py"}]}
| 3,368 | 722 |
gh_patches_debug_610
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-23142
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ifft
</issue>
<code>
[start of ivy/functional/frontends/jax/numpy/fft.py]
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
4 from ivy.func_wrapper import with_unsupported_dtypes
5
6
7 @to_ivy_arrays_and_back
8 def fft(a, n=None, axis=-1, norm=None):
9 if norm is None:
10 norm = "backward"
11 return ivy.fft(a, axis, norm=norm, n=n)
12
13
14 @to_ivy_arrays_and_back
15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
16 def fftshift(x, axes=None, name=None):
17 shape = x.shape
18
19 if axes is None:
20 axes = tuple(range(x.ndim))
21 shifts = [(dim // 2) for dim in shape]
22 elif isinstance(axes, int):
23 shifts = shape[axes] // 2
24 else:
25 shifts = [shape[ax] // 2 for ax in axes]
26
27 roll = ivy.roll(x, shifts, axis=axes)
28
29 return roll
30
[end of ivy/functional/frontends/jax/numpy/fft.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py
--- a/ivy/functional/frontends/jax/numpy/fft.py
+++ b/ivy/functional/frontends/jax/numpy/fft.py
@@ -27,3 +27,10 @@
roll = ivy.roll(x, shifts, axis=axes)
return roll
+
+
+@to_ivy_arrays_and_back
+def ifft(a, n=None, axis=-1, norm=None):
+ if norm is None:
+ norm = "backward"
+ return ivy.ifft(a, axis, norm=norm, n=n)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py\n--- a/ivy/functional/frontends/jax/numpy/fft.py\n+++ b/ivy/functional/frontends/jax/numpy/fft.py\n@@ -27,3 +27,10 @@\n roll = ivy.roll(x, shifts, axis=axes)\n \n return roll\n+\n+\n+@to_ivy_arrays_and_back\n+def ifft(a, n=None, axis=-1, norm=None):\n+ if norm is None:\n+ norm = \"backward\"\n+ return ivy.ifft(a, axis, norm=norm, n=n)\n", "issue": "ifft\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}]}
| 839 | 155 |
gh_patches_debug_28326
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-5803
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Circular import error (again) in 3.4.3
Opening a new issue as the old one (https://github.com/pyca/cryptography/issues/5756) wasn't re-opened after my comment:
@reaperhulk
Hi,
still got some circular import problems here with cryptography 3.4.3 :
`AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)`
reproducer used (gencrypto.py):
```
pip3 install -U cryptography
Collecting cryptography
Downloading cryptography-3.4.3-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)
|████████████████████████████████| 3.2 MB 5.3 MB/s
python3 gencrypto.py
Traceback (most recent call last):
File "gencrypto.py", line 2, in <module>
from cryptography.hazmat.primitives.asymmetric import dh
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py", line 11, in <module>
from cryptography.hazmat.primitives import serialization
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/__init__.py", line 15, in <module>
from cryptography.hazmat.primitives.serialization.base import (
File "/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/base.py", line 28, in <module>
def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)
```
**gencrypto.py**
```
import time
from cryptography.hazmat.primitives.asymmetric import dh
for i in [2048,3072,4096]:
begin=time.time()
params = dh.generate_parameters(
generator=2,
key_size=i,
)
end=time.time()
print('took {}s for {} keysize'.format(int(end-begin), i))
```
Create py.typed
the changelog lists:
> cryptography now has PEP 484 type hints on nearly all of of its public APIs. Users can begin using them to type check their code with mypy.
but I'm still falling back to typeshed because this file is missing
</issue>
<code>
[start of src/cryptography/hazmat/primitives/serialization/base.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import typing
7
8 from cryptography.hazmat._types import (
9 _PRIVATE_KEY_TYPES,
10 _PUBLIC_KEY_TYPES,
11 )
12 from cryptography.hazmat.backends import _get_backend
13 from cryptography.hazmat.primitives.asymmetric import dh
14
15
16 def load_pem_private_key(
17 data: bytes, password: typing.Optional[bytes], backend=None
18 ) -> _PRIVATE_KEY_TYPES:
19 backend = _get_backend(backend)
20 return backend.load_pem_private_key(data, password)
21
22
23 def load_pem_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:
24 backend = _get_backend(backend)
25 return backend.load_pem_public_key(data)
26
27
28 def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
29 backend = _get_backend(backend)
30 return backend.load_pem_parameters(data)
31
32
33 def load_der_private_key(
34 data: bytes, password: typing.Optional[bytes], backend=None
35 ) -> _PRIVATE_KEY_TYPES:
36 backend = _get_backend(backend)
37 return backend.load_der_private_key(data, password)
38
39
40 def load_der_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:
41 backend = _get_backend(backend)
42 return backend.load_der_public_key(data)
43
44
45 def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:
46 backend = _get_backend(backend)
47 return backend.load_der_parameters(data)
48
[end of src/cryptography/hazmat/primitives/serialization/base.py]
[start of src/cryptography/__about__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 __all__ = [
7 "__title__",
8 "__summary__",
9 "__uri__",
10 "__version__",
11 "__author__",
12 "__email__",
13 "__license__",
14 "__copyright__",
15 ]
16
17 __title__ = "cryptography"
18 __summary__ = (
19 "cryptography is a package which provides cryptographic recipes"
20 " and primitives to Python developers."
21 )
22 __uri__ = "https://github.com/pyca/cryptography"
23
24 __version__ = "3.4.3"
25
26 __author__ = "The Python Cryptographic Authority and individual contributors"
27 __email__ = "[email protected]"
28
29 __license__ = "BSD or Apache License, Version 2.0"
30 __copyright__ = "Copyright 2013-2021 {}".format(__author__)
31
[end of src/cryptography/__about__.py]
[start of vectors/cryptography_vectors/__about__.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 __all__ = [
6 "__title__",
7 "__summary__",
8 "__uri__",
9 "__version__",
10 "__author__",
11 "__email__",
12 "__license__",
13 "__copyright__",
14 ]
15
16 __title__ = "cryptography_vectors"
17 __summary__ = "Test vectors for the cryptography package."
18
19 __uri__ = "https://github.com/pyca/cryptography"
20
21 __version__ = "3.4.3"
22
23 __author__ = "The Python Cryptographic Authority and individual contributors"
24 __email__ = "[email protected]"
25
26 __license__ = "BSD or Apache License, Version 2.0"
27 __copyright__ = "Copyright 2013-2021 %s" % __author__
28
[end of vectors/cryptography_vectors/__about__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py
--- a/src/cryptography/__about__.py
+++ b/src/cryptography/__about__.py
@@ -21,7 +21,7 @@
)
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.4.3"
+__version__ = "3.4.4"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
diff --git a/src/cryptography/hazmat/primitives/serialization/base.py b/src/cryptography/hazmat/primitives/serialization/base.py
--- a/src/cryptography/hazmat/primitives/serialization/base.py
+++ b/src/cryptography/hazmat/primitives/serialization/base.py
@@ -25,7 +25,7 @@
return backend.load_pem_public_key(data)
-def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:
+def load_pem_parameters(data: bytes, backend=None) -> "dh.DHParameters":
backend = _get_backend(backend)
return backend.load_pem_parameters(data)
@@ -42,6 +42,6 @@
return backend.load_der_public_key(data)
-def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:
+def load_der_parameters(data: bytes, backend=None) -> "dh.DHParameters":
backend = _get_backend(backend)
return backend.load_der_parameters(data)
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py
--- a/vectors/cryptography_vectors/__about__.py
+++ b/vectors/cryptography_vectors/__about__.py
@@ -18,7 +18,7 @@
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.4.3"
+__version__ = "3.4.4"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
|
{"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -21,7 +21,7 @@\n )\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.4.3\"\n+__version__ = \"3.4.4\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\ndiff --git a/src/cryptography/hazmat/primitives/serialization/base.py b/src/cryptography/hazmat/primitives/serialization/base.py\n--- a/src/cryptography/hazmat/primitives/serialization/base.py\n+++ b/src/cryptography/hazmat/primitives/serialization/base.py\n@@ -25,7 +25,7 @@\n return backend.load_pem_public_key(data)\n \n \n-def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\n+def load_pem_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_pem_parameters(data)\n \n@@ -42,6 +42,6 @@\n return backend.load_der_public_key(data)\n \n \n-def load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:\n+def load_der_parameters(data: bytes, backend=None) -> \"dh.DHParameters\":\n backend = _get_backend(backend)\n return backend.load_der_parameters(data)\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -18,7 +18,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.4.3\"\n+__version__ = \"3.4.4\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\n", "issue": "Circular import error (again) in 3.4.3\nOpening a new issue as the old one (https://github.com/pyca/cryptography/issues/5756) wasn't re-opened after my comment:\r\n\r\n\r\n@reaperhulk\r\n\r\nHi,\r\n\r\nstill got some circular import problems here with cryptography 3.4.3 :\r\n\r\n`AttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)`\r\n\r\nreproducer used (gencrypto.py):\r\n\r\n```\r\npip3 install -U cryptography\r\nCollecting cryptography\r\n Downloading cryptography-3.4.3-cp36-abi3-manylinux2014_x86_64.whl (3.2 MB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 3.2 MB 5.3 MB/s \r\n\r\n\r\npython3 gencrypto.py\r\n\r\nTraceback (most recent call last):\r\n File \"gencrypto.py\", line 2, in <module>\r\n from cryptography.hazmat.primitives.asymmetric import dh\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/dh.py\", line 11, in <module>\r\n from cryptography.hazmat.primitives import serialization\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/__init__.py\", line 15, in <module>\r\n from cryptography.hazmat.primitives.serialization.base import (\r\n File \"/home/xxxxx/.local/lib/python3.8/site-packages/cryptography/hazmat/primitives/serialization/base.py\", line 28, in <module>\r\n def load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\r\nAttributeError: partially initialized module 'cryptography.hazmat.primitives.asymmetric.dh' has no attribute 'DHParameters' (most likely due to a circular import)\r\n```\r\n\r\n**gencrypto.py**\r\n\r\n```\r\nimport time\r\nfrom cryptography.hazmat.primitives.asymmetric import dh\r\n\r\n\r\nfor i in [2048,3072,4096]:\r\n begin=time.time()\r\n params = dh.generate_parameters(\r\n generator=2,\r\n key_size=i,\r\n )\r\n end=time.time()\r\n print('took {}s for {} keysize'.format(int(end-begin), i))\r\n```\nCreate py.typed\nthe changelog lists:\r\n\r\n> cryptography now has PEP 484 type hints on nearly all of of its public APIs. Users can begin using them to type check their code with mypy.\r\n\r\nbut I'm still falling back to typeshed because this file is missing\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport typing\n\nfrom cryptography.hazmat._types import (\n _PRIVATE_KEY_TYPES,\n _PUBLIC_KEY_TYPES,\n)\nfrom cryptography.hazmat.backends import _get_backend\nfrom cryptography.hazmat.primitives.asymmetric import dh\n\n\ndef load_pem_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_private_key(data, password)\n\n\ndef load_pem_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_pem_public_key(data)\n\n\ndef load_pem_parameters(data: bytes, backend=None) -> dh.DHParameters:\n backend = _get_backend(backend)\n return backend.load_pem_parameters(data)\n\n\ndef load_der_private_key(\n data: bytes, password: typing.Optional[bytes], backend=None\n) -> _PRIVATE_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_private_key(data, password)\n\n\ndef load_der_public_key(data: bytes, backend=None) -> _PUBLIC_KEY_TYPES:\n backend = _get_backend(backend)\n return backend.load_der_public_key(data)\n\n\ndef load_der_parameters(data: bytes, backend=None) -> dh.DHParameters:\n backend = _get_backend(backend)\n return backend.load_der_parameters(data)\n", "path": "src/cryptography/hazmat/primitives/serialization/base.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.3\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.4.3\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]}
| 2,133 | 461 |
gh_patches_debug_50232
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1720
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.79
On the docket:
+ [x] The --lock resolver only includes extras from the 1st encounter of a required project in its graph walk. #1717
+ [x] Support canonicalizing absolute paths in locks. (#1716)
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.78"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.78"
+__version__ = "2.1.79"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.78\"\n+__version__ = \"2.1.79\"\n", "issue": "Release 2.1.79\nOn the docket:\r\n+ [x] The --lock resolver only includes extras from the 1st encounter of a required project in its graph walk. #1717 \r\n+ [x] Support canonicalizing absolute paths in locks. (#1716)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.78\"\n", "path": "pex/version.py"}]}
| 646 | 96 |
gh_patches_debug_39308
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-2924
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MATLAB easyblock doesn't handle errors during install
The matlab installer script always returns 0, even if it fails due to, e.g. specifying the wrong key.
There is no way to view the reason why, because the installer doesn't output anything to stdout. I had to run the install command myself with a modified with a outputFile specified, which contained the actual errors.
We should minimum just specify the outputFile argument so that the error logs appear *somewhere*, and possibly check for any errors inside this file.
</issue>
<code>
[start of easybuild/easyblocks/m/matlab.py]
1 ##
2 # Copyright 2009-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for installing MATLAB, implemented as an easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Pieter De Baets (Ghent University)
32 @author: Jens Timmerman (Ghent University)
33 @author: Fotis Georgatos (Uni.Lu, NTUA)
34 """
35 import re
36 import os
37 import stat
38 import tempfile
39
40 from distutils.version import LooseVersion
41
42 from easybuild.easyblocks.generic.packedbinary import PackedBinary
43 from easybuild.framework.easyconfig import CUSTOM
44 from easybuild.tools.build_log import EasyBuildError
45 from easybuild.tools.filetools import adjust_permissions, change_dir, copy_file, read_file, write_file
46 from easybuild.tools.py2vs3 import string_type
47 from easybuild.tools.run import run_cmd
48
49
50 class EB_MATLAB(PackedBinary):
51 """Support for installing MATLAB."""
52
53 def __init__(self, *args, **kwargs):
54 """Add extra config options specific to MATLAB."""
55 super(EB_MATLAB, self).__init__(*args, **kwargs)
56 self.comp_fam = None
57 self.configfile = os.path.join(self.builddir, 'my_installer_input.txt')
58
59 @staticmethod
60 def extra_options():
61 extra_vars = {
62 'java_options': ['-Xmx256m', "$_JAVA_OPTIONS value set for install and in module file.", CUSTOM],
63 'key': [None, "Installation key(s), make one install for each key. Single key or a list of keys", CUSTOM],
64 }
65 return PackedBinary.extra_options(extra_vars)
66
67 def configure_step(self):
68 """Configure MATLAB installation: create license file."""
69
70 licfile = self.cfg['license_file']
71 if licfile is None:
72 licserv = self.cfg['license_server']
73 if licserv is None:
74 licserv = os.getenv('EB_MATLAB_LICENSE_SERVER', 'license.example.com')
75 licport = self.cfg['license_server_port']
76 if licport is None:
77 licport = os.getenv('EB_MATLAB_LICENSE_SERVER_PORT', '00000')
78 # create license file
79 lictxt = '\n'.join([
80 "SERVER %s 000000000000 %s" % (licserv, licport),
81 "USE_SERVER",
82 ])
83
84 licfile = os.path.join(self.builddir, 'matlab.lic')
85 write_file(licfile, lictxt)
86
87 try:
88 copy_file(os.path.join(self.cfg['start_dir'], 'installer_input.txt'), self.configfile)
89 adjust_permissions(self.configfile, stat.S_IWUSR)
90
91 # read file in binary mode to avoid UTF-8 encoding issues when using Python 3,
92 # due to non-UTF-8 characters...
93 config = read_file(self.configfile, mode='rb')
94
95 # use raw byte strings (must be 'br', not 'rb'),
96 # required when using Python 3 because file was read in binary mode
97 regdest = re.compile(br"^# destinationFolder=.*", re.M)
98 regagree = re.compile(br"^# agreeToLicense=.*", re.M)
99 regmode = re.compile(br"^# mode=.*", re.M)
100 reglicpath = re.compile(br"^# licensePath=.*", re.M)
101
102 # must use byte-strings here when using Python 3, see above
103 config = regdest.sub(b"destinationFolder=%s" % self.installdir.encode('utf-8'), config)
104 config = regagree.sub(b"agreeToLicense=Yes", config)
105 config = regmode.sub(b"mode=silent", config)
106 config = reglicpath.sub(b"licensePath=%s" % licfile.encode('utf-8'), config)
107
108 write_file(self.configfile, config)
109
110 except IOError as err:
111 raise EasyBuildError("Failed to create installation config file %s: %s", self.configfile, err)
112
113 self.log.debug('configuration file written to %s:\n %s', self.configfile, config)
114
115 def install_step(self):
116 """MATLAB install procedure using 'install' command."""
117
118 src = os.path.join(self.cfg['start_dir'], 'install')
119
120 # make sure install script is executable
121 adjust_permissions(src, stat.S_IXUSR)
122
123 if LooseVersion(self.version) >= LooseVersion('2016b'):
124 perm_dirs = [os.path.join(self.cfg['start_dir'], 'bin', 'glnxa64')]
125 if LooseVersion(self.version) < LooseVersion('2021b'):
126 jdir = os.path.join(self.cfg['start_dir'], 'sys', 'java', 'jre', 'glnxa64', 'jre', 'bin')
127 perm_dirs.append(jdir)
128 for perm_dir in perm_dirs:
129 adjust_permissions(perm_dir, stat.S_IXUSR)
130
131 # make sure $DISPLAY is not defined, which may lead to (hard to trace) problems
132 # this is a workaround for not being able to specify --nodisplay to the install scripts
133 if 'DISPLAY' in os.environ:
134 os.environ.pop('DISPLAY')
135
136 if '_JAVA_OPTIONS' not in self.cfg['preinstallopts']:
137 java_opts = 'export _JAVA_OPTIONS="%s" && ' % self.cfg['java_options']
138 self.cfg['preinstallopts'] = java_opts + self.cfg['preinstallopts']
139 if LooseVersion(self.version) >= LooseVersion('2016b'):
140 change_dir(self.builddir)
141
142 # Build the cmd string
143 cmdlist = [
144 self.cfg['preinstallopts'],
145 src,
146 '-inputFile',
147 self.configfile,
148 ]
149 if LooseVersion(self.version) < LooseVersion('2020a'):
150 # MATLAB installers < 2020a ignore $TMPDIR (always use /tmp) and might need a large tmpdir
151 tmpdir = tempfile.mkdtemp()
152 cmdlist.extend([
153 '-v',
154 '-tmpdir',
155 tmpdir,
156 ])
157 cmdlist.append(self.cfg['installopts'])
158 cmd = ' '.join(cmdlist)
159
160 keys = self.cfg['key']
161 if keys is None:
162 try:
163 keys = os.environ['EB_MATLAB_KEY']
164 except KeyError:
165 raise EasyBuildError("The MATLAB install key is not set. This can be set either with the environment "
166 "variable EB_MATLAB_KEY or by the easyconfig variable 'key'.")
167 if isinstance(keys, string_type):
168 keys = keys.split(',')
169
170 # Compile the installation key regex outside of the loop
171 regkey = re.compile(br"^(# )?fileInstallationKey=.*", re.M)
172
173 # Run an install for each key
174 for i, key in enumerate(keys):
175
176 self.log.info('Installing MATLAB with key %s of %s', i + 1, len(keys))
177
178 try:
179 config = read_file(self.configfile, mode='rb')
180 config = regkey.sub(b"fileInstallationKey=%s" % key.encode('utf-8'), config)
181 write_file(self.configfile, config)
182
183 except IOError as err:
184 raise EasyBuildError("Failed to update config file %s: %s", self.configfile, err)
185
186 (out, _) = run_cmd(cmd, log_all=True, simple=False)
187
188 # check installer output for known signs of trouble
189 patterns = [
190 "Error: You have entered an invalid File Installation Key",
191 ]
192
193 for pattern in patterns:
194 regex = re.compile(pattern, re.I)
195 if regex.search(out):
196 raise EasyBuildError("Found error pattern '%s' in output of installation command '%s': %s",
197 regex.pattern, cmd, out)
198
199 def sanity_check_step(self):
200 """Custom sanity check for MATLAB."""
201 custom_paths = {
202 'files': ["bin/matlab", "bin/glnxa64/MATLAB", "toolbox/local/classpath.txt"],
203 'dirs': ["java/jar"],
204 }
205 super(EB_MATLAB, self).sanity_check_step(custom_paths=custom_paths)
206
207 def make_module_extra(self):
208 """Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx)."""
209 txt = super(EB_MATLAB, self).make_module_extra()
210
211 # make MATLAB runtime available
212 if LooseVersion(self.version) >= LooseVersion('2017a'):
213 for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]:
214 libdir = os.path.join(ldlibdir, 'glnxa64')
215 txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir)
216 if self.cfg['java_options']:
217 txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options'])
218 return txt
219
[end of easybuild/easyblocks/m/matlab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/easybuild/easyblocks/m/matlab.py b/easybuild/easyblocks/m/matlab.py
--- a/easybuild/easyblocks/m/matlab.py
+++ b/easybuild/easyblocks/m/matlab.py
@@ -55,6 +55,7 @@
super(EB_MATLAB, self).__init__(*args, **kwargs)
self.comp_fam = None
self.configfile = os.path.join(self.builddir, 'my_installer_input.txt')
+ self.outputfile = os.path.join(self.builddir, 'my_installer_output.txt')
@staticmethod
def extra_options():
@@ -98,12 +99,14 @@
regagree = re.compile(br"^# agreeToLicense=.*", re.M)
regmode = re.compile(br"^# mode=.*", re.M)
reglicpath = re.compile(br"^# licensePath=.*", re.M)
+ regoutfile = re.compile(br"^# outputFile=.*", re.M)
# must use byte-strings here when using Python 3, see above
config = regdest.sub(b"destinationFolder=%s" % self.installdir.encode('utf-8'), config)
config = regagree.sub(b"agreeToLicense=Yes", config)
config = regmode.sub(b"mode=silent", config)
config = reglicpath.sub(b"licensePath=%s" % licfile.encode('utf-8'), config)
+ config = regoutfile.sub(b"outputFile=%s" % self.outputfile.encode('utf-8'), config)
write_file(self.configfile, config)
@@ -188,6 +191,12 @@
# check installer output for known signs of trouble
patterns = [
"Error: You have entered an invalid File Installation Key",
+ "Not a valid key",
+ "All selected products are already installed",
+ "The application encountered an unexpected error and needs to close",
+ "Error: Unable to write to",
+ "Exiting with status -\\d",
+ "End - Unsuccessful",
]
for pattern in patterns:
@@ -195,6 +204,10 @@
if regex.search(out):
raise EasyBuildError("Found error pattern '%s' in output of installation command '%s': %s",
regex.pattern, cmd, out)
+ with open(self.outputfile) as f:
+ if regex.search(f.read()):
+ raise EasyBuildError("Found error pattern '%s' in output file of installer",
+ regex.pattern)
def sanity_check_step(self):
"""Custom sanity check for MATLAB."""
|
{"golden_diff": "diff --git a/easybuild/easyblocks/m/matlab.py b/easybuild/easyblocks/m/matlab.py\n--- a/easybuild/easyblocks/m/matlab.py\n+++ b/easybuild/easyblocks/m/matlab.py\n@@ -55,6 +55,7 @@\n super(EB_MATLAB, self).__init__(*args, **kwargs)\n self.comp_fam = None\n self.configfile = os.path.join(self.builddir, 'my_installer_input.txt')\n+ self.outputfile = os.path.join(self.builddir, 'my_installer_output.txt')\n \n @staticmethod\n def extra_options():\n@@ -98,12 +99,14 @@\n regagree = re.compile(br\"^# agreeToLicense=.*\", re.M)\n regmode = re.compile(br\"^# mode=.*\", re.M)\n reglicpath = re.compile(br\"^# licensePath=.*\", re.M)\n+ regoutfile = re.compile(br\"^# outputFile=.*\", re.M)\n \n # must use byte-strings here when using Python 3, see above\n config = regdest.sub(b\"destinationFolder=%s\" % self.installdir.encode('utf-8'), config)\n config = regagree.sub(b\"agreeToLicense=Yes\", config)\n config = regmode.sub(b\"mode=silent\", config)\n config = reglicpath.sub(b\"licensePath=%s\" % licfile.encode('utf-8'), config)\n+ config = regoutfile.sub(b\"outputFile=%s\" % self.outputfile.encode('utf-8'), config)\n \n write_file(self.configfile, config)\n \n@@ -188,6 +191,12 @@\n # check installer output for known signs of trouble\n patterns = [\n \"Error: You have entered an invalid File Installation Key\",\n+ \"Not a valid key\",\n+ \"All selected products are already installed\",\n+ \"The application encountered an unexpected error and needs to close\",\n+ \"Error: Unable to write to\",\n+ \"Exiting with status -\\\\d\",\n+ \"End - Unsuccessful\",\n ]\n \n for pattern in patterns:\n@@ -195,6 +204,10 @@\n if regex.search(out):\n raise EasyBuildError(\"Found error pattern '%s' in output of installation command '%s': %s\",\n regex.pattern, cmd, out)\n+ with open(self.outputfile) as f:\n+ if regex.search(f.read()):\n+ raise EasyBuildError(\"Found error pattern '%s' in output file of installer\",\n+ regex.pattern)\n \n def sanity_check_step(self):\n \"\"\"Custom sanity check for MATLAB.\"\"\"\n", "issue": "MATLAB easyblock doesn't handle errors during install\nThe matlab installer script always returns 0, even if it fails due to, e.g. specifying the wrong key. \r\nThere is no way to view the reason why, because the installer doesn't output anything to stdout. I had to run the install command myself with a modified with a outputFile specified, which contained the actual errors.\r\n\r\nWe should minimum just specify the outputFile argument so that the error logs appear *somewhere*, and possibly check for any errors inside this file. \n", "before_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing MATLAB, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Fotis Georgatos (Uni.Lu, NTUA)\n\"\"\"\nimport re\nimport os\nimport stat\nimport tempfile\n\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.packedbinary import PackedBinary\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import adjust_permissions, change_dir, copy_file, read_file, write_file\nfrom easybuild.tools.py2vs3 import string_type\nfrom easybuild.tools.run import run_cmd\n\n\nclass EB_MATLAB(PackedBinary):\n \"\"\"Support for installing MATLAB.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Add extra config options specific to MATLAB.\"\"\"\n super(EB_MATLAB, self).__init__(*args, **kwargs)\n self.comp_fam = None\n self.configfile = os.path.join(self.builddir, 'my_installer_input.txt')\n\n @staticmethod\n def extra_options():\n extra_vars = {\n 'java_options': ['-Xmx256m', \"$_JAVA_OPTIONS value set for install and in module file.\", CUSTOM],\n 'key': [None, \"Installation key(s), make one install for each key. Single key or a list of keys\", CUSTOM],\n }\n return PackedBinary.extra_options(extra_vars)\n\n def configure_step(self):\n \"\"\"Configure MATLAB installation: create license file.\"\"\"\n\n licfile = self.cfg['license_file']\n if licfile is None:\n licserv = self.cfg['license_server']\n if licserv is None:\n licserv = os.getenv('EB_MATLAB_LICENSE_SERVER', 'license.example.com')\n licport = self.cfg['license_server_port']\n if licport is None:\n licport = os.getenv('EB_MATLAB_LICENSE_SERVER_PORT', '00000')\n # create license file\n lictxt = '\\n'.join([\n \"SERVER %s 000000000000 %s\" % (licserv, licport),\n \"USE_SERVER\",\n ])\n\n licfile = os.path.join(self.builddir, 'matlab.lic')\n write_file(licfile, lictxt)\n\n try:\n copy_file(os.path.join(self.cfg['start_dir'], 'installer_input.txt'), self.configfile)\n adjust_permissions(self.configfile, stat.S_IWUSR)\n\n # read file in binary mode to avoid UTF-8 encoding issues when using Python 3,\n # due to non-UTF-8 characters...\n config = read_file(self.configfile, mode='rb')\n\n # use raw byte strings (must be 'br', not 'rb'),\n # required when using Python 3 because file was read in binary mode\n regdest = re.compile(br\"^# destinationFolder=.*\", re.M)\n regagree = re.compile(br\"^# agreeToLicense=.*\", re.M)\n regmode = re.compile(br\"^# mode=.*\", re.M)\n reglicpath = re.compile(br\"^# licensePath=.*\", re.M)\n\n # must use byte-strings here when using Python 3, see above\n config = regdest.sub(b\"destinationFolder=%s\" % self.installdir.encode('utf-8'), config)\n config = regagree.sub(b\"agreeToLicense=Yes\", config)\n config = regmode.sub(b\"mode=silent\", config)\n config = reglicpath.sub(b\"licensePath=%s\" % licfile.encode('utf-8'), config)\n\n write_file(self.configfile, config)\n\n except IOError as err:\n raise EasyBuildError(\"Failed to create installation config file %s: %s\", self.configfile, err)\n\n self.log.debug('configuration file written to %s:\\n %s', self.configfile, config)\n\n def install_step(self):\n \"\"\"MATLAB install procedure using 'install' command.\"\"\"\n\n src = os.path.join(self.cfg['start_dir'], 'install')\n\n # make sure install script is executable\n adjust_permissions(src, stat.S_IXUSR)\n\n if LooseVersion(self.version) >= LooseVersion('2016b'):\n perm_dirs = [os.path.join(self.cfg['start_dir'], 'bin', 'glnxa64')]\n if LooseVersion(self.version) < LooseVersion('2021b'):\n jdir = os.path.join(self.cfg['start_dir'], 'sys', 'java', 'jre', 'glnxa64', 'jre', 'bin')\n perm_dirs.append(jdir)\n for perm_dir in perm_dirs:\n adjust_permissions(perm_dir, stat.S_IXUSR)\n\n # make sure $DISPLAY is not defined, which may lead to (hard to trace) problems\n # this is a workaround for not being able to specify --nodisplay to the install scripts\n if 'DISPLAY' in os.environ:\n os.environ.pop('DISPLAY')\n\n if '_JAVA_OPTIONS' not in self.cfg['preinstallopts']:\n java_opts = 'export _JAVA_OPTIONS=\"%s\" && ' % self.cfg['java_options']\n self.cfg['preinstallopts'] = java_opts + self.cfg['preinstallopts']\n if LooseVersion(self.version) >= LooseVersion('2016b'):\n change_dir(self.builddir)\n\n # Build the cmd string\n cmdlist = [\n self.cfg['preinstallopts'],\n src,\n '-inputFile',\n self.configfile,\n ]\n if LooseVersion(self.version) < LooseVersion('2020a'):\n # MATLAB installers < 2020a ignore $TMPDIR (always use /tmp) and might need a large tmpdir\n tmpdir = tempfile.mkdtemp()\n cmdlist.extend([\n '-v',\n '-tmpdir',\n tmpdir,\n ])\n cmdlist.append(self.cfg['installopts'])\n cmd = ' '.join(cmdlist)\n\n keys = self.cfg['key']\n if keys is None:\n try:\n keys = os.environ['EB_MATLAB_KEY']\n except KeyError:\n raise EasyBuildError(\"The MATLAB install key is not set. This can be set either with the environment \"\n \"variable EB_MATLAB_KEY or by the easyconfig variable 'key'.\")\n if isinstance(keys, string_type):\n keys = keys.split(',')\n\n # Compile the installation key regex outside of the loop\n regkey = re.compile(br\"^(# )?fileInstallationKey=.*\", re.M)\n\n # Run an install for each key\n for i, key in enumerate(keys):\n\n self.log.info('Installing MATLAB with key %s of %s', i + 1, len(keys))\n\n try:\n config = read_file(self.configfile, mode='rb')\n config = regkey.sub(b\"fileInstallationKey=%s\" % key.encode('utf-8'), config)\n write_file(self.configfile, config)\n\n except IOError as err:\n raise EasyBuildError(\"Failed to update config file %s: %s\", self.configfile, err)\n\n (out, _) = run_cmd(cmd, log_all=True, simple=False)\n\n # check installer output for known signs of trouble\n patterns = [\n \"Error: You have entered an invalid File Installation Key\",\n ]\n\n for pattern in patterns:\n regex = re.compile(pattern, re.I)\n if regex.search(out):\n raise EasyBuildError(\"Found error pattern '%s' in output of installation command '%s': %s\",\n regex.pattern, cmd, out)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for MATLAB.\"\"\"\n custom_paths = {\n 'files': [\"bin/matlab\", \"bin/glnxa64/MATLAB\", \"toolbox/local/classpath.txt\"],\n 'dirs': [\"java/jar\"],\n }\n super(EB_MATLAB, self).sanity_check_step(custom_paths=custom_paths)\n\n def make_module_extra(self):\n \"\"\"Extend PATH and set proper _JAVA_OPTIONS (e.g., -Xmx).\"\"\"\n txt = super(EB_MATLAB, self).make_module_extra()\n\n # make MATLAB runtime available\n if LooseVersion(self.version) >= LooseVersion('2017a'):\n for ldlibdir in ['runtime', 'bin', os.path.join('sys', 'os')]:\n libdir = os.path.join(ldlibdir, 'glnxa64')\n txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', libdir)\n if self.cfg['java_options']:\n txt += self.module_generator.set_environment('_JAVA_OPTIONS', self.cfg['java_options'])\n return txt\n", "path": "easybuild/easyblocks/m/matlab.py"}]}
| 3,411 | 584 |
gh_patches_debug_21292
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-1663
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider renaming BaggagePropagator
BaggagePropagator is probably a too-generic name. In contrast, there's `TraceContextTextMapPropagator`, so maybe renaming it to something along those lines could convey more clarity.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import typing
16 import urllib.parse
17
18 from opentelemetry import baggage
19 from opentelemetry.context import get_current
20 from opentelemetry.context.context import Context
21 from opentelemetry.propagators import textmap
22
23
24 class BaggagePropagator(textmap.TextMapPropagator):
25 """Extracts and injects Baggage which is used to annotate telemetry."""
26
27 MAX_HEADER_LENGTH = 8192
28 MAX_PAIR_LENGTH = 4096
29 MAX_PAIRS = 180
30 _BAGGAGE_HEADER_NAME = "baggage"
31
32 def extract(
33 self,
34 getter: textmap.Getter[textmap.TextMapPropagatorT],
35 carrier: textmap.TextMapPropagatorT,
36 context: typing.Optional[Context] = None,
37 ) -> Context:
38 """Extract Baggage from the carrier.
39
40 See
41 `opentelemetry.propagators.textmap.TextMapPropagator.extract`
42 """
43
44 if context is None:
45 context = get_current()
46
47 header = _extract_first_element(
48 getter.get(carrier, self._BAGGAGE_HEADER_NAME)
49 )
50
51 if not header or len(header) > self.MAX_HEADER_LENGTH:
52 return context
53
54 baggage_entries = header.split(",")
55 total_baggage_entries = self.MAX_PAIRS
56 for entry in baggage_entries:
57 if total_baggage_entries <= 0:
58 return context
59 total_baggage_entries -= 1
60 if len(entry) > self.MAX_PAIR_LENGTH:
61 continue
62 try:
63 name, value = entry.split("=", 1)
64 except Exception: # pylint: disable=broad-except
65 continue
66 context = baggage.set_baggage(
67 urllib.parse.unquote(name).strip(),
68 urllib.parse.unquote(value).strip(),
69 context=context,
70 )
71
72 return context
73
74 def inject(
75 self,
76 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
77 carrier: textmap.TextMapPropagatorT,
78 context: typing.Optional[Context] = None,
79 ) -> None:
80 """Injects Baggage into the carrier.
81
82 See
83 `opentelemetry.propagators.textmap.TextMapPropagator.inject`
84 """
85 baggage_entries = baggage.get_all(context=context)
86 if not baggage_entries:
87 return
88
89 baggage_string = _format_baggage(baggage_entries)
90 set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
91
92 @property
93 def fields(self) -> typing.Set[str]:
94 """Returns a set with the fields set in `inject`."""
95 return {self._BAGGAGE_HEADER_NAME}
96
97
98 def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
99 return ",".join(
100 key + "=" + urllib.parse.quote_plus(str(value))
101 for key, value in baggage_entries.items()
102 )
103
104
105 def _extract_first_element(
106 items: typing.Optional[typing.Iterable[textmap.TextMapPropagatorT]],
107 ) -> typing.Optional[textmap.TextMapPropagatorT]:
108 if items is None:
109 return None
110 return next(iter(items), None)
111
[end of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py]
[start of opentelemetry-api/src/opentelemetry/propagate/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 API for propagation of context.
17
18 The propagators for the
19 ``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined
20 via configuration in the ``OTEL_PROPAGATORS`` environment variable. This
21 variable should be set to a comma-separated string of names of values for the
22 ``opentelemetry_propagator`` entry point. For example, setting
23 ``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)
24 would instantiate
25 ``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2
26 propagators, one of type
27 ``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``
28 and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.
29 Notice that these propagator classes are defined as
30 ``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of
31 ``opentelemetry``.
32
33 Example::
34
35 import flask
36 import requests
37 from opentelemetry import propagators
38
39
40 PROPAGATOR = propagators.get_global_textmap()
41
42
43 def get_header_from_flask_request(request, key):
44 return request.headers.get_all(key)
45
46 def set_header_into_requests_request(request: requests.Request,
47 key: str, value: str):
48 request.headers[key] = value
49
50 def example_route():
51 context = PROPAGATOR.extract(
52 get_header_from_flask_request,
53 flask.request
54 )
55 request_to_downstream = requests.Request(
56 "GET", "http://httpbin.org/get"
57 )
58 PROPAGATOR.inject(
59 set_header_into_requests_request,
60 request_to_downstream,
61 context=context
62 )
63 session = requests.Session()
64 session.send(request_to_downstream.prepare())
65
66
67 .. _Propagation API Specification:
68 https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md
69 """
70
71 import typing
72 from logging import getLogger
73 from os import environ
74
75 from pkg_resources import iter_entry_points
76
77 from opentelemetry.context.context import Context
78 from opentelemetry.environment_variables import OTEL_PROPAGATORS
79 from opentelemetry.propagators import composite, textmap
80
81 logger = getLogger(__name__)
82
83
84 def extract(
85 getter: textmap.Getter[textmap.TextMapPropagatorT],
86 carrier: textmap.TextMapPropagatorT,
87 context: typing.Optional[Context] = None,
88 ) -> Context:
89 """Uses the configured propagator to extract a Context from the carrier.
90
91 Args:
92 getter: an object which contains a get function that can retrieve zero
93 or more values from the carrier and a keys function that can get all the keys
94 from carrier.
95 carrier: and object which contains values that are
96 used to construct a Context. This object
97 must be paired with an appropriate getter
98 which understands how to extract a value from it.
99 context: an optional Context to use. Defaults to current
100 context if not set.
101 """
102 return get_global_textmap().extract(getter, carrier, context)
103
104
105 def inject(
106 set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],
107 carrier: textmap.TextMapPropagatorT,
108 context: typing.Optional[Context] = None,
109 ) -> None:
110 """Uses the configured propagator to inject a Context into the carrier.
111
112 Args:
113 set_in_carrier: A setter function that can set values
114 on the carrier.
115 carrier: An object that contains a representation of HTTP
116 headers. Should be paired with set_in_carrier, which
117 should know how to set header values on the carrier.
118 context: an optional Context to use. Defaults to current
119 context if not set.
120 """
121 get_global_textmap().inject(set_in_carrier, carrier, context)
122
123
124 try:
125
126 propagators = []
127
128 # Single use variable here to hack black and make lint pass
129 environ_propagators = environ.get(
130 OTEL_PROPAGATORS, "tracecontext,baggage",
131 )
132
133 for propagator in environ_propagators.split(","):
134 propagators.append( # type: ignore
135 next( # type: ignore
136 iter_entry_points("opentelemetry_propagator", propagator)
137 ).load()()
138 )
139
140 except Exception: # pylint: disable=broad-except
141 logger.exception("Failed to load configured propagators")
142 raise
143
144 _HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore
145
146
147 def get_global_textmap() -> textmap.TextMapPropagator:
148 return _HTTP_TEXT_FORMAT
149
150
151 def set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:
152 global _HTTP_TEXT_FORMAT # pylint:disable=global-statement
153 _HTTP_TEXT_FORMAT = http_text_format # type: ignore
154
[end of opentelemetry-api/src/opentelemetry/propagate/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -21,7 +21,7 @@
from opentelemetry.propagators import textmap
-class BaggagePropagator(textmap.TextMapPropagator):
+class W3CBaggagePropagator(textmap.TextMapPropagator):
"""Extracts and injects Baggage which is used to annotate telemetry."""
MAX_HEADER_LENGTH = 8192
diff --git a/opentelemetry-api/src/opentelemetry/propagate/__init__.py b/opentelemetry-api/src/opentelemetry/propagate/__init__.py
--- a/opentelemetry-api/src/opentelemetry/propagate/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/propagate/__init__.py
@@ -25,7 +25,7 @@
``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2
propagators, one of type
``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``
-and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.
+and other of type ``opentelemetry.baggage.propagation.W3CBaggagePropagator``.
Notice that these propagator classes are defined as
``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of
``opentelemetry``.
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -21,7 +21,7 @@\n from opentelemetry.propagators import textmap\n \n \n-class BaggagePropagator(textmap.TextMapPropagator):\n+class W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n \n MAX_HEADER_LENGTH = 8192\ndiff --git a/opentelemetry-api/src/opentelemetry/propagate/__init__.py b/opentelemetry-api/src/opentelemetry/propagate/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/propagate/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/propagate/__init__.py\n@@ -25,7 +25,7 @@\n ``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2\n propagators, one of type\n ``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``\n-and other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.\n+and other of type ``opentelemetry.baggage.propagation.W3CBaggagePropagator``.\n Notice that these propagator classes are defined as\n ``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of\n ``opentelemetry``.\n", "issue": "Consider renaming BaggagePropagator\nBaggagePropagator is probably a too-generic name. In contrast, there's `TraceContextTextMapPropagator`, so maybe renaming it to something along those lines could convey more clarity.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nimport urllib.parse\n\nfrom opentelemetry import baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\n\n\nclass BaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n MAX_HEADER_LENGTH = 8192\n MAX_PAIR_LENGTH = 4096\n MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n getter: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self.MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self.MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self.MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = baggage.set_baggage(\n urllib.parse.unquote(name).strip(),\n urllib.parse.unquote(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = baggage.get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n set_in_carrier(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> typing.Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n key + \"=\" + urllib.parse.quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Optional[typing.Iterable[textmap.TextMapPropagatorT]],\n) -> typing.Optional[textmap.TextMapPropagatorT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAPI for propagation of context.\n\nThe propagators for the\n``opentelemetry.propagators.composite.CompositeHTTPPropagator`` can be defined\nvia configuration in the ``OTEL_PROPAGATORS`` environment variable. This\nvariable should be set to a comma-separated string of names of values for the\n``opentelemetry_propagator`` entry point. For example, setting\n``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)\nwould instantiate\n``opentelemetry.propagators.composite.CompositeHTTPPropagator`` with 2\npropagators, one of type\n``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``\nand other of type ``opentelemetry.baggage.propagation.BaggagePropagator``.\nNotice that these propagator classes are defined as\n``opentelemetry_propagator`` entry points in the ``setup.cfg`` file of\n``opentelemetry``.\n\nExample::\n\n import flask\n import requests\n from opentelemetry import propagators\n\n\n PROPAGATOR = propagators.get_global_textmap()\n\n\n def get_header_from_flask_request(request, key):\n return request.headers.get_all(key)\n\n def set_header_into_requests_request(request: requests.Request,\n key: str, value: str):\n request.headers[key] = value\n\n def example_route():\n context = PROPAGATOR.extract(\n get_header_from_flask_request,\n flask.request\n )\n request_to_downstream = requests.Request(\n \"GET\", \"http://httpbin.org/get\"\n )\n PROPAGATOR.inject(\n set_header_into_requests_request,\n request_to_downstream,\n context=context\n )\n session = requests.Session()\n session.send(request_to_downstream.prepare())\n\n\n.. _Propagation API Specification:\n https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md\n\"\"\"\n\nimport typing\nfrom logging import getLogger\nfrom os import environ\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.environment_variables import OTEL_PROPAGATORS\nfrom opentelemetry.propagators import composite, textmap\n\nlogger = getLogger(__name__)\n\n\ndef extract(\n getter: textmap.Getter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> Context:\n \"\"\"Uses the configured propagator to extract a Context from the carrier.\n\n Args:\n getter: an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n carrier: and object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n return get_global_textmap().extract(getter, carrier, context)\n\n\ndef inject(\n set_in_carrier: textmap.Setter[textmap.TextMapPropagatorT],\n carrier: textmap.TextMapPropagatorT,\n context: typing.Optional[Context] = None,\n) -> None:\n \"\"\"Uses the configured propagator to inject a Context into the carrier.\n\n Args:\n set_in_carrier: A setter function that can set values\n on the carrier.\n carrier: An object that contains a representation of HTTP\n headers. Should be paired with set_in_carrier, which\n should know how to set header values on the carrier.\n context: an optional Context to use. Defaults to current\n context if not set.\n \"\"\"\n get_global_textmap().inject(set_in_carrier, carrier, context)\n\n\ntry:\n\n propagators = []\n\n # Single use variable here to hack black and make lint pass\n environ_propagators = environ.get(\n OTEL_PROPAGATORS, \"tracecontext,baggage\",\n )\n\n for propagator in environ_propagators.split(\",\"):\n propagators.append( # type: ignore\n next( # type: ignore\n iter_entry_points(\"opentelemetry_propagator\", propagator)\n ).load()()\n )\n\nexcept Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to load configured propagators\")\n raise\n\n_HTTP_TEXT_FORMAT = composite.CompositeHTTPPropagator(propagators) # type: ignore\n\n\ndef get_global_textmap() -> textmap.TextMapPropagator:\n return _HTTP_TEXT_FORMAT\n\n\ndef set_global_textmap(http_text_format: textmap.TextMapPropagator,) -> None:\n global _HTTP_TEXT_FORMAT # pylint:disable=global-statement\n _HTTP_TEXT_FORMAT = http_text_format # type: ignore\n", "path": "opentelemetry-api/src/opentelemetry/propagate/__init__.py"}]}
| 3,226 | 376 |
gh_patches_debug_23816
|
rasdani/github-patches
|
git_diff
|
cltk__cltk-1146
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hyphen added when separating 'ne' enclitics at sentence start
**Describe the bug**
Hyphen added when separating '-ne' enclitics at sentence start
**To Reproduce**
Python 3.9.9; CLTK 1.0.22
```
test = 'nihilne te nocturnum praesidium Palati'
t = LatinWordTokenizer()
print(t.tokenize(test))
```
Current output:
['nihil', '-', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
4. See error (include literal copy-paste)
Expected output:
['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
**Desktop (please complete the following information):**
- MacOS 12.1
**Additional context**
Issue is that there is a redundant hyphen being added in cases where '-ne' is separated as part of the first token in a sentence. I will submit a PR to fix this.
</issue>
<code>
[start of src/cltk/tokenizers/lat/lat.py]
1 """ Latin word tokenization - handles enclitics and abbreviations."""
2
3 __author__ = [
4 "Patrick J. Burns <[email protected]>",
5 "Todd Cook <[email protected]",
6 ]
7 __license__ = "MIT License."
8
9 import re
10 from typing import List, Tuple
11
12 from nltk.tokenize.punkt import PunktLanguageVars, PunktParameters
13
14 from cltk.sentence.lat import LatinPunktSentenceTokenizer
15 from cltk.tokenizers.lat.params import ABBREVIATIONS, latin_exceptions
16 from cltk.tokenizers.lat.params import latin_replacements as REPLACEMENTS
17 from cltk.tokenizers.word import WordTokenizer
18
19
20 class LatinLanguageVars(PunktLanguageVars):
21 _re_non_word_chars = PunktLanguageVars()._re_non_word_chars.replace("'", "")
22
23
24 class LatinWordTokenizer(WordTokenizer):
25 """Tokenize according to rules specific to a given language."""
26
27 ENCLITICS = ["que", "n", "ne", "ue", "ve", "st"]
28
29 EXCEPTIONS = list(set(ENCLITICS + latin_exceptions))
30
31 def __init__(self):
32 self.punkt_param = PunktParameters()
33 self.punkt_param.abbrev_types = set(ABBREVIATIONS)
34 self.sent_tokenizer = LatinPunktSentenceTokenizer()
35 self.word_tokenizer = LatinLanguageVars()
36
37 def tokenize(
38 self,
39 text: str,
40 replacements: List[Tuple[str, str]] = REPLACEMENTS,
41 enclitics_exceptions: List[str] = EXCEPTIONS,
42 enclitics: List[str] = ENCLITICS,
43 ) -> List[str]:
44 """
45 Tokenizer divides the text into a list of substrings
46
47 :param text: This accepts the string value that needs to be tokenized
48 :param replacements: List of replacements to apply to tokens such as "mecum" -> ["cum", "me"]
49 :param enclitics_exceptions: List of words that look likes they end with an enclitic but are not.
50 :param enclitics: List of enclitics to check for in tokenization
51
52 :returns: A list of substrings extracted from the text
53
54 >>> toker = LatinWordTokenizer()
55 >>> text = 'atque haec abuterque puerve paterne nihil'
56 >>> toker.tokenize(text)
57 ['atque', 'haec', 'abuter', '-que', 'puer', '-ve', 'pater', '-ne', 'nihil']
58
59 >>> toker.tokenize('Cicero dixit orationem pro Sex. Roscio')
60 ['Cicero', 'dixit', 'orationem', 'pro', 'Sex.', 'Roscio']
61
62 >>> toker.tokenize('Cenavin ego heri in navi in portu Persico?')
63 ['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']
64
65 >>> toker.tokenize('Dic si audes mihi, bellan videtur specie mulier?')
66 ['Dic', 'si', 'audes', 'mihi', ',', 'bella', '-ne', 'videtur', 'specie', 'mulier', '?']
67
68 >>> toker.tokenize("mecum")
69 ['cum', 'me']
70
71 You can specify how replacements are made using replacements
72
73 >>> toker.tokenize("mecum", replacements=[(r"mecum", "me cum")])
74 ['me', 'cum']
75
76 Or change enclitics and enclitics exception:
77 >>> toker.tokenize("atque haec abuterque puerve paterne nihil", enclitics=["que"])
78 ['atque', 'haec', 'abuter', '-que', 'puerve', 'paterne', 'nihil']
79
80 >>> toker.tokenize("atque haec abuterque puerve paterne nihil", enclitics=["que", "ve", "ne"],
81 ... enclitics_exceptions=('paterne', 'atque'))
82 ['atque', 'haec', 'abuter', '-que', 'puer', '-ve', 'paterne', 'nihil']
83
84 """
85
86 def matchcase(word):
87 """helper function From Python Cookbook"""
88
89 def replace(matching):
90 text = matching.group()
91 if text.isupper():
92 return word.upper()
93 elif text.islower():
94 return word.lower()
95 elif text[0].isupper():
96 return word.capitalize()
97 return word
98
99 return replace
100
101 for replacement in replacements:
102 text = re.sub(
103 replacement[0], matchcase(replacement[1]), text, flags=re.IGNORECASE
104 )
105
106 sents = self.sent_tokenizer.tokenize(text)
107 tokens = [] # type: List[str]
108
109 for sent in sents:
110 temp_tokens = self.word_tokenizer.word_tokenize(sent)
111 # Need to check that tokens exist before handling them;
112 # needed to make stream.readlines work in PlaintextCorpusReader
113 if temp_tokens:
114 if temp_tokens[0].endswith("ne"):
115 if temp_tokens[0].lower() not in enclitics_exceptions:
116 temp = [temp_tokens[0][:-2], "-ne"]
117 temp_tokens = temp + temp_tokens[1:]
118 if temp_tokens[-1].endswith("."):
119 final_word = temp_tokens[-1][:-1]
120 del temp_tokens[-1]
121 temp_tokens += [final_word, "."]
122
123 for token in temp_tokens:
124 tokens.append(token)
125
126 # Break enclitic handling into own function?
127 specific_tokens = [] # type: List[str]
128
129 for token in tokens:
130 is_enclitic = False
131 if token.lower() not in enclitics_exceptions:
132 for enclitic in enclitics:
133 if token.endswith(enclitic):
134 if enclitic == "n":
135 specific_tokens += [token[: -len(enclitic)]] + ["-ne"]
136 elif enclitic == "st":
137 if token.endswith("ust"):
138 specific_tokens += [token[: -len(enclitic) + 1]] + [
139 "est"
140 ]
141 else:
142 specific_tokens += [token[: -len(enclitic)]] + ["est"]
143 else:
144 specific_tokens += [token[: -len(enclitic)]] + [
145 "-" + enclitic
146 ]
147 is_enclitic = True
148 break
149 if not is_enclitic:
150 specific_tokens.append(token)
151
152 # collapse abbreviations
153 abbrev_idx = []
154 for idx, token in enumerate(specific_tokens):
155 if token.lower() in self.punkt_param.abbrev_types:
156 abbrev_idx.append(idx)
157 for val in reversed(abbrev_idx):
158 if val + 1 < len(specific_tokens) and specific_tokens[val + 1] == ".":
159 specific_tokens[val] = specific_tokens[val] + "."
160 specific_tokens[val + 1] = ""
161 specific_tokens = [tmp for tmp in specific_tokens if tmp]
162 return specific_tokens
163
164 @staticmethod
165 def compute_indices(text: str, tokens):
166 indices = []
167 for i, token in enumerate(tokens):
168 if 1 <= i:
169 current_index = indices[-1] + len(tokens[i - 1])
170 if token == "-ne":
171 indices.append(current_index + text[current_index:].find(token[1:]))
172 else:
173 indices.append(current_index + text[current_index:].find(token))
174 else:
175 indices.append(text.find(token))
176 return indices
177
[end of src/cltk/tokenizers/lat/lat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cltk/tokenizers/lat/lat.py b/src/cltk/tokenizers/lat/lat.py
--- a/src/cltk/tokenizers/lat/lat.py
+++ b/src/cltk/tokenizers/lat/lat.py
@@ -59,6 +59,9 @@
>>> toker.tokenize('Cicero dixit orationem pro Sex. Roscio')
['Cicero', 'dixit', 'orationem', 'pro', 'Sex.', 'Roscio']
+ >>> toker.tokenize('nihilne te nocturnum praesidium Palati')
+ ['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
+
>>> toker.tokenize('Cenavin ego heri in navi in portu Persico?')
['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']
@@ -141,9 +144,7 @@
else:
specific_tokens += [token[: -len(enclitic)]] + ["est"]
else:
- specific_tokens += [token[: -len(enclitic)]] + [
- "-" + enclitic
- ]
+ specific_tokens += [token]
is_enclitic = True
break
if not is_enclitic:
|
{"golden_diff": "diff --git a/src/cltk/tokenizers/lat/lat.py b/src/cltk/tokenizers/lat/lat.py\n--- a/src/cltk/tokenizers/lat/lat.py\n+++ b/src/cltk/tokenizers/lat/lat.py\n@@ -59,6 +59,9 @@\n >>> toker.tokenize('Cicero dixit orationem pro Sex. Roscio')\n ['Cicero', 'dixit', 'orationem', 'pro', 'Sex.', 'Roscio']\n \n+ >>> toker.tokenize('nihilne te nocturnum praesidium Palati')\n+ ['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']\n+\n >>> toker.tokenize('Cenavin ego heri in navi in portu Persico?')\n ['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']\n \n@@ -141,9 +144,7 @@\n else:\n specific_tokens += [token[: -len(enclitic)]] + [\"est\"]\n else:\n- specific_tokens += [token[: -len(enclitic)]] + [\n- \"-\" + enclitic\n- ]\n+ specific_tokens += [token]\n is_enclitic = True\n break\n if not is_enclitic:\n", "issue": "Hyphen added when separating 'ne' enclitics at sentence start\n**Describe the bug**\r\nHyphen added when separating '-ne' enclitics at sentence start\r\n\r\n**To Reproduce**\r\nPython 3.9.9; CLTK 1.0.22\r\n\r\n```\r\ntest = 'nihilne te nocturnum praesidium Palati'\r\nt = LatinWordTokenizer()\r\nprint(t.tokenize(test))\r\n```\r\nCurrent output:\r\n['nihil', '-', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']\r\n\r\n4. See error (include literal copy-paste)\r\n\r\nExpected output:\r\n['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']\r\n\r\n**Desktop (please complete the following information):**\r\n - MacOS 12.1\r\n\r\n**Additional context**\r\nIssue is that there is a redundant hyphen being added in cases where '-ne' is separated as part of the first token in a sentence. I will submit a PR to fix this.\r\n\n", "before_files": [{"content": "\"\"\" Latin word tokenization - handles enclitics and abbreviations.\"\"\"\n\n__author__ = [\n \"Patrick J. Burns <[email protected]>\",\n \"Todd Cook <[email protected]\",\n]\n__license__ = \"MIT License.\"\n\nimport re\nfrom typing import List, Tuple\n\nfrom nltk.tokenize.punkt import PunktLanguageVars, PunktParameters\n\nfrom cltk.sentence.lat import LatinPunktSentenceTokenizer\nfrom cltk.tokenizers.lat.params import ABBREVIATIONS, latin_exceptions\nfrom cltk.tokenizers.lat.params import latin_replacements as REPLACEMENTS\nfrom cltk.tokenizers.word import WordTokenizer\n\n\nclass LatinLanguageVars(PunktLanguageVars):\n _re_non_word_chars = PunktLanguageVars()._re_non_word_chars.replace(\"'\", \"\")\n\n\nclass LatinWordTokenizer(WordTokenizer):\n \"\"\"Tokenize according to rules specific to a given language.\"\"\"\n\n ENCLITICS = [\"que\", \"n\", \"ne\", \"ue\", \"ve\", \"st\"]\n\n EXCEPTIONS = list(set(ENCLITICS + latin_exceptions))\n\n def __init__(self):\n self.punkt_param = PunktParameters()\n self.punkt_param.abbrev_types = set(ABBREVIATIONS)\n self.sent_tokenizer = LatinPunktSentenceTokenizer()\n self.word_tokenizer = LatinLanguageVars()\n\n def tokenize(\n self,\n text: str,\n replacements: List[Tuple[str, str]] = REPLACEMENTS,\n enclitics_exceptions: List[str] = EXCEPTIONS,\n enclitics: List[str] = ENCLITICS,\n ) -> List[str]:\n \"\"\"\n Tokenizer divides the text into a list of substrings\n\n :param text: This accepts the string value that needs to be tokenized\n :param replacements: List of replacements to apply to tokens such as \"mecum\" -> [\"cum\", \"me\"]\n :param enclitics_exceptions: List of words that look likes they end with an enclitic but are not.\n :param enclitics: List of enclitics to check for in tokenization\n\n :returns: A list of substrings extracted from the text\n\n >>> toker = LatinWordTokenizer()\n >>> text = 'atque haec abuterque puerve paterne nihil'\n >>> toker.tokenize(text)\n ['atque', 'haec', 'abuter', '-que', 'puer', '-ve', 'pater', '-ne', 'nihil']\n\n >>> toker.tokenize('Cicero dixit orationem pro Sex. Roscio')\n ['Cicero', 'dixit', 'orationem', 'pro', 'Sex.', 'Roscio']\n\n >>> toker.tokenize('Cenavin ego heri in navi in portu Persico?')\n ['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']\n\n >>> toker.tokenize('Dic si audes mihi, bellan videtur specie mulier?')\n ['Dic', 'si', 'audes', 'mihi', ',', 'bella', '-ne', 'videtur', 'specie', 'mulier', '?']\n\n >>> toker.tokenize(\"mecum\")\n ['cum', 'me']\n\n You can specify how replacements are made using replacements\n\n >>> toker.tokenize(\"mecum\", replacements=[(r\"mecum\", \"me cum\")])\n ['me', 'cum']\n\n Or change enclitics and enclitics exception:\n >>> toker.tokenize(\"atque haec abuterque puerve paterne nihil\", enclitics=[\"que\"])\n ['atque', 'haec', 'abuter', '-que', 'puerve', 'paterne', 'nihil']\n\n >>> toker.tokenize(\"atque haec abuterque puerve paterne nihil\", enclitics=[\"que\", \"ve\", \"ne\"],\n ... enclitics_exceptions=('paterne', 'atque'))\n ['atque', 'haec', 'abuter', '-que', 'puer', '-ve', 'paterne', 'nihil']\n\n \"\"\"\n\n def matchcase(word):\n \"\"\"helper function From Python Cookbook\"\"\"\n\n def replace(matching):\n text = matching.group()\n if text.isupper():\n return word.upper()\n elif text.islower():\n return word.lower()\n elif text[0].isupper():\n return word.capitalize()\n return word\n\n return replace\n\n for replacement in replacements:\n text = re.sub(\n replacement[0], matchcase(replacement[1]), text, flags=re.IGNORECASE\n )\n\n sents = self.sent_tokenizer.tokenize(text)\n tokens = [] # type: List[str]\n\n for sent in sents:\n temp_tokens = self.word_tokenizer.word_tokenize(sent)\n # Need to check that tokens exist before handling them;\n # needed to make stream.readlines work in PlaintextCorpusReader\n if temp_tokens:\n if temp_tokens[0].endswith(\"ne\"):\n if temp_tokens[0].lower() not in enclitics_exceptions:\n temp = [temp_tokens[0][:-2], \"-ne\"]\n temp_tokens = temp + temp_tokens[1:]\n if temp_tokens[-1].endswith(\".\"):\n final_word = temp_tokens[-1][:-1]\n del temp_tokens[-1]\n temp_tokens += [final_word, \".\"]\n\n for token in temp_tokens:\n tokens.append(token)\n\n # Break enclitic handling into own function?\n specific_tokens = [] # type: List[str]\n\n for token in tokens:\n is_enclitic = False\n if token.lower() not in enclitics_exceptions:\n for enclitic in enclitics:\n if token.endswith(enclitic):\n if enclitic == \"n\":\n specific_tokens += [token[: -len(enclitic)]] + [\"-ne\"]\n elif enclitic == \"st\":\n if token.endswith(\"ust\"):\n specific_tokens += [token[: -len(enclitic) + 1]] + [\n \"est\"\n ]\n else:\n specific_tokens += [token[: -len(enclitic)]] + [\"est\"]\n else:\n specific_tokens += [token[: -len(enclitic)]] + [\n \"-\" + enclitic\n ]\n is_enclitic = True\n break\n if not is_enclitic:\n specific_tokens.append(token)\n\n # collapse abbreviations\n abbrev_idx = []\n for idx, token in enumerate(specific_tokens):\n if token.lower() in self.punkt_param.abbrev_types:\n abbrev_idx.append(idx)\n for val in reversed(abbrev_idx):\n if val + 1 < len(specific_tokens) and specific_tokens[val + 1] == \".\":\n specific_tokens[val] = specific_tokens[val] + \".\"\n specific_tokens[val + 1] = \"\"\n specific_tokens = [tmp for tmp in specific_tokens if tmp]\n return specific_tokens\n\n @staticmethod\n def compute_indices(text: str, tokens):\n indices = []\n for i, token in enumerate(tokens):\n if 1 <= i:\n current_index = indices[-1] + len(tokens[i - 1])\n if token == \"-ne\":\n indices.append(current_index + text[current_index:].find(token[1:]))\n else:\n indices.append(current_index + text[current_index:].find(token))\n else:\n indices.append(text.find(token))\n return indices\n", "path": "src/cltk/tokenizers/lat/lat.py"}]}
| 2,834 | 314 |
gh_patches_debug_21195
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmocr-1587
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] 数据集预处理中,对于含","的特殊文本未特殊处理
### Prerequisite
- [X] I have searched [Issues](https://github.com/open-mmlab/mmocr/issues) and [Discussions](https://github.com/open-mmlab/mmocr/discussions) but cannot get the expected help.
- [X] The bug has not been fixed in the [latest version (0.x)](https://github.com/open-mmlab/mmocr) or [latest version (1.x)](https://github.com/open-mmlab/mmocr/tree/dev-1.x).
### Task
I'm using the official example scripts/configs for the officially supported tasks/models/datasets.
### Branch
1.x branch https://github.com/open-mmlab/mmocr/tree/dev-1.x
### Environment
本次bug和环境无关
torch 1.7.0
torchvision 0.8.1
mmcv 2.0.0rc3
mmcv-full 1.7.0
mmdet 3.0.0rc0
mmengine 0.1.0
mmocr 1.0.0rc3
### Reproduces the problem - code sample
相关代码在mmocr/datasets/preparers/parsers/icdar_txt_parser.py
### Reproduces the problem - command or script
python tools/dataset_converters/prepare_dataset.py icdar2015 --task textspotting
### Reproduces the problem - error message
no error message,检查发现部分图片预处理后的文本gt不对
### Additional information
1. 使用mmocr 1.x版本进行预处理icdar2015数据集为统一格式的json文件
2. 发现对文本处理时,未处理含分隔符的情况,如原始数据集的100,000文本,在统一格式中为100,即分隔符将文本分开后仅取了第一个文本
3. 具体例子如mmocr/data/icdar2015/textspotting_train.json中,在图片data/icdar2015/textdet_imgs/train/img_39.jpg的gt中,原始数据集文本标注为402,85,460,86,457,106,399,105,100,000,其中402,85,460,86,457,106,399,105为polygon标注,100,000为文本标注。但是预处理后的ocr数据集中,文本标注为100。
(因为在区分polygon标注和text标注时,通过分隔符","来区分,导致text文本被截断)
</issue>
<code>
[start of mmocr/datasets/preparers/parsers/base.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 from abc import abstractmethod
3 from functools import partial
4 from typing import Dict, List, Optional, Tuple, Union
5
6 from mmengine import track_parallel_progress
7
8
9 class BaseParser:
10 """Base class for parsing annotations.
11
12 Args:
13 data_root (str, optional): Path to the data root. Defaults to None.
14 nproc (int, optional): Number of processes. Defaults to 1.
15 """
16
17 def __init__(self,
18 data_root: Optional[str] = None,
19 nproc: int = 1) -> None:
20 self.data_root = data_root
21 self.nproc = nproc
22
23 def __call__(self, files: List[Tuple], split: str) -> List:
24 """Parse annotations.
25
26 Args:
27 files (List[Tuple]): A list of a tuple of
28 (image_path, annotation_path).
29 split (str): The split of the dataset.
30
31 Returns:
32 List: A list of a tuple of (image_path, instances)
33 """
34 samples = self.parse_files(files, split)
35 return samples
36
37 def parse_files(self, files: List[Tuple], split: str) -> List[Tuple]:
38 """Convert annotations to MMOCR format.
39
40 Args:
41 files (Tuple): A list of tuple of path to image and annotation.
42
43 Returns:
44 List[Tuple]: A list of a tuple of (image_path, instances)
45 """
46 func = partial(self.parse_file, split=split)
47 samples = track_parallel_progress(func, files, nproc=self.nproc)
48 return samples
49
50 @abstractmethod
51 def parse_file(self, file: Tuple, split: str) -> Tuple:
52 """Convert annotation for a single image.
53
54 Args:
55 file (Tuple): A tuple of path to image and annotation
56 split (str): Current split.
57
58 Returns:
59 Tuple: A tuple of (img_path, instance). Instance is a list of dict
60 containing parsed annotations, which should contain the
61 following keys:
62 - 'poly' or 'box' (textdet or textspotting)
63 - 'text' (textspotting or textrecog)
64 - 'ignore' (all task)
65
66 Examples:
67 An example of returned values:
68 >>> ('imgs/train/xxx.jpg',
69 >>> dict(
70 >>> poly=[[[0, 1], [1, 1], [1, 0], [0, 0]]],
71 >>> text='hello',
72 >>> ignore=False)
73 >>> )
74 """
75 raise NotImplementedError
76
77 def loader(self,
78 file_path: str,
79 separator: str = ',',
80 format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',
81 encoding='utf-8') -> Union[Dict, str]:
82 """A basic loader designed for .txt format annotation.
83
84 Args:
85 file_path (str): Path to the txt file.
86 separator (str, optional): Separator of data. Defaults to ','.
87 format (str, optional): Annotation format.
88 Defaults to 'x1,y1,x2,y2,x3,y3,x4,y4,trans'.
89 encoding (str, optional): Encoding format. Defaults to 'utf-8'.
90
91 Yields:
92 Iterator[Union[Dict, str]]: Original text line or a dict containing
93 the information of the text line.
94 """
95 keys = format.split(separator)
96 with open(file_path, 'r', encoding=encoding) as f:
97 for line in f.readlines():
98 line = line.strip()
99 if line:
100 yield dict(zip(keys, line.split(separator)))
101
[end of mmocr/datasets/preparers/parsers/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mmocr/datasets/preparers/parsers/base.py b/mmocr/datasets/preparers/parsers/base.py
--- a/mmocr/datasets/preparers/parsers/base.py
+++ b/mmocr/datasets/preparers/parsers/base.py
@@ -79,7 +79,8 @@
separator: str = ',',
format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',
encoding='utf-8') -> Union[Dict, str]:
- """A basic loader designed for .txt format annotation.
+ """A basic loader designed for .txt format annotation. It greedily
+ extracts information separated by separators.
Args:
file_path (str): Path to the txt file.
@@ -96,5 +97,8 @@
with open(file_path, 'r', encoding=encoding) as f:
for line in f.readlines():
line = line.strip()
+ values = line.split(separator)
+ values = values[:len(keys) -
+ 1] + [separator.join(values[len(keys) - 1:])]
if line:
- yield dict(zip(keys, line.split(separator)))
+ yield dict(zip(keys, values))
|
{"golden_diff": "diff --git a/mmocr/datasets/preparers/parsers/base.py b/mmocr/datasets/preparers/parsers/base.py\n--- a/mmocr/datasets/preparers/parsers/base.py\n+++ b/mmocr/datasets/preparers/parsers/base.py\n@@ -79,7 +79,8 @@\n separator: str = ',',\n format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',\n encoding='utf-8') -> Union[Dict, str]:\n- \"\"\"A basic loader designed for .txt format annotation.\n+ \"\"\"A basic loader designed for .txt format annotation. It greedily\n+ extracts information separated by separators.\n \n Args:\n file_path (str): Path to the txt file.\n@@ -96,5 +97,8 @@\n with open(file_path, 'r', encoding=encoding) as f:\n for line in f.readlines():\n line = line.strip()\n+ values = line.split(separator)\n+ values = values[:len(keys) -\n+ 1] + [separator.join(values[len(keys) - 1:])]\n if line:\n- yield dict(zip(keys, line.split(separator)))\n+ yield dict(zip(keys, values))\n", "issue": "[Bug] \u6570\u636e\u96c6\u9884\u5904\u7406\u4e2d\uff0c\u5bf9\u4e8e\u542b\",\"\u7684\u7279\u6b8a\u6587\u672c\u672a\u7279\u6b8a\u5904\u7406\n### Prerequisite\n\n- [X] I have searched [Issues](https://github.com/open-mmlab/mmocr/issues) and [Discussions](https://github.com/open-mmlab/mmocr/discussions) but cannot get the expected help.\n- [X] The bug has not been fixed in the [latest version (0.x)](https://github.com/open-mmlab/mmocr) or [latest version (1.x)](https://github.com/open-mmlab/mmocr/tree/dev-1.x).\n\n### Task\n\nI'm using the official example scripts/configs for the officially supported tasks/models/datasets.\n\n### Branch\n\n1.x branch https://github.com/open-mmlab/mmocr/tree/dev-1.x\n\n### Environment\n\n\u672c\u6b21bug\u548c\u73af\u5883\u65e0\u5173\r\ntorch 1.7.0\r\ntorchvision 0.8.1\r\nmmcv 2.0.0rc3\r\nmmcv-full 1.7.0\r\nmmdet 3.0.0rc0\r\nmmengine 0.1.0\r\nmmocr 1.0.0rc3 \r\n\n\n### Reproduces the problem - code sample\n\n\u76f8\u5173\u4ee3\u7801\u5728mmocr/datasets/preparers/parsers/icdar_txt_parser.py\n\n### Reproduces the problem - command or script\n\npython tools/dataset_converters/prepare_dataset.py icdar2015 --task textspotting\n\n### Reproduces the problem - error message\n\nno error message\uff0c\u68c0\u67e5\u53d1\u73b0\u90e8\u5206\u56fe\u7247\u9884\u5904\u7406\u540e\u7684\u6587\u672cgt\u4e0d\u5bf9\n\n### Additional information\n\n1. \u4f7f\u7528mmocr 1.x\u7248\u672c\u8fdb\u884c\u9884\u5904\u7406icdar2015\u6570\u636e\u96c6\u4e3a\u7edf\u4e00\u683c\u5f0f\u7684json\u6587\u4ef6\r\n2. \u53d1\u73b0\u5bf9\u6587\u672c\u5904\u7406\u65f6\uff0c\u672a\u5904\u7406\u542b\u5206\u9694\u7b26\u7684\u60c5\u51b5\uff0c\u5982\u539f\u59cb\u6570\u636e\u96c6\u7684100,000\u6587\u672c\uff0c\u5728\u7edf\u4e00\u683c\u5f0f\u4e2d\u4e3a100\uff0c\u5373\u5206\u9694\u7b26\u5c06\u6587\u672c\u5206\u5f00\u540e\u4ec5\u53d6\u4e86\u7b2c\u4e00\u4e2a\u6587\u672c\r\n3. \u5177\u4f53\u4f8b\u5b50\u5982mmocr/data/icdar2015/textspotting_train.json\u4e2d\uff0c\u5728\u56fe\u7247data/icdar2015/textdet_imgs/train/img_39.jpg\u7684gt\u4e2d\uff0c\u539f\u59cb\u6570\u636e\u96c6\u6587\u672c\u6807\u6ce8\u4e3a402,85,460,86,457,106,399,105,100,000\uff0c\u5176\u4e2d402,85,460,86,457,106,399,105\u4e3apolygon\u6807\u6ce8\uff0c100,000\u4e3a\u6587\u672c\u6807\u6ce8\u3002\u4f46\u662f\u9884\u5904\u7406\u540e\u7684ocr\u6570\u636e\u96c6\u4e2d\uff0c\u6587\u672c\u6807\u6ce8\u4e3a100\u3002\r\n\uff08\u56e0\u4e3a\u5728\u533a\u5206polygon\u6807\u6ce8\u548ctext\u6807\u6ce8\u65f6\uff0c\u901a\u8fc7\u5206\u9694\u7b26\",\"\u6765\u533a\u5206\uff0c\u5bfc\u81f4text\u6587\u672c\u88ab\u622a\u65ad\uff09\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\nfrom functools import partial\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom mmengine import track_parallel_progress\n\n\nclass BaseParser:\n \"\"\"Base class for parsing annotations.\n\n Args:\n data_root (str, optional): Path to the data root. Defaults to None.\n nproc (int, optional): Number of processes. Defaults to 1.\n \"\"\"\n\n def __init__(self,\n data_root: Optional[str] = None,\n nproc: int = 1) -> None:\n self.data_root = data_root\n self.nproc = nproc\n\n def __call__(self, files: List[Tuple], split: str) -> List:\n \"\"\"Parse annotations.\n\n Args:\n files (List[Tuple]): A list of a tuple of\n (image_path, annotation_path).\n split (str): The split of the dataset.\n\n Returns:\n List: A list of a tuple of (image_path, instances)\n \"\"\"\n samples = self.parse_files(files, split)\n return samples\n\n def parse_files(self, files: List[Tuple], split: str) -> List[Tuple]:\n \"\"\"Convert annotations to MMOCR format.\n\n Args:\n files (Tuple): A list of tuple of path to image and annotation.\n\n Returns:\n List[Tuple]: A list of a tuple of (image_path, instances)\n \"\"\"\n func = partial(self.parse_file, split=split)\n samples = track_parallel_progress(func, files, nproc=self.nproc)\n return samples\n\n @abstractmethod\n def parse_file(self, file: Tuple, split: str) -> Tuple:\n \"\"\"Convert annotation for a single image.\n\n Args:\n file (Tuple): A tuple of path to image and annotation\n split (str): Current split.\n\n Returns:\n Tuple: A tuple of (img_path, instance). Instance is a list of dict\n containing parsed annotations, which should contain the\n following keys:\n - 'poly' or 'box' (textdet or textspotting)\n - 'text' (textspotting or textrecog)\n - 'ignore' (all task)\n\n Examples:\n An example of returned values:\n >>> ('imgs/train/xxx.jpg',\n >>> dict(\n >>> poly=[[[0, 1], [1, 1], [1, 0], [0, 0]]],\n >>> text='hello',\n >>> ignore=False)\n >>> )\n \"\"\"\n raise NotImplementedError\n\n def loader(self,\n file_path: str,\n separator: str = ',',\n format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans',\n encoding='utf-8') -> Union[Dict, str]:\n \"\"\"A basic loader designed for .txt format annotation.\n\n Args:\n file_path (str): Path to the txt file.\n separator (str, optional): Separator of data. Defaults to ','.\n format (str, optional): Annotation format.\n Defaults to 'x1,y1,x2,y2,x3,y3,x4,y4,trans'.\n encoding (str, optional): Encoding format. Defaults to 'utf-8'.\n\n Yields:\n Iterator[Union[Dict, str]]: Original text line or a dict containing\n the information of the text line.\n \"\"\"\n keys = format.split(separator)\n with open(file_path, 'r', encoding=encoding) as f:\n for line in f.readlines():\n line = line.strip()\n if line:\n yield dict(zip(keys, line.split(separator)))\n", "path": "mmocr/datasets/preparers/parsers/base.py"}]}
| 2,140 | 269 |
gh_patches_debug_7801
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-7357
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in QOI decoder leading to corrupted images
### What did you do?
Loaded dice.qoi from the official test image set (https://qoiformat.org/qoi_test_images.zip).
### What did you expect to happen?
Image is not corrupted.
### What actually happened?
Image is corrupted.
### What are your OS, Python and Pillow versions?
* OS: doesn't matter
* Python: doesn't matter
* Pillow: all versions with QOI support
The problem is the faulty implementation of the QOI_OP_RGB operation here https://github.com/python-pillow/Pillow/blob/24606216e1e5931a8fe6f41acde9e7e67489905d/src/PIL/QoiImagePlugin.py#L58C10-L58C10
The implementation sets the alpha channel to 255, however, the QOI specification says it should use the previous alpha value.
Replacing that line with something like `value = self.fd.read(3) + o8(self._previous_pixel[3])` fixes the problem.
</issue>
<code>
[start of src/PIL/QoiImagePlugin.py]
1 #
2 # The Python Imaging Library.
3 #
4 # QOI support for PIL
5 #
6 # See the README file for information on usage and redistribution.
7 #
8
9 import os
10
11 from . import Image, ImageFile
12 from ._binary import i32be as i32
13 from ._binary import o8
14
15
16 def _accept(prefix):
17 return prefix[:4] == b"qoif"
18
19
20 class QoiImageFile(ImageFile.ImageFile):
21 format = "QOI"
22 format_description = "Quite OK Image"
23
24 def _open(self):
25 if not _accept(self.fp.read(4)):
26 msg = "not a QOI file"
27 raise SyntaxError(msg)
28
29 self._size = tuple(i32(self.fp.read(4)) for i in range(2))
30
31 channels = self.fp.read(1)[0]
32 self._mode = "RGB" if channels == 3 else "RGBA"
33
34 self.fp.seek(1, os.SEEK_CUR) # colorspace
35 self.tile = [("qoi", (0, 0) + self._size, self.fp.tell(), None)]
36
37
38 class QoiDecoder(ImageFile.PyDecoder):
39 _pulls_fd = True
40
41 def _add_to_previous_pixels(self, value):
42 self._previous_pixel = value
43
44 r, g, b, a = value
45 hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64
46 self._previously_seen_pixels[hash_value] = value
47
48 def decode(self, buffer):
49 self._previously_seen_pixels = {}
50 self._previous_pixel = None
51 self._add_to_previous_pixels(b"".join(o8(i) for i in (0, 0, 0, 255)))
52
53 data = bytearray()
54 bands = Image.getmodebands(self.mode)
55 while len(data) < self.state.xsize * self.state.ysize * bands:
56 byte = self.fd.read(1)[0]
57 if byte == 0b11111110: # QOI_OP_RGB
58 value = self.fd.read(3) + o8(255)
59 elif byte == 0b11111111: # QOI_OP_RGBA
60 value = self.fd.read(4)
61 else:
62 op = byte >> 6
63 if op == 0: # QOI_OP_INDEX
64 op_index = byte & 0b00111111
65 value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))
66 elif op == 1: # QOI_OP_DIFF
67 value = (
68 (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)
69 % 256,
70 (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)
71 % 256,
72 (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,
73 )
74 value += (self._previous_pixel[3],)
75 elif op == 2: # QOI_OP_LUMA
76 second_byte = self.fd.read(1)[0]
77 diff_green = (byte & 0b00111111) - 32
78 diff_red = ((second_byte & 0b11110000) >> 4) - 8
79 diff_blue = (second_byte & 0b00001111) - 8
80
81 value = tuple(
82 (self._previous_pixel[i] + diff_green + diff) % 256
83 for i, diff in enumerate((diff_red, 0, diff_blue))
84 )
85 value += (self._previous_pixel[3],)
86 elif op == 3: # QOI_OP_RUN
87 run_length = (byte & 0b00111111) + 1
88 value = self._previous_pixel
89 if bands == 3:
90 value = value[:3]
91 data += value * run_length
92 continue
93 value = b"".join(o8(i) for i in value)
94 self._add_to_previous_pixels(value)
95
96 if bands == 3:
97 value = value[:3]
98 data += value
99 self.set_as_raw(bytes(data))
100 return -1, 0
101
102
103 Image.register_open(QoiImageFile.format, QoiImageFile, _accept)
104 Image.register_decoder("qoi", QoiDecoder)
105 Image.register_extension(QoiImageFile.format, ".qoi")
106
[end of src/PIL/QoiImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/PIL/QoiImagePlugin.py b/src/PIL/QoiImagePlugin.py
--- a/src/PIL/QoiImagePlugin.py
+++ b/src/PIL/QoiImagePlugin.py
@@ -55,7 +55,7 @@
while len(data) < self.state.xsize * self.state.ysize * bands:
byte = self.fd.read(1)[0]
if byte == 0b11111110: # QOI_OP_RGB
- value = self.fd.read(3) + o8(255)
+ value = self.fd.read(3) + self._previous_pixel[3:]
elif byte == 0b11111111: # QOI_OP_RGBA
value = self.fd.read(4)
else:
|
{"golden_diff": "diff --git a/src/PIL/QoiImagePlugin.py b/src/PIL/QoiImagePlugin.py\n--- a/src/PIL/QoiImagePlugin.py\n+++ b/src/PIL/QoiImagePlugin.py\n@@ -55,7 +55,7 @@\n while len(data) < self.state.xsize * self.state.ysize * bands:\n byte = self.fd.read(1)[0]\n if byte == 0b11111110: # QOI_OP_RGB\n- value = self.fd.read(3) + o8(255)\n+ value = self.fd.read(3) + self._previous_pixel[3:]\n elif byte == 0b11111111: # QOI_OP_RGBA\n value = self.fd.read(4)\n else:\n", "issue": "Bug in QOI decoder leading to corrupted images\n### What did you do?\r\n\r\nLoaded dice.qoi from the official test image set (https://qoiformat.org/qoi_test_images.zip).\r\n\r\n### What did you expect to happen?\r\n\r\nImage is not corrupted.\r\n\r\n### What actually happened?\r\n\r\nImage is corrupted.\r\n\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: doesn't matter\r\n* Python: doesn't matter\r\n* Pillow: all versions with QOI support\r\n\r\nThe problem is the faulty implementation of the QOI_OP_RGB operation here https://github.com/python-pillow/Pillow/blob/24606216e1e5931a8fe6f41acde9e7e67489905d/src/PIL/QoiImagePlugin.py#L58C10-L58C10\r\nThe implementation sets the alpha channel to 255, however, the QOI specification says it should use the previous alpha value.\r\nReplacing that line with something like `value = self.fd.read(3) + o8(self._previous_pixel[3])` fixes the problem.\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n#\n# QOI support for PIL\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport os\n\nfrom . import Image, ImageFile\nfrom ._binary import i32be as i32\nfrom ._binary import o8\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"qoif\"\n\n\nclass QoiImageFile(ImageFile.ImageFile):\n format = \"QOI\"\n format_description = \"Quite OK Image\"\n\n def _open(self):\n if not _accept(self.fp.read(4)):\n msg = \"not a QOI file\"\n raise SyntaxError(msg)\n\n self._size = tuple(i32(self.fp.read(4)) for i in range(2))\n\n channels = self.fp.read(1)[0]\n self._mode = \"RGB\" if channels == 3 else \"RGBA\"\n\n self.fp.seek(1, os.SEEK_CUR) # colorspace\n self.tile = [(\"qoi\", (0, 0) + self._size, self.fp.tell(), None)]\n\n\nclass QoiDecoder(ImageFile.PyDecoder):\n _pulls_fd = True\n\n def _add_to_previous_pixels(self, value):\n self._previous_pixel = value\n\n r, g, b, a = value\n hash_value = (r * 3 + g * 5 + b * 7 + a * 11) % 64\n self._previously_seen_pixels[hash_value] = value\n\n def decode(self, buffer):\n self._previously_seen_pixels = {}\n self._previous_pixel = None\n self._add_to_previous_pixels(b\"\".join(o8(i) for i in (0, 0, 0, 255)))\n\n data = bytearray()\n bands = Image.getmodebands(self.mode)\n while len(data) < self.state.xsize * self.state.ysize * bands:\n byte = self.fd.read(1)[0]\n if byte == 0b11111110: # QOI_OP_RGB\n value = self.fd.read(3) + o8(255)\n elif byte == 0b11111111: # QOI_OP_RGBA\n value = self.fd.read(4)\n else:\n op = byte >> 6\n if op == 0: # QOI_OP_INDEX\n op_index = byte & 0b00111111\n value = self._previously_seen_pixels.get(op_index, (0, 0, 0, 0))\n elif op == 1: # QOI_OP_DIFF\n value = (\n (self._previous_pixel[0] + ((byte & 0b00110000) >> 4) - 2)\n % 256,\n (self._previous_pixel[1] + ((byte & 0b00001100) >> 2) - 2)\n % 256,\n (self._previous_pixel[2] + (byte & 0b00000011) - 2) % 256,\n )\n value += (self._previous_pixel[3],)\n elif op == 2: # QOI_OP_LUMA\n second_byte = self.fd.read(1)[0]\n diff_green = (byte & 0b00111111) - 32\n diff_red = ((second_byte & 0b11110000) >> 4) - 8\n diff_blue = (second_byte & 0b00001111) - 8\n\n value = tuple(\n (self._previous_pixel[i] + diff_green + diff) % 256\n for i, diff in enumerate((diff_red, 0, diff_blue))\n )\n value += (self._previous_pixel[3],)\n elif op == 3: # QOI_OP_RUN\n run_length = (byte & 0b00111111) + 1\n value = self._previous_pixel\n if bands == 3:\n value = value[:3]\n data += value * run_length\n continue\n value = b\"\".join(o8(i) for i in value)\n self._add_to_previous_pixels(value)\n\n if bands == 3:\n value = value[:3]\n data += value\n self.set_as_raw(bytes(data))\n return -1, 0\n\n\nImage.register_open(QoiImageFile.format, QoiImageFile, _accept)\nImage.register_decoder(\"qoi\", QoiDecoder)\nImage.register_extension(QoiImageFile.format, \".qoi\")\n", "path": "src/PIL/QoiImagePlugin.py"}]}
| 2,051 | 179 |
gh_patches_debug_21263
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-1417
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rabbitmq_user module inserts "node" parameter even when it was not given
rabbitmq_user tries to run
```
/usr/sbin/rabbitmqctl -q -n rabbit list_users
```
even, when there was no node parameter defined.
The correct behavior would be:
```
/usr/sbin/rabbitmqctl -q list_users
```
The difference is that the later works on rabbit@local but the previos does not. So if there's a parameter, and it does not have a value, the correct method would be not to give that parameter to the underlying process with a default but let it handle the missing (optional!) parameter the way it was intended.
effect:
failed: [local] => (item=rabbitmq_user_remove) => {"cmd": "/usr/sbin/rabbitmqctl -q -n rabbit list_users", "failed": true, "item": "rabbitmq_user_remove", "rc": 2}
stderr: Error: unable to connect to node rabbit@local: nodedown
# DIAGNOSTICS
attempted to contact: [rabbit@local]
rabbit@local:
- connected to epmd (port 4369) on local
- epmd reports node 'rabbit' running on port 25672
- TCP connection succeeded but Erlang distribution failed
- suggestion: hostname mismatch?
- suggestion: is the cookie set correctly?
- suggestion: is the Erlang distribution using TLS?
rabbitmq_user runs the following command, when I'm trying to remove the guest:
but the simpler command works fine:
```
$ sudo /usr/sbin/rabbitmqctl -q list_users
guest [administrator]
```
Of course I could work around this with setting the node parameter, but that depends on the hostname, in this case:
```
$ sudo /usr/sbin/rabbitmqctl -q -n rabbit@develbox list_users
guest [administrator]
```
So if you don't have information, don't give a bad default to a not necessary (optional) parameter, or if its very-very neccessary this case you should use rabbit@hostname with the current hosts name.
rabbitmq_user module inserts "node" parameter even when it was not given
rabbitmq_user tries to run
```
/usr/sbin/rabbitmqctl -q -n rabbit list_users
```
even, when there was no node parameter defined.
The correct behavior would be:
```
/usr/sbin/rabbitmqctl -q list_users
```
The difference is that the later works on rabbit@local but the previos does not. So if there's a parameter, and it does not have a value, the correct method would be not to give that parameter to the underlying process with a default but let it handle the missing (optional!) parameter the way it was intended.
effect:
failed: [local] => (item=rabbitmq_user_remove) => {"cmd": "/usr/sbin/rabbitmqctl -q -n rabbit list_users", "failed": true, "item": "rabbitmq_user_remove", "rc": 2}
stderr: Error: unable to connect to node rabbit@local: nodedown
# DIAGNOSTICS
attempted to contact: [rabbit@local]
rabbit@local:
- connected to epmd (port 4369) on local
- epmd reports node 'rabbit' running on port 25672
- TCP connection succeeded but Erlang distribution failed
- suggestion: hostname mismatch?
- suggestion: is the cookie set correctly?
- suggestion: is the Erlang distribution using TLS?
rabbitmq_user runs the following command, when I'm trying to remove the guest:
but the simpler command works fine:
```
$ sudo /usr/sbin/rabbitmqctl -q list_users
guest [administrator]
```
Of course I could work around this with setting the node parameter, but that depends on the hostname, in this case:
```
$ sudo /usr/sbin/rabbitmqctl -q -n rabbit@develbox list_users
guest [administrator]
```
So if you don't have information, don't give a bad default to a not necessary (optional) parameter, or if its very-very neccessary this case you should use rabbit@hostname with the current hosts name.
</issue>
<code>
[start of messaging/rabbitmq_user.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Chatham Financial <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 DOCUMENTATION = '''
22 ---
23 module: rabbitmq_user
24 short_description: Adds or removes users to RabbitMQ
25 description:
26 - Add or remove users to RabbitMQ and assign permissions
27 version_added: "1.1"
28 author: '"Chris Hoffman (@chrishoffman)"'
29 options:
30 user:
31 description:
32 - Name of user to add
33 required: true
34 default: null
35 aliases: [username, name]
36 password:
37 description:
38 - Password of user to add.
39 - To change the password of an existing user, you must also specify
40 C(force=yes).
41 required: false
42 default: null
43 tags:
44 description:
45 - User tags specified as comma delimited
46 required: false
47 default: null
48 vhost:
49 description:
50 - vhost to apply access privileges.
51 required: false
52 default: /
53 node:
54 description:
55 - erlang node name of the rabbit we wish to configure
56 required: false
57 default: rabbit
58 version_added: "1.2"
59 configure_priv:
60 description:
61 - Regular expression to restrict configure actions on a resource
62 for the specified vhost.
63 - By default all actions are restricted.
64 required: false
65 default: ^$
66 write_priv:
67 description:
68 - Regular expression to restrict configure actions on a resource
69 for the specified vhost.
70 - By default all actions are restricted.
71 required: false
72 default: ^$
73 read_priv:
74 description:
75 - Regular expression to restrict configure actions on a resource
76 for the specified vhost.
77 - By default all actions are restricted.
78 required: false
79 default: ^$
80 force:
81 description:
82 - Deletes and recreates the user.
83 required: false
84 default: "no"
85 choices: [ "yes", "no" ]
86 state:
87 description:
88 - Specify if user is to be added or removed
89 required: false
90 default: present
91 choices: [present, absent]
92 '''
93
94 EXAMPLES = '''
95 # Add user to server and assign full access control
96 - rabbitmq_user: user=joe
97 password=changeme
98 vhost=/
99 configure_priv=.*
100 read_priv=.*
101 write_priv=.*
102 state=present
103 '''
104
105 class RabbitMqUser(object):
106 def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node):
107 self.module = module
108 self.username = username
109 self.password = password
110 self.node = node
111 if not tags:
112 self.tags = list()
113 else:
114 self.tags = tags.split(',')
115
116 permissions = dict(
117 vhost=vhost,
118 configure_priv=configure_priv,
119 write_priv=write_priv,
120 read_priv=read_priv
121 )
122 self.permissions = permissions
123
124 self._tags = None
125 self._permissions = None
126 self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
127
128 def _exec(self, args, run_in_check_mode=False):
129 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
130 cmd = [self._rabbitmqctl, '-q', '-n', self.node]
131 rc, out, err = self.module.run_command(cmd + args, check_rc=True)
132 return out.splitlines()
133 return list()
134
135 def get(self):
136 users = self._exec(['list_users'], True)
137
138 for user_tag in users:
139 user, tags = user_tag.split('\t')
140
141 if user == self.username:
142 for c in ['[',']',' ']:
143 tags = tags.replace(c, '')
144
145 if tags != '':
146 self._tags = tags.split(',')
147 else:
148 self._tags = list()
149
150 self._permissions = self._get_permissions()
151 return True
152 return False
153
154 def _get_permissions(self):
155 perms_out = self._exec(['list_user_permissions', self.username], True)
156
157 for perm in perms_out:
158 vhost, configure_priv, write_priv, read_priv = perm.split('\t')
159 if vhost == self.permissions['vhost']:
160 return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv)
161
162 return dict()
163
164 def add(self):
165 if self.password is not None:
166 self._exec(['add_user', self.username, self.password])
167 else:
168 self._exec(['add_user', self.username, ''])
169 self._exec(['clear_password', self.username])
170
171 def delete(self):
172 self._exec(['delete_user', self.username])
173
174 def set_tags(self):
175 self._exec(['set_user_tags', self.username] + self.tags)
176
177 def set_permissions(self):
178 cmd = ['set_permissions']
179 cmd.append('-p')
180 cmd.append(self.permissions['vhost'])
181 cmd.append(self.username)
182 cmd.append(self.permissions['configure_priv'])
183 cmd.append(self.permissions['write_priv'])
184 cmd.append(self.permissions['read_priv'])
185 self._exec(cmd)
186
187 def has_tags_modifications(self):
188 return set(self.tags) != set(self._tags)
189
190 def has_permissions_modifications(self):
191 return self._permissions != self.permissions
192
193 def main():
194 arg_spec = dict(
195 user=dict(required=True, aliases=['username', 'name']),
196 password=dict(default=None),
197 tags=dict(default=None),
198 vhost=dict(default='/'),
199 configure_priv=dict(default='^$'),
200 write_priv=dict(default='^$'),
201 read_priv=dict(default='^$'),
202 force=dict(default='no', type='bool'),
203 state=dict(default='present', choices=['present', 'absent']),
204 node=dict(default='rabbit')
205 )
206 module = AnsibleModule(
207 argument_spec=arg_spec,
208 supports_check_mode=True
209 )
210
211 username = module.params['user']
212 password = module.params['password']
213 tags = module.params['tags']
214 vhost = module.params['vhost']
215 configure_priv = module.params['configure_priv']
216 write_priv = module.params['write_priv']
217 read_priv = module.params['read_priv']
218 force = module.params['force']
219 state = module.params['state']
220 node = module.params['node']
221
222 rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node)
223
224 changed = False
225 if rabbitmq_user.get():
226 if state == 'absent':
227 rabbitmq_user.delete()
228 changed = True
229 else:
230 if force:
231 rabbitmq_user.delete()
232 rabbitmq_user.add()
233 rabbitmq_user.get()
234 changed = True
235
236 if rabbitmq_user.has_tags_modifications():
237 rabbitmq_user.set_tags()
238 changed = True
239
240 if rabbitmq_user.has_permissions_modifications():
241 rabbitmq_user.set_permissions()
242 changed = True
243 elif state == 'present':
244 rabbitmq_user.add()
245 rabbitmq_user.set_tags()
246 rabbitmq_user.set_permissions()
247 changed = True
248
249 module.exit_json(changed=changed, user=username, state=state)
250
251 # import module snippets
252 from ansible.module_utils.basic import *
253 main()
254
[end of messaging/rabbitmq_user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py
--- a/messaging/rabbitmq_user.py
+++ b/messaging/rabbitmq_user.py
@@ -127,7 +127,9 @@
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
- cmd = [self._rabbitmqctl, '-q', '-n', self.node]
+ cmd = [self._rabbitmqctl, '-q']
+ if self.node is not None:
+ cmd.append(['-n', self.node])
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
@@ -201,7 +203,7 @@
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
- node=dict(default='rabbit')
+ node=dict(default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
|
{"golden_diff": "diff --git a/messaging/rabbitmq_user.py b/messaging/rabbitmq_user.py\n--- a/messaging/rabbitmq_user.py\n+++ b/messaging/rabbitmq_user.py\n@@ -127,7 +127,9 @@\n \n def _exec(self, args, run_in_check_mode=False):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n- cmd = [self._rabbitmqctl, '-q', '-n', self.node]\n+ cmd = [self._rabbitmqctl, '-q']\n+ if self.node is not None:\n+ cmd.append(['-n', self.node])\n rc, out, err = self.module.run_command(cmd + args, check_rc=True)\n return out.splitlines()\n return list()\n@@ -201,7 +203,7 @@\n read_priv=dict(default='^$'),\n force=dict(default='no', type='bool'),\n state=dict(default='present', choices=['present', 'absent']),\n- node=dict(default='rabbit')\n+ node=dict(default=None)\n )\n module = AnsibleModule(\n argument_spec=arg_spec,\n", "issue": "rabbitmq_user module inserts \"node\" parameter even when it was not given\nrabbitmq_user tries to run \n\n```\n/usr/sbin/rabbitmqctl -q -n rabbit list_users\n```\n\neven, when there was no node parameter defined.\n\nThe correct behavior would be:\n\n```\n/usr/sbin/rabbitmqctl -q list_users\n```\n\nThe difference is that the later works on rabbit@local but the previos does not. So if there's a parameter, and it does not have a value, the correct method would be not to give that parameter to the underlying process with a default but let it handle the missing (optional!) parameter the way it was intended.\n\neffect:\n\nfailed: [local] => (item=rabbitmq_user_remove) => {\"cmd\": \"/usr/sbin/rabbitmqctl -q -n rabbit list_users\", \"failed\": true, \"item\": \"rabbitmq_user_remove\", \"rc\": 2}\nstderr: Error: unable to connect to node rabbit@local: nodedown\n# DIAGNOSTICS\n\nattempted to contact: [rabbit@local]\n\nrabbit@local:\n- connected to epmd (port 4369) on local\n- epmd reports node 'rabbit' running on port 25672\n- TCP connection succeeded but Erlang distribution failed\n- suggestion: hostname mismatch?\n- suggestion: is the cookie set correctly?\n- suggestion: is the Erlang distribution using TLS?\n\nrabbitmq_user runs the following command, when I'm trying to remove the guest:\n\nbut the simpler command works fine:\n\n```\n$ sudo /usr/sbin/rabbitmqctl -q list_users\nguest [administrator]\n```\n\nOf course I could work around this with setting the node parameter, but that depends on the hostname, in this case:\n\n```\n$ sudo /usr/sbin/rabbitmqctl -q -n rabbit@develbox list_users\nguest [administrator]\n```\n\nSo if you don't have information, don't give a bad default to a not necessary (optional) parameter, or if its very-very neccessary this case you should use rabbit@hostname with the current hosts name.\n\nrabbitmq_user module inserts \"node\" parameter even when it was not given\nrabbitmq_user tries to run \n\n```\n/usr/sbin/rabbitmqctl -q -n rabbit list_users\n```\n\neven, when there was no node parameter defined.\n\nThe correct behavior would be:\n\n```\n/usr/sbin/rabbitmqctl -q list_users\n```\n\nThe difference is that the later works on rabbit@local but the previos does not. So if there's a parameter, and it does not have a value, the correct method would be not to give that parameter to the underlying process with a default but let it handle the missing (optional!) parameter the way it was intended.\n\neffect:\n\nfailed: [local] => (item=rabbitmq_user_remove) => {\"cmd\": \"/usr/sbin/rabbitmqctl -q -n rabbit list_users\", \"failed\": true, \"item\": \"rabbitmq_user_remove\", \"rc\": 2}\nstderr: Error: unable to connect to node rabbit@local: nodedown\n# DIAGNOSTICS\n\nattempted to contact: [rabbit@local]\n\nrabbit@local:\n- connected to epmd (port 4369) on local\n- epmd reports node 'rabbit' running on port 25672\n- TCP connection succeeded but Erlang distribution failed\n- suggestion: hostname mismatch?\n- suggestion: is the cookie set correctly?\n- suggestion: is the Erlang distribution using TLS?\n\nrabbitmq_user runs the following command, when I'm trying to remove the guest:\n\nbut the simpler command works fine:\n\n```\n$ sudo /usr/sbin/rabbitmqctl -q list_users\nguest [administrator]\n```\n\nOf course I could work around this with setting the node parameter, but that depends on the hostname, in this case:\n\n```\n$ sudo /usr/sbin/rabbitmqctl -q -n rabbit@develbox list_users\nguest [administrator]\n```\n\nSo if you don't have information, don't give a bad default to a not necessary (optional) parameter, or if its very-very neccessary this case you should use rabbit@hostname with the current hosts name.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Chatham Financial <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: rabbitmq_user\nshort_description: Adds or removes users to RabbitMQ\ndescription:\n - Add or remove users to RabbitMQ and assign permissions\nversion_added: \"1.1\"\nauthor: '\"Chris Hoffman (@chrishoffman)\"'\noptions:\n user:\n description:\n - Name of user to add\n required: true\n default: null\n aliases: [username, name]\n password:\n description:\n - Password of user to add.\n - To change the password of an existing user, you must also specify\n C(force=yes).\n required: false\n default: null\n tags:\n description:\n - User tags specified as comma delimited\n required: false\n default: null\n vhost:\n description:\n - vhost to apply access privileges.\n required: false\n default: /\n node:\n description:\n - erlang node name of the rabbit we wish to configure\n required: false\n default: rabbit\n version_added: \"1.2\"\n configure_priv:\n description:\n - Regular expression to restrict configure actions on a resource\n for the specified vhost.\n - By default all actions are restricted.\n required: false\n default: ^$\n write_priv:\n description:\n - Regular expression to restrict configure actions on a resource\n for the specified vhost.\n - By default all actions are restricted.\n required: false\n default: ^$\n read_priv:\n description:\n - Regular expression to restrict configure actions on a resource\n for the specified vhost.\n - By default all actions are restricted.\n required: false\n default: ^$\n force:\n description:\n - Deletes and recreates the user.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n state:\n description:\n - Specify if user is to be added or removed\n required: false\n default: present\n choices: [present, absent]\n'''\n\nEXAMPLES = '''\n# Add user to server and assign full access control\n- rabbitmq_user: user=joe\n password=changeme\n vhost=/\n configure_priv=.*\n read_priv=.*\n write_priv=.*\n state=present\n'''\n\nclass RabbitMqUser(object):\n def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node):\n self.module = module\n self.username = username\n self.password = password\n self.node = node\n if not tags:\n self.tags = list()\n else:\n self.tags = tags.split(',')\n\n permissions = dict(\n vhost=vhost,\n configure_priv=configure_priv,\n write_priv=write_priv,\n read_priv=read_priv\n )\n self.permissions = permissions\n\n self._tags = None\n self._permissions = None\n self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)\n\n def _exec(self, args, run_in_check_mode=False):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = [self._rabbitmqctl, '-q', '-n', self.node]\n rc, out, err = self.module.run_command(cmd + args, check_rc=True)\n return out.splitlines()\n return list()\n\n def get(self):\n users = self._exec(['list_users'], True)\n\n for user_tag in users:\n user, tags = user_tag.split('\\t')\n\n if user == self.username:\n for c in ['[',']',' ']:\n tags = tags.replace(c, '')\n\n if tags != '':\n self._tags = tags.split(',')\n else:\n self._tags = list()\n\n self._permissions = self._get_permissions()\n return True\n return False\n\n def _get_permissions(self):\n perms_out = self._exec(['list_user_permissions', self.username], True)\n\n for perm in perms_out:\n vhost, configure_priv, write_priv, read_priv = perm.split('\\t')\n if vhost == self.permissions['vhost']:\n return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv)\n\n return dict()\n\n def add(self):\n if self.password is not None:\n self._exec(['add_user', self.username, self.password])\n else:\n self._exec(['add_user', self.username, ''])\n self._exec(['clear_password', self.username])\n\n def delete(self):\n self._exec(['delete_user', self.username])\n\n def set_tags(self):\n self._exec(['set_user_tags', self.username] + self.tags)\n\n def set_permissions(self):\n cmd = ['set_permissions']\n cmd.append('-p')\n cmd.append(self.permissions['vhost'])\n cmd.append(self.username)\n cmd.append(self.permissions['configure_priv'])\n cmd.append(self.permissions['write_priv'])\n cmd.append(self.permissions['read_priv'])\n self._exec(cmd)\n\n def has_tags_modifications(self):\n return set(self.tags) != set(self._tags)\n\n def has_permissions_modifications(self):\n return self._permissions != self.permissions\n\ndef main():\n arg_spec = dict(\n user=dict(required=True, aliases=['username', 'name']),\n password=dict(default=None),\n tags=dict(default=None),\n vhost=dict(default='/'),\n configure_priv=dict(default='^$'),\n write_priv=dict(default='^$'),\n read_priv=dict(default='^$'),\n force=dict(default='no', type='bool'),\n state=dict(default='present', choices=['present', 'absent']),\n node=dict(default='rabbit')\n )\n module = AnsibleModule(\n argument_spec=arg_spec,\n supports_check_mode=True\n )\n\n username = module.params['user']\n password = module.params['password']\n tags = module.params['tags']\n vhost = module.params['vhost']\n configure_priv = module.params['configure_priv']\n write_priv = module.params['write_priv']\n read_priv = module.params['read_priv']\n force = module.params['force']\n state = module.params['state']\n node = module.params['node']\n\n rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node)\n\n changed = False\n if rabbitmq_user.get():\n if state == 'absent':\n rabbitmq_user.delete()\n changed = True\n else:\n if force:\n rabbitmq_user.delete()\n rabbitmq_user.add()\n rabbitmq_user.get()\n changed = True\n\n if rabbitmq_user.has_tags_modifications():\n rabbitmq_user.set_tags()\n changed = True\n\n if rabbitmq_user.has_permissions_modifications():\n rabbitmq_user.set_permissions()\n changed = True\n elif state == 'present':\n rabbitmq_user.add()\n rabbitmq_user.set_tags()\n rabbitmq_user.set_permissions()\n changed = True\n\n module.exit_json(changed=changed, user=username, state=state)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "messaging/rabbitmq_user.py"}]}
| 3,821 | 251 |
gh_patches_debug_16247
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1397
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dsa_private_key.pem vector has p and q whose lengths we don't normally allow
We currently enforce that `p` and `q` have lengths which are one of:
- `(1024, 160)`
- `(2048, 256)`
- `(3072, 256)`
However, this vector has `(p, q)` with lengths of `(2048, 160)`. Do we need to be less restrictive, use a different vector?
This was discovered in the process of writing a pure python PEM loader.
</issue>
<code>
[start of cryptography/hazmat/primitives/asymmetric/dsa.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import six
17
18 from cryptography import utils
19
20
21 def generate_parameters(key_size, backend):
22 return backend.generate_dsa_parameters(key_size)
23
24
25 def generate_private_key(key_size, backend):
26 return backend.generate_dsa_private_key_and_parameters(key_size)
27
28
29 def _check_dsa_parameters(parameters):
30 if (utils.bit_length(parameters.p),
31 utils.bit_length(parameters.q)) not in (
32 (1024, 160),
33 (2048, 256),
34 (3072, 256)):
35 raise ValueError(
36 "p and q's bit-lengths must be one of these pairs (1024, 160), "
37 "(2048, 256), or (3072, 256). Not ({0:d}, {1:d})".format(
38 utils.bit_length(parameters.p), utils.bit_length(parameters.q)
39 )
40 )
41
42 if not (1 < parameters.g < parameters.p):
43 raise ValueError("g, p don't satisfy 1 < g < p.")
44
45
46 def _check_dsa_private_numbers(numbers):
47 parameters = numbers.public_numbers.parameter_numbers
48 _check_dsa_parameters(parameters)
49 if numbers.x <= 0 or numbers.x >= parameters.q:
50 raise ValueError("x must be > 0 and < q.")
51
52 if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):
53 raise ValueError("y must be equal to (g ** x % p).")
54
55
56 class DSAParameterNumbers(object):
57 def __init__(self, p, q, g):
58 if (
59 not isinstance(p, six.integer_types) or
60 not isinstance(q, six.integer_types) or
61 not isinstance(g, six.integer_types)
62 ):
63 raise TypeError(
64 "DSAParameterNumbers p, q, and g arguments must be integers."
65 )
66
67 self._p = p
68 self._q = q
69 self._g = g
70
71 @property
72 def p(self):
73 return self._p
74
75 @property
76 def q(self):
77 return self._q
78
79 @property
80 def g(self):
81 return self._g
82
83 def parameters(self, backend):
84 return backend.load_dsa_parameter_numbers(self)
85
86
87 class DSAPublicNumbers(object):
88 def __init__(self, y, parameter_numbers):
89 if not isinstance(y, six.integer_types):
90 raise TypeError("DSAPublicNumbers y argument must be an integer.")
91
92 if not isinstance(parameter_numbers, DSAParameterNumbers):
93 raise TypeError(
94 "parameter_numbers must be a DSAParameterNumbers instance."
95 )
96
97 self._y = y
98 self._parameter_numbers = parameter_numbers
99
100 @property
101 def y(self):
102 return self._y
103
104 @property
105 def parameter_numbers(self):
106 return self._parameter_numbers
107
108 def public_key(self, backend):
109 return backend.load_dsa_public_numbers(self)
110
111
112 class DSAPrivateNumbers(object):
113 def __init__(self, x, public_numbers):
114 if not isinstance(x, six.integer_types):
115 raise TypeError("DSAPrivateNumbers x argument must be an integer.")
116
117 if not isinstance(public_numbers, DSAPublicNumbers):
118 raise TypeError(
119 "public_numbers must be a DSAPublicNumbers instance."
120 )
121 self._public_numbers = public_numbers
122 self._x = x
123
124 @property
125 def x(self):
126 return self._x
127
128 @property
129 def public_numbers(self):
130 return self._public_numbers
131
132 def private_key(self, backend):
133 return backend.load_dsa_private_numbers(self)
134
[end of cryptography/hazmat/primitives/asymmetric/dsa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cryptography/hazmat/primitives/asymmetric/dsa.py b/cryptography/hazmat/primitives/asymmetric/dsa.py
--- a/cryptography/hazmat/primitives/asymmetric/dsa.py
+++ b/cryptography/hazmat/primitives/asymmetric/dsa.py
@@ -27,17 +27,10 @@
def _check_dsa_parameters(parameters):
- if (utils.bit_length(parameters.p),
- utils.bit_length(parameters.q)) not in (
- (1024, 160),
- (2048, 256),
- (3072, 256)):
- raise ValueError(
- "p and q's bit-lengths must be one of these pairs (1024, 160), "
- "(2048, 256), or (3072, 256). Not ({0:d}, {1:d})".format(
- utils.bit_length(parameters.p), utils.bit_length(parameters.q)
- )
- )
+ if utils.bit_length(parameters.p) not in [1024, 2048, 3072]:
+ raise ValueError("p must be exactly 1024, 2048, or 3072 bits long")
+ if utils.bit_length(parameters.q) not in [160, 256]:
+ raise ValueError("q must be exactly 160 or 256 bits long")
if not (1 < parameters.g < parameters.p):
raise ValueError("g, p don't satisfy 1 < g < p.")
|
{"golden_diff": "diff --git a/cryptography/hazmat/primitives/asymmetric/dsa.py b/cryptography/hazmat/primitives/asymmetric/dsa.py\n--- a/cryptography/hazmat/primitives/asymmetric/dsa.py\n+++ b/cryptography/hazmat/primitives/asymmetric/dsa.py\n@@ -27,17 +27,10 @@\n \n \n def _check_dsa_parameters(parameters):\n- if (utils.bit_length(parameters.p),\n- utils.bit_length(parameters.q)) not in (\n- (1024, 160),\n- (2048, 256),\n- (3072, 256)):\n- raise ValueError(\n- \"p and q's bit-lengths must be one of these pairs (1024, 160), \"\n- \"(2048, 256), or (3072, 256). Not ({0:d}, {1:d})\".format(\n- utils.bit_length(parameters.p), utils.bit_length(parameters.q)\n- )\n- )\n+ if utils.bit_length(parameters.p) not in [1024, 2048, 3072]:\n+ raise ValueError(\"p must be exactly 1024, 2048, or 3072 bits long\")\n+ if utils.bit_length(parameters.q) not in [160, 256]:\n+ raise ValueError(\"q must be exactly 160 or 256 bits long\")\n \n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n", "issue": "dsa_private_key.pem vector has p and q whose lengths we don't normally allow\nWe currently enforce that `p` and `q` have lengths which are one of:\n- `(1024, 160)`\n- `(2048, 256)`\n- `(3072, 256)`\n\nHowever, this vector has `(p, q)` with lengths of `(2048, 160)`. Do we need to be less restrictive, use a different vector?\n\nThis was discovered in the process of writing a pure python PEM loader.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\n\n\ndef generate_parameters(key_size, backend):\n return backend.generate_dsa_parameters(key_size)\n\n\ndef generate_private_key(key_size, backend):\n return backend.generate_dsa_private_key_and_parameters(key_size)\n\n\ndef _check_dsa_parameters(parameters):\n if (utils.bit_length(parameters.p),\n utils.bit_length(parameters.q)) not in (\n (1024, 160),\n (2048, 256),\n (3072, 256)):\n raise ValueError(\n \"p and q's bit-lengths must be one of these pairs (1024, 160), \"\n \"(2048, 256), or (3072, 256). Not ({0:d}, {1:d})\".format(\n utils.bit_length(parameters.p), utils.bit_length(parameters.q)\n )\n )\n\n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n\ndef _check_dsa_private_numbers(numbers):\n parameters = numbers.public_numbers.parameter_numbers\n _check_dsa_parameters(parameters)\n if numbers.x <= 0 or numbers.x >= parameters.q:\n raise ValueError(\"x must be > 0 and < q.\")\n\n if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):\n raise ValueError(\"y must be equal to (g ** x % p).\")\n\n\nclass DSAParameterNumbers(object):\n def __init__(self, p, q, g):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(q, six.integer_types) or\n not isinstance(g, six.integer_types)\n ):\n raise TypeError(\n \"DSAParameterNumbers p, q, and g arguments must be integers.\"\n )\n\n self._p = p\n self._q = q\n self._g = g\n\n @property\n def p(self):\n return self._p\n\n @property\n def q(self):\n return self._q\n\n @property\n def g(self):\n return self._g\n\n def parameters(self, backend):\n return backend.load_dsa_parameter_numbers(self)\n\n\nclass DSAPublicNumbers(object):\n def __init__(self, y, parameter_numbers):\n if not isinstance(y, six.integer_types):\n raise TypeError(\"DSAPublicNumbers y argument must be an integer.\")\n\n if not isinstance(parameter_numbers, DSAParameterNumbers):\n raise TypeError(\n \"parameter_numbers must be a DSAParameterNumbers instance.\"\n )\n\n self._y = y\n self._parameter_numbers = parameter_numbers\n\n @property\n def y(self):\n return self._y\n\n @property\n def parameter_numbers(self):\n return self._parameter_numbers\n\n def public_key(self, backend):\n return backend.load_dsa_public_numbers(self)\n\n\nclass DSAPrivateNumbers(object):\n def __init__(self, x, public_numbers):\n if not isinstance(x, six.integer_types):\n raise TypeError(\"DSAPrivateNumbers x argument must be an integer.\")\n\n if not isinstance(public_numbers, DSAPublicNumbers):\n raise TypeError(\n \"public_numbers must be a DSAPublicNumbers instance.\"\n )\n self._public_numbers = public_numbers\n self._x = x\n\n @property\n def x(self):\n return self._x\n\n @property\n def public_numbers(self):\n return self._public_numbers\n\n def private_key(self, backend):\n return backend.load_dsa_private_numbers(self)\n", "path": "cryptography/hazmat/primitives/asymmetric/dsa.py"}]}
| 1,889 | 364 |
gh_patches_debug_16759
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_78 misreported
**Describe the bug**
Checkov is returning as vulnerability CKV_AWS_78, but the solution breaks Terraform validation.
Accordigly to Checkov if `encryption_disabled = false` is not set in the main block it can be considered a vulnerability
```
resource "aws_codebuild_project" "project-with-cache" {
name = "test-project-cache"
description = "test_codebuild_project_cache"
build_timeout = "5"
queued_timeout = "5"
+ encryption_disabled = false
}
```
as described here: https://docs.bridgecrew.io/docs/bc_aws_general_30
Unfortunately in Terraform v1.0.3 `encryption_disabled` is not available in that location but only in blocks `artifacts`, `secondary_artifacts` and `logs_config: s3_logs` as you can see here: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project
So if not set it gives vulnerability, if set terraform fails during the validation.
**To Reproduce**
Steps to reproduce the behavior:
1. Set in **aws_codebuild_project** **encryption_disabled = false**
```
resource "aws_codebuild_project" "project-with-cache" {
name = "test-project-cache"
description = "test_codebuild_project_cache"
build_timeout = "5"
queued_timeout = "5"
+ encryption_disabled = false
}
```
2. Run `terraform validate`
3. See error
**Expected behavior**
No vulnerability or vulnerability if not set the attribute in all the 3 blocks
**Desktop (please complete the following information):**
- terraform --version: Terraform v1.0.3 on linux_amd64
- checkov --version: 2.0.326
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class CodeBuildProjectEncryption(BaseResourceCheck):
6
7 def __init__(self):
8 name = "Ensure that CodeBuild Project encryption is not disabled"
9 id = "CKV_AWS_78"
10 supported_resources = ['aws_codebuild_project']
11 categories = [CheckCategories.ENCRYPTION]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 if 'artifacts' not in conf:
16 return CheckResult.UNKNOWN
17 artifact = conf['artifacts'][0]
18 if isinstance(artifact, dict):
19 if artifact['type'] == "NO_ARTIFACTS":
20 self.evaluated_keys = 'artifacts/[0]/type'
21 elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:
22 self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
23 return CheckResult.FAILED
24 return CheckResult.PASSED
25
26
27 check = CodeBuildProjectEncryption()
28
[end of checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
--- a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
+++ b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py
@@ -16,11 +16,13 @@
return CheckResult.UNKNOWN
artifact = conf['artifacts'][0]
if isinstance(artifact, dict):
- if artifact['type'] == "NO_ARTIFACTS":
+ if artifact['type'] == ["NO_ARTIFACTS"]:
self.evaluated_keys = 'artifacts/[0]/type'
- elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:
- self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
- return CheckResult.FAILED
+ return CheckResult.UNKNOWN
+ if 'encryption_disabled' in artifact:
+ if artifact['encryption_disabled'] == [True]:
+ self.evaluated_keys = 'artifacts/[0]/encryption_disabled'
+ return CheckResult.FAILED
return CheckResult.PASSED
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n--- a/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py\n@@ -16,11 +16,13 @@\n return CheckResult.UNKNOWN\n artifact = conf['artifacts'][0]\n if isinstance(artifact, dict):\n- if artifact['type'] == \"NO_ARTIFACTS\":\n+ if artifact['type'] == [\"NO_ARTIFACTS\"]:\n self.evaluated_keys = 'artifacts/[0]/type'\n- elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:\n- self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n- return CheckResult.FAILED\n+ return CheckResult.UNKNOWN\n+ if 'encryption_disabled' in artifact: \n+ if artifact['encryption_disabled'] == [True]:\n+ self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "CKV_AWS_78 misreported\n**Describe the bug**\r\nCheckov is returning as vulnerability CKV_AWS_78, but the solution breaks Terraform validation.\r\n\r\nAccordigly to Checkov if `encryption_disabled = false` is not set in the main block it can be considered a vulnerability\r\n\r\n```\r\nresource \"aws_codebuild_project\" \"project-with-cache\" {\r\n name = \"test-project-cache\"\r\n description = \"test_codebuild_project_cache\"\r\n build_timeout = \"5\"\r\n queued_timeout = \"5\"\r\n+ encryption_disabled = false\r\n} \r\n```\r\nas described here: https://docs.bridgecrew.io/docs/bc_aws_general_30\r\n\r\nUnfortunately in Terraform v1.0.3 `encryption_disabled` is not available in that location but only in blocks `artifacts`, `secondary_artifacts` and `logs_config: s3_logs` as you can see here: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project\r\n\r\nSo if not set it gives vulnerability, if set terraform fails during the validation.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Set in **aws_codebuild_project** **encryption_disabled = false**\r\n```\r\nresource \"aws_codebuild_project\" \"project-with-cache\" {\r\n name = \"test-project-cache\"\r\n description = \"test_codebuild_project_cache\"\r\n build_timeout = \"5\"\r\n queued_timeout = \"5\"\r\n+ encryption_disabled = false\r\n} \r\n```\r\n2. Run `terraform validate`\r\n3. See error\r\n\r\n**Expected behavior**\r\nNo vulnerability or vulnerability if not set the attribute in all the 3 blocks\r\n\r\n**Desktop (please complete the following information):**\r\n - terraform --version: Terraform v1.0.3 on linux_amd64\r\n - checkov --version: 2.0.326\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass CodeBuildProjectEncryption(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure that CodeBuild Project encryption is not disabled\"\n id = \"CKV_AWS_78\"\n supported_resources = ['aws_codebuild_project']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'artifacts' not in conf:\n return CheckResult.UNKNOWN\n artifact = conf['artifacts'][0]\n if isinstance(artifact, dict):\n if artifact['type'] == \"NO_ARTIFACTS\":\n self.evaluated_keys = 'artifacts/[0]/type'\n elif 'encryption_disabled' in artifact and artifact['encryption_disabled']:\n self.evaluated_keys = 'artifacts/[0]/encryption_disabled'\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = CodeBuildProjectEncryption()\n", "path": "checkov/terraform/checks/resource/aws/CodeBuildProjectEncryption.py"}]}
| 1,237 | 253 |
gh_patches_debug_34334
|
rasdani/github-patches
|
git_diff
|
davanstrien__flyswot-64
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
check csv output doesn't have blank lines
</issue>
<code>
[start of src/flyswot/inference.py]
1 """Inference functionality"""
2 import csv
3 import mimetypes
4 from abc import ABC
5 from abc import abstractmethod
6 from dataclasses import asdict
7 from dataclasses import dataclass
8 from datetime import datetime
9 from pathlib import Path
10 from typing import IO
11 from typing import Iterable
12 from typing import Iterator
13 from typing import List
14 from typing import Union
15
16 import numpy as np
17 import onnxruntime as rt
18 import PIL
19 import typer
20 from PIL import Image
21 from rich.progress import Progress
22 from rich.table import Table
23 from toolz import itertoolz # type: ignore
24
25 from flyswot import core
26 from flyswot import models
27 from flyswot.console import console
28
29 try:
30 from fastai.vision.all import Learner
31 from fastai.vision.all import load_learner
32 except ImportError:
33 pass
34 from importlib import resources
35
36 app = typer.Typer()
37
38
39 # flake8: noqa
40 @dataclass()
41 class ImagePredictionItem:
42 """Prediction for an image.
43
44 Attributes:
45 path: The Path to the image
46 predicted_label: The predicted label i.e. the argmax value for the prediction tensor
47 condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor
48 """
49
50 path: Path
51 predicted_label: str
52 confidence: float
53
54 def __post_init__(self) -> Union[Path, None]:
55 try:
56 self.path: Path = self.path.absolute()
57 except AttributeError:
58 pass
59
60
61 @dataclass
62 class PredictionBatch:
63 """Container for ImagePredictionItems"""
64
65 batch: List[ImagePredictionItem]
66
67 def __post_init__(self):
68 self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)
69
70
71 image_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith("image/")}
72
73
74 @app.command()
75 def predict_image(
76 image: Path = typer.Argument(..., readable=True, resolve_path=True)
77 ) -> None:
78 pass # pragma: no cover
79
80
81 @app.command(name="directory")
82 def predict_directory(
83 directory: Path = typer.Argument(..., readable=True, resolve_path=True),
84 csv_save_dir: Path = typer.Argument(
85 ...,
86 writable=True,
87 resolve_path=True,
88 ),
89 pattern: str = typer.Option("fse"),
90 bs: int = typer.Option(32),
91 preferred_format: str = typer.Option(
92 ".tif",
93 help="Preferred image format for predictions. If not available, flyswot will use images matching `pattern`",
94 ),
95 ):
96 """
97 Predicts against all images containing PATTERN in the filename found under DIRECTORY.
98 By default searches for filenames containing FSE
99 Creates a CSV report saved to CSV_SAVE_DIR containing the predictions
100 """
101 typer.echo(csv_save_dir)
102 model_dir = models.ensure_model_dir()
103 typer.echo(model_dir)
104 # TODO add load learner function that can be passed a model name
105 model_parts = models.ensure_model(model_dir)
106 model = model_parts.model
107 vocab = models.load_vocab(model_parts.vocab)
108 OnnxInference = onnx_inference_session(model, vocab)
109 files = core.get_image_files_from_pattern(directory, pattern)
110 filtered_files = core.filter_to_preferred_ext(files, preferred_format)
111 files = list(filtered_files)
112 typer.echo(f"Found {len(files)} files matching {pattern} in {directory}")
113 csv_fname = create_csv_fname(csv_save_dir)
114 create_csv_header(csv_fname)
115 with typer.progressbar(length=len(files)) as progress:
116 all_preds = []
117 predictions = []
118 for batch in itertoolz.partition_all(bs, files):
119 batch_predictions = OnnxInference.predict_batch(batch, bs)
120 all_preds.append(batch_predictions.batch_labels)
121 predictions.append(batch_predictions)
122 progress.update(len(batch))
123 write_batch_preds_to_csv(csv_fname, batch_predictions)
124 all_preds = list(itertoolz.concat(all_preds))
125 print_table(all_preds)
126
127
128 def print_table(decoded) -> None:
129 table = Table(show_header=True, title="Prediction summary")
130 table.add_column(
131 "Class",
132 )
133 table.add_column("Count")
134 table.add_column("Percentage")
135 total = len(decoded)
136 frequencies = itertoolz.frequencies(decoded)
137 for is_last_element, var in core.signal_last(frequencies.items()):
138 key, value = var
139 count = value
140 percentage = round((count / total) * 100, 2)
141 if is_last_element:
142 table.add_row(key, str(count), f"{percentage}", end_section=True)
143 table.add_row("Total", str(total), "")
144 else:
145 table.add_row(key, str(count), f"{percentage}")
146 console.print(table)
147
148
149 def create_csv_fname(csv_directory: Path) -> Path:
150 date_now = datetime.now()
151 date_now = date_now.strftime("%Y_%m_%d_%H_%M")
152 fname = Path(date_now + ".csv")
153 return Path(csv_directory / fname)
154
155
156 def create_csv_header(csv_path: Path) -> None:
157 with open(csv_path, mode="w", newline="") as csv_file:
158 field_names = ["path", "directory", "predicted_label", "confidence"]
159 writer = csv.DictWriter(csv_file, fieldnames=field_names)
160 writer.writeheader()
161
162
163 def write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:
164 with open(csv_fpath, mode="a", newline="") as csv_file:
165 field_names = ["path", "directory", "predicted_label", "confidence"]
166 writer = csv.DictWriter(csv_file, fieldnames=field_names)
167 for pred in predictions.batch:
168 row = asdict(pred)
169 row["directory"] = pred.path.parent
170 writer.writerow(row)
171
172
173 class InferenceSession(ABC):
174 @abstractmethod
175 def __init__(self, model: Path, vocab: List):
176 self.model = model
177 self.vocab = vocab
178
179 @abstractmethod
180 def predict_image(self, image: Path):
181 pass
182
183 @abstractmethod
184 def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):
185 pass
186
187
188 def softmax(x):
189 x = x.reshape(-1)
190 e_x = np.exp(x - np.max(x))
191 return e_x / e_x.sum(axis=0)
192
193
194 # class FastaiInferenceModel(InferenceSession):
195 # def __init__(self, model):
196 # self.model = model
197 # self.learn = load_learner(model)
198
199 # def predict_image(self, image: Path) -> Any:
200 # return self.learn.predict(image)
201
202 # def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:
203 # test_dl = self.learn.dls.test_dl(batch, bs=bs)
204 # vocab = dict(enumerate(self.learn.dls.vocab))
205 # with self.learn.no_bar():
206 # fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)
207 # prediction_tensors: Iterable[Any] = fastai_preds[0]
208 # prediction_items = []
209 # for file, pred in zip(batch, prediction_tensors):
210 # arg_max = int(np.array(pred).argmax())
211 # predicted_label = vocab[int(arg_max)]
212 # confidence = float(np.array(pred).max())
213 # prediction_items.append(
214 # ImagePredictionItem(file, predicted_label, confidence)
215 # )
216 # return PredictionBatch(prediction_items)
217
218
219 class onnx_inference_session(InferenceSession):
220 """Class for running inference making use of the onnxrunntime"""
221
222 def __init__(self, model, vocab):
223 self.model = model
224 self.session = rt.InferenceSession(str(model))
225
226 self.vocab = vocab
227 self.vocab_mapping = dict(enumerate(self.vocab))
228
229 def _load_vocab(self, vocab: Path) -> List:
230 with open(vocab, "r") as f:
231 return [item.strip("\n") for item in f.readlines()]
232
233 def predict_image(self, image: Path):
234 """Predict a single image"""
235 img = self._load_image(image)
236 raw_result = self.session.run(["output"], {"image": img})
237 pred = self._postprocess(raw_result)
238 arg_max = int(np.array(pred).argmax())
239 predicted_label = self.vocab_mapping[int(arg_max)]
240 confidence = float(np.array(pred).max())
241 return ImagePredictionItem(image, predicted_label, confidence)
242
243 def _preprocess(self, input_data: np.ndarray) -> np.ndarray:
244 # converts the input data into the float32 input for onnx
245 img_data = input_data.astype("float32")
246
247 # normalize
248 mean_vec = np.array([0.485, 0.456, 0.406])
249 stddev_vec = np.array([0.229, 0.224, 0.225])
250 norm_img_data = np.zeros(img_data.shape).astype("float32")
251 for i in range(img_data.shape[0]):
252 norm_img_data[i, :, :] = (
253 img_data[i, :, :] / 255 - mean_vec[i]
254 ) / stddev_vec[i]
255
256 # add batch channel
257 norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype("float32")
258 return norm_img_data
259
260 def _load_image(self, file: Path) -> np.ndarray:
261 """loads image and carries out preprocessing for inference"""
262 image = Image.open(file, mode="r")
263 image = image.resize((512, 512), Image.BILINEAR)
264 image_data = np.array(image).transpose(2, 0, 1)
265 return self._preprocess(image_data)
266
267 def _postprocess(self, result: List):
268 """process results from onnx session"""
269 return softmax(np.array(result)).tolist()
270
271 def predict_batch(self, batch: Iterable[Path], bs: int):
272 """predicts a batch of images"""
273 prediction_items = [self.predict_image(file) for file in batch]
274 return PredictionBatch(prediction_items)
275
276
277 if __name__ == "__main__":
278 app()
279
[end of src/flyswot/inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py
--- a/src/flyswot/inference.py
+++ b/src/flyswot/inference.py
@@ -112,15 +112,14 @@
typer.echo(f"Found {len(files)} files matching {pattern} in {directory}")
csv_fname = create_csv_fname(csv_save_dir)
create_csv_header(csv_fname)
- with Progress() as progress:
- prediction_progress = progress.add_task("Predicting images", total=len(files))
+ with typer.progressbar(length=len(files)) as progress:
all_preds = []
predictions = []
for batch in itertoolz.partition_all(bs, files):
batch_predictions = OnnxInference.predict_batch(batch, bs)
all_preds.append(batch_predictions.batch_labels)
predictions.append(batch_predictions)
- progress.update(prediction_progress, advance=bs)
+ progress.update(len(batch))
write_batch_preds_to_csv(csv_fname, batch_predictions)
all_preds = list(itertoolz.concat(all_preds))
print_table(all_preds)
@@ -155,14 +154,14 @@
def create_csv_header(csv_path: Path) -> None:
- with open(csv_path, mode="w") as csv_file:
+ with open(csv_path, mode="w", newline="") as csv_file:
field_names = ["path", "directory", "predicted_label", "confidence"]
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
def write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:
- with open(csv_fpath, mode="a") as csv_file:
+ with open(csv_fpath, mode="a", newline="") as csv_file:
field_names = ["path", "directory", "predicted_label", "confidence"]
writer = csv.DictWriter(csv_file, fieldnames=field_names)
for pred in predictions.batch:
|
{"golden_diff": "diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py\n--- a/src/flyswot/inference.py\n+++ b/src/flyswot/inference.py\n@@ -112,15 +112,14 @@\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n- with Progress() as progress:\n- prediction_progress = progress.add_task(\"Predicting images\", total=len(files))\n+ with typer.progressbar(length=len(files)) as progress:\n all_preds = []\n predictions = []\n for batch in itertoolz.partition_all(bs, files):\n batch_predictions = OnnxInference.predict_batch(batch, bs)\n all_preds.append(batch_predictions.batch_labels)\n predictions.append(batch_predictions)\n- progress.update(prediction_progress, advance=bs)\n+ progress.update(len(batch))\n write_batch_preds_to_csv(csv_fname, batch_predictions)\n all_preds = list(itertoolz.concat(all_preds))\n print_table(all_preds)\n@@ -155,14 +154,14 @@\n \n \n def create_csv_header(csv_path: Path) -> None:\n- with open(csv_path, mode=\"w\") as csv_file:\n+ with open(csv_path, mode=\"w\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n \n \n def write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:\n- with open(csv_fpath, mode=\"a\") as csv_file:\n+ with open(csv_fpath, mode=\"a\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n for pred in predictions.batch:\n", "issue": "check csv output doesn't have blank lines \n\n", "before_files": [{"content": "\"\"\"Inference functionality\"\"\"\nimport csv\nimport mimetypes\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom dataclasses import asdict\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import IO\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Union\n\nimport numpy as np\nimport onnxruntime as rt\nimport PIL\nimport typer\nfrom PIL import Image\nfrom rich.progress import Progress\nfrom rich.table import Table\nfrom toolz import itertoolz # type: ignore\n\nfrom flyswot import core\nfrom flyswot import models\nfrom flyswot.console import console\n\ntry:\n from fastai.vision.all import Learner\n from fastai.vision.all import load_learner\nexcept ImportError:\n pass\nfrom importlib import resources\n\napp = typer.Typer()\n\n\n# flake8: noqa\n@dataclass()\nclass ImagePredictionItem:\n \"\"\"Prediction for an image.\n\n Attributes:\n path: The Path to the image\n predicted_label: The predicted label i.e. the argmax value for the prediction tensor\n condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor\n \"\"\"\n\n path: Path\n predicted_label: str\n confidence: float\n\n def __post_init__(self) -> Union[Path, None]:\n try:\n self.path: Path = self.path.absolute()\n except AttributeError:\n pass\n\n\n@dataclass\nclass PredictionBatch:\n \"\"\"Container for ImagePredictionItems\"\"\"\n\n batch: List[ImagePredictionItem]\n\n def __post_init__(self):\n self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)\n\n\nimage_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith(\"image/\")}\n\n\[email protected]()\ndef predict_image(\n image: Path = typer.Argument(..., readable=True, resolve_path=True)\n) -> None:\n pass # pragma: no cover\n\n\[email protected](name=\"directory\")\ndef predict_directory(\n directory: Path = typer.Argument(..., readable=True, resolve_path=True),\n csv_save_dir: Path = typer.Argument(\n ...,\n writable=True,\n resolve_path=True,\n ),\n pattern: str = typer.Option(\"fse\"),\n bs: int = typer.Option(32),\n preferred_format: str = typer.Option(\n \".tif\",\n help=\"Preferred image format for predictions. If not available, flyswot will use images matching `pattern`\",\n ),\n):\n \"\"\"\n Predicts against all images containing PATTERN in the filename found under DIRECTORY.\n By default searches for filenames containing FSE\n Creates a CSV report saved to CSV_SAVE_DIR containing the predictions\n \"\"\"\n typer.echo(csv_save_dir)\n model_dir = models.ensure_model_dir()\n typer.echo(model_dir)\n # TODO add load learner function that can be passed a model name\n model_parts = models.ensure_model(model_dir)\n model = model_parts.model\n vocab = models.load_vocab(model_parts.vocab)\n OnnxInference = onnx_inference_session(model, vocab)\n files = core.get_image_files_from_pattern(directory, pattern)\n filtered_files = core.filter_to_preferred_ext(files, preferred_format)\n files = list(filtered_files)\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n with typer.progressbar(length=len(files)) as progress:\n all_preds = []\n predictions = []\n for batch in itertoolz.partition_all(bs, files):\n batch_predictions = OnnxInference.predict_batch(batch, bs)\n all_preds.append(batch_predictions.batch_labels)\n predictions.append(batch_predictions)\n progress.update(len(batch))\n write_batch_preds_to_csv(csv_fname, batch_predictions)\n all_preds = list(itertoolz.concat(all_preds))\n print_table(all_preds)\n\n\ndef print_table(decoded) -> None:\n table = Table(show_header=True, title=\"Prediction summary\")\n table.add_column(\n \"Class\",\n )\n table.add_column(\"Count\")\n table.add_column(\"Percentage\")\n total = len(decoded)\n frequencies = itertoolz.frequencies(decoded)\n for is_last_element, var in core.signal_last(frequencies.items()):\n key, value = var\n count = value\n percentage = round((count / total) * 100, 2)\n if is_last_element:\n table.add_row(key, str(count), f\"{percentage}\", end_section=True)\n table.add_row(\"Total\", str(total), \"\")\n else:\n table.add_row(key, str(count), f\"{percentage}\")\n console.print(table)\n\n\ndef create_csv_fname(csv_directory: Path) -> Path:\n date_now = datetime.now()\n date_now = date_now.strftime(\"%Y_%m_%d_%H_%M\")\n fname = Path(date_now + \".csv\")\n return Path(csv_directory / fname)\n\n\ndef create_csv_header(csv_path: Path) -> None:\n with open(csv_path, mode=\"w\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n\n\ndef write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:\n with open(csv_fpath, mode=\"a\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n for pred in predictions.batch:\n row = asdict(pred)\n row[\"directory\"] = pred.path.parent\n writer.writerow(row)\n\n\nclass InferenceSession(ABC):\n @abstractmethod\n def __init__(self, model: Path, vocab: List):\n self.model = model\n self.vocab = vocab\n\n @abstractmethod\n def predict_image(self, image: Path):\n pass\n\n @abstractmethod\n def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):\n pass\n\n\ndef softmax(x):\n x = x.reshape(-1)\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\n# class FastaiInferenceModel(InferenceSession):\n# def __init__(self, model):\n# self.model = model\n# self.learn = load_learner(model)\n\n# def predict_image(self, image: Path) -> Any:\n# return self.learn.predict(image)\n\n# def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:\n# test_dl = self.learn.dls.test_dl(batch, bs=bs)\n# vocab = dict(enumerate(self.learn.dls.vocab))\n# with self.learn.no_bar():\n# fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)\n# prediction_tensors: Iterable[Any] = fastai_preds[0]\n# prediction_items = []\n# for file, pred in zip(batch, prediction_tensors):\n# arg_max = int(np.array(pred).argmax())\n# predicted_label = vocab[int(arg_max)]\n# confidence = float(np.array(pred).max())\n# prediction_items.append(\n# ImagePredictionItem(file, predicted_label, confidence)\n# )\n# return PredictionBatch(prediction_items)\n\n\nclass onnx_inference_session(InferenceSession):\n \"\"\"Class for running inference making use of the onnxrunntime\"\"\"\n\n def __init__(self, model, vocab):\n self.model = model\n self.session = rt.InferenceSession(str(model))\n\n self.vocab = vocab\n self.vocab_mapping = dict(enumerate(self.vocab))\n\n def _load_vocab(self, vocab: Path) -> List:\n with open(vocab, \"r\") as f:\n return [item.strip(\"\\n\") for item in f.readlines()]\n\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n img = self._load_image(image)\n raw_result = self.session.run([\"output\"], {\"image\": img})\n pred = self._postprocess(raw_result)\n arg_max = int(np.array(pred).argmax())\n predicted_label = self.vocab_mapping[int(arg_max)]\n confidence = float(np.array(pred).max())\n return ImagePredictionItem(image, predicted_label, confidence)\n\n def _preprocess(self, input_data: np.ndarray) -> np.ndarray:\n # converts the input data into the float32 input for onnx\n img_data = input_data.astype(\"float32\")\n\n # normalize\n mean_vec = np.array([0.485, 0.456, 0.406])\n stddev_vec = np.array([0.229, 0.224, 0.225])\n norm_img_data = np.zeros(img_data.shape).astype(\"float32\")\n for i in range(img_data.shape[0]):\n norm_img_data[i, :, :] = (\n img_data[i, :, :] / 255 - mean_vec[i]\n ) / stddev_vec[i]\n\n # add batch channel\n norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype(\"float32\")\n return norm_img_data\n\n def _load_image(self, file: Path) -> np.ndarray:\n \"\"\"loads image and carries out preprocessing for inference\"\"\"\n image = Image.open(file, mode=\"r\")\n image = image.resize((512, 512), Image.BILINEAR)\n image_data = np.array(image).transpose(2, 0, 1)\n return self._preprocess(image_data)\n\n def _postprocess(self, result: List):\n \"\"\"process results from onnx session\"\"\"\n return softmax(np.array(result)).tolist()\n\n def predict_batch(self, batch: Iterable[Path], bs: int):\n \"\"\"predicts a batch of images\"\"\"\n prediction_items = [self.predict_image(file) for file in batch]\n return PredictionBatch(prediction_items)\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "src/flyswot/inference.py"}]}
| 3,502 | 432 |
gh_patches_debug_2490
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-165
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding 'Offline Informasjonstekster' causes error
Not really sure what this does but it casts an error saying:
Exception Type: IntegrityError
Exception Value: column key is not unique
</issue>
<code>
[start of apps/offline/admin.py]
1 from apps.offline.models import ProxyChunk, Issue
2 from chunks.models import Chunk
3 from django.contrib import admin
4 from django.db.models import Q
5
6
7 class ProxyChunkAdmin(admin.ModelAdmin):
8
9 readonly_fields = ['key']
10
11 def queryset(self, request):
12 offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))
13 return offline
14
15 admin.site.register(ProxyChunk, ProxyChunkAdmin)
16 admin.site.register(Issue)
17
[end of apps/offline/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/offline/admin.py b/apps/offline/admin.py
--- a/apps/offline/admin.py
+++ b/apps/offline/admin.py
@@ -8,6 +8,9 @@
readonly_fields = ['key']
+ def has_add_permission(self, request):
+ return False
+
def queryset(self, request):
offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))
return offline
|
{"golden_diff": "diff --git a/apps/offline/admin.py b/apps/offline/admin.py\n--- a/apps/offline/admin.py\n+++ b/apps/offline/admin.py\n@@ -8,6 +8,9 @@\n \n readonly_fields = ['key']\n \n+ def has_add_permission(self, request):\n+ return False\n+\n def queryset(self, request):\n offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))\n return offline\n", "issue": "Adding 'Offline Informasjonstekster' causes error\nNot really sure what this does but it casts an error saying:\n\nException Type: IntegrityError\nException Value: column key is not unique\n\n", "before_files": [{"content": "from apps.offline.models import ProxyChunk, Issue\nfrom chunks.models import Chunk\nfrom django.contrib import admin\nfrom django.db.models import Q\n\n\nclass ProxyChunkAdmin(admin.ModelAdmin):\n\n readonly_fields = ['key']\n\n def queryset(self, request):\n offline = Chunk.objects.filter(Q(key='offline_ingress') | Q(key='offline_brodtekst'))\n return offline\n\nadmin.site.register(ProxyChunk, ProxyChunkAdmin)\nadmin.site.register(Issue)\n", "path": "apps/offline/admin.py"}]}
| 702 | 101 |
gh_patches_debug_34711
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6035
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Segfault in peak_local_max with large numbed of segments
## Description
scikit-image dives to (absolutely uncatchable and untrackable) segfault in peak_local_max.
## Way to reproduce
```python
import numpy as np
from scipy.ndimage import distance_transform_edt
from skimage.feature import peak_local_max
def segment(binary_image):
distance = distance_transform_edt(binary_image)
peak_local_max(
distance, min_distance=100, footprint=np.ones((3, 3)), labels=binary_image,
)
for p in [0.05, 0.95, 0.001, 0.999]:
print(p)
segment(np.random.random([2048, 2048]) < p)
```
## Version information
```python
# Paste the output of the following python commands
from __future__ import print_function
import sys; print(sys.version)
import platform; print(platform.platform())
import skimage; print(f'scikit-image version: {skimage.__version__}')
import numpy; print(f'numpy version: {numpy.__version__}')
```
```python
3.8.10 (default, Sep 28 2021, 16:10:42)
[GCC 9.3.0]
Linux-5.10.47-linuxkit-x86_64-with-glibc2.29
scikit-image version: 0.18.3
numpy version: 1.21.4
```
</issue>
<code>
[start of skimage/_shared/coord.py]
1 import numpy as np
2 from scipy.spatial import cKDTree, distance
3
4
5 def _ensure_spacing(coord, spacing, p_norm, max_out):
6 """Returns a subset of coord where a minimum spacing is guaranteed.
7
8 Parameters
9 ----------
10 coord : ndarray
11 The coordinates of the considered points.
12 spacing : float
13 the maximum allowed spacing between the points.
14 p_norm : float
15 Which Minkowski p-norm to use. Should be in the range [1, inf].
16 A finite large p may cause a ValueError if overflow can occur.
17 ``inf`` corresponds to the Chebyshev distance and 2 to the
18 Euclidean distance.
19 max_out: int
20 If not None, at most the first ``max_out`` candidates are
21 returned.
22
23 Returns
24 -------
25 output : ndarray
26 A subset of coord where a minimum spacing is guaranteed.
27
28 """
29
30 # Use KDtree to find the peaks that are too close to each other
31 tree = cKDTree(coord)
32
33 indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
34 rejected_peaks_indices = set()
35 naccepted = 0
36 for idx, candidates in enumerate(indices):
37 if idx not in rejected_peaks_indices:
38 # keep current point and the points at exactly spacing from it
39 candidates.remove(idx)
40 dist = distance.cdist([coord[idx]],
41 coord[candidates],
42 distance.minkowski,
43 p=p_norm).reshape(-1)
44 candidates = [c for c, d in zip(candidates, dist)
45 if d < spacing]
46
47 # candidates.remove(keep)
48 rejected_peaks_indices.update(candidates)
49 naccepted += 1
50 if max_out is not None and naccepted >= max_out:
51 break
52
53 # Remove the peaks that are too close to each other
54 output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
55 if max_out is not None:
56 output = output[:max_out]
57
58 return output
59
60
61 def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,
62 max_out=None):
63 """Returns a subset of coord where a minimum spacing is guaranteed.
64
65 Parameters
66 ----------
67 coords : array_like
68 The coordinates of the considered points.
69 spacing : float
70 the maximum allowed spacing between the points.
71 p_norm : float
72 Which Minkowski p-norm to use. Should be in the range [1, inf].
73 A finite large p may cause a ValueError if overflow can occur.
74 ``inf`` corresponds to the Chebyshev distance and 2 to the
75 Euclidean distance.
76 min_split_size : int
77 Minimum split size used to process ``coord`` by batch to save
78 memory. If None, the memory saving strategy is not applied.
79 max_out : int
80 If not None, only the first ``max_out`` candidates are returned.
81
82 Returns
83 -------
84 output : array_like
85 A subset of coord where a minimum spacing is guaranteed.
86
87 """
88
89 output = coords
90 if len(coords):
91
92 coords = np.atleast_2d(coords)
93 if min_split_size is None:
94 batch_list = [coords]
95 else:
96 coord_count = len(coords)
97 split_count = int(np.log2(coord_count / min_split_size)) + 1
98 split_idx = np.cumsum(
99 [coord_count // (2 ** i) for i in range(1, split_count)])
100 batch_list = np.array_split(coords, split_idx)
101
102 output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
103 for batch in batch_list:
104 output = _ensure_spacing(np.vstack([output, batch]),
105 spacing, p_norm, max_out)
106 if max_out is not None and len(output) >= max_out:
107 break
108
109 return output
110
[end of skimage/_shared/coord.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py
--- a/skimage/_shared/coord.py
+++ b/skimage/_shared/coord.py
@@ -59,7 +59,7 @@
def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,
- max_out=None):
+ max_out=None, *, max_split_size=2000):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
@@ -74,10 +74,19 @@
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
min_split_size : int
- Minimum split size used to process ``coord`` by batch to save
+ Minimum split size used to process ``coords`` by batch to save
memory. If None, the memory saving strategy is not applied.
max_out : int
If not None, only the first ``max_out`` candidates are returned.
+ max_split_size : int
+ Maximum split size used to process ``coords`` by batch to save
+ memory. This number was decided by profiling with a large number
+ of points. Too small a number results in too much looping in
+ Python instead of C, slowing down the process, while too large
+ a number results in large memory allocations, slowdowns, and,
+ potentially, in the process being killed -- see gh-6010. See
+ benchmark results `here
+ <https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.
Returns
-------
@@ -94,9 +103,12 @@
batch_list = [coords]
else:
coord_count = len(coords)
- split_count = int(np.log2(coord_count / min_split_size)) + 1
- split_idx = np.cumsum(
- [coord_count // (2 ** i) for i in range(1, split_count)])
+ split_idx = [min_split_size]
+ split_size = min_split_size
+ while coord_count - split_idx[-1] > max_split_size:
+ split_size *= 2
+ split_idx.append(split_idx[-1] + min(split_size,
+ max_split_size))
batch_list = np.array_split(coords, split_idx)
output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
|
{"golden_diff": "diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py\n--- a/skimage/_shared/coord.py\n+++ b/skimage/_shared/coord.py\n@@ -59,7 +59,7 @@\n \n \n def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,\n- max_out=None):\n+ max_out=None, *, max_split_size=2000):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n \n Parameters\n@@ -74,10 +74,19 @@\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n- Minimum split size used to process ``coord`` by batch to save\n+ Minimum split size used to process ``coords`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n max_out : int\n If not None, only the first ``max_out`` candidates are returned.\n+ max_split_size : int\n+ Maximum split size used to process ``coords`` by batch to save\n+ memory. This number was decided by profiling with a large number\n+ of points. Too small a number results in too much looping in\n+ Python instead of C, slowing down the process, while too large\n+ a number results in large memory allocations, slowdowns, and,\n+ potentially, in the process being killed -- see gh-6010. See\n+ benchmark results `here\n+ <https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.\n \n Returns\n -------\n@@ -94,9 +103,12 @@\n batch_list = [coords]\n else:\n coord_count = len(coords)\n- split_count = int(np.log2(coord_count / min_split_size)) + 1\n- split_idx = np.cumsum(\n- [coord_count // (2 ** i) for i in range(1, split_count)])\n+ split_idx = [min_split_size]\n+ split_size = min_split_size\n+ while coord_count - split_idx[-1] > max_split_size:\n+ split_size *= 2\n+ split_idx.append(split_idx[-1] + min(split_size,\n+ max_split_size))\n batch_list = np.array_split(coords, split_idx)\n \n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n", "issue": "Segfault in peak_local_max with large numbed of segments\n## Description\r\n\r\nscikit-image dives to (absolutely uncatchable and untrackable) segfault in peak_local_max.\r\n\r\n## Way to reproduce\r\n```python\r\nimport numpy as np\r\nfrom scipy.ndimage import distance_transform_edt\r\nfrom skimage.feature import peak_local_max\r\n\r\n\r\ndef segment(binary_image):\r\n distance = distance_transform_edt(binary_image)\r\n peak_local_max(\r\n distance, min_distance=100, footprint=np.ones((3, 3)), labels=binary_image,\r\n )\r\n\r\nfor p in [0.05, 0.95, 0.001, 0.999]:\r\n print(p)\r\n segment(np.random.random([2048, 2048]) < p)\r\n\r\n\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n# Paste the output of the following python commands\r\nfrom __future__ import print_function\r\nimport sys; print(sys.version)\r\nimport platform; print(platform.platform())\r\nimport skimage; print(f'scikit-image version: {skimage.__version__}')\r\nimport numpy; print(f'numpy version: {numpy.__version__}')\r\n```\r\n\r\n```python\r\n3.8.10 (default, Sep 28 2021, 16:10:42) \r\n[GCC 9.3.0]\r\nLinux-5.10.47-linuxkit-x86_64-with-glibc2.29\r\nscikit-image version: 0.18.3\r\nnumpy version: 1.21.4\r\n```\r\n\n", "before_files": [{"content": "import numpy as np\nfrom scipy.spatial import cKDTree, distance\n\n\ndef _ensure_spacing(coord, spacing, p_norm, max_out):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : ndarray\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n max_out: int\n If not None, at most the first ``max_out`` candidates are\n returned.\n\n Returns\n -------\n output : ndarray\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n # Use KDtree to find the peaks that are too close to each other\n tree = cKDTree(coord)\n\n indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n rejected_peaks_indices = set()\n naccepted = 0\n for idx, candidates in enumerate(indices):\n if idx not in rejected_peaks_indices:\n # keep current point and the points at exactly spacing from it\n candidates.remove(idx)\n dist = distance.cdist([coord[idx]],\n coord[candidates],\n distance.minkowski,\n p=p_norm).reshape(-1)\n candidates = [c for c, d in zip(candidates, dist)\n if d < spacing]\n\n # candidates.remove(keep)\n rejected_peaks_indices.update(candidates)\n naccepted += 1\n if max_out is not None and naccepted >= max_out:\n break\n\n # Remove the peaks that are too close to each other\n output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n if max_out is not None:\n output = output[:max_out]\n\n return output\n\n\ndef ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50,\n max_out=None):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coords : array_like\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n Minimum split size used to process ``coord`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n max_out : int\n If not None, only the first ``max_out`` candidates are returned.\n\n Returns\n -------\n output : array_like\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n output = coords\n if len(coords):\n\n coords = np.atleast_2d(coords)\n if min_split_size is None:\n batch_list = [coords]\n else:\n coord_count = len(coords)\n split_count = int(np.log2(coord_count / min_split_size)) + 1\n split_idx = np.cumsum(\n [coord_count // (2 ** i) for i in range(1, split_count)])\n batch_list = np.array_split(coords, split_idx)\n\n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n for batch in batch_list:\n output = _ensure_spacing(np.vstack([output, batch]),\n spacing, p_norm, max_out)\n if max_out is not None and len(output) >= max_out:\n break\n\n return output\n", "path": "skimage/_shared/coord.py"}]}
| 1,939 | 560 |
gh_patches_debug_37441
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-1841
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ak.type does not understand `numpy.<type>` style dtypes
### Version of Awkward Array
2.0.0rc1
### Description and code to reproduce
numpy = 1.23.4
```python3
>>> import awkward as ak
>>> import numpy as np
>>> x = np.random.normal(size=100)
>>> ak.type(x)
```
results in:
```
Traceback (most recent call last):
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 99, in _impl
out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
KeyError: <class 'numpy.float64'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 60, in type
return _impl(array)
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py", line 101, in _impl
raise ak._errors.wrap_error(
TypeError: while calling
ak.type(
array = numpy.ndarray([ 0.27824033 -1.483569 -0.61108357 ...
)
Error details: numpy array type is unrecognized by awkward: <class 'numpy.float64'>
```
`np.float64` (or `np.<type>` in general) is a fairly common way for folks to denote typing, we should probably support it.
</issue>
<code>
[start of src/awkward/operations/ak_type.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak.nplikes.NumpyMetadata.instance()
8
9
10 def type(array):
11 """
12 The high-level type of an `array` (many types supported, including all
13 Awkward Arrays and Records) as #ak.types.Type objects.
14
15 The high-level type ignores #layout differences like
16 #ak.contents.ListArray versus #ak.contents.ListOffsetArray, but
17 not differences like "regular-sized lists" (i.e.
18 #ak.contents.RegularArray) versus "variable-sized lists" (i.e.
19 #ak.contents.ListArray and similar).
20
21 Types are rendered as [Datashape](https://datashape.readthedocs.io/)
22 strings, which makes the same distinctions.
23
24 For example,
25
26 ak.Array([[{"x": 1.1, "y": [1]}, {"x": 2.2, "y": [2, 2]}],
27 [],
28 [{"x": 3.3, "y": [3, 3, 3]}]])
29
30 has type
31
32 3 * var * {"x": float64, "y": var * int64}
33
34 but
35
36 ak.Array(np.arange(2*3*5).reshape(2, 3, 5))
37
38 has type
39
40 2 * 3 * 5 * int64
41
42 Some cases, like heterogeneous data, require [extensions beyond the
43 Datashape specification](https://github.com/blaze/datashape/issues/237).
44 For example,
45
46 ak.Array([1, "two", [3, 3, 3]])
47
48 has type
49
50 3 * union[int64, string, var * int64]
51
52 but "union" is not a Datashape type-constructor. (Its syntax is
53 similar to existing type-constructors, so it's a plausible addition
54 to the language.)
55 """
56 with ak._errors.OperationErrorContext(
57 "ak.type",
58 dict(array=array),
59 ):
60 return _impl(array)
61
62
63 def _impl(array):
64 if array is None:
65 return ak.types.UnknownType()
66
67 elif isinstance(
68 array,
69 tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),
70 ):
71 return ak.types.NumpyType(
72 ak.types.numpytype._dtype_to_primitive_dict[array.dtype]
73 )
74
75 elif isinstance(array, (bool, np.bool_)):
76 return ak.types.NumpyType("bool")
77
78 elif isinstance(array, numbers.Integral):
79 return ak.types.NumpyType("int64")
80
81 elif isinstance(array, numbers.Real):
82 return ak.types.NumpyType("float64")
83
84 elif isinstance(
85 array,
86 (
87 ak.highlevel.Array,
88 ak.highlevel.Record,
89 ak.highlevel.ArrayBuilder,
90 ),
91 ):
92 return array.type
93
94 elif isinstance(array, np.ndarray):
95 if len(array.shape) == 0:
96 return _impl(array.reshape((1,))[0])
97 else:
98 try:
99 out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
100 except KeyError as err:
101 raise ak._errors.wrap_error(
102 TypeError(
103 "numpy array type is unrecognized by awkward: %r"
104 % array.dtype.type
105 )
106 ) from err
107 out = ak.types.NumpyType(out)
108 for x in array.shape[-1:0:-1]:
109 out = ak.types.RegularType(out, x)
110 return ak.types.ArrayType(out, array.shape[0])
111
112 elif isinstance(array, ak._ext.ArrayBuilder):
113 form = ak.forms.from_json(array.form())
114 return ak.types.ArrayType(form.type_from_behavior(None), len(array))
115
116 elif isinstance(array, ak.record.Record):
117 return array.array.form.type
118
119 elif isinstance(array, ak.contents.Content):
120 return array.form.type
121
122 else:
123 raise ak._errors.wrap_error(TypeError(f"unrecognized array type: {array!r}"))
124
[end of src/awkward/operations/ak_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/awkward/operations/ak_type.py b/src/awkward/operations/ak_type.py
--- a/src/awkward/operations/ak_type.py
+++ b/src/awkward/operations/ak_type.py
@@ -1,6 +1,8 @@
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
+import builtins
import numbers
+from datetime import datetime, timedelta
import awkward as ak
@@ -64,15 +66,18 @@
if array is None:
return ak.types.UnknownType()
- elif isinstance(
- array,
- tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),
+ elif isinstance(array, np.dtype):
+ return ak.types.NumpyType(ak.types.numpytype.dtype_to_primitive(array))
+
+ elif (
+ isinstance(array, np.generic)
+ or isinstance(array, builtins.type)
+ and issubclass(array, np.generic)
):
- return ak.types.NumpyType(
- ak.types.numpytype._dtype_to_primitive_dict[array.dtype]
- )
+ primitive = ak.types.numpytype.dtype_to_primitive(np.dtype(array))
+ return ak.types.NumpyType(primitive)
- elif isinstance(array, (bool, np.bool_)):
+ elif isinstance(array, bool): # np.bool_ in np.generic (above)
return ak.types.NumpyType("bool")
elif isinstance(array, numbers.Integral):
@@ -81,6 +86,15 @@
elif isinstance(array, numbers.Real):
return ak.types.NumpyType("float64")
+ elif isinstance(array, numbers.Complex):
+ return ak.types.NumpyType("complex128")
+
+ elif isinstance(array, datetime): # np.datetime64 in np.generic (above)
+ return ak.types.NumpyType("datetime64")
+
+ elif isinstance(array, timedelta): # np.timedelta64 in np.generic (above)
+ return ak.types.NumpyType("timedelta")
+
elif isinstance(
array,
(
@@ -95,16 +109,8 @@
if len(array.shape) == 0:
return _impl(array.reshape((1,))[0])
else:
- try:
- out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]
- except KeyError as err:
- raise ak._errors.wrap_error(
- TypeError(
- "numpy array type is unrecognized by awkward: %r"
- % array.dtype.type
- )
- ) from err
- out = ak.types.NumpyType(out)
+ primitive = ak.types.numpytype.dtype_to_primitive(array.dtype)
+ out = ak.types.NumpyType(primitive)
for x in array.shape[-1:0:-1]:
out = ak.types.RegularType(out, x)
return ak.types.ArrayType(out, array.shape[0])
|
{"golden_diff": "diff --git a/src/awkward/operations/ak_type.py b/src/awkward/operations/ak_type.py\n--- a/src/awkward/operations/ak_type.py\n+++ b/src/awkward/operations/ak_type.py\n@@ -1,6 +1,8 @@\n # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n \n+import builtins\n import numbers\n+from datetime import datetime, timedelta\n \n import awkward as ak\n \n@@ -64,15 +66,18 @@\n if array is None:\n return ak.types.UnknownType()\n \n- elif isinstance(\n- array,\n- tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),\n+ elif isinstance(array, np.dtype):\n+ return ak.types.NumpyType(ak.types.numpytype.dtype_to_primitive(array))\n+\n+ elif (\n+ isinstance(array, np.generic)\n+ or isinstance(array, builtins.type)\n+ and issubclass(array, np.generic)\n ):\n- return ak.types.NumpyType(\n- ak.types.numpytype._dtype_to_primitive_dict[array.dtype]\n- )\n+ primitive = ak.types.numpytype.dtype_to_primitive(np.dtype(array))\n+ return ak.types.NumpyType(primitive)\n \n- elif isinstance(array, (bool, np.bool_)):\n+ elif isinstance(array, bool): # np.bool_ in np.generic (above)\n return ak.types.NumpyType(\"bool\")\n \n elif isinstance(array, numbers.Integral):\n@@ -81,6 +86,15 @@\n elif isinstance(array, numbers.Real):\n return ak.types.NumpyType(\"float64\")\n \n+ elif isinstance(array, numbers.Complex):\n+ return ak.types.NumpyType(\"complex128\")\n+\n+ elif isinstance(array, datetime): # np.datetime64 in np.generic (above)\n+ return ak.types.NumpyType(\"datetime64\")\n+\n+ elif isinstance(array, timedelta): # np.timedelta64 in np.generic (above)\n+ return ak.types.NumpyType(\"timedelta\")\n+\n elif isinstance(\n array,\n (\n@@ -95,16 +109,8 @@\n if len(array.shape) == 0:\n return _impl(array.reshape((1,))[0])\n else:\n- try:\n- out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\n- except KeyError as err:\n- raise ak._errors.wrap_error(\n- TypeError(\n- \"numpy array type is unrecognized by awkward: %r\"\n- % array.dtype.type\n- )\n- ) from err\n- out = ak.types.NumpyType(out)\n+ primitive = ak.types.numpytype.dtype_to_primitive(array.dtype)\n+ out = ak.types.NumpyType(primitive)\n for x in array.shape[-1:0:-1]:\n out = ak.types.RegularType(out, x)\n return ak.types.ArrayType(out, array.shape[0])\n", "issue": "ak.type does not understand `numpy.<type>` style dtypes\n### Version of Awkward Array\n\n2.0.0rc1\n\n### Description and code to reproduce\n\nnumpy = 1.23.4\r\n\r\n```python3\r\n>>> import awkward as ak\r\n>>> import numpy as np\r\n>>> x = np.random.normal(size=100)\r\n>>> ak.type(x)\r\n```\r\nresults in:\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 99, in _impl\r\n out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\r\nKeyError: <class 'numpy.float64'>\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 60, in type\r\n return _impl(array)\r\n File \"/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.8/site-packages/awkward/operations/ak_type.py\", line 101, in _impl\r\n raise ak._errors.wrap_error(\r\nTypeError: while calling\r\n\r\n ak.type(\r\n array = numpy.ndarray([ 0.27824033 -1.483569 -0.61108357 ...\r\n )\r\n\r\nError details: numpy array type is unrecognized by awkward: <class 'numpy.float64'>\r\n```\r\n\r\n`np.float64` (or `np.<type>` in general) is a fairly common way for folks to denote typing, we should probably support it.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef type(array):\n \"\"\"\n The high-level type of an `array` (many types supported, including all\n Awkward Arrays and Records) as #ak.types.Type objects.\n\n The high-level type ignores #layout differences like\n #ak.contents.ListArray versus #ak.contents.ListOffsetArray, but\n not differences like \"regular-sized lists\" (i.e.\n #ak.contents.RegularArray) versus \"variable-sized lists\" (i.e.\n #ak.contents.ListArray and similar).\n\n Types are rendered as [Datashape](https://datashape.readthedocs.io/)\n strings, which makes the same distinctions.\n\n For example,\n\n ak.Array([[{\"x\": 1.1, \"y\": [1]}, {\"x\": 2.2, \"y\": [2, 2]}],\n [],\n [{\"x\": 3.3, \"y\": [3, 3, 3]}]])\n\n has type\n\n 3 * var * {\"x\": float64, \"y\": var * int64}\n\n but\n\n ak.Array(np.arange(2*3*5).reshape(2, 3, 5))\n\n has type\n\n 2 * 3 * 5 * int64\n\n Some cases, like heterogeneous data, require [extensions beyond the\n Datashape specification](https://github.com/blaze/datashape/issues/237).\n For example,\n\n ak.Array([1, \"two\", [3, 3, 3]])\n\n has type\n\n 3 * union[int64, string, var * int64]\n\n but \"union\" is not a Datashape type-constructor. (Its syntax is\n similar to existing type-constructors, so it's a plausible addition\n to the language.)\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.type\",\n dict(array=array),\n ):\n return _impl(array)\n\n\ndef _impl(array):\n if array is None:\n return ak.types.UnknownType()\n\n elif isinstance(\n array,\n tuple(x.type for x in ak.types.numpytype._dtype_to_primitive_dict),\n ):\n return ak.types.NumpyType(\n ak.types.numpytype._dtype_to_primitive_dict[array.dtype]\n )\n\n elif isinstance(array, (bool, np.bool_)):\n return ak.types.NumpyType(\"bool\")\n\n elif isinstance(array, numbers.Integral):\n return ak.types.NumpyType(\"int64\")\n\n elif isinstance(array, numbers.Real):\n return ak.types.NumpyType(\"float64\")\n\n elif isinstance(\n array,\n (\n ak.highlevel.Array,\n ak.highlevel.Record,\n ak.highlevel.ArrayBuilder,\n ),\n ):\n return array.type\n\n elif isinstance(array, np.ndarray):\n if len(array.shape) == 0:\n return _impl(array.reshape((1,))[0])\n else:\n try:\n out = ak.types.numpytype._dtype_to_primitive_dict[array.dtype.type]\n except KeyError as err:\n raise ak._errors.wrap_error(\n TypeError(\n \"numpy array type is unrecognized by awkward: %r\"\n % array.dtype.type\n )\n ) from err\n out = ak.types.NumpyType(out)\n for x in array.shape[-1:0:-1]:\n out = ak.types.RegularType(out, x)\n return ak.types.ArrayType(out, array.shape[0])\n\n elif isinstance(array, ak._ext.ArrayBuilder):\n form = ak.forms.from_json(array.form())\n return ak.types.ArrayType(form.type_from_behavior(None), len(array))\n\n elif isinstance(array, ak.record.Record):\n return array.array.form.type\n\n elif isinstance(array, ak.contents.Content):\n return array.form.type\n\n else:\n raise ak._errors.wrap_error(TypeError(f\"unrecognized array type: {array!r}\"))\n", "path": "src/awkward/operations/ak_type.py"}]}
| 2,102 | 651 |
gh_patches_debug_20081
|
rasdani/github-patches
|
git_diff
|
explosion__spaCy-2949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide compat that works with on Windows with Py 3.7.1
## Feature description
The script that creates the symbolic link doesn't work by default when running `python -m spacy link en_core_web_sm en` -- normally if Dos is the default shell a call to `mklink` just works. However, in via this module, it does not and it is not a permission issues nor a system policy restriction as just running `mklink /d ...` from the same shell/command line works..
This fails regardless if under a virtualenv as well.
Instead a user is presented with the following error message.
```
(venv) C:\g\py\spacy> python -m spacy link en_core_web_sm en
C:\Program Files\Python37\lib\importlib\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Pool size changed, may indicate binary incompatibility. Expected 48 from C header, got 64 from PyObject
return f(*args, **kwds)
C:\Program Files\Python37\lib\importlib\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Address size changed, may indicate binary incompatibility. Expected 24 from C header, got 40 from PyObject
return f(*args, **kwds)
Error: Couldn't link model to 'en'
Creating a symlink in spacy/data failed. Make sure you have the required
permissions and try re-running the command as admin, or use a
virtualenv. You can still import the model as a module and call its
load() method, or create the symlink manually.
C:\g\py\spacy\venv\lib\site-packages\en_core_web_sm -->
C:\g\py\spacy\venv\lib\site-packages\spacy\data\en
Traceback (most recent call last):
File "C:\Program Files\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Program Files\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\__main__.py", line 31, in <module>
plac.call(commands[command], sys.argv[1:])
File "C:\g\py\spacy\venv\lib\site-packages\plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "C:\g\py\spacy\venv\lib\site-packages\plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\cli\link.py", line 48, in link
symlink_to(link_path, model_path)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\compat.py", line 87, in symlink_to
orig.symlink_to(dest)
File "C:\Program Files\Python37\lib\pathlib.py", line 1320, in symlink_to
self._accessor.symlink(target, self, target_is_directory)
OSError: symbolic link privilege not held
```
The following command works regardless and has no issue with permissions nor policy. The error message indicating symbolic link privilege not held is misleading.
```
cmd /c mklink /k c:\path\to\symlink c:\target\file
```
A suggestion is changing the respective lines in `venv\Lib\site-packages\spacy\compat.py (symlink_to)`
```
def symlink_to(orig, dest):
if is_python2 and is_windows:
import subprocess
subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
```
to
```
def symlink_to(orig, dest):
if (is_python2 or is_python3) and is_windows:
import subprocess
subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
````
</issue>
<code>
[start of spacy/compat.py]
1 # coding: utf8
2 from __future__ import unicode_literals
3
4 import sys
5 import ujson
6 import itertools
7 import locale
8
9 from thinc.neural.util import copy_array
10
11 try:
12 import cPickle as pickle
13 except ImportError:
14 import pickle
15
16 try:
17 import copy_reg
18 except ImportError:
19 import copyreg as copy_reg
20
21 try:
22 from cupy.cuda.stream import Stream as CudaStream
23 except ImportError:
24 CudaStream = None
25
26 try:
27 import cupy
28 except ImportError:
29 cupy = None
30
31 try:
32 from thinc.neural.optimizers import Optimizer
33 except ImportError:
34 from thinc.neural.optimizers import Adam as Optimizer
35
36 pickle = pickle
37 copy_reg = copy_reg
38 CudaStream = CudaStream
39 cupy = cupy
40 copy_array = copy_array
41 izip = getattr(itertools, 'izip', zip)
42
43 is_windows = sys.platform.startswith('win')
44 is_linux = sys.platform.startswith('linux')
45 is_osx = sys.platform == 'darwin'
46
47 # See: https://github.com/benjaminp/six/blob/master/six.py
48 is_python2 = sys.version_info[0] == 2
49 is_python3 = sys.version_info[0] == 3
50 is_python_pre_3_5 = is_python2 or (is_python3 and sys.version_info[1] < 5)
51
52 if is_python2:
53 bytes_ = str
54 unicode_ = unicode # noqa: F821
55 basestring_ = basestring # noqa: F821
56 input_ = raw_input # noqa: F821
57 json_dumps = lambda data: ujson.dumps(data, indent=2, escape_forward_slashes=False).decode('utf8')
58 path2str = lambda path: str(path).decode('utf8')
59
60 elif is_python3:
61 bytes_ = bytes
62 unicode_ = str
63 basestring_ = str
64 input_ = input
65 json_dumps = lambda data: ujson.dumps(data, indent=2, escape_forward_slashes=False)
66 path2str = lambda path: str(path)
67
68
69 def b_to_str(b_str):
70 if is_python2:
71 return b_str
72 # important: if no encoding is set, string becomes "b'...'"
73 return str(b_str, encoding='utf8')
74
75
76 def getattr_(obj, name, *default):
77 if is_python3 and isinstance(name, bytes):
78 name = name.decode('utf8')
79 return getattr(obj, name, *default)
80
81
82 def symlink_to(orig, dest):
83 if is_python2 and is_windows:
84 import subprocess
85 subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
86 else:
87 orig.symlink_to(dest)
88
89
90 def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
91 return (python2 in (None, is_python2) and
92 python3 in (None, is_python3) and
93 windows in (None, is_windows) and
94 linux in (None, is_linux) and
95 osx in (None, is_osx))
96
97
98 def normalize_string_keys(old):
99 """Given a dictionary, make sure keys are unicode strings, not bytes."""
100 new = {}
101 for key, value in old.items():
102 if isinstance(key, bytes_):
103 new[key.decode('utf8')] = value
104 else:
105 new[key] = value
106 return new
107
108
109 def import_file(name, loc):
110 loc = str(loc)
111 if is_python_pre_3_5:
112 import imp
113 return imp.load_source(name, loc)
114 else:
115 import importlib.util
116 spec = importlib.util.spec_from_file_location(name, str(loc))
117 module = importlib.util.module_from_spec(spec)
118 spec.loader.exec_module(module)
119 return module
120
121
122 def locale_escape(string, errors='replace'):
123 '''
124 Mangle non-supported characters, for savages with ascii terminals.
125 '''
126 encoding = locale.getpreferredencoding()
127 string = string.encode(encoding, errors).decode('utf8')
128 return string
129
[end of spacy/compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spacy/compat.py b/spacy/compat.py
--- a/spacy/compat.py
+++ b/spacy/compat.py
@@ -1,6 +1,7 @@
# coding: utf8
from __future__ import unicode_literals
+import os
import sys
import ujson
import itertools
@@ -80,12 +81,18 @@
def symlink_to(orig, dest):
- if is_python2 and is_windows:
+ if is_windows:
import subprocess
subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
+def symlink_remove(link):
+ # https://stackoverflow.com/questions/26554135/cant-delete-unlink-a-symlink-to-directory-in-python-windows
+ if( os.path.isdir(path2str(link)) and is_windows ): # this should only be on Py2.7 and windows
+ os.rmdir(path2str(link))
+ else:
+ os.unlink(path2str(link))
def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
return (python2 in (None, is_python2) and
|
{"golden_diff": "diff --git a/spacy/compat.py b/spacy/compat.py\n--- a/spacy/compat.py\n+++ b/spacy/compat.py\n@@ -1,6 +1,7 @@\n # coding: utf8\n from __future__ import unicode_literals\n \n+import os\n import sys\n import ujson\n import itertools\n@@ -80,12 +81,18 @@\n \n \n def symlink_to(orig, dest):\n- if is_python2 and is_windows:\n+ if is_windows:\n import subprocess\n subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)\n else:\n orig.symlink_to(dest)\n \n+def symlink_remove(link):\n+ # https://stackoverflow.com/questions/26554135/cant-delete-unlink-a-symlink-to-directory-in-python-windows\n+ if( os.path.isdir(path2str(link)) and is_windows ): # this should only be on Py2.7 and windows\n+ os.rmdir(path2str(link))\n+ else:\n+ os.unlink(path2str(link))\n \n def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):\n return (python2 in (None, is_python2) and\n", "issue": "Provide compat that works with on Windows with Py 3.7.1\n## Feature description\r\nThe script that creates the symbolic link doesn't work by default when running `python -m spacy link en_core_web_sm en` -- normally if Dos is the default shell a call to `mklink` just works. However, in via this module, it does not and it is not a permission issues nor a system policy restriction as just running `mklink /d ...` from the same shell/command line works..\r\n\r\nThis fails regardless if under a virtualenv as well.\r\n\r\nInstead a user is presented with the following error message.\r\n\r\n```\r\n(venv) C:\\g\\py\\spacy> python -m spacy link en_core_web_sm en\r\nC:\\Program Files\\Python37\\lib\\importlib\\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Pool size changed, may indicate binary incompatibility. Expected 48 from C header, got 64 from PyObject\r\n return f(*args, **kwds)\r\nC:\\Program Files\\Python37\\lib\\importlib\\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Address size changed, may indicate binary incompatibility. Expected 24 from C header, got 40 from PyObject\r\n return f(*args, **kwds)\r\n\r\n Error: Couldn't link model to 'en'\r\n Creating a symlink in spacy/data failed. Make sure you have the required\r\n permissions and try re-running the command as admin, or use a\r\n virtualenv. You can still import the model as a module and call its\r\n load() method, or create the symlink manually.\r\n\r\n C:\\g\\py\\spacy\\venv\\lib\\site-packages\\en_core_web_sm -->\r\n C:\\g\\py\\spacy\\venv\\lib\\site-packages\\spacy\\data\\en\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files\\Python37\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Program Files\\Python37\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\g\\py\\spacy\\venv\\lib\\site-packages\\spacy\\__main__.py\", line 31, in <module>\r\n plac.call(commands[command], sys.argv[1:])\r\n File \"C:\\g\\py\\spacy\\venv\\lib\\site-packages\\plac_core.py\", line 328, in call\r\n cmd, result = parser.consume(arglist)\r\n File \"C:\\g\\py\\spacy\\venv\\lib\\site-packages\\plac_core.py\", line 207, in consume\r\n return cmd, self.func(*(args + varargs + extraopts), **kwargs)\r\n File \"C:\\g\\py\\spacy\\venv\\lib\\site-packages\\spacy\\cli\\link.py\", line 48, in link\r\n symlink_to(link_path, model_path)\r\n File \"C:\\g\\py\\spacy\\venv\\lib\\site-packages\\spacy\\compat.py\", line 87, in symlink_to\r\n orig.symlink_to(dest)\r\n File \"C:\\Program Files\\Python37\\lib\\pathlib.py\", line 1320, in symlink_to\r\n self._accessor.symlink(target, self, target_is_directory)\r\nOSError: symbolic link privilege not held\r\n```\r\n\r\nThe following command works regardless and has no issue with permissions nor policy. The error message indicating symbolic link privilege not held is misleading.\r\n\r\n```\r\ncmd /c mklink /k c:\\path\\to\\symlink c:\\target\\file\r\n```\r\n\r\nA suggestion is changing the respective lines in `venv\\Lib\\site-packages\\spacy\\compat.py (symlink_to)`\r\n\r\n```\r\ndef symlink_to(orig, dest):\r\n if is_python2 and is_windows:\r\n import subprocess\r\n subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)\r\n else:\r\n orig.symlink_to(dest)\r\n```\r\n\r\nto \r\n\r\n```\r\ndef symlink_to(orig, dest):\r\n if (is_python2 or is_python3) and is_windows:\r\n import subprocess\r\n subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)\r\n else:\r\n orig.symlink_to(dest)\r\n````\r\n\r\n\r\n\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport sys\nimport ujson\nimport itertools\nimport locale\n\nfrom thinc.neural.util import copy_array\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\ntry:\n import copy_reg\nexcept ImportError:\n import copyreg as copy_reg\n\ntry:\n from cupy.cuda.stream import Stream as CudaStream\nexcept ImportError:\n CudaStream = None\n\ntry:\n import cupy\nexcept ImportError:\n cupy = None\n\ntry:\n from thinc.neural.optimizers import Optimizer\nexcept ImportError:\n from thinc.neural.optimizers import Adam as Optimizer\n\npickle = pickle\ncopy_reg = copy_reg\nCudaStream = CudaStream\ncupy = cupy\ncopy_array = copy_array\nizip = getattr(itertools, 'izip', zip)\n\nis_windows = sys.platform.startswith('win')\nis_linux = sys.platform.startswith('linux')\nis_osx = sys.platform == 'darwin'\n\n# See: https://github.com/benjaminp/six/blob/master/six.py\nis_python2 = sys.version_info[0] == 2\nis_python3 = sys.version_info[0] == 3\nis_python_pre_3_5 = is_python2 or (is_python3 and sys.version_info[1] < 5)\n\nif is_python2:\n bytes_ = str\n unicode_ = unicode # noqa: F821\n basestring_ = basestring # noqa: F821\n input_ = raw_input # noqa: F821\n json_dumps = lambda data: ujson.dumps(data, indent=2, escape_forward_slashes=False).decode('utf8')\n path2str = lambda path: str(path).decode('utf8')\n\nelif is_python3:\n bytes_ = bytes\n unicode_ = str\n basestring_ = str\n input_ = input\n json_dumps = lambda data: ujson.dumps(data, indent=2, escape_forward_slashes=False)\n path2str = lambda path: str(path)\n\n\ndef b_to_str(b_str):\n if is_python2:\n return b_str\n # important: if no encoding is set, string becomes \"b'...'\"\n return str(b_str, encoding='utf8')\n\n\ndef getattr_(obj, name, *default):\n if is_python3 and isinstance(name, bytes):\n name = name.decode('utf8')\n return getattr(obj, name, *default)\n\n\ndef symlink_to(orig, dest):\n if is_python2 and is_windows:\n import subprocess\n subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)\n else:\n orig.symlink_to(dest)\n\n\ndef is_config(python2=None, python3=None, windows=None, linux=None, osx=None):\n return (python2 in (None, is_python2) and\n python3 in (None, is_python3) and\n windows in (None, is_windows) and\n linux in (None, is_linux) and\n osx in (None, is_osx))\n\n\ndef normalize_string_keys(old):\n \"\"\"Given a dictionary, make sure keys are unicode strings, not bytes.\"\"\"\n new = {}\n for key, value in old.items():\n if isinstance(key, bytes_):\n new[key.decode('utf8')] = value\n else:\n new[key] = value\n return new\n\n\ndef import_file(name, loc):\n loc = str(loc)\n if is_python_pre_3_5:\n import imp\n return imp.load_source(name, loc)\n else:\n import importlib.util\n spec = importlib.util.spec_from_file_location(name, str(loc))\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef locale_escape(string, errors='replace'):\n '''\n Mangle non-supported characters, for savages with ascii terminals.\n '''\n encoding = locale.getpreferredencoding()\n string = string.encode(encoding, errors).decode('utf8')\n return string\n", "path": "spacy/compat.py"}]}
| 2,697 | 274 |
gh_patches_debug_17098
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-9791
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show appears on schedule page to air "today" even though it airs on different day
**Describe the bug**
The TV Show "Last Week Tonight with John Oliver" appears on schedule page to air "today" even though it airs on different day.
**To Reproduce**
http://x.x.x.x:8081/schedule/
**Expected behavior**
The TV Show "Last Week Tonight with John Oliver" airs on Sunday nights, as shown on the individual show page, and should appear on the schedule page as such instead of always appearing under "Today".
**Screenshots**

**Medusa (please complete the following information):**
Branch: master
Commit: 9b23c72f7e24bf638819e32d72f33e433f9649c4
Version: 0.5.15
Database: 44.18
Python Version: | 3.8.9 (default, Jul 15 2021, 01:46:40) [GCC 10.2.0]
SSL Version: | OpenSSL 1.1.1k 25 Mar 2021
OS: | Linux-5.10.47-x86_64-with-glibc2.2.5
Locale: | en_US.UTF-8
Timezone: | EDT
**Debug logs (at least 50 lines):**
General > Advanced Settings > Enable debug
[https://paste.kodi.tv/rucoceqelu.kodi](url)
**Additional context**
Tried choosing different index, and restarted services, but issue remains with just the TV Show "Last Week Tonight with John Oliver".
</issue>
<code>
[start of medusa/show/coming_episodes.py]
1 # coding=utf-8
2 # This file is part of Medusa.
3 #
4 # Medusa is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # Medusa is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with Medusa. If not, see <http://www.gnu.org/licenses/>.
16
17 from __future__ import unicode_literals
18
19 from builtins import object
20 from builtins import str
21 from datetime import date, timedelta
22 from operator import itemgetter
23
24 from medusa import app
25 from medusa.common import (
26 ARCHIVED,
27 DOWNLOADED,
28 IGNORED,
29 SNATCHED,
30 SNATCHED_BEST,
31 SNATCHED_PROPER,
32 UNAIRED,
33 WANTED
34 )
35 from medusa.db import DBConnection
36 from medusa.helper.common import dateFormat, timeFormat
37 from medusa.helpers.quality import get_quality_string
38 from medusa.network_timezones import parse_date_time
39 from medusa.sbdatetime import sbdatetime
40 from medusa.tv.series import Series, SeriesIdentifier
41
42
43 class ComingEpisodes(object):
44 """
45 Missed: yesterday...(less than 1 week)
46 Today: today
47 Soon: tomorrow till next week
48 Later: later than next week
49 """
50
51 categories = ['later', 'missed', 'soon', 'today']
52 sorts = {
53 'date': itemgetter('localtime'),
54 'network': itemgetter('network', 'localtime'),
55 'show': itemgetter('show_name', 'localtime'),
56 }
57
58 def __init__(self):
59 pass
60
61 @staticmethod
62 def get_coming_episodes(categories, sort, group, paused=app.COMING_EPS_DISPLAY_PAUSED):
63 """
64 :param categories: The categories of coming episodes. See ``ComingEpisodes.categories``
65 :param sort: The sort to apply to the coming episodes. See ``ComingEpisodes.sorts``
66 :param group: ``True`` to group the coming episodes by category, ``False`` otherwise
67 :param paused: ``True`` to include paused shows, ``False`` otherwise
68 :return: The list of coming episodes
69 """
70 categories = ComingEpisodes._get_categories(categories)
71 sort = ComingEpisodes._get_sort(sort)
72
73 today = date.today().toordinal()
74 next_week = (date.today() + timedelta(days=7)).toordinal()
75 recently = (date.today() - timedelta(days=app.COMING_EPS_MISSED_RANGE)).toordinal()
76 status_list = [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER,
77 ARCHIVED, IGNORED]
78
79 db = DBConnection()
80 fields_to_select = ', '.join(
81 ['airdate', 'airs', 'e.description as description', 'episode', 'imdb_id', 'e.indexer',
82 'indexer_id', 'name', 'network', 'paused', 's.quality', 'runtime', 'season', 'show_name',
83 'showid', 's.status']
84 )
85 results = db.select(
86 'SELECT %s ' % fields_to_select +
87 'FROM tv_episodes e, tv_shows s '
88 'WHERE season != 0 '
89 'AND airdate >= ? '
90 'AND airdate < ? '
91 'AND s.indexer = e.indexer '
92 'AND s.indexer_id = e.showid '
93 'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
94 [today, next_week] + status_list
95 )
96
97 done_shows_list = [int(result['showid']) for result in results]
98 placeholder = ','.join(['?'] * len(done_shows_list))
99 placeholder2 = ','.join(['?'] * len([DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]))
100
101 # FIXME: This inner join is not multi indexer friendly.
102 results += db.select(
103 'SELECT %s ' % fields_to_select +
104 'FROM tv_episodes e, tv_shows s '
105 'WHERE season != 0 '
106 'AND showid NOT IN (' + placeholder + ') '
107 'AND s.indexer_id = e.showid '
108 'AND airdate = (SELECT airdate '
109 'FROM tv_episodes inner_e '
110 'WHERE inner_e.season != 0 '
111 'AND inner_e.showid = e.showid '
112 'AND inner_e.indexer = e.indexer '
113 'AND inner_e.airdate >= ? '
114 'ORDER BY inner_e.airdate ASC LIMIT 1) '
115 'AND e.status NOT IN (' + placeholder2 + ')',
116 done_shows_list + [next_week] + [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]
117 )
118
119 results += db.select(
120 'SELECT %s ' % fields_to_select +
121 'FROM tv_episodes e, tv_shows s '
122 'WHERE season != 0 '
123 'AND s.indexer_id = e.showid '
124 'AND airdate < ? '
125 'AND airdate >= ? '
126 'AND e.status IN (?,?) '
127 'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',
128 [today, recently, WANTED, UNAIRED] + status_list
129 )
130
131 for index, item in enumerate(results):
132 identifier = SeriesIdentifier.from_id(int(item['indexer']), item['indexer_id'])
133 show = Series.find_by_identifier(identifier)
134 item['series_slug'] = identifier.slug
135 results[index]['localtime'] = sbdatetime.convert_to_setting(
136 parse_date_time(item['airdate'], item['airs'], item['network']))
137 results[index]['externals'] = show.externals
138
139 results.sort(key=ComingEpisodes.sorts[sort])
140
141 if not group:
142 return results
143
144 grouped_results = ComingEpisodes._get_categories_map(categories)
145
146 for result in results:
147 if result['paused'] and not paused:
148 continue
149
150 result['airs'] = str(result['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
151 result['airdate'] = result['localtime'].toordinal()
152
153 if result['airdate'] < today:
154 category = 'missed'
155 elif result['airdate'] >= next_week:
156 category = 'later'
157 elif result['airdate'] == today:
158 category = 'today'
159 else:
160 category = 'soon'
161
162 if len(categories) > 0 and category not in categories:
163 continue
164
165 if not result['network']:
166 result['network'] = ''
167
168 result['qualityValue'] = result['quality']
169 result['quality'] = get_quality_string(result['quality'])
170 result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')
171 result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()
172 result['tvdbid'] = result['indexer_id']
173 result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)
174 result['localtime'] = result['localtime'].toordinal()
175
176 grouped_results[category].append(result)
177
178 return grouped_results
179
180 @staticmethod
181 def _get_categories(categories):
182 if not categories:
183 return []
184
185 if not isinstance(categories, list):
186 return categories.split('|')
187
188 return categories
189
190 @staticmethod
191 def _get_categories_map(categories):
192 if not categories:
193 return {}
194
195 return {category: [] for category in categories}
196
197 @staticmethod
198 def _get_sort(sort):
199 sort = sort.lower() if sort else ''
200
201 if sort not in ComingEpisodes.sorts:
202 return 'date'
203
204 return sort
205
[end of medusa/show/coming_episodes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/show/coming_episodes.py b/medusa/show/coming_episodes.py
--- a/medusa/show/coming_episodes.py
+++ b/medusa/show/coming_episodes.py
@@ -168,7 +168,8 @@
result['qualityValue'] = result['quality']
result['quality'] = get_quality_string(result['quality'])
result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')
- result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()
+ # Monday - Sunday (0 - 6)
+ result['weekday'] = date.fromordinal(result['airdate']).weekday()
result['tvdbid'] = result['indexer_id']
result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)
result['localtime'] = result['localtime'].toordinal()
|
{"golden_diff": "diff --git a/medusa/show/coming_episodes.py b/medusa/show/coming_episodes.py\n--- a/medusa/show/coming_episodes.py\n+++ b/medusa/show/coming_episodes.py\n@@ -168,7 +168,8 @@\n result['qualityValue'] = result['quality']\n result['quality'] = get_quality_string(result['quality'])\n result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')\n- result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()\n+ # Monday - Sunday (0 - 6)\n+ result['weekday'] = date.fromordinal(result['airdate']).weekday()\n result['tvdbid'] = result['indexer_id']\n result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)\n result['localtime'] = result['localtime'].toordinal()\n", "issue": "Show appears on schedule page to air \"today\" even though it airs on different day\n**Describe the bug**\r\nThe TV Show \"Last Week Tonight with John Oliver\" appears on schedule page to air \"today\" even though it airs on different day.\r\n\r\n**To Reproduce**\r\nhttp://x.x.x.x:8081/schedule/\r\n\r\n**Expected behavior**\r\nThe TV Show \"Last Week Tonight with John Oliver\" airs on Sunday nights, as shown on the individual show page, and should appear on the schedule page as such instead of always appearing under \"Today\".\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n**Medusa (please complete the following information):**\r\nBranch:\u00a0master\r\nCommit:\u00a09b23c72f7e24bf638819e32d72f33e433f9649c4\r\nVersion:\u00a00.5.15\r\nDatabase:\u00a044.18\r\n\r\nPython Version: | 3.8.9 (default, Jul 15 2021, 01:46:40) [GCC 10.2.0]\r\nSSL Version: | OpenSSL 1.1.1k 25 Mar 2021\r\nOS: | Linux-5.10.47-x86_64-with-glibc2.2.5\r\nLocale: | en_US.UTF-8\r\nTimezone: | EDT\r\n\r\n**Debug logs (at least 50 lines):**\r\nGeneral > Advanced Settings > Enable debug\r\n\r\n[https://paste.kodi.tv/rucoceqelu.kodi](url)\r\n\r\n**Additional context**\r\nTried choosing different index, and restarted services, but issue remains with just the TV Show \"Last Week Tonight with John Oliver\".\r\n\n", "before_files": [{"content": "# coding=utf-8\n# This file is part of Medusa.\n#\n# Medusa is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Medusa is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Medusa. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import unicode_literals\n\nfrom builtins import object\nfrom builtins import str\nfrom datetime import date, timedelta\nfrom operator import itemgetter\n\nfrom medusa import app\nfrom medusa.common import (\n ARCHIVED,\n DOWNLOADED,\n IGNORED,\n SNATCHED,\n SNATCHED_BEST,\n SNATCHED_PROPER,\n UNAIRED,\n WANTED\n)\nfrom medusa.db import DBConnection\nfrom medusa.helper.common import dateFormat, timeFormat\nfrom medusa.helpers.quality import get_quality_string\nfrom medusa.network_timezones import parse_date_time\nfrom medusa.sbdatetime import sbdatetime\nfrom medusa.tv.series import Series, SeriesIdentifier\n\n\nclass ComingEpisodes(object):\n \"\"\"\n Missed: yesterday...(less than 1 week)\n Today: today\n Soon: tomorrow till next week\n Later: later than next week\n \"\"\"\n\n categories = ['later', 'missed', 'soon', 'today']\n sorts = {\n 'date': itemgetter('localtime'),\n 'network': itemgetter('network', 'localtime'),\n 'show': itemgetter('show_name', 'localtime'),\n }\n\n def __init__(self):\n pass\n\n @staticmethod\n def get_coming_episodes(categories, sort, group, paused=app.COMING_EPS_DISPLAY_PAUSED):\n \"\"\"\n :param categories: The categories of coming episodes. See ``ComingEpisodes.categories``\n :param sort: The sort to apply to the coming episodes. See ``ComingEpisodes.sorts``\n :param group: ``True`` to group the coming episodes by category, ``False`` otherwise\n :param paused: ``True`` to include paused shows, ``False`` otherwise\n :return: The list of coming episodes\n \"\"\"\n categories = ComingEpisodes._get_categories(categories)\n sort = ComingEpisodes._get_sort(sort)\n\n today = date.today().toordinal()\n next_week = (date.today() + timedelta(days=7)).toordinal()\n recently = (date.today() - timedelta(days=app.COMING_EPS_MISSED_RANGE)).toordinal()\n status_list = [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER,\n ARCHIVED, IGNORED]\n\n db = DBConnection()\n fields_to_select = ', '.join(\n ['airdate', 'airs', 'e.description as description', 'episode', 'imdb_id', 'e.indexer',\n 'indexer_id', 'name', 'network', 'paused', 's.quality', 'runtime', 'season', 'show_name',\n 'showid', 's.status']\n )\n results = db.select(\n 'SELECT %s ' % fields_to_select +\n 'FROM tv_episodes e, tv_shows s '\n 'WHERE season != 0 '\n 'AND airdate >= ? '\n 'AND airdate < ? '\n 'AND s.indexer = e.indexer '\n 'AND s.indexer_id = e.showid '\n 'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',\n [today, next_week] + status_list\n )\n\n done_shows_list = [int(result['showid']) for result in results]\n placeholder = ','.join(['?'] * len(done_shows_list))\n placeholder2 = ','.join(['?'] * len([DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]))\n\n # FIXME: This inner join is not multi indexer friendly.\n results += db.select(\n 'SELECT %s ' % fields_to_select +\n 'FROM tv_episodes e, tv_shows s '\n 'WHERE season != 0 '\n 'AND showid NOT IN (' + placeholder + ') '\n 'AND s.indexer_id = e.showid '\n 'AND airdate = (SELECT airdate '\n 'FROM tv_episodes inner_e '\n 'WHERE inner_e.season != 0 '\n 'AND inner_e.showid = e.showid '\n 'AND inner_e.indexer = e.indexer '\n 'AND inner_e.airdate >= ? '\n 'ORDER BY inner_e.airdate ASC LIMIT 1) '\n 'AND e.status NOT IN (' + placeholder2 + ')',\n done_shows_list + [next_week] + [DOWNLOADED, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER]\n )\n\n results += db.select(\n 'SELECT %s ' % fields_to_select +\n 'FROM tv_episodes e, tv_shows s '\n 'WHERE season != 0 '\n 'AND s.indexer_id = e.showid '\n 'AND airdate < ? '\n 'AND airdate >= ? '\n 'AND e.status IN (?,?) '\n 'AND e.status NOT IN (' + ','.join(['?'] * len(status_list)) + ')',\n [today, recently, WANTED, UNAIRED] + status_list\n )\n\n for index, item in enumerate(results):\n identifier = SeriesIdentifier.from_id(int(item['indexer']), item['indexer_id'])\n show = Series.find_by_identifier(identifier)\n item['series_slug'] = identifier.slug\n results[index]['localtime'] = sbdatetime.convert_to_setting(\n parse_date_time(item['airdate'], item['airs'], item['network']))\n results[index]['externals'] = show.externals\n\n results.sort(key=ComingEpisodes.sorts[sort])\n\n if not group:\n return results\n\n grouped_results = ComingEpisodes._get_categories_map(categories)\n\n for result in results:\n if result['paused'] and not paused:\n continue\n\n result['airs'] = str(result['airs']).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')\n result['airdate'] = result['localtime'].toordinal()\n\n if result['airdate'] < today:\n category = 'missed'\n elif result['airdate'] >= next_week:\n category = 'later'\n elif result['airdate'] == today:\n category = 'today'\n else:\n category = 'soon'\n\n if len(categories) > 0 and category not in categories:\n continue\n\n if not result['network']:\n result['network'] = ''\n\n result['qualityValue'] = result['quality']\n result['quality'] = get_quality_string(result['quality'])\n result['airs'] = sbdatetime.sbftime(result['localtime'], t_preset=timeFormat).lstrip('0').replace(' 0', ' ')\n result['weekday'] = 1 + date.fromordinal(result['airdate']).weekday()\n result['tvdbid'] = result['indexer_id']\n result['airdate'] = sbdatetime.sbfdate(result['localtime'], d_preset=dateFormat)\n result['localtime'] = result['localtime'].toordinal()\n\n grouped_results[category].append(result)\n\n return grouped_results\n\n @staticmethod\n def _get_categories(categories):\n if not categories:\n return []\n\n if not isinstance(categories, list):\n return categories.split('|')\n\n return categories\n\n @staticmethod\n def _get_categories_map(categories):\n if not categories:\n return {}\n\n return {category: [] for category in categories}\n\n @staticmethod\n def _get_sort(sort):\n sort = sort.lower() if sort else ''\n\n if sort not in ComingEpisodes.sorts:\n return 'date'\n\n return sort\n", "path": "medusa/show/coming_episodes.py"}]}
| 3,264 | 216 |
gh_patches_debug_41897
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-1007
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Error will raise when CUDA driver installed but no devices detected
When ``libnvidia-ml.so`` can be detected while no devices is installed, an error will appear:
```
/usr/local/lib/python3.6/dist-packages/mars/lib/nvutils.py in get_driver_info()
200 cuda_version = c_uint()
201
--> 202 _nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
203 _nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
204
/usr/local/lib/python3.6/dist-packages/mars/lib/nvutils.py in _nvml_check_error(result)
104 _error_str = _nvmlErrorString(result)
105 if _error_str:
--> 106 raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))
107 else:
108 raise NVError('Unknown NVML API Error %d' % result)
NVError: NVML API Error 1: Uninitialized
```
</issue>
<code>
[start of mars/lib/nvutils.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 1999-2020 Alibaba Group Holding Ltd.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import logging
17 import os
18 import uuid
19 from collections import namedtuple
20 from ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\
21 create_string_buffer, Structure, POINTER, CDLL
22
23 logger = logging.getLogger(__name__)
24
25 # Some constants taken from cuda.h
26 CUDA_SUCCESS = 0
27 CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16
28 CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39
29 CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13
30 CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
31 CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
32 CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
33
34 # nvml constants
35 NVML_SUCCESS = 0
36 NVML_TEMPERATURE_GPU = 0
37
38
39 class _CUuuid_t(Structure):
40 _fields_ = [
41 ('bytes', c_char * 16)
42 ]
43 class _nvmlUtilization_t(Structure):
44 _fields_ = [
45 ('gpu', c_uint),
46 ('memory', c_uint),
47 ]
48
49 class _struct_nvmlDevice_t(Structure):
50 pass # opaque handle
51 _nvmlDevice_t = POINTER(_struct_nvmlDevice_t)
52
53 class _nvmlBAR1Memory_t(Structure):
54 _fields_ = [
55 ('total', c_ulonglong),
56 ('free', c_ulonglong),
57 ('used', c_ulonglong),
58 ]
59
60
61 def _load_nv_library(*libnames):
62 for lib in libnames:
63 try:
64 return CDLL(lib)
65 except OSError:
66 continue
67
68
69 _cuda_lib = _nvml_lib = None
70
71 _cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')
72 _nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')
73 _nvml_device_status = namedtuple(
74 '_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
75
76
77 _init_pid = None
78 _gpu_count = None
79 _driver_info = None
80 _device_infos = dict()
81
82
83 class NVError(Exception):
84 pass
85
86
87 def _cu_check_error(result):
88 if result != CUDA_SUCCESS:
89 _error_str = c_char_p()
90 _cuda_lib.cuGetErrorString(result, byref(_error_str))
91 raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))
92
93
94 _nvmlErrorString = None
95
96
97 def _nvml_check_error(result):
98 global _nvmlErrorString
99 if _nvmlErrorString is None:
100 _nvmlErrorString = _nvml_lib.nvmlErrorString
101 _nvmlErrorString.restype = c_char_p
102
103 if result != NVML_SUCCESS:
104 _error_str = _nvmlErrorString(result)
105 if _error_str:
106 raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))
107 else:
108 raise NVError('Unknown NVML API Error %d' % result)
109
110
111 _cu_process_var_to_cores = {
112 (1, 0): 8,
113 (1, 1): 8,
114 (1, 2): 8,
115 (1, 3): 8,
116 (2, 0): 32,
117 (2, 1): 48,
118 }
119
120
121 def _cu_get_processor_cores(major, minor):
122 return _cu_process_var_to_cores.get((major, minor), 192)
123
124
125 def _init_cp():
126 global _cuda_lib
127 if _init_pid == os.getpid():
128 return
129
130 _cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll')
131
132 if _cuda_lib is None:
133 return
134 try:
135 _cu_check_error(_cuda_lib.cuInit(0))
136 except NVError:
137 logger.exception('Failed to initialize libcuda.')
138 return
139
140
141 def _init_nvml():
142 global _nvml_lib
143 if _init_pid == os.getpid():
144 return
145
146 _nvml_lib = _load_nv_library('libnvidia-ml.so', 'libnvidia-ml.dylib', 'nvml.dll')
147
148 if _nvml_lib is None:
149 return
150 try:
151 _nvml_check_error(_nvml_lib.nvmlInit_v2())
152 except NVError:
153 logger.exception('Failed to initialize libnvidia-ml.')
154 return
155
156
157 def _init():
158 global _init_pid
159
160 _init_cp()
161 _init_nvml()
162
163 if _nvml_lib is not None and _cuda_lib is not None:
164 _init_pid = os.getpid()
165
166
167 def get_device_count():
168 global _gpu_count
169
170 if _gpu_count is not None:
171 return _gpu_count
172
173 _init_nvml()
174 if _nvml_lib is None:
175 return None
176
177 if 'CUDA_VISIBLE_DEVICES' in os.environ:
178 devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()
179 if not devices:
180 _gpu_count = 0
181 else:
182 _gpu_count = len(devices.split(','))
183 else:
184 n_gpus = c_uint()
185 _cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))
186 _gpu_count = n_gpus.value
187 return _gpu_count
188
189
190 def get_driver_info():
191 global _driver_info
192
193 _init_nvml()
194 if _nvml_lib is None:
195 return None
196 if _driver_info is not None:
197 return _driver_info
198
199 version_buf = create_string_buffer(100)
200 cuda_version = c_uint()
201
202 _nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))
203 _nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))
204
205 _driver_info = _nvml_driver_info(
206 driver_version=version_buf.value.decode(),
207 cuda_version='%d.%d' % (cuda_version.value // 1000, cuda_version.value % 1000)
208 )
209 return _driver_info
210
211
212 def get_device_info(dev_index):
213 try:
214 return _device_infos[dev_index]
215 except KeyError:
216 pass
217
218 _init()
219 if _init_pid is None:
220 return None
221
222 device = c_int()
223 name_buf = create_string_buffer(100)
224 uuid_t = _CUuuid_t()
225 cc_major = c_int()
226 cc_minor = c_int()
227 cores = c_int()
228 threads_per_core = c_int()
229
230 _cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))
231 _cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))
232 _cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))
233 _cu_check_error(_cuda_lib.cuDeviceComputeCapability(
234 byref(cc_major), byref(cc_minor), device))
235 _cu_check_error(_cuda_lib.cuDeviceGetAttribute(
236 byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))
237 _cu_check_error(_cuda_lib.cuDeviceGetAttribute(
238 byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))
239
240 if 'CUDA_VISIBLE_DEVICES' in os.environ:
241 real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]
242 else:
243 real_dev_index = dev_index
244
245 info = _device_infos[dev_index] = _cu_device_info(
246 index=real_dev_index,
247 uuid=uuid.UUID(bytes=uuid_t.bytes),
248 name=name_buf.value.decode(),
249 multiprocessors=cores.value,
250 cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),
251 threads=cores.value * threads_per_core.value,
252 )
253 return info
254
255
256 def get_device_status(dev_index):
257 _init()
258 if _init_pid is None:
259 return None
260
261 device = _nvmlDevice_t()
262 utils = _nvmlUtilization_t()
263 temperature = c_uint()
264 memory_info = _nvmlBAR1Memory_t()
265
266 dev_uuid = get_device_info(dev_index).uuid
267
268 uuid_str = ('GPU-' + str(dev_uuid)).encode()
269
270 _nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))
271 _nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))
272 _nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(
273 device, NVML_TEMPERATURE_GPU, byref(temperature)))
274 _nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))
275
276 return _nvml_device_status(
277 gpu_util=utils.gpu,
278 mem_util=utils.memory,
279 temperature=temperature.value,
280 fb_total_mem=memory_info.total,
281 fb_free_mem=memory_info.free,
282 fb_used_mem=memory_info.used,
283 )
284
[end of mars/lib/nvutils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mars/lib/nvutils.py b/mars/lib/nvutils.py
--- a/mars/lib/nvutils.py
+++ b/mars/lib/nvutils.py
@@ -31,10 +31,14 @@
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36
+CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100
+
# nvml constants
NVML_SUCCESS = 0
NVML_TEMPERATURE_GPU = 0
+NVML_DRIVER_NOT_LOADED = 9
+
class _CUuuid_t(Structure):
_fields_ = [
@@ -73,14 +77,36 @@
_nvml_device_status = namedtuple(
'_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')
-
_init_pid = None
_gpu_count = None
_driver_info = None
_device_infos = dict()
+_no_device_warned = False
+
class NVError(Exception):
+ def __init__(self, msg, *args, errno=None):
+ self._errno = errno
+ super().__init__(msg or 'Unknown error', *args)
+
+ def __str__(self):
+ return '(%s) %s' % (self._errno, super().__str__())
+
+ @property
+ def errno(self):
+ return self._errno
+
+ @property
+ def message(self):
+ return super().__str__()
+
+
+class NVDeviceAPIError(NVError):
+ pass
+
+
+class NVMLAPIError(NVError):
pass
@@ -88,7 +114,7 @@
if result != CUDA_SUCCESS:
_error_str = c_char_p()
_cuda_lib.cuGetErrorString(result, byref(_error_str))
- raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))
+ raise NVDeviceAPIError(_error_str.value.decode(), errno=result)
_nvmlErrorString = None
@@ -102,10 +128,7 @@
if result != NVML_SUCCESS:
_error_str = _nvmlErrorString(result)
- if _error_str:
- raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))
- else:
- raise NVError('Unknown NVML API Error %d' % result)
+ raise NVMLAPIError(_error_str.decode(), errno=result)
_cu_process_var_to_cores = {
@@ -123,7 +146,7 @@
def _init_cp():
- global _cuda_lib
+ global _cuda_lib, _no_device_warned
if _init_pid == os.getpid():
return
@@ -133,13 +156,19 @@
return
try:
_cu_check_error(_cuda_lib.cuInit(0))
- except NVError:
- logger.exception('Failed to initialize libcuda.')
+ except NVDeviceAPIError as ex:
+ if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:
+ _cuda_lib = None
+ if not _no_device_warned:
+ logger.warning('No CUDA device detected')
+ _no_device_warned = True
+ else:
+ logger.exception('Failed to initialize libcuda.')
return
def _init_nvml():
- global _nvml_lib
+ global _nvml_lib, _no_device_warned
if _init_pid == os.getpid():
return
@@ -149,8 +178,14 @@
return
try:
_nvml_check_error(_nvml_lib.nvmlInit_v2())
- except NVError:
- logger.exception('Failed to initialize libnvidia-ml.')
+ except NVMLAPIError as ex:
+ if ex.errno == NVML_DRIVER_NOT_LOADED:
+ _nvml_lib = None
+ if not _no_device_warned:
+ logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)
+ _no_device_warned = True
+ else:
+ logger.exception('Failed to initialize libnvidia-ml.')
return
|
{"golden_diff": "diff --git a/mars/lib/nvutils.py b/mars/lib/nvutils.py\n--- a/mars/lib/nvutils.py\n+++ b/mars/lib/nvutils.py\n@@ -31,10 +31,14 @@\n CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34\n CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36\n \n+CU_NO_CUDA_CAPABLE_DEVICE_DETECTED = 100\n+\n # nvml constants\n NVML_SUCCESS = 0\n NVML_TEMPERATURE_GPU = 0\n \n+NVML_DRIVER_NOT_LOADED = 9\n+\n \n class _CUuuid_t(Structure):\n _fields_ = [\n@@ -73,14 +77,36 @@\n _nvml_device_status = namedtuple(\n '_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')\n \n-\n _init_pid = None\n _gpu_count = None\n _driver_info = None\n _device_infos = dict()\n \n+_no_device_warned = False\n+\n \n class NVError(Exception):\n+ def __init__(self, msg, *args, errno=None):\n+ self._errno = errno\n+ super().__init__(msg or 'Unknown error', *args)\n+\n+ def __str__(self):\n+ return '(%s) %s' % (self._errno, super().__str__())\n+\n+ @property\n+ def errno(self):\n+ return self._errno\n+\n+ @property\n+ def message(self):\n+ return super().__str__()\n+\n+\n+class NVDeviceAPIError(NVError):\n+ pass\n+\n+\n+class NVMLAPIError(NVError):\n pass\n \n \n@@ -88,7 +114,7 @@\n if result != CUDA_SUCCESS:\n _error_str = c_char_p()\n _cuda_lib.cuGetErrorString(result, byref(_error_str))\n- raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))\n+ raise NVDeviceAPIError(_error_str.value.decode(), errno=result)\n \n \n _nvmlErrorString = None\n@@ -102,10 +128,7 @@\n \n if result != NVML_SUCCESS:\n _error_str = _nvmlErrorString(result)\n- if _error_str:\n- raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))\n- else:\n- raise NVError('Unknown NVML API Error %d' % result)\n+ raise NVMLAPIError(_error_str.decode(), errno=result)\n \n \n _cu_process_var_to_cores = {\n@@ -123,7 +146,7 @@\n \n \n def _init_cp():\n- global _cuda_lib\n+ global _cuda_lib, _no_device_warned\n if _init_pid == os.getpid():\n return\n \n@@ -133,13 +156,19 @@\n return\n try:\n _cu_check_error(_cuda_lib.cuInit(0))\n- except NVError:\n- logger.exception('Failed to initialize libcuda.')\n+ except NVDeviceAPIError as ex:\n+ if ex.errno == CU_NO_CUDA_CAPABLE_DEVICE_DETECTED:\n+ _cuda_lib = None\n+ if not _no_device_warned:\n+ logger.warning('No CUDA device detected')\n+ _no_device_warned = True\n+ else:\n+ logger.exception('Failed to initialize libcuda.')\n return\n \n \n def _init_nvml():\n- global _nvml_lib\n+ global _nvml_lib, _no_device_warned\n if _init_pid == os.getpid():\n return\n \n@@ -149,8 +178,14 @@\n return\n try:\n _nvml_check_error(_nvml_lib.nvmlInit_v2())\n- except NVError:\n- logger.exception('Failed to initialize libnvidia-ml.')\n+ except NVMLAPIError as ex:\n+ if ex.errno == NVML_DRIVER_NOT_LOADED:\n+ _nvml_lib = None\n+ if not _no_device_warned:\n+ logger.warning('Failed to load libnvidia-ml: %s, no CUDA device will be enabled', ex.message)\n+ _no_device_warned = True\n+ else:\n+ logger.exception('Failed to initialize libnvidia-ml.')\n return\n", "issue": "[BUG] Error will raise when CUDA driver installed but no devices detected\nWhen ``libnvidia-ml.so`` can be detected while no devices is installed, an error will appear:\r\n\r\n```\r\n/usr/local/lib/python3.6/dist-packages/mars/lib/nvutils.py in get_driver_info()\r\n 200 cuda_version = c_uint()\r\n 201 \r\n--> 202 _nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))\r\n 203 _nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))\r\n 204 \r\n\r\n/usr/local/lib/python3.6/dist-packages/mars/lib/nvutils.py in _nvml_check_error(result)\r\n 104 _error_str = _nvmlErrorString(result)\r\n 105 if _error_str:\r\n--> 106 raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))\r\n 107 else:\r\n 108 raise NVError('Unknown NVML API Error %d' % result)\r\n\r\nNVError: NVML API Error 1: Uninitialized\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport uuid\nfrom collections import namedtuple\nfrom ctypes import c_char, c_char_p, c_int, c_uint, c_ulonglong, byref,\\\n create_string_buffer, Structure, POINTER, CDLL\n\nlogger = logging.getLogger(__name__)\n\n# Some constants taken from cuda.h\nCUDA_SUCCESS = 0\nCU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16\nCU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39\nCU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13\nCU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33\nCU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34\nCU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36\n\n# nvml constants\nNVML_SUCCESS = 0\nNVML_TEMPERATURE_GPU = 0\n\n\nclass _CUuuid_t(Structure):\n _fields_ = [\n ('bytes', c_char * 16)\n ]\nclass _nvmlUtilization_t(Structure):\n _fields_ = [\n ('gpu', c_uint),\n ('memory', c_uint),\n ]\n\nclass _struct_nvmlDevice_t(Structure):\n pass # opaque handle\n_nvmlDevice_t = POINTER(_struct_nvmlDevice_t)\n\nclass _nvmlBAR1Memory_t(Structure):\n _fields_ = [\n ('total', c_ulonglong),\n ('free', c_ulonglong),\n ('used', c_ulonglong),\n ]\n\n\ndef _load_nv_library(*libnames):\n for lib in libnames:\n try:\n return CDLL(lib)\n except OSError:\n continue\n\n\n_cuda_lib = _nvml_lib = None\n\n_cu_device_info = namedtuple('_cu_device_info', 'index uuid name multiprocessors cuda_cores threads')\n_nvml_driver_info = namedtuple('_nvml_driver_info', 'driver_version cuda_version')\n_nvml_device_status = namedtuple(\n '_nvml_device_status', 'gpu_util mem_util temperature fb_total_mem fb_used_mem fb_free_mem')\n\n\n_init_pid = None\n_gpu_count = None\n_driver_info = None\n_device_infos = dict()\n\n\nclass NVError(Exception):\n pass\n\n\ndef _cu_check_error(result):\n if result != CUDA_SUCCESS:\n _error_str = c_char_p()\n _cuda_lib.cuGetErrorString(result, byref(_error_str))\n raise NVError('Device API Error %d: %s' % (result, _error_str.value.decode()))\n\n\n_nvmlErrorString = None\n\n\ndef _nvml_check_error(result):\n global _nvmlErrorString\n if _nvmlErrorString is None:\n _nvmlErrorString = _nvml_lib.nvmlErrorString\n _nvmlErrorString.restype = c_char_p\n\n if result != NVML_SUCCESS:\n _error_str = _nvmlErrorString(result)\n if _error_str:\n raise NVError('NVML API Error %d: %s' % (result, _error_str.decode()))\n else:\n raise NVError('Unknown NVML API Error %d' % result)\n\n\n_cu_process_var_to_cores = {\n (1, 0): 8,\n (1, 1): 8,\n (1, 2): 8,\n (1, 3): 8,\n (2, 0): 32,\n (2, 1): 48,\n}\n\n\ndef _cu_get_processor_cores(major, minor):\n return _cu_process_var_to_cores.get((major, minor), 192)\n\n\ndef _init_cp():\n global _cuda_lib\n if _init_pid == os.getpid():\n return\n\n _cuda_lib = _load_nv_library('libcuda.so', 'libcuda.dylib', 'cuda.dll')\n\n if _cuda_lib is None:\n return\n try:\n _cu_check_error(_cuda_lib.cuInit(0))\n except NVError:\n logger.exception('Failed to initialize libcuda.')\n return\n\n\ndef _init_nvml():\n global _nvml_lib\n if _init_pid == os.getpid():\n return\n\n _nvml_lib = _load_nv_library('libnvidia-ml.so', 'libnvidia-ml.dylib', 'nvml.dll')\n\n if _nvml_lib is None:\n return\n try:\n _nvml_check_error(_nvml_lib.nvmlInit_v2())\n except NVError:\n logger.exception('Failed to initialize libnvidia-ml.')\n return\n\n\ndef _init():\n global _init_pid\n\n _init_cp()\n _init_nvml()\n\n if _nvml_lib is not None and _cuda_lib is not None:\n _init_pid = os.getpid()\n\n\ndef get_device_count():\n global _gpu_count\n\n if _gpu_count is not None:\n return _gpu_count\n\n _init_nvml()\n if _nvml_lib is None:\n return None\n\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n devices = os.environ['CUDA_VISIBLE_DEVICES'].strip()\n if not devices:\n _gpu_count = 0\n else:\n _gpu_count = len(devices.split(','))\n else:\n n_gpus = c_uint()\n _cu_check_error(_nvml_lib.nvmlDeviceGetCount(byref(n_gpus)))\n _gpu_count = n_gpus.value\n return _gpu_count\n\n\ndef get_driver_info():\n global _driver_info\n\n _init_nvml()\n if _nvml_lib is None:\n return None\n if _driver_info is not None:\n return _driver_info\n\n version_buf = create_string_buffer(100)\n cuda_version = c_uint()\n\n _nvml_check_error(_nvml_lib.nvmlSystemGetDriverVersion(version_buf, len(version_buf)))\n _nvml_check_error(_nvml_lib.nvmlSystemGetCudaDriverVersion(byref(cuda_version)))\n\n _driver_info = _nvml_driver_info(\n driver_version=version_buf.value.decode(),\n cuda_version='%d.%d' % (cuda_version.value // 1000, cuda_version.value % 1000)\n )\n return _driver_info\n\n\ndef get_device_info(dev_index):\n try:\n return _device_infos[dev_index]\n except KeyError:\n pass\n\n _init()\n if _init_pid is None:\n return None\n\n device = c_int()\n name_buf = create_string_buffer(100)\n uuid_t = _CUuuid_t()\n cc_major = c_int()\n cc_minor = c_int()\n cores = c_int()\n threads_per_core = c_int()\n\n _cu_check_error(_cuda_lib.cuDeviceGet(byref(device), c_int(dev_index)))\n _cu_check_error(_cuda_lib.cuDeviceGetName(name_buf, len(name_buf), device))\n _cu_check_error(_cuda_lib.cuDeviceGetUuid(byref(uuid_t), device))\n _cu_check_error(_cuda_lib.cuDeviceComputeCapability(\n byref(cc_major), byref(cc_minor), device))\n _cu_check_error(_cuda_lib.cuDeviceGetAttribute(\n byref(cores), CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device))\n _cu_check_error(_cuda_lib.cuDeviceGetAttribute(\n byref(threads_per_core), CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR, device))\n\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n real_dev_index = [int(s) for s in os.environ['CUDA_VISIBLE_DEVICES'].split(',')][dev_index]\n else:\n real_dev_index = dev_index\n\n info = _device_infos[dev_index] = _cu_device_info(\n index=real_dev_index,\n uuid=uuid.UUID(bytes=uuid_t.bytes),\n name=name_buf.value.decode(),\n multiprocessors=cores.value,\n cuda_cores=cores.value * _cu_get_processor_cores(cc_major.value, cc_minor.value),\n threads=cores.value * threads_per_core.value,\n )\n return info\n\n\ndef get_device_status(dev_index):\n _init()\n if _init_pid is None:\n return None\n\n device = _nvmlDevice_t()\n utils = _nvmlUtilization_t()\n temperature = c_uint()\n memory_info = _nvmlBAR1Memory_t()\n\n dev_uuid = get_device_info(dev_index).uuid\n\n uuid_str = ('GPU-' + str(dev_uuid)).encode()\n\n _nvml_check_error(_nvml_lib.nvmlDeviceGetHandleByUUID(uuid_str, byref(device)))\n _nvml_check_error(_nvml_lib.nvmlDeviceGetUtilizationRates(device, byref(utils)))\n _nvml_check_error(_nvml_lib.nvmlDeviceGetTemperature(\n device, NVML_TEMPERATURE_GPU, byref(temperature)))\n _nvml_check_error(_nvml_lib.nvmlDeviceGetBAR1MemoryInfo(device, byref(memory_info)))\n\n return _nvml_device_status(\n gpu_util=utils.gpu,\n mem_util=utils.memory,\n temperature=temperature.value,\n fb_total_mem=memory_info.total,\n fb_free_mem=memory_info.free,\n fb_used_mem=memory_info.used,\n )\n", "path": "mars/lib/nvutils.py"}]}
| 3,700 | 962 |
gh_patches_debug_3730
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-2279
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installed Plugins NavMenuItem not adhering to `is_staff` permission
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.8
* Nautobot version: 1.3.9
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
Caused by #1844
### Steps to Reproduce
1. Set user permissions to `is_active` and `is_staff` to true BUT with `is_superuser` to false
2. Attempt to browse to installed plugins from nav menu
3.
<!-- What did you expect to happen? -->
### Expected Behavior
`is_staff` provide visibility to menu item without requiring `is_superuser`
<!-- What happened instead? -->
### Observed Behavior
NavMenuItem is only present if user has `is_superuser`
</issue>
<code>
[start of nautobot/core/authentication.py]
1 import logging
2 from collections import defaultdict
3
4 from django.conf import settings
5 from django.contrib.auth.backends import (
6 ModelBackend,
7 RemoteUserBackend as _RemoteUserBackend,
8 )
9 from django.contrib.auth.models import Group
10 from django.db.models import Q
11
12 from nautobot.users.models import ObjectPermission
13 from nautobot.utilities.permissions import (
14 permission_is_exempt,
15 resolve_permission,
16 resolve_permission_ct,
17 )
18
19 logger = logging.getLogger("nautobot.authentication")
20
21
22 class ObjectPermissionBackend(ModelBackend):
23 def get_all_permissions(self, user_obj, obj=None):
24 if not user_obj.is_active or user_obj.is_anonymous:
25 return dict()
26 if not hasattr(user_obj, "_object_perm_cache"):
27 user_obj._object_perm_cache = self.get_object_permissions(user_obj)
28 return user_obj._object_perm_cache
29
30 def get_object_permissions(self, user_obj):
31 """
32 Return all permissions granted to the user by an ObjectPermission.
33 """
34 # Retrieve all assigned and enabled ObjectPermissions
35 object_permissions = ObjectPermission.objects.filter(
36 Q(users=user_obj) | Q(groups__user=user_obj), enabled=True
37 ).prefetch_related("object_types")
38
39 # Create a dictionary mapping permissions to their constraints
40 perms = defaultdict(list)
41 for obj_perm in object_permissions:
42 for object_type in obj_perm.object_types.all():
43 for action in obj_perm.actions:
44 perm_name = f"{object_type.app_label}.{action}_{object_type.model}"
45 perms[perm_name].extend(obj_perm.list_constraints())
46
47 return perms
48
49 def has_perm(self, user_obj, perm, obj=None):
50 if perm == "is_staff":
51 return user_obj.is_active and (user_obj.obj.is_staff or user_obj.is_superuser)
52
53 app_label, action, model_name = resolve_permission(perm)
54
55 if app_label == "users" and model_name == "admingroup":
56 perm = perm.replace("users", "auth").replace("admingroup", "group")
57
58 # Superusers implicitly have all permissions
59 if user_obj.is_active and user_obj.is_superuser:
60 return True
61
62 # Permission is exempt from enforcement (i.e. listed in EXEMPT_VIEW_PERMISSIONS)
63 if permission_is_exempt(perm):
64 return True
65
66 # Handle inactive/anonymous users
67 if not user_obj.is_active or user_obj.is_anonymous:
68 return False
69
70 # If no applicable ObjectPermissions have been created for this user/permission, deny permission
71 if perm not in self.get_all_permissions(user_obj):
72 return False
73
74 # If no object has been specified, grant permission. (The presence of a permission in this set tells
75 # us that the user has permission for *some* objects, but not necessarily a specific object.)
76 if obj is None:
77 return True
78
79 # Sanity check: Ensure that the requested permission applies to the specified object
80 model = obj._meta.model
81 if model._meta.label_lower != ".".join((app_label, model_name)):
82 raise ValueError(f"Invalid permission {perm} for model {model}")
83
84 # Compile a query filter that matches all instances of the specified model
85 obj_perm_constraints = self.get_all_permissions(user_obj)[perm]
86 constraints = Q()
87 for perm_constraints in obj_perm_constraints:
88 if perm_constraints:
89 constraints |= Q(**perm_constraints)
90 else:
91 # Found ObjectPermission with null constraints; allow model-level access
92 constraints = Q()
93 break
94
95 # Permission to perform the requested action on the object depends on whether the specified object matches
96 # the specified constraints. Note that this check is made against the *database* record representing the object,
97 # not the instance itself.
98 return model.objects.filter(constraints, pk=obj.pk).exists()
99
100
101 class RemoteUserBackend(_RemoteUserBackend):
102 """
103 Custom implementation of Django's RemoteUserBackend which provides configuration hooks for basic customization.
104 """
105
106 @property
107 def create_unknown_user(self):
108 return settings.REMOTE_AUTH_AUTO_CREATE_USER
109
110 def has_perm(self, user_obj, perm, obj=None):
111 return False
112
113
114 def assign_groups_to_user(user, groups=None):
115 """
116 Assign a specified user a given list of groups.
117
118 :param user: The user to assign the permissions
119 :param group: A list of group names to add the user to
120 """
121 if groups is None:
122 groups = []
123 group_list = []
124 for name in groups:
125 try:
126 group_list.append(Group.objects.get(name=name))
127 except Group.DoesNotExist:
128 logging.error(f"Could not assign group {name} to remotely-authenticated user {user}: Group not found")
129 if group_list:
130 user.groups.add(*group_list)
131 logger.debug(f"Assigned groups to remotely-authenticated user {user}: {group_list}")
132
133
134 def assign_permissions_to_user(user, permissions=None):
135 """
136 Assign a specified user a given set of permissions.
137
138 :param user: The user to assign the permissions
139 :param permissions: A dictionary of permissions, with the permission name <app_label>.<action>_<model> as the key and constraints as values
140 """
141 if permissions is None:
142 permissions = {}
143 permissions_list = []
144 for permission_name, constraints in permissions.items():
145 try:
146 object_type, action = resolve_permission_ct(permission_name)
147 # TODO: Merge multiple actions into a single ObjectPermission per content type
148 obj_perm = ObjectPermission(name=permission_name, actions=[action], constraints=constraints)
149 obj_perm.save()
150 obj_perm.users.add(user)
151 obj_perm.object_types.add(object_type)
152 permissions_list.append(permission_name)
153 except ValueError:
154 logging.error(
155 f"Invalid permission name: '{permission_name}'. Permissions must be in the form "
156 "<app>.<action>_<model>. (Example: dcim.add_site)"
157 )
158 if permissions_list:
159 logger.debug(f"Assigned permissions to remotely-authenticated user {user}: {permissions_list}")
160
[end of nautobot/core/authentication.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/core/authentication.py b/nautobot/core/authentication.py
--- a/nautobot/core/authentication.py
+++ b/nautobot/core/authentication.py
@@ -48,7 +48,7 @@
def has_perm(self, user_obj, perm, obj=None):
if perm == "is_staff":
- return user_obj.is_active and (user_obj.obj.is_staff or user_obj.is_superuser)
+ return user_obj.is_active and (user_obj.is_staff or user_obj.is_superuser)
app_label, action, model_name = resolve_permission(perm)
|
{"golden_diff": "diff --git a/nautobot/core/authentication.py b/nautobot/core/authentication.py\n--- a/nautobot/core/authentication.py\n+++ b/nautobot/core/authentication.py\n@@ -48,7 +48,7 @@\n \n def has_perm(self, user_obj, perm, obj=None):\n if perm == \"is_staff\":\n- return user_obj.is_active and (user_obj.obj.is_staff or user_obj.is_superuser)\n+ return user_obj.is_active and (user_obj.is_staff or user_obj.is_superuser)\n \n app_label, action, model_name = resolve_permission(perm)\n", "issue": "Installed Plugins NavMenuItem not adhering to `is_staff` permission\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.8\r\n* Nautobot version: 1.3.9\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\nCaused by #1844 \r\n\r\n### Steps to Reproduce\r\n1. Set user permissions to `is_active` and `is_staff` to true BUT with `is_superuser` to false\r\n2. Attempt to browse to installed plugins from nav menu\r\n3.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n`is_staff` provide visibility to menu item without requiring `is_superuser`\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nNavMenuItem is only present if user has `is_superuser`\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\n\nfrom django.conf import settings\nfrom django.contrib.auth.backends import (\n ModelBackend,\n RemoteUserBackend as _RemoteUserBackend,\n)\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom nautobot.users.models import ObjectPermission\nfrom nautobot.utilities.permissions import (\n permission_is_exempt,\n resolve_permission,\n resolve_permission_ct,\n)\n\nlogger = logging.getLogger(\"nautobot.authentication\")\n\n\nclass ObjectPermissionBackend(ModelBackend):\n def get_all_permissions(self, user_obj, obj=None):\n if not user_obj.is_active or user_obj.is_anonymous:\n return dict()\n if not hasattr(user_obj, \"_object_perm_cache\"):\n user_obj._object_perm_cache = self.get_object_permissions(user_obj)\n return user_obj._object_perm_cache\n\n def get_object_permissions(self, user_obj):\n \"\"\"\n Return all permissions granted to the user by an ObjectPermission.\n \"\"\"\n # Retrieve all assigned and enabled ObjectPermissions\n object_permissions = ObjectPermission.objects.filter(\n Q(users=user_obj) | Q(groups__user=user_obj), enabled=True\n ).prefetch_related(\"object_types\")\n\n # Create a dictionary mapping permissions to their constraints\n perms = defaultdict(list)\n for obj_perm in object_permissions:\n for object_type in obj_perm.object_types.all():\n for action in obj_perm.actions:\n perm_name = f\"{object_type.app_label}.{action}_{object_type.model}\"\n perms[perm_name].extend(obj_perm.list_constraints())\n\n return perms\n\n def has_perm(self, user_obj, perm, obj=None):\n if perm == \"is_staff\":\n return user_obj.is_active and (user_obj.obj.is_staff or user_obj.is_superuser)\n\n app_label, action, model_name = resolve_permission(perm)\n\n if app_label == \"users\" and model_name == \"admingroup\":\n perm = perm.replace(\"users\", \"auth\").replace(\"admingroup\", \"group\")\n\n # Superusers implicitly have all permissions\n if user_obj.is_active and user_obj.is_superuser:\n return True\n\n # Permission is exempt from enforcement (i.e. listed in EXEMPT_VIEW_PERMISSIONS)\n if permission_is_exempt(perm):\n return True\n\n # Handle inactive/anonymous users\n if not user_obj.is_active or user_obj.is_anonymous:\n return False\n\n # If no applicable ObjectPermissions have been created for this user/permission, deny permission\n if perm not in self.get_all_permissions(user_obj):\n return False\n\n # If no object has been specified, grant permission. (The presence of a permission in this set tells\n # us that the user has permission for *some* objects, but not necessarily a specific object.)\n if obj is None:\n return True\n\n # Sanity check: Ensure that the requested permission applies to the specified object\n model = obj._meta.model\n if model._meta.label_lower != \".\".join((app_label, model_name)):\n raise ValueError(f\"Invalid permission {perm} for model {model}\")\n\n # Compile a query filter that matches all instances of the specified model\n obj_perm_constraints = self.get_all_permissions(user_obj)[perm]\n constraints = Q()\n for perm_constraints in obj_perm_constraints:\n if perm_constraints:\n constraints |= Q(**perm_constraints)\n else:\n # Found ObjectPermission with null constraints; allow model-level access\n constraints = Q()\n break\n\n # Permission to perform the requested action on the object depends on whether the specified object matches\n # the specified constraints. Note that this check is made against the *database* record representing the object,\n # not the instance itself.\n return model.objects.filter(constraints, pk=obj.pk).exists()\n\n\nclass RemoteUserBackend(_RemoteUserBackend):\n \"\"\"\n Custom implementation of Django's RemoteUserBackend which provides configuration hooks for basic customization.\n \"\"\"\n\n @property\n def create_unknown_user(self):\n return settings.REMOTE_AUTH_AUTO_CREATE_USER\n\n def has_perm(self, user_obj, perm, obj=None):\n return False\n\n\ndef assign_groups_to_user(user, groups=None):\n \"\"\"\n Assign a specified user a given list of groups.\n\n :param user: The user to assign the permissions\n :param group: A list of group names to add the user to\n \"\"\"\n if groups is None:\n groups = []\n group_list = []\n for name in groups:\n try:\n group_list.append(Group.objects.get(name=name))\n except Group.DoesNotExist:\n logging.error(f\"Could not assign group {name} to remotely-authenticated user {user}: Group not found\")\n if group_list:\n user.groups.add(*group_list)\n logger.debug(f\"Assigned groups to remotely-authenticated user {user}: {group_list}\")\n\n\ndef assign_permissions_to_user(user, permissions=None):\n \"\"\"\n Assign a specified user a given set of permissions.\n\n :param user: The user to assign the permissions\n :param permissions: A dictionary of permissions, with the permission name <app_label>.<action>_<model> as the key and constraints as values\n \"\"\"\n if permissions is None:\n permissions = {}\n permissions_list = []\n for permission_name, constraints in permissions.items():\n try:\n object_type, action = resolve_permission_ct(permission_name)\n # TODO: Merge multiple actions into a single ObjectPermission per content type\n obj_perm = ObjectPermission(name=permission_name, actions=[action], constraints=constraints)\n obj_perm.save()\n obj_perm.users.add(user)\n obj_perm.object_types.add(object_type)\n permissions_list.append(permission_name)\n except ValueError:\n logging.error(\n f\"Invalid permission name: '{permission_name}'. Permissions must be in the form \"\n \"<app>.<action>_<model>. (Example: dcim.add_site)\"\n )\n if permissions_list:\n logger.debug(f\"Assigned permissions to remotely-authenticated user {user}: {permissions_list}\")\n", "path": "nautobot/core/authentication.py"}]}
| 2,536 | 125 |
gh_patches_debug_38340
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-2602
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Poetry 1.1.0a2 includes python code in the version string
To reproduce:
1. `poetry init` with default choices; no dependencies.
1. `poetry add pytest-cov`
1. `poetry install`
The last command prints:
```
Installing dependencies from lock file
Package operations: 0 installs, 1 update, 0 removals
- Updating pytest-cov (2.10.0 import os, sys;exec('if \'COV_CORE_SOURCE\' in os.environ:\n try:\n from pytest_cov.embed import init\n init()\n except Exception as exc:\n sys.stderr.write(\n "pytest-cov: Failed to setup subprocess coverage. "\n "Environ: {0!r} "\n "Exception: {1!r}\\n".format(\n dict((k, v) for k, v in os.environ.items() if k.startswith(\'COV_CORE\')),\n exc\n )\n )\n') -> 2.10.0)
```
</issue>
<code>
[start of poetry/repositories/installed_repository.py]
1 from poetry.core.packages import Package
2 from poetry.utils._compat import Path
3 from poetry.utils._compat import metadata
4 from poetry.utils.env import Env
5
6 from .repository import Repository
7
8
9 _VENDORS = Path(__file__).parent.parent.joinpath("_vendor")
10
11
12 class InstalledRepository(Repository):
13 @classmethod
14 def load(cls, env): # type: (Env) -> InstalledRepository
15 """
16 Load installed packages.
17 """
18 repo = cls()
19 seen = set()
20
21 for entry in reversed(env.sys_path):
22 for distribution in sorted(
23 metadata.distributions(path=[entry]), key=lambda d: str(d._path),
24 ):
25 name = distribution.metadata["name"]
26 path = Path(str(distribution._path))
27 version = distribution.metadata["version"]
28 package = Package(name, version, version)
29 package.description = distribution.metadata.get("summary", "")
30
31 if package.name in seen:
32 continue
33
34 try:
35 path.relative_to(_VENDORS)
36 except ValueError:
37 pass
38 else:
39 continue
40
41 seen.add(package.name)
42
43 repo.add_package(package)
44
45 is_standard_package = True
46 try:
47 path.relative_to(env.site_packages)
48 except ValueError:
49 is_standard_package = False
50
51 if is_standard_package:
52 if (
53 path.name.endswith(".dist-info")
54 and env.site_packages.joinpath(
55 "{}.pth".format(package.pretty_name)
56 ).exists()
57 ):
58 with env.site_packages.joinpath(
59 "{}.pth".format(package.pretty_name)
60 ).open() as f:
61 directory = Path(f.readline().strip())
62 package.source_type = "directory"
63 package.source_url = directory.as_posix()
64
65 continue
66
67 src_path = env.path / "src"
68
69 # A VCS dependency should have been installed
70 # in the src directory. If not, it's a path dependency
71 try:
72 path.relative_to(src_path)
73
74 from poetry.core.vcs.git import Git
75
76 git = Git()
77 revision = git.rev_parse("HEAD", src_path / package.name).strip()
78 url = git.remote_url(src_path / package.name)
79
80 package.source_type = "git"
81 package.source_url = url
82 package.source_reference = revision
83 except ValueError:
84 package.source_type = "directory"
85 package.source_url = str(path.parent)
86
87 return repo
88
[end of poetry/repositories/installed_repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py
--- a/poetry/repositories/installed_repository.py
+++ b/poetry/repositories/installed_repository.py
@@ -1,3 +1,5 @@
+from typing import Set
+
from poetry.core.packages import Package
from poetry.utils._compat import Path
from poetry.utils._compat import metadata
@@ -10,6 +12,37 @@
class InstalledRepository(Repository):
+ @classmethod
+ def get_package_paths(cls, sitedir, name): # type: (Path, str) -> Set[Path]
+ """
+ Process a .pth file within the site-packages directory, and return any valid
+ paths. We skip executable .pth files as there is no reliable means to do this
+ without side-effects to current run-time. Mo check is made that the item refers
+ to a directory rather than a file, however, in order to maintain backwards
+ compatibility, we allow non-existing paths to be discovered. The latter
+ behaviour is different to how Python's site-specific hook configuration works.
+
+ Reference: https://docs.python.org/3.8/library/site.html
+
+ :param sitedir: The site-packages directory to search for .pth file.
+ :param name: The name of the package to search .pth file for.
+ :return: A `Set` of valid `Path` objects.
+ """
+ paths = set()
+
+ pth_file = sitedir.joinpath("{}.pth".format(name))
+ if pth_file.exists():
+ with pth_file.open() as f:
+ for line in f:
+ line = line.strip()
+ if line and not line.startswith(("#", "import ", "import\t")):
+ path = Path(line)
+ if not path.is_absolute():
+ path = sitedir.joinpath(path)
+ paths.add(path)
+
+ return paths
+
@classmethod
def load(cls, env): # type: (Env) -> InstalledRepository
"""
@@ -49,19 +82,14 @@
is_standard_package = False
if is_standard_package:
- if (
- path.name.endswith(".dist-info")
- and env.site_packages.joinpath(
- "{}.pth".format(package.pretty_name)
- ).exists()
- ):
- with env.site_packages.joinpath(
- "{}.pth".format(package.pretty_name)
- ).open() as f:
- directory = Path(f.readline().strip())
+ if path.name.endswith(".dist-info"):
+ paths = cls.get_package_paths(
+ sitedir=env.site_packages, name=package.pretty_name
+ )
+ if paths:
+ # TODO: handle multiple source directories?
package.source_type = "directory"
- package.source_url = directory.as_posix()
-
+ package.source_url = paths.pop().as_posix()
continue
src_path = env.path / "src"
|
{"golden_diff": "diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py\n--- a/poetry/repositories/installed_repository.py\n+++ b/poetry/repositories/installed_repository.py\n@@ -1,3 +1,5 @@\n+from typing import Set\n+\n from poetry.core.packages import Package\n from poetry.utils._compat import Path\n from poetry.utils._compat import metadata\n@@ -10,6 +12,37 @@\n \n \n class InstalledRepository(Repository):\n+ @classmethod\n+ def get_package_paths(cls, sitedir, name): # type: (Path, str) -> Set[Path]\n+ \"\"\"\n+ Process a .pth file within the site-packages directory, and return any valid\n+ paths. We skip executable .pth files as there is no reliable means to do this\n+ without side-effects to current run-time. Mo check is made that the item refers\n+ to a directory rather than a file, however, in order to maintain backwards\n+ compatibility, we allow non-existing paths to be discovered. The latter\n+ behaviour is different to how Python's site-specific hook configuration works.\n+\n+ Reference: https://docs.python.org/3.8/library/site.html\n+\n+ :param sitedir: The site-packages directory to search for .pth file.\n+ :param name: The name of the package to search .pth file for.\n+ :return: A `Set` of valid `Path` objects.\n+ \"\"\"\n+ paths = set()\n+\n+ pth_file = sitedir.joinpath(\"{}.pth\".format(name))\n+ if pth_file.exists():\n+ with pth_file.open() as f:\n+ for line in f:\n+ line = line.strip()\n+ if line and not line.startswith((\"#\", \"import \", \"import\\t\")):\n+ path = Path(line)\n+ if not path.is_absolute():\n+ path = sitedir.joinpath(path)\n+ paths.add(path)\n+\n+ return paths\n+\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n@@ -49,19 +82,14 @@\n is_standard_package = False\n \n if is_standard_package:\n- if (\n- path.name.endswith(\".dist-info\")\n- and env.site_packages.joinpath(\n- \"{}.pth\".format(package.pretty_name)\n- ).exists()\n- ):\n- with env.site_packages.joinpath(\n- \"{}.pth\".format(package.pretty_name)\n- ).open() as f:\n- directory = Path(f.readline().strip())\n+ if path.name.endswith(\".dist-info\"):\n+ paths = cls.get_package_paths(\n+ sitedir=env.site_packages, name=package.pretty_name\n+ )\n+ if paths:\n+ # TODO: handle multiple source directories?\n package.source_type = \"directory\"\n- package.source_url = directory.as_posix()\n-\n+ package.source_url = paths.pop().as_posix()\n continue\n \n src_path = env.path / \"src\"\n", "issue": "Poetry 1.1.0a2 includes python code in the version string\nTo reproduce:\r\n\r\n1. `poetry init` with default choices; no dependencies.\r\n1. `poetry add pytest-cov`\r\n1. `poetry install`\r\n\r\nThe last command prints:\r\n```\r\nInstalling dependencies from lock file\r\n\r\nPackage operations: 0 installs, 1 update, 0 removals\r\n\r\n- Updating pytest-cov (2.10.0 import os, sys;exec('if \\'COV_CORE_SOURCE\\' in os.environ:\\n try:\\n from pytest_cov.embed import init\\n init()\\n except Exception as exc:\\n sys.stderr.write(\\n \"pytest-cov: Failed to setup subprocess coverage. \"\\n \"Environ: {0!r} \"\\n \"Exception: {1!r}\\\\n\".format(\\n dict((k, v) for k, v in os.environ.items() if k.startswith(\\'COV_CORE\\')),\\n exc\\n )\\n )\\n') -> 2.10.0) \r\n```\n", "before_files": [{"content": "from poetry.core.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils._compat import metadata\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\n_VENDORS = Path(__file__).parent.parent.joinpath(\"_vendor\")\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n \"\"\"\n repo = cls()\n seen = set()\n\n for entry in reversed(env.sys_path):\n for distribution in sorted(\n metadata.distributions(path=[entry]), key=lambda d: str(d._path),\n ):\n name = distribution.metadata[\"name\"]\n path = Path(str(distribution._path))\n version = distribution.metadata[\"version\"]\n package = Package(name, version, version)\n package.description = distribution.metadata.get(\"summary\", \"\")\n\n if package.name in seen:\n continue\n\n try:\n path.relative_to(_VENDORS)\n except ValueError:\n pass\n else:\n continue\n\n seen.add(package.name)\n\n repo.add_package(package)\n\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n if (\n path.name.endswith(\".dist-info\")\n and env.site_packages.joinpath(\n \"{}.pth\".format(package.pretty_name)\n ).exists()\n ):\n with env.site_packages.joinpath(\n \"{}.pth\".format(package.pretty_name)\n ).open() as f:\n directory = Path(f.readline().strip())\n package.source_type = \"directory\"\n package.source_url = directory.as_posix()\n\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.core.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}]}
| 1,447 | 671 |
gh_patches_debug_4899
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18924
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dropout3d
</issue>
<code>
[start of ivy/functional/frontends/paddle/nn/functional/common.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @to_ivy_arrays_and_back
8 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
9 def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
10 if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
11 numerator = ivy.sum(x1 * x2, axis=axis)
12 x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
13 x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
14 else:
15 numerator = ivy.sum(x1 * x2)
16 x1_squared_norm = ivy.sum(ivy.square(x1))
17 x2_squared_norm = ivy.sum(ivy.square(x2))
18
19 x1_norm = ivy.sqrt(x1_squared_norm)
20 x2_norm = ivy.sqrt(x2_squared_norm)
21 norm_mm = x1_norm * x2_norm
22 denominator = ivy.maximum(norm_mm, eps)
23
24 cosine = numerator / denominator
25 return cosine
26
27
28 @to_ivy_arrays_and_back
29 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
30 def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
31 return ivy.dropout2d(x, p=p, training=training, data_format=data_format)
32
33
34 def get_mask(shape, device, prob, seed=None):
35 mask = ivy.where(
36 ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
37 0.0,
38 1.0,
39 )
40 return mask
41
42
43 @to_ivy_arrays_and_back
44 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
45 def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
46 if axis > 1:
47 raise ValueError("Axis value can only be 0 or 1 or None.")
48 elif axis is None or (isinstance(axis, list) and len(axis) == 2):
49 mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
50 elif axis == 0:
51 mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
52 mask = ivy.broadcast_to(mask, x.shape)
53 elif axis == 1:
54 mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
55 mask = ivy.broadcast_to(mask, x.shape)
56 if mode == "upscale_in_train":
57 if training:
58 out = ivy.multiply(x, mask)
59 ret = ivy.multiply(out, 1.0 / (1.0 - p))
60 else:
61 ret = x
62 else:
63 if training:
64 ret = ivy.multiply(x, mask)
65 else:
66 ret = ivy.multiply(x, (1.0 - p))
67 return ret
68
69
70 @to_ivy_arrays_and_back
71 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
72 def zeropad2d(x, padding, data_format="NCHW", name=None):
73 if ivy.is_array(padding):
74 padding = padding.to_list()
75 if isinstance(padding, int):
76 padding = [padding, padding, padding, padding]
77 if len(padding) != 4:
78 raise ValueError("Padding length should be 4.")
79 if x.ndim != 4:
80 raise ValueError("Input x must be 4-dimensional.")
81 if data_format == "NCHW":
82 padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
83 elif data_format == "NHWC":
84 padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
85 else:
86 raise ValueError("Unknown data_format: {}".format(data_format))
87 return ivy.pad(x, padding, mode="constant", constant_values=0.0)
88
89
90 @to_ivy_arrays_and_back
91 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
92 def interpolate(
93 x,
94 size=None,
95 scale_factor=None,
96 mode="nearest",
97 align_corners=False,
98 align_mode=0,
99 data_format="NCHW",
100 name=None,
101 ):
102 return ivy.interpolate(
103 x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners
104 )
105
106
107 @to_ivy_arrays_and_back
108 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
109 def linear(x, weight, bias=None, name=None):
110 weight = ivy.swapaxes(weight, -1, -2)
111 return ivy.linear(x, weight, bias=bias)
112
[end of ivy/functional/frontends/paddle/nn/functional/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -109,3 +109,9 @@
def linear(x, weight, bias=None, name=None):
weight = ivy.swapaxes(weight, -1, -2)
return ivy.linear(x, weight, bias=bias)
+
+
+@to_ivy_arrays_and_back
+@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
+def dropout3d(x, p=0.5, training=True, data_format="NCDHW", name=None):
+ return ivy.dropout3d(x, p, training=training, data_format=data_format)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -109,3 +109,9 @@\n def linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+def dropout3d(x, p=0.5, training=True, data_format=\"NCDHW\", name=None):\n+ return ivy.dropout3d(x, p, training=training, data_format=data_format)\n", "issue": "dropout3d\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef interpolate(\n x,\n size=None,\n scale_factor=None,\n mode=\"nearest\",\n align_corners=False,\n align_mode=0,\n data_format=\"NCHW\",\n name=None,\n):\n return ivy.interpolate(\n x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners\n )\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef linear(x, weight, bias=None, name=None):\n weight = ivy.swapaxes(weight, -1, -2)\n return ivy.linear(x, weight, bias=bias)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]}
| 1,962 | 210 |
gh_patches_debug_18102
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-417
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running DVC outside of Git dir
We should handle all the cases like this.
```
cd /
$ dvc repro
No handlers could be found for logger "dvc"
```
</issue>
<code>
[start of dvc/logger.py]
1 import sys
2 import logging
3
4 import colorama
5
6
7 colorama.init()
8
9
10 class Logger(object):
11 FMT = '%(message)s'
12 DEFAULT_LEVEL = logging.INFO
13
14 LEVEL_MAP = {
15 'debug': logging.DEBUG,
16 'info': logging.INFO,
17 'warn': logging.WARNING,
18 'error': logging.ERROR
19 }
20
21 COLOR_MAP = {
22 'debug': colorama.Fore.BLUE,
23 'warn': colorama.Fore.YELLOW,
24 'error': colorama.Fore.RED
25 }
26
27 def __init__(self, config=None):
28 sh = logging.StreamHandler(sys.stdout)
29 sh.setFormatter(logging.Formatter(self.FMT))
30 sh.setLevel(logging.DEBUG)
31
32 self.logger().addHandler(sh)
33 level = None
34 if config:
35 level = config['Global'].get('LogLevel', None)
36 self.set_level(level)
37
38 @staticmethod
39 def logger():
40 return logging.getLogger('dvc')
41
42 @staticmethod
43 def set_level(level=None):
44 if not level:
45 lvl = Logger.DEFAULT_LEVEL
46 else:
47 lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)
48 Logger.logger().setLevel(lvl)
49
50 @staticmethod
51 def be_quiet():
52 Logger.logger().setLevel(logging.CRITICAL)
53
54 @staticmethod
55 def be_verbose():
56 Logger.logger().setLevel(logging.DEBUG)
57
58 @staticmethod
59 def colorize(msg, typ):
60 header = ''
61 footer = ''
62
63 if sys.stdout.isatty():
64 header = Logger.COLOR_MAP.get(typ.lower(), '')
65 footer = colorama.Style.RESET_ALL
66
67 return u'{}{}{}'.format(header, msg, footer)
68
69 @staticmethod
70 def error(msg, **kwargs):
71 exc_info = Logger.logger().getEffectiveLevel() == logging.DEBUG
72 return Logger.logger().error(Logger.colorize(msg, 'error'), exc_info=exc_info, **kwargs)
73
74 @staticmethod
75 def warn(msg, **kwargs):
76 return Logger.logger().warn(Logger.colorize(msg, 'warn'), **kwargs)
77
78 @staticmethod
79 def debug(msg, **kwargs):
80 return Logger.logger().debug(Logger.colorize(msg, 'debug'), **kwargs)
81
82 @staticmethod
83 def info(msg, **kwargs):
84 return Logger.logger().info(Logger.colorize(msg, 'info'), **kwargs)
85
[end of dvc/logger.py]
[start of dvc/main.py]
1 from dvc.logger import Logger
2 from dvc.cli import parse_args
3
4 def main(argv=None):
5 args = parse_args(argv)
6
7 try:
8 cmd = args.func(args)
9 except Exception as ex:
10 Logger.error('Initialization error: {}'.format(str(ex)))
11 return 255
12
13 return cmd.run_cmd()
14
[end of dvc/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/logger.py b/dvc/logger.py
--- a/dvc/logger.py
+++ b/dvc/logger.py
@@ -25,15 +25,19 @@
}
def __init__(self, config=None):
+ self.init(config)
+
+ @staticmethod
+ def init(config=None):
sh = logging.StreamHandler(sys.stdout)
- sh.setFormatter(logging.Formatter(self.FMT))
+ sh.setFormatter(logging.Formatter(Logger.FMT))
sh.setLevel(logging.DEBUG)
- self.logger().addHandler(sh)
+ Logger.logger().addHandler(sh)
level = None
if config:
level = config['Global'].get('LogLevel', None)
- self.set_level(level)
+ Logger.set_level(level)
@staticmethod
def logger():
diff --git a/dvc/main.py b/dvc/main.py
--- a/dvc/main.py
+++ b/dvc/main.py
@@ -2,6 +2,8 @@
from dvc.cli import parse_args
def main(argv=None):
+ Logger.init()
+
args = parse_args(argv)
try:
|
{"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -25,15 +25,19 @@\n }\n \n def __init__(self, config=None):\n+ self.init(config)\n+\n+ @staticmethod\n+ def init(config=None):\n sh = logging.StreamHandler(sys.stdout)\n- sh.setFormatter(logging.Formatter(self.FMT))\n+ sh.setFormatter(logging.Formatter(Logger.FMT))\n sh.setLevel(logging.DEBUG)\n \n- self.logger().addHandler(sh)\n+ Logger.logger().addHandler(sh)\n level = None\n if config:\n level = config['Global'].get('LogLevel', None)\n- self.set_level(level)\n+ Logger.set_level(level)\n \n @staticmethod\n def logger():\ndiff --git a/dvc/main.py b/dvc/main.py\n--- a/dvc/main.py\n+++ b/dvc/main.py\n@@ -2,6 +2,8 @@\n from dvc.cli import parse_args\n \n def main(argv=None):\n+ Logger.init()\n+\n args = parse_args(argv)\n \n try:\n", "issue": "Running DVC outside of Git dir\nWe should handle all the cases like this.\r\n\r\n```\r\ncd /\r\n$ dvc repro\r\nNo handlers could be found for logger \"dvc\"\r\n```\n", "before_files": [{"content": "import sys\nimport logging\n\nimport colorama\n\n\ncolorama.init()\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'debug': colorama.Fore.BLUE,\n 'warn': colorama.Fore.YELLOW,\n 'error': colorama.Fore.RED\n }\n\n def __init__(self, config=None):\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(self.FMT))\n sh.setLevel(logging.DEBUG)\n\n self.logger().addHandler(sh)\n level = None\n if config:\n level = config['Global'].get('LogLevel', None)\n self.set_level(level)\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, typ):\n header = ''\n footer = ''\n\n if sys.stdout.isatty():\n header = Logger.COLOR_MAP.get(typ.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def error(msg, **kwargs):\n exc_info = Logger.logger().getEffectiveLevel() == logging.DEBUG\n return Logger.logger().error(Logger.colorize(msg, 'error'), exc_info=exc_info, **kwargs)\n\n @staticmethod\n def warn(msg, **kwargs):\n return Logger.logger().warn(Logger.colorize(msg, 'warn'), **kwargs)\n\n @staticmethod\n def debug(msg, **kwargs):\n return Logger.logger().debug(Logger.colorize(msg, 'debug'), **kwargs)\n\n @staticmethod\n def info(msg, **kwargs):\n return Logger.logger().info(Logger.colorize(msg, 'info'), **kwargs)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\n\ndef main(argv=None):\n args = parse_args(argv)\n\n try:\n cmd = args.func(args)\n except Exception as ex:\n Logger.error('Initialization error: {}'.format(str(ex)))\n return 255\n\n return cmd.run_cmd()\n", "path": "dvc/main.py"}]}
| 1,335 | 243 |
gh_patches_debug_32408
|
rasdani/github-patches
|
git_diff
|
cltk__cltk-631
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unused imports in word tokenizer module
The word tokenizer module has a large number of imports from NLTK that are not used anywhere in the module. Removing them 1. cleans up the code, and 2. speeds up testing.
</issue>
<code>
[start of cltk/tokenize/word.py]
1 # -*-coding:utf-8-*-
2 """Language-specific word tokenizers. Primary purpose is to handle enclitics."""
3
4 import re
5
6 from nltk.tokenize.punkt import PunktLanguageVars
7 from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
8
9 import re
10
11 # Cleanup these imports—most are not used!
12 from nltk.data import load
13 from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)
14 from nltk.tokenize.mwe import MWETokenizer
15 from nltk.tokenize.punkt import PunktSentenceTokenizer
16 from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
17 BlanklineTokenizer, WordPunctTokenizer,
18 wordpunct_tokenize, regexp_tokenize,
19 blankline_tokenize)
20 #from nltk.tokenize.repp import ReppTokenizer
21 from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
22 from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
23 line_tokenize)
24 from nltk.tokenize.stanford import StanfordTokenizer
25 from nltk.tokenize.texttiling import TextTilingTokenizer
26 #from nltk.tokenize.toktok import ToktokTokenizer
27 from nltk.tokenize.treebank import TreebankWordTokenizer
28 from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize
29 from nltk.tokenize.stanford_segmenter import StanfordSegmenter
30
31 import cltk.corpus.arabic.utils.pyarabic.araby as araby
32
33 __author__ = ['Patrick J. Burns <[email protected]>', 'Kyle P. Johnson <[email protected]>',
34 'Natasha Voake <[email protected]>']
35 __license__ = 'MIT License. See LICENSE.'
36
37
38 class WordTokenizer: # pylint: disable=too-few-public-methods
39 """Tokenize according to rules specific to a given language."""
40
41 def __init__(self, language):
42 """Take language as argument to the class. Check availability and
43 setup class variables."""
44 self.language = language
45 self.available_languages = ['arabic',
46 'french',
47 'greek',
48 'latin',
49 'old_norse']
50 assert self.language in self.available_languages, \
51 "Specific tokenizer not available for '{0}'. Only available for: '{1}'.".format(self.language, # pylint: disable=line-too-long
52 self.available_languages) # pylint: disable=line-too-long
53 # ^^^ Necessary? since we have an 'else' in `tokenize`
54
55
56 def tokenize(self, string):
57 """Tokenize incoming string."""
58
59 if self.language == 'arabic':
60 tokens = tokenize_arabic_words(string)
61 elif self.language == 'french':
62 tokens = tokenize_french_words(string)
63 elif self.language == 'greek':
64 tokens = tokenize_greek_words(string)
65 elif self.language == 'latin':
66 tokens = tokenize_latin_words(string)
67 elif self.language == 'old_norse':
68 tokens = tokenize_old_norse_words(string)
69 else:
70 tokens = nltk_tokenize_words(string)
71
72 return tokens
73
74
75 def nltk_tokenize_words(string, attached_period=False, language=None):
76 """Wrap NLTK's tokenizer PunktLanguageVars(), but make final period
77 its own token.
78
79 >>> nltk_tokenize_words("Sentence 1. Sentence 2.")
80 ['Sentence', '1', '.', 'Sentence', '2', '.']
81
82 >>> #Optionally keep the NLTK's output:
83
84 >>> nltk_tokenize_words("Sentence 1. Sentence 2.", attached_period=True)
85 ['Sentence', '1.', 'Sentence', '2.']
86
87 TODO: Run some tests to determine whether there is a large penalty for
88 re-calling PunktLanguageVars() for each use of this function. If so, this
89 will need to become a class, perhaps inheriting from the PunktLanguageVars
90 object. Maybe integrate with WordTokenizer.
91 """
92 assert isinstance(string, str), "Incoming string must be type str."
93 if language == 'sanskrit':
94 periods = ['.', '।','॥']
95 else:
96 periods = ['.']
97 punkt = PunktLanguageVars()
98 tokens = punkt.word_tokenize(string)
99 if attached_period:
100 return tokens
101 new_tokens = []
102 for word in tokens:
103 for char in periods:
104 if word.endswith(char):
105 new_tokens.append(word[:-1])
106 new_tokens.append(char)
107 break
108 else:
109 new_tokens.append(word)
110 return new_tokens
111
112
113 def tokenize_arabic_words(text):
114
115 """
116 Tokenize text into words
117 @param text: the input text.
118 @type text: unicode.
119 @return: list of words.
120 @rtype: list.
121 """
122 specific_tokens = []
123 if not text:
124 return specific_tokens
125 else:
126 specific_tokens = araby.tokenize(text)
127 return specific_tokens
128
129
130 def tokenize_french_words(string):
131 assert isinstance(string, str), "Incoming string must be type str."
132
133 # normalize apostrophes
134
135 text = re.sub(r"’", r"'", string)
136
137 # Dealing with punctuation
138 text = re.sub(r"\'", r"' ", text)
139 text = re.sub("(?<=.)(?=[.!?)(\";:,«»\-])", " ", text)
140
141 results = str.split(text)
142 return (results)
143
144
145 def tokenize_greek_words(text):
146 """
147 Tokenizer divides the string into a list of substrings. This is a placeholder
148 function that returns the default NLTK word tokenizer until
149 Greek-specific options are added.
150
151 Example:
152 >>> text = 'Θουκυδίδης Ἀθηναῖος ξυνέγραψε τὸν πόλεμον τῶν Πελοποννησίων καὶ Ἀθηναίων,'
153 >>> tokenize_greek_words(text)
154 ['Θουκυδίδης', 'Ἀθηναῖος', 'ξυνέγραψε', 'τὸν', 'πόλεμον', 'τῶν', 'Πελοποννησίων', 'καὶ', 'Ἀθηναίων', ',']
155
156 :param string: This accepts the string value that needs to be tokenized
157 :returns: A list of substrings extracted from the string
158 """
159
160 return nltk_tokenize_words(text) # Simplest implementation to start
161
162
163 def tokenize_latin_words(string):
164 """
165 Tokenizer divides the string into a list of substrings
166
167 >>> from cltk.corpus.utils.formatter import remove_non_ascii
168 >>> text = 'Dices ἐστιν ἐμός pulchrum esse inimicos ulcisci.'
169 >>> tokenize_latin_words(text)
170 ['Dices', 'ἐστιν', 'ἐμός', 'pulchrum', 'esse', 'inimicos', 'ulcisci', '.']
171
172 :param string: This accepts the string value that needs to be tokenized
173 :returns: A list of substrings extracted from the string
174 """
175 from cltk.tokenize.latin_exceptions import latin_exceptions
176
177 assert isinstance(string, str), "Incoming string must be type str."
178
179 def matchcase(word):
180 # From Python Cookbook
181 def replace(m):
182 text = m.group()
183 if text.isupper():
184 return word.upper()
185 elif text.islower():
186 return word.lower()
187 elif text[0].isupper():
188 return word.capitalize()
189 else:
190 return word
191
192 return replace
193
194 replacements = [(r'mecum', 'cum me'),
195 (r'tecum', 'cum te'),
196 (r'secum', 'cum se'),
197 (r'nobiscum', 'cum nobis'),
198 (r'vobiscum', 'cum vobis'),
199 (r'quocum', 'cum quo'),
200 (r'quacum', 'cum qua'),
201 (r'quicum', 'cum qui'),
202 (r'quibuscum', 'cum quibus'),
203 (r'sodes', 'si audes'),
204 (r'satin', 'satis ne'),
205 (r'scin', 'scis ne'),
206 (r'sultis', 'si vultis'),
207 (r'similist', 'similis est'),
208 (r'qualist', 'qualis est')
209 ]
210
211 for replacement in replacements:
212 string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)
213
214 punkt_param = PunktParameters()
215 abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n',
216 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
217 punkt_param.abbrev_types = set(abbreviations)
218 sent_tokenizer = PunktSentenceTokenizer(punkt_param)
219
220 word_tokenizer = PunktLanguageVars()
221 sents = sent_tokenizer.tokenize(string)
222
223 enclitics = ['que', 'n', 'ue', 've', 'st']
224 exceptions = enclitics
225 exceptions = list(set(exceptions + latin_exceptions))
226
227 tokens = []
228
229 for sent in sents:
230 temp_tokens = word_tokenizer.word_tokenize(sent)
231 # Need to check that tokens exist before handling them;
232 # needed to make stream.readlines work in PlaintextCorpusReader
233
234 if temp_tokens:
235 if temp_tokens[0].endswith('ne'):
236 if temp_tokens[0].lower() not in exceptions:
237 temp = [temp_tokens[0][:-2], '-ne']
238 temp_tokens = temp + temp_tokens[1:]
239
240 if temp_tokens[-1].endswith('.'):
241 final_word = temp_tokens[-1][:-1]
242 del temp_tokens[-1]
243 temp_tokens += [final_word, '.']
244
245 for token in temp_tokens:
246 tokens.append(token)
247
248 # Break enclitic handling into own function?
249 specific_tokens = []
250
251 for token in tokens:
252 is_enclitic = False
253 if token.lower() not in exceptions:
254 for enclitic in enclitics:
255 if token.endswith(enclitic):
256 if enclitic == 'n':
257 specific_tokens += [token[:-len(enclitic)]] + ['-ne']
258 elif enclitic == 'st':
259 if token.endswith('ust'):
260 specific_tokens += [token[:-len(enclitic) + 1]] + ['est']
261 else:
262 specific_tokens += [token[:-len(enclitic)]] + ['est']
263 else:
264 specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]
265 is_enclitic = True
266 break
267 if not is_enclitic:
268 specific_tokens.append(token)
269
270 return specific_tokens
271
272
273 def tokenize_old_norse_words(text):
274 """
275
276 :param text: a text or a sentence
277 :return:
278 """
279 assert isinstance(text, str)
280
281 # punctuation
282 text = re.sub(r"\'", r"' ", text)
283 text = re.sub("(?<=.)(?=[.!?)(\";:,«»\-])", " ", text)
284
285 # TODO dealing with merges between verbs at the second person of the present tense and þú
286 # -> -tu, -ðu, -du, -u : question
287
288 # TODO dealing with merges between verbs and sik -> st : middle voice
289
290 results = str.split(text)
291 return results
292
[end of cltk/tokenize/word.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cltk/tokenize/word.py b/cltk/tokenize/word.py
--- a/cltk/tokenize/word.py
+++ b/cltk/tokenize/word.py
@@ -1,40 +1,19 @@
-# -*-coding:utf-8-*-
"""Language-specific word tokenizers. Primary purpose is to handle enclitics."""
-import re
+__author__ = ['Patrick J. Burns <[email protected]>',
+ 'Kyle P. Johnson <[email protected]>',
+ 'Natasha Voake <[email protected]>']
+# Author info for Arabic, Old Norse?
-from nltk.tokenize.punkt import PunktLanguageVars
-from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
+__license__ = 'MIT License. See LICENSE.'
import re
-# Cleanup these imports—most are not used!
-from nltk.data import load
-from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)
-from nltk.tokenize.mwe import MWETokenizer
-from nltk.tokenize.punkt import PunktSentenceTokenizer
-from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
- BlanklineTokenizer, WordPunctTokenizer,
- wordpunct_tokenize, regexp_tokenize,
- blankline_tokenize)
-#from nltk.tokenize.repp import ReppTokenizer
-from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
-from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
- line_tokenize)
-from nltk.tokenize.stanford import StanfordTokenizer
-from nltk.tokenize.texttiling import TextTilingTokenizer
-#from nltk.tokenize.toktok import ToktokTokenizer
-from nltk.tokenize.treebank import TreebankWordTokenizer
-from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize
-from nltk.tokenize.stanford_segmenter import StanfordSegmenter
+from nltk.tokenize.punkt import PunktLanguageVars
+from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import cltk.corpus.arabic.utils.pyarabic.araby as araby
-__author__ = ['Patrick J. Burns <[email protected]>', 'Kyle P. Johnson <[email protected]>',
- 'Natasha Voake <[email protected]>']
-__license__ = 'MIT License. See LICENSE.'
-
-
class WordTokenizer: # pylint: disable=too-few-public-methods
"""Tokenize according to rules specific to a given language."""
|
{"golden_diff": "diff --git a/cltk/tokenize/word.py b/cltk/tokenize/word.py\n--- a/cltk/tokenize/word.py\n+++ b/cltk/tokenize/word.py\n@@ -1,40 +1,19 @@\n-# -*-coding:utf-8-*-\n \"\"\"Language-specific word tokenizers. Primary purpose is to handle enclitics.\"\"\"\n \n-import re\n+__author__ = ['Patrick J. Burns <[email protected]>', \n+ 'Kyle P. Johnson <[email protected]>', \n+ 'Natasha Voake <[email protected]>']\n+# Author info for Arabic, Old Norse?\n \n-from nltk.tokenize.punkt import PunktLanguageVars\n-from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\n+__license__ = 'MIT License. See LICENSE.'\n \n import re\n \n-# Cleanup these imports\u2014most are not used!\n-from nltk.data import load\n-from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)\n-from nltk.tokenize.mwe import MWETokenizer\n-from nltk.tokenize.punkt import PunktSentenceTokenizer\n-from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,\n- BlanklineTokenizer, WordPunctTokenizer,\n- wordpunct_tokenize, regexp_tokenize,\n- blankline_tokenize)\n-#from nltk.tokenize.repp import ReppTokenizer\n-from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize\n-from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,\n- line_tokenize)\n-from nltk.tokenize.stanford import StanfordTokenizer\n-from nltk.tokenize.texttiling import TextTilingTokenizer\n-#from nltk.tokenize.toktok import ToktokTokenizer\n-from nltk.tokenize.treebank import TreebankWordTokenizer\n-from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize\n-from nltk.tokenize.stanford_segmenter import StanfordSegmenter\n+from nltk.tokenize.punkt import PunktLanguageVars\n+from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\n \n import cltk.corpus.arabic.utils.pyarabic.araby as araby\n \n-__author__ = ['Patrick J. Burns <[email protected]>', 'Kyle P. Johnson <[email protected]>',\n- 'Natasha Voake <[email protected]>']\n-__license__ = 'MIT License. See LICENSE.'\n-\n-\n class WordTokenizer: # pylint: disable=too-few-public-methods\n \"\"\"Tokenize according to rules specific to a given language.\"\"\"\n", "issue": "Unused imports in word tokenizer module\nThe word tokenizer module has a large number of imports from NLTK that are not used anywhere in the module. Removing them 1. cleans up the code, and 2. speeds up testing.\n", "before_files": [{"content": "# -*-coding:utf-8-*-\n\"\"\"Language-specific word tokenizers. Primary purpose is to handle enclitics.\"\"\"\n\nimport re\n\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\n\nimport re\n\n# Cleanup these imports\u2014most are not used!\nfrom nltk.data import load\nfrom nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)\nfrom nltk.tokenize.mwe import MWETokenizer\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer\nfrom nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,\n BlanklineTokenizer, WordPunctTokenizer,\n wordpunct_tokenize, regexp_tokenize,\n blankline_tokenize)\n#from nltk.tokenize.repp import ReppTokenizer\nfrom nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize\nfrom nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,\n line_tokenize)\nfrom nltk.tokenize.stanford import StanfordTokenizer\nfrom nltk.tokenize.texttiling import TextTilingTokenizer\n#from nltk.tokenize.toktok import ToktokTokenizer\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\nfrom nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize\nfrom nltk.tokenize.stanford_segmenter import StanfordSegmenter\n\nimport cltk.corpus.arabic.utils.pyarabic.araby as araby\n\n__author__ = ['Patrick J. Burns <[email protected]>', 'Kyle P. Johnson <[email protected]>',\n 'Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\n\nclass WordTokenizer: # pylint: disable=too-few-public-methods\n \"\"\"Tokenize according to rules specific to a given language.\"\"\"\n\n def __init__(self, language):\n \"\"\"Take language as argument to the class. Check availability and\n setup class variables.\"\"\"\n self.language = language\n self.available_languages = ['arabic', \n 'french',\n 'greek',\n 'latin',\n 'old_norse']\n assert self.language in self.available_languages, \\\n \"Specific tokenizer not available for '{0}'. Only available for: '{1}'.\".format(self.language, # pylint: disable=line-too-long\n self.available_languages) # pylint: disable=line-too-long\n # ^^^ Necessary? since we have an 'else' in `tokenize`\n \n\n def tokenize(self, string):\n \"\"\"Tokenize incoming string.\"\"\"\n \n if self.language == 'arabic':\n tokens = tokenize_arabic_words(string)\n elif self.language == 'french':\n tokens = tokenize_french_words(string)\n elif self.language == 'greek':\n tokens = tokenize_greek_words(string)\n elif self.language == 'latin':\n tokens = tokenize_latin_words(string)\n elif self.language == 'old_norse':\n tokens = tokenize_old_norse_words(string)\n else:\n tokens = nltk_tokenize_words(string)\n\n return tokens\n\n\ndef nltk_tokenize_words(string, attached_period=False, language=None):\n \"\"\"Wrap NLTK's tokenizer PunktLanguageVars(), but make final period\n its own token.\n\n >>> nltk_tokenize_words(\"Sentence 1. Sentence 2.\")\n ['Sentence', '1', '.', 'Sentence', '2', '.']\n\n >>> #Optionally keep the NLTK's output:\n\n >>> nltk_tokenize_words(\"Sentence 1. Sentence 2.\", attached_period=True)\n ['Sentence', '1.', 'Sentence', '2.']\n\n TODO: Run some tests to determine whether there is a large penalty for\n re-calling PunktLanguageVars() for each use of this function. If so, this\n will need to become a class, perhaps inheriting from the PunktLanguageVars\n object. Maybe integrate with WordTokenizer.\n \"\"\"\n assert isinstance(string, str), \"Incoming string must be type str.\"\n if language == 'sanskrit':\n periods = ['.', '\u0964','\u0965']\n else:\n periods = ['.']\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(string)\n if attached_period:\n return tokens\n new_tokens = []\n for word in tokens:\n for char in periods:\n if word.endswith(char):\n new_tokens.append(word[:-1])\n new_tokens.append(char)\n break\n else:\n new_tokens.append(word)\n return new_tokens\n\n\ndef tokenize_arabic_words(text):\n\n \"\"\"\n Tokenize text into words\n @param text: the input text.\n @type text: unicode.\n @return: list of words.\n @rtype: list.\n \"\"\"\n specific_tokens = []\n if not text:\n return specific_tokens\n else:\n specific_tokens = araby.tokenize(text)\n return specific_tokens\n \n\ndef tokenize_french_words(string):\n assert isinstance(string, str), \"Incoming string must be type str.\"\n\n # normalize apostrophes\n\n text = re.sub(r\"\u2019\", r\"'\", string)\n\n # Dealing with punctuation\n text = re.sub(r\"\\'\", r\"' \", text)\n text = re.sub(\"(?<=.)(?=[.!?)(\\\";:,\u00ab\u00bb\\-])\", \" \", text)\n\n results = str.split(text)\n return (results)\n \n \ndef tokenize_greek_words(text):\n \"\"\"\n Tokenizer divides the string into a list of substrings. This is a placeholder\n function that returns the default NLTK word tokenizer until\n Greek-specific options are added.\n \n Example:\n >>> text = '\u0398\u03bf\u03c5\u03ba\u03c5\u03b4\u03af\u03b4\u03b7\u03c2 \u1f08\u03b8\u03b7\u03bd\u03b1\u1fd6\u03bf\u03c2 \u03be\u03c5\u03bd\u03ad\u03b3\u03c1\u03b1\u03c8\u03b5 \u03c4\u1f78\u03bd \u03c0\u03cc\u03bb\u03b5\u03bc\u03bf\u03bd \u03c4\u1ff6\u03bd \u03a0\u03b5\u03bb\u03bf\u03c0\u03bf\u03bd\u03bd\u03b7\u03c3\u03af\u03c9\u03bd \u03ba\u03b1\u1f76 \u1f08\u03b8\u03b7\u03bd\u03b1\u03af\u03c9\u03bd,'\n >>> tokenize_greek_words(text)\n ['\u0398\u03bf\u03c5\u03ba\u03c5\u03b4\u03af\u03b4\u03b7\u03c2', '\u1f08\u03b8\u03b7\u03bd\u03b1\u1fd6\u03bf\u03c2', '\u03be\u03c5\u03bd\u03ad\u03b3\u03c1\u03b1\u03c8\u03b5', '\u03c4\u1f78\u03bd', '\u03c0\u03cc\u03bb\u03b5\u03bc\u03bf\u03bd', '\u03c4\u1ff6\u03bd', '\u03a0\u03b5\u03bb\u03bf\u03c0\u03bf\u03bd\u03bd\u03b7\u03c3\u03af\u03c9\u03bd', '\u03ba\u03b1\u1f76', '\u1f08\u03b8\u03b7\u03bd\u03b1\u03af\u03c9\u03bd', ',']\n \n :param string: This accepts the string value that needs to be tokenized\n :returns: A list of substrings extracted from the string\n \"\"\"\n \n return nltk_tokenize_words(text) # Simplest implementation to start\n \n\ndef tokenize_latin_words(string):\n \"\"\"\n Tokenizer divides the string into a list of substrings\n \n >>> from cltk.corpus.utils.formatter import remove_non_ascii\n >>> text = 'Dices \u1f10\u03c3\u03c4\u03b9\u03bd \u1f10\u03bc\u03cc\u03c2 pulchrum esse inimicos ulcisci.'\n >>> tokenize_latin_words(text)\n ['Dices', '\u1f10\u03c3\u03c4\u03b9\u03bd', '\u1f10\u03bc\u03cc\u03c2', 'pulchrum', 'esse', 'inimicos', 'ulcisci', '.']\n \n :param string: This accepts the string value that needs to be tokenized\n :returns: A list of substrings extracted from the string\n \"\"\"\n from cltk.tokenize.latin_exceptions import latin_exceptions\n\n assert isinstance(string, str), \"Incoming string must be type str.\"\n\n def matchcase(word):\n # From Python Cookbook\n def replace(m):\n text = m.group()\n if text.isupper():\n return word.upper()\n elif text.islower():\n return word.lower()\n elif text[0].isupper():\n return word.capitalize()\n else:\n return word\n\n return replace\n\n replacements = [(r'mecum', 'cum me'),\n (r'tecum', 'cum te'),\n (r'secum', 'cum se'),\n (r'nobiscum', 'cum nobis'),\n (r'vobiscum', 'cum vobis'),\n (r'quocum', 'cum quo'),\n (r'quacum', 'cum qua'),\n (r'quicum', 'cum qui'),\n (r'quibuscum', 'cum quibus'),\n (r'sodes', 'si audes'),\n (r'satin', 'satis ne'),\n (r'scin', 'scis ne'),\n (r'sultis', 'si vultis'),\n (r'similist', 'similis est'),\n (r'qualist', 'qualis est')\n ]\n\n for replacement in replacements:\n string = re.sub(replacement[0], matchcase(replacement[1]), string, flags=re.IGNORECASE)\n\n punkt_param = PunktParameters()\n abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', \"m'\", 'ser', 'ap', 'n',\n 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']\n punkt_param.abbrev_types = set(abbreviations)\n sent_tokenizer = PunktSentenceTokenizer(punkt_param)\n\n word_tokenizer = PunktLanguageVars()\n sents = sent_tokenizer.tokenize(string)\n\n enclitics = ['que', 'n', 'ue', 've', 'st']\n exceptions = enclitics\n exceptions = list(set(exceptions + latin_exceptions))\n\n tokens = []\n\n for sent in sents:\n temp_tokens = word_tokenizer.word_tokenize(sent)\n # Need to check that tokens exist before handling them;\n # needed to make stream.readlines work in PlaintextCorpusReader\n \n if temp_tokens:\n if temp_tokens[0].endswith('ne'):\n if temp_tokens[0].lower() not in exceptions:\n temp = [temp_tokens[0][:-2], '-ne']\n temp_tokens = temp + temp_tokens[1:]\n\n if temp_tokens[-1].endswith('.'):\n final_word = temp_tokens[-1][:-1]\n del temp_tokens[-1]\n temp_tokens += [final_word, '.']\n\n for token in temp_tokens:\n tokens.append(token)\n\n # Break enclitic handling into own function?\n specific_tokens = []\n\n for token in tokens:\n is_enclitic = False\n if token.lower() not in exceptions:\n for enclitic in enclitics:\n if token.endswith(enclitic):\n if enclitic == 'n':\n specific_tokens += [token[:-len(enclitic)]] + ['-ne']\n elif enclitic == 'st':\n if token.endswith('ust'):\n specific_tokens += [token[:-len(enclitic) + 1]] + ['est']\n else:\n specific_tokens += [token[:-len(enclitic)]] + ['est']\n else:\n specific_tokens += [token[:-len(enclitic)]] + ['-' + enclitic]\n is_enclitic = True\n break\n if not is_enclitic:\n specific_tokens.append(token)\n\n return specific_tokens\n\n\ndef tokenize_old_norse_words(text):\n \"\"\"\n\n :param text: a text or a sentence\n :return:\n \"\"\"\n assert isinstance(text, str)\n\n # punctuation\n text = re.sub(r\"\\'\", r\"' \", text)\n text = re.sub(\"(?<=.)(?=[.!?)(\\\";:,\u00ab\u00bb\\-])\", \" \", text)\n\n # TODO dealing with merges between verbs at the second person of the present tense and \u00fe\u00fa\n # -> -tu, -\u00f0u, -du, -u : question\n\n # TODO dealing with merges between verbs and sik -> st : middle voice\n\n results = str.split(text)\n return results\n", "path": "cltk/tokenize/word.py"}]}
| 3,945 | 551 |
gh_patches_debug_9821
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1309
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: clip GeoSeries by non-overlapping mask raises error
Clipping GeoSeries by non-overlapping `mask` raises error as we try to return GeoDataFrame with original columns in here: https://github.com/geopandas/geopandas/blob/7350b49688f51b281f69638c8c51a448a0115fb0/geopandas/tools/clip.py#L155
In case of GeoSeries we should return empty GeoSeries I would say.
MRE:
```py
import geopandas as gpd
from shapely.geometry import Point
s = gpd.GeoSeries([Point(0,0), Point(1,1)])
mask = Point(10, 10).buffer(2)
gpd.clip(s, mask)
```
<details>
```
~/Dropbox/Python/geopandas/martinfleis/geopandas/tools/clip.py in clip(gdf, mask, keep_geom_type)
153 and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
154 ):
--> 155 return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)
156
157 if isinstance(mask, (GeoDataFrame, GeoSeries)):
~/anaconda3/envs/geo_dev/lib/python3.8/site-packages/pandas/core/generic.py in __getattr__(self, name)
5173 or name in self._accessors
5174 ):
-> 5175 return object.__getattribute__(self, name)
5176 else:
5177 if self._info_axis._can_hold_identifiers_and_holds_name(name):
AttributeError: 'GeoSeries' object has no attribute 'columns'
```
</details>
</issue>
<code>
[start of geopandas/tools/clip.py]
1 """
2 geopandas.clip
3 ==============
4
5 A module to clip vector data using GeoPandas.
6
7 """
8 import warnings
9
10 import numpy as np
11 import pandas as pd
12
13 from shapely.geometry import Polygon, MultiPolygon
14
15 from geopandas import GeoDataFrame, GeoSeries
16
17
18 def _clip_points(gdf, poly):
19 """Clip point geometry to the polygon extent.
20
21 Clip an input point GeoDataFrame to the polygon extent of the poly
22 parameter. Points that intersect the poly geometry are extracted with
23 associated attributes and returned.
24
25 Parameters
26 ----------
27 gdf : GeoDataFrame, GeoSeries
28 Composed of point geometry that will be clipped to the poly.
29
30 poly : (Multi)Polygon
31 Reference geometry used to spatially clip the data.
32
33 Returns
34 -------
35 GeoDataFrame
36 The returned GeoDataFrame is a subset of gdf that intersects
37 with poly.
38 """
39 spatial_index = gdf.sindex
40 bbox = poly.bounds
41 sidx = list(spatial_index.intersection(bbox))
42 gdf_sub = gdf.iloc[sidx]
43
44 return gdf_sub[gdf_sub.geometry.intersects(poly)]
45
46
47 def _clip_line_poly(gdf, poly):
48 """Clip line and polygon geometry to the polygon extent.
49
50 Clip an input line or polygon to the polygon extent of the poly
51 parameter. Parts of Lines or Polygons that intersect the poly geometry are
52 extracted with associated attributes and returned.
53
54 Parameters
55 ----------
56 gdf : GeoDataFrame, GeoSeries
57 Line or polygon geometry that is clipped to poly.
58
59 poly : (Multi)Polygon
60 Reference polygon for clipping.
61
62 Returns
63 -------
64 GeoDataFrame
65 The returned GeoDataFrame is a clipped subset of gdf
66 that intersects with poly.
67 """
68 spatial_index = gdf.sindex
69
70 # Create a box for the initial intersection
71 bbox = poly.bounds
72 # Get a list of id's for each object that overlaps the bounding box and
73 # subset the data to just those lines
74 sidx = list(spatial_index.intersection(bbox))
75 gdf_sub = gdf.iloc[sidx]
76
77 # Clip the data with the polygon
78 if isinstance(gdf_sub, GeoDataFrame):
79 clipped = gdf_sub.copy()
80 clipped["geometry"] = gdf_sub.intersection(poly)
81
82 # Return the clipped layer with no null geometry values or empty geometries
83 return clipped[~clipped.geometry.is_empty & clipped.geometry.notnull()]
84 else:
85 # GeoSeries
86 clipped = gdf_sub.intersection(poly)
87 return clipped[~clipped.is_empty & clipped.notnull()]
88
89
90 def clip(gdf, mask, keep_geom_type=False):
91 """Clip points, lines, or polygon geometries to the mask extent.
92
93 Both layers must be in the same Coordinate Reference System (CRS).
94 The `gdf` will be clipped to the full extent of the clip object.
95
96 If there are multiple polygons in mask, data from `gdf` will be
97 clipped to the total boundary of all polygons in mask.
98
99 Parameters
100 ----------
101 gdf : GeoDataFrame or GeoSeries
102 Vector layer (point, line, polygon) to be clipped to mask.
103 mask : GeoDataFrame, GeoSeries, (Multi)Polygon
104 Polygon vector layer used to clip `gdf`.
105 The mask's geometry is dissolved into one geometric feature
106 and intersected with `gdf`.
107 keep_geom_type : boolean, default False
108 If True, return only geometries of original type in case of intersection
109 resulting in multiple geometry types or GeometryCollections.
110 If False, return all resulting geometries (potentially mixed-types).
111
112 Returns
113 -------
114 GeoDataFrame or GeoSeries
115 Vector data (points, lines, polygons) from `gdf` clipped to
116 polygon boundary from mask.
117
118 Examples
119 --------
120 Clip points (global cities) with a polygon (the South American continent):
121
122 >>> import geopandas
123 >>> path =
124 >>> world = geopandas.read_file(
125 ... geopandas.datasets.get_path('naturalearth_lowres'))
126 >>> south_america = world[world['continent'] == "South America"]
127 >>> capitals = geopandas.read_file(
128 ... geopandas.datasets.get_path('naturalearth_cities'))
129 >>> capitals.shape
130 (202, 2)
131 >>> sa_capitals = geopandas.clip(capitals, south_america)
132 >>> sa_capitals.shape
133 (12, 2)
134 """
135 if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
136 raise TypeError(
137 "'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
138 )
139
140 if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):
141 raise TypeError(
142 "'mask' should be GeoDataFrame, GeoSeries or"
143 "(Multi)Polygon, got {}".format(type(gdf))
144 )
145
146 if isinstance(mask, (GeoDataFrame, GeoSeries)):
147 box_mask = mask.total_bounds
148 else:
149 box_mask = mask.bounds
150 box_gdf = gdf.total_bounds
151 if not (
152 ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
153 and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
154 ):
155 return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)
156
157 if isinstance(mask, (GeoDataFrame, GeoSeries)):
158 poly = mask.geometry.unary_union
159 else:
160 poly = mask
161
162 geom_types = gdf.geometry.type
163 poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
164 line_idx = np.asarray(
165 (geom_types == "LineString")
166 | (geom_types == "LinearRing")
167 | (geom_types == "MultiLineString")
168 )
169 point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
170 geomcoll_idx = np.asarray((geom_types == "GeometryCollection"))
171
172 if point_idx.any():
173 point_gdf = _clip_points(gdf[point_idx], poly)
174 else:
175 point_gdf = None
176
177 if poly_idx.any():
178 poly_gdf = _clip_line_poly(gdf[poly_idx], poly)
179 else:
180 poly_gdf = None
181
182 if line_idx.any():
183 line_gdf = _clip_line_poly(gdf[line_idx], poly)
184 else:
185 line_gdf = None
186
187 if geomcoll_idx.any():
188 geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)
189 else:
190 geomcoll_gdf = None
191
192 order = pd.Series(range(len(gdf)), index=gdf.index)
193 concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])
194
195 if keep_geom_type:
196 geomcoll_concat = (concat.geom_type == "GeometryCollection").any()
197 geomcoll_orig = geomcoll_idx.any()
198
199 new_collection = geomcoll_concat and not geomcoll_orig
200
201 if geomcoll_orig:
202 warnings.warn(
203 "keep_geom_type can not be called on a "
204 "GeoDataFrame with GeometryCollection."
205 )
206 else:
207 polys = ["Polygon", "MultiPolygon"]
208 lines = ["LineString", "MultiLineString", "LinearRing"]
209 points = ["Point", "MultiPoint"]
210
211 # Check that the gdf for multiple geom types (points, lines and/or polys)
212 orig_types_total = sum(
213 [
214 gdf.geom_type.isin(polys).any(),
215 gdf.geom_type.isin(lines).any(),
216 gdf.geom_type.isin(points).any(),
217 ]
218 )
219
220 # Check how many geometry types are in the clipped GeoDataFrame
221 clip_types_total = sum(
222 [
223 concat.geom_type.isin(polys).any(),
224 concat.geom_type.isin(lines).any(),
225 concat.geom_type.isin(points).any(),
226 ]
227 )
228
229 # Check there aren't any new geom types in the clipped GeoDataFrame
230 more_types = orig_types_total < clip_types_total
231
232 if orig_types_total > 1:
233 warnings.warn(
234 "keep_geom_type can not be called on a mixed type GeoDataFrame."
235 )
236 elif new_collection or more_types:
237 orig_type = gdf.geom_type.iloc[0]
238 if new_collection:
239 concat = concat.explode()
240 if orig_type in polys:
241 concat = concat.loc[concat.geom_type.isin(polys)]
242 elif orig_type in lines:
243 concat = concat.loc[concat.geom_type.isin(lines)]
244
245 # preserve the original order of the input
246 if isinstance(concat, GeoDataFrame):
247 concat["_order"] = order
248 return concat.sort_values(by="_order").drop(columns="_order")
249 else:
250 concat = GeoDataFrame(geometry=concat)
251 concat["_order"] = order
252 return concat.sort_values(by="_order").geometry
253
[end of geopandas/tools/clip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py
--- a/geopandas/tools/clip.py
+++ b/geopandas/tools/clip.py
@@ -152,7 +152,11 @@
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
- return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)
+ return (
+ GeoDataFrame(columns=gdf.columns, crs=gdf.crs)
+ if isinstance(gdf, GeoDataFrame)
+ else GeoSeries(crs=gdf.crs)
+ )
if isinstance(mask, (GeoDataFrame, GeoSeries)):
poly = mask.geometry.unary_union
|
{"golden_diff": "diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py\n--- a/geopandas/tools/clip.py\n+++ b/geopandas/tools/clip.py\n@@ -152,7 +152,11 @@\n ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))\n and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\n ):\n- return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)\n+ return (\n+ GeoDataFrame(columns=gdf.columns, crs=gdf.crs)\n+ if isinstance(gdf, GeoDataFrame)\n+ else GeoSeries(crs=gdf.crs)\n+ )\n \n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n poly = mask.geometry.unary_union\n", "issue": "BUG: clip GeoSeries by non-overlapping mask raises error\nClipping GeoSeries by non-overlapping `mask` raises error as we try to return GeoDataFrame with original columns in here: https://github.com/geopandas/geopandas/blob/7350b49688f51b281f69638c8c51a448a0115fb0/geopandas/tools/clip.py#L155\r\n\r\nIn case of GeoSeries we should return empty GeoSeries I would say.\r\nMRE:\r\n\r\n```py\r\nimport geopandas as gpd\r\nfrom shapely.geometry import Point\r\n\r\ns = gpd.GeoSeries([Point(0,0), Point(1,1)])\r\nmask = Point(10, 10).buffer(2)\r\ngpd.clip(s, mask)\r\n```\r\n\r\n\r\n<details>\r\n\r\n```\r\n~/Dropbox/Python/geopandas/martinfleis/geopandas/tools/clip.py in clip(gdf, mask, keep_geom_type)\r\n 153 and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\r\n 154 ):\r\n--> 155 return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)\r\n 156 \r\n 157 if isinstance(mask, (GeoDataFrame, GeoSeries)):\r\n\r\n~/anaconda3/envs/geo_dev/lib/python3.8/site-packages/pandas/core/generic.py in __getattr__(self, name)\r\n 5173 or name in self._accessors\r\n 5174 ):\r\n-> 5175 return object.__getattribute__(self, name)\r\n 5176 else:\r\n 5177 if self._info_axis._can_hold_identifiers_and_holds_name(name):\r\n\r\nAttributeError: 'GeoSeries' object has no attribute 'columns'\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "\"\"\"\ngeopandas.clip\n==============\n\nA module to clip vector data using GeoPandas.\n\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely.geometry import Polygon, MultiPolygon\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n\ndef _clip_points(gdf, poly):\n \"\"\"Clip point geometry to the polygon extent.\n\n Clip an input point GeoDataFrame to the polygon extent of the poly\n parameter. Points that intersect the poly geometry are extracted with\n associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Composed of point geometry that will be clipped to the poly.\n\n poly : (Multi)Polygon\n Reference geometry used to spatially clip the data.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a subset of gdf that intersects\n with poly.\n \"\"\"\n spatial_index = gdf.sindex\n bbox = poly.bounds\n sidx = list(spatial_index.intersection(bbox))\n gdf_sub = gdf.iloc[sidx]\n\n return gdf_sub[gdf_sub.geometry.intersects(poly)]\n\n\ndef _clip_line_poly(gdf, poly):\n \"\"\"Clip line and polygon geometry to the polygon extent.\n\n Clip an input line or polygon to the polygon extent of the poly\n parameter. Parts of Lines or Polygons that intersect the poly geometry are\n extracted with associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Line or polygon geometry that is clipped to poly.\n\n poly : (Multi)Polygon\n Reference polygon for clipping.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a clipped subset of gdf\n that intersects with poly.\n \"\"\"\n spatial_index = gdf.sindex\n\n # Create a box for the initial intersection\n bbox = poly.bounds\n # Get a list of id's for each object that overlaps the bounding box and\n # subset the data to just those lines\n sidx = list(spatial_index.intersection(bbox))\n gdf_sub = gdf.iloc[sidx]\n\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n clipped[\"geometry\"] = gdf_sub.intersection(poly)\n\n # Return the clipped layer with no null geometry values or empty geometries\n return clipped[~clipped.geometry.is_empty & clipped.geometry.notnull()]\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n return clipped[~clipped.is_empty & clipped.notnull()]\n\n\ndef clip(gdf, mask, keep_geom_type=False):\n \"\"\"Clip points, lines, or polygon geometries to the mask extent.\n\n Both layers must be in the same Coordinate Reference System (CRS).\n The `gdf` will be clipped to the full extent of the clip object.\n\n If there are multiple polygons in mask, data from `gdf` will be\n clipped to the total boundary of all polygons in mask.\n\n Parameters\n ----------\n gdf : GeoDataFrame or GeoSeries\n Vector layer (point, line, polygon) to be clipped to mask.\n mask : GeoDataFrame, GeoSeries, (Multi)Polygon\n Polygon vector layer used to clip `gdf`.\n The mask's geometry is dissolved into one geometric feature\n and intersected with `gdf`.\n keep_geom_type : boolean, default False\n If True, return only geometries of original type in case of intersection\n resulting in multiple geometry types or GeometryCollections.\n If False, return all resulting geometries (potentially mixed-types).\n\n Returns\n -------\n GeoDataFrame or GeoSeries\n Vector data (points, lines, polygons) from `gdf` clipped to\n polygon boundary from mask.\n\n Examples\n --------\n Clip points (global cities) with a polygon (the South American continent):\n\n >>> import geopandas\n >>> path =\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_lowres'))\n >>> south_america = world[world['continent'] == \"South America\"]\n >>> capitals = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_cities'))\n >>> capitals.shape\n (202, 2)\n >>> sa_capitals = geopandas.clip(capitals, south_america)\n >>> sa_capitals.shape\n (12, 2)\n \"\"\"\n if not isinstance(gdf, (GeoDataFrame, GeoSeries)):\n raise TypeError(\n \"'gdf' should be GeoDataFrame or GeoSeries, got {}\".format(type(gdf))\n )\n\n if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):\n raise TypeError(\n \"'mask' should be GeoDataFrame, GeoSeries or\"\n \"(Multi)Polygon, got {}\".format(type(gdf))\n )\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n box_mask = mask.total_bounds\n else:\n box_mask = mask.bounds\n box_gdf = gdf.total_bounds\n if not (\n ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))\n and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\n ):\n return GeoDataFrame(columns=gdf.columns, crs=gdf.crs)\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n poly = mask.geometry.unary_union\n else:\n poly = mask\n\n geom_types = gdf.geometry.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"LinearRing\")\n | (geom_types == \"MultiLineString\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n geomcoll_idx = np.asarray((geom_types == \"GeometryCollection\"))\n\n if point_idx.any():\n point_gdf = _clip_points(gdf[point_idx], poly)\n else:\n point_gdf = None\n\n if poly_idx.any():\n poly_gdf = _clip_line_poly(gdf[poly_idx], poly)\n else:\n poly_gdf = None\n\n if line_idx.any():\n line_gdf = _clip_line_poly(gdf[line_idx], poly)\n else:\n line_gdf = None\n\n if geomcoll_idx.any():\n geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)\n else:\n geomcoll_gdf = None\n\n order = pd.Series(range(len(gdf)), index=gdf.index)\n concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])\n\n if keep_geom_type:\n geomcoll_concat = (concat.geom_type == \"GeometryCollection\").any()\n geomcoll_orig = geomcoll_idx.any()\n\n new_collection = geomcoll_concat and not geomcoll_orig\n\n if geomcoll_orig:\n warnings.warn(\n \"keep_geom_type can not be called on a \"\n \"GeoDataFrame with GeometryCollection.\"\n )\n else:\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n\n # Check that the gdf for multiple geom types (points, lines and/or polys)\n orig_types_total = sum(\n [\n gdf.geom_type.isin(polys).any(),\n gdf.geom_type.isin(lines).any(),\n gdf.geom_type.isin(points).any(),\n ]\n )\n\n # Check how many geometry types are in the clipped GeoDataFrame\n clip_types_total = sum(\n [\n concat.geom_type.isin(polys).any(),\n concat.geom_type.isin(lines).any(),\n concat.geom_type.isin(points).any(),\n ]\n )\n\n # Check there aren't any new geom types in the clipped GeoDataFrame\n more_types = orig_types_total < clip_types_total\n\n if orig_types_total > 1:\n warnings.warn(\n \"keep_geom_type can not be called on a mixed type GeoDataFrame.\"\n )\n elif new_collection or more_types:\n orig_type = gdf.geom_type.iloc[0]\n if new_collection:\n concat = concat.explode()\n if orig_type in polys:\n concat = concat.loc[concat.geom_type.isin(polys)]\n elif orig_type in lines:\n concat = concat.loc[concat.geom_type.isin(lines)]\n\n # preserve the original order of the input\n if isinstance(concat, GeoDataFrame):\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").drop(columns=\"_order\")\n else:\n concat = GeoDataFrame(geometry=concat)\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").geometry\n", "path": "geopandas/tools/clip.py"}]}
| 3,579 | 198 |
gh_patches_debug_16314
|
rasdani/github-patches
|
git_diff
|
sotetsuk__pgx-926
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Termination in Leduc Holdem
Hello, in Leduc Holdem I don't quite understand why doing the `FOLD` action in the first round of betting doesn't terminate the episode.
` terminated = round_over & (state._round == 1)`
Seems to me that's a faulty behavior, please tell me if I miss something. A possible fix could be:
` terminated = (round_over & (state._round == 1)) | (action==FOLD) `
</issue>
<code>
[start of pgx/leduc_holdem.py]
1 # Copyright 2023 The Pgx Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import jax
16 import jax.numpy as jnp
17
18 import pgx.v1 as v1
19 from pgx._src.struct import dataclass
20
21 FALSE = jnp.bool_(False)
22 TRUE = jnp.bool_(True)
23
24 INVALID_ACTION = jnp.int8(-1)
25 CALL = jnp.int8(0)
26 RAISE = jnp.int8(1)
27 FOLD = jnp.int8(2)
28
29 MAX_RAISE = jnp.int8(2)
30
31
32 @dataclass
33 class State(v1.State):
34 current_player: jnp.ndarray = jnp.int8(0)
35 observation: jnp.ndarray = jnp.zeros((8, 8, 2), dtype=jnp.bool_)
36 rewards: jnp.ndarray = jnp.float32([0.0, 0.0])
37 terminated: jnp.ndarray = FALSE
38 truncated: jnp.ndarray = FALSE
39 legal_action_mask: jnp.ndarray = jnp.ones(3, dtype=jnp.bool_)
40 _rng_key: jax.random.KeyArray = jax.random.PRNGKey(0)
41 _step_count: jnp.ndarray = jnp.int32(0)
42 # --- Leduc Hold'Em specific ---
43 _first_player: jnp.ndarray = jnp.int8(0)
44 # [(player 0), (player 1), (public)]
45 _cards: jnp.ndarray = jnp.int8([-1, -1, -1])
46 # 0(Call) 1(Bet) 2(Fold) 3(Check)
47 _last_action: jnp.ndarray = INVALID_ACTION
48 _chips: jnp.ndarray = jnp.ones(2, dtype=jnp.int8)
49 _round: jnp.ndarray = jnp.int8(0)
50 _raise_count: jnp.ndarray = jnp.int8(0)
51
52 @property
53 def env_id(self) -> v1.EnvId:
54 return "leduc_holdem"
55
56
57 class LeducHoldem(v1.Env):
58 def __init__(self):
59 super().__init__()
60
61 def _init(self, key: jax.random.KeyArray) -> State:
62 return _init(key)
63
64 def _step(self, state: v1.State, action: jnp.ndarray) -> State:
65 assert isinstance(state, State)
66 return _step(state, action)
67
68 def _observe(self, state: v1.State, player_id: jnp.ndarray) -> jnp.ndarray:
69 assert isinstance(state, State)
70 return _observe(state, player_id)
71
72 @property
73 def id(self) -> v1.EnvId:
74 return "leduc_holdem"
75
76 @property
77 def version(self) -> str:
78 return "beta"
79
80 @property
81 def num_players(self) -> int:
82 return 2
83
84
85 def _init(rng: jax.random.KeyArray) -> State:
86 rng1, rng2, rng3 = jax.random.split(rng, 3)
87 current_player = jnp.int8(jax.random.bernoulli(rng1))
88 init_card = jax.random.permutation(
89 rng2, jnp.int8([0, 0, 1, 1, 2, 2]), independent=True
90 )
91 return State( # type:ignore
92 _rng_key=rng3,
93 _first_player=current_player,
94 current_player=current_player,
95 _cards=init_card[:3],
96 legal_action_mask=jnp.bool_([1, 1, 0]),
97 _chips=jnp.ones(2, dtype=jnp.int8),
98 )
99
100
101 def _step(state: State, action):
102 action = jnp.int8(action)
103 chips = jax.lax.switch(
104 action,
105 [
106 lambda: state._chips.at[state.current_player].set(
107 state._chips[1 - state.current_player]
108 ), # CALL
109 lambda: state._chips.at[state.current_player].set(
110 jnp.max(state._chips) + _raise_chips(state)
111 ), # RAISE
112 lambda: state._chips, # FOLD
113 ],
114 )
115
116 round_over, terminated, reward = _check_round_over(state, action)
117 last_action = jax.lax.select(round_over, INVALID_ACTION, action)
118 current_player = jax.lax.select(
119 round_over, state._first_player, 1 - state.current_player
120 )
121 raise_count = jax.lax.select(
122 round_over, jnp.int8(0), state._raise_count + jnp.int8(action == RAISE)
123 )
124
125 reward *= jnp.min(chips)
126
127 legal_action = jax.lax.switch(
128 action,
129 [
130 lambda: jnp.bool_([1, 1, 0]), # CALL
131 lambda: jnp.bool_([1, 1, 1]), # RAISE
132 lambda: jnp.bool_([0, 0, 0]), # FOLD
133 ],
134 )
135 legal_action = legal_action.at[RAISE].set(raise_count < MAX_RAISE)
136
137 return state.replace( # type:ignore
138 current_player=current_player,
139 _last_action=last_action,
140 legal_action_mask=legal_action,
141 terminated=terminated,
142 rewards=reward,
143 _round=state._round + jnp.int8(round_over),
144 _chips=chips,
145 _raise_count=raise_count,
146 )
147
148
149 def _check_round_over(state, action):
150 round_over = (action == FOLD) | (
151 (state._last_action != INVALID_ACTION) & (action == CALL)
152 )
153 terminated = round_over & (state._round == 1)
154
155 reward = jax.lax.select(
156 terminated & (action == FOLD),
157 jnp.float32([-1, -1]).at[1 - state.current_player].set(1),
158 jnp.float32([0, 0]),
159 )
160 reward = jax.lax.select(
161 terminated & (action != FOLD),
162 _get_unit_reward(state),
163 reward,
164 )
165 return round_over, terminated, reward
166
167
168 def _get_unit_reward(state: State):
169 win_by_one_pair = state._cards[state.current_player] == state._cards[2]
170 lose_by_one_pair = (
171 state._cards[1 - state.current_player] == state._cards[2]
172 )
173 win = win_by_one_pair | (
174 ~lose_by_one_pair
175 & (
176 state._cards[state.current_player]
177 > state._cards[1 - state.current_player]
178 )
179 )
180 reward = jax.lax.select(
181 win,
182 jnp.float32([-1, -1]).at[state.current_player].set(1),
183 jnp.float32([-1, -1]).at[1 - state.current_player].set(1),
184 )
185 return jax.lax.select(
186 state._cards[state.current_player]
187 == state._cards[1 - state.current_player], # Draw
188 jnp.float32([0, 0]),
189 reward,
190 )
191
192
193 def _raise_chips(state):
194 """raise amounts is 2 in the first round and 4 in the second round."""
195 return (state._round + 1) * 2
196
197
198 def _observe(state: State, player_id) -> jnp.ndarray:
199 """
200 Index Meaning
201 0~2 J ~ K in hand
202 3~5 J ~ K as public card
203 6~19 0 ~ 13 chips for the current player
204 20~33 0 ~ 13 chips for the opponent
205 """
206 obs = jnp.zeros(34, dtype=jnp.bool_)
207 obs = obs.at[state._cards[player_id]].set(TRUE)
208 obs = jax.lax.select(
209 state._round == 1, obs.at[3 + state._cards[2]].set(TRUE), obs
210 )
211 obs = obs.at[6 + state._chips[player_id]].set(TRUE)
212 obs = obs.at[20 + state._chips[1 - player_id]].set(TRUE)
213
214 return obs
215
[end of pgx/leduc_holdem.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pgx/leduc_holdem.py b/pgx/leduc_holdem.py
--- a/pgx/leduc_holdem.py
+++ b/pgx/leduc_holdem.py
@@ -147,18 +147,20 @@
def _check_round_over(state, action):
- round_over = (action == FOLD) | (
- (state._last_action != INVALID_ACTION) & (action == CALL)
- )
- terminated = round_over & (state._round == 1)
+ fold = action == FOLD
+ call = (state._last_action != INVALID_ACTION) & (action == CALL)
+ _continue = (state._round == 0) & call
+
+ round_over = fold | call
+ terminated = round_over & (~_continue)
reward = jax.lax.select(
- terminated & (action == FOLD),
+ fold,
jnp.float32([-1, -1]).at[1 - state.current_player].set(1),
jnp.float32([0, 0]),
)
reward = jax.lax.select(
- terminated & (action != FOLD),
+ terminated & call,
_get_unit_reward(state),
reward,
)
|
{"golden_diff": "diff --git a/pgx/leduc_holdem.py b/pgx/leduc_holdem.py\n--- a/pgx/leduc_holdem.py\n+++ b/pgx/leduc_holdem.py\n@@ -147,18 +147,20 @@\n \n \n def _check_round_over(state, action):\n- round_over = (action == FOLD) | (\n- (state._last_action != INVALID_ACTION) & (action == CALL)\n- )\n- terminated = round_over & (state._round == 1)\n+ fold = action == FOLD\n+ call = (state._last_action != INVALID_ACTION) & (action == CALL)\n+ _continue = (state._round == 0) & call\n+\n+ round_over = fold | call\n+ terminated = round_over & (~_continue)\n \n reward = jax.lax.select(\n- terminated & (action == FOLD),\n+ fold,\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n jnp.float32([0, 0]),\n )\n reward = jax.lax.select(\n- terminated & (action != FOLD),\n+ terminated & call,\n _get_unit_reward(state),\n reward,\n )\n", "issue": "Termination in Leduc Holdem\nHello, in Leduc Holdem I don't quite understand why doing the `FOLD` action in the first round of betting doesn't terminate the episode.\r\n` terminated = round_over & (state._round == 1)`\r\nSeems to me that's a faulty behavior, please tell me if I miss something. A possible fix could be:\r\n` terminated = (round_over & (state._round == 1)) | (action==FOLD) `\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2023 The Pgx Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jax\nimport jax.numpy as jnp\n\nimport pgx.v1 as v1\nfrom pgx._src.struct import dataclass\n\nFALSE = jnp.bool_(False)\nTRUE = jnp.bool_(True)\n\nINVALID_ACTION = jnp.int8(-1)\nCALL = jnp.int8(0)\nRAISE = jnp.int8(1)\nFOLD = jnp.int8(2)\n\nMAX_RAISE = jnp.int8(2)\n\n\n@dataclass\nclass State(v1.State):\n current_player: jnp.ndarray = jnp.int8(0)\n observation: jnp.ndarray = jnp.zeros((8, 8, 2), dtype=jnp.bool_)\n rewards: jnp.ndarray = jnp.float32([0.0, 0.0])\n terminated: jnp.ndarray = FALSE\n truncated: jnp.ndarray = FALSE\n legal_action_mask: jnp.ndarray = jnp.ones(3, dtype=jnp.bool_)\n _rng_key: jax.random.KeyArray = jax.random.PRNGKey(0)\n _step_count: jnp.ndarray = jnp.int32(0)\n # --- Leduc Hold'Em specific ---\n _first_player: jnp.ndarray = jnp.int8(0)\n # [(player 0), (player 1), (public)]\n _cards: jnp.ndarray = jnp.int8([-1, -1, -1])\n # 0(Call) 1(Bet) 2(Fold) 3(Check)\n _last_action: jnp.ndarray = INVALID_ACTION\n _chips: jnp.ndarray = jnp.ones(2, dtype=jnp.int8)\n _round: jnp.ndarray = jnp.int8(0)\n _raise_count: jnp.ndarray = jnp.int8(0)\n\n @property\n def env_id(self) -> v1.EnvId:\n return \"leduc_holdem\"\n\n\nclass LeducHoldem(v1.Env):\n def __init__(self):\n super().__init__()\n\n def _init(self, key: jax.random.KeyArray) -> State:\n return _init(key)\n\n def _step(self, state: v1.State, action: jnp.ndarray) -> State:\n assert isinstance(state, State)\n return _step(state, action)\n\n def _observe(self, state: v1.State, player_id: jnp.ndarray) -> jnp.ndarray:\n assert isinstance(state, State)\n return _observe(state, player_id)\n\n @property\n def id(self) -> v1.EnvId:\n return \"leduc_holdem\"\n\n @property\n def version(self) -> str:\n return \"beta\"\n\n @property\n def num_players(self) -> int:\n return 2\n\n\ndef _init(rng: jax.random.KeyArray) -> State:\n rng1, rng2, rng3 = jax.random.split(rng, 3)\n current_player = jnp.int8(jax.random.bernoulli(rng1))\n init_card = jax.random.permutation(\n rng2, jnp.int8([0, 0, 1, 1, 2, 2]), independent=True\n )\n return State( # type:ignore\n _rng_key=rng3,\n _first_player=current_player,\n current_player=current_player,\n _cards=init_card[:3],\n legal_action_mask=jnp.bool_([1, 1, 0]),\n _chips=jnp.ones(2, dtype=jnp.int8),\n )\n\n\ndef _step(state: State, action):\n action = jnp.int8(action)\n chips = jax.lax.switch(\n action,\n [\n lambda: state._chips.at[state.current_player].set(\n state._chips[1 - state.current_player]\n ), # CALL\n lambda: state._chips.at[state.current_player].set(\n jnp.max(state._chips) + _raise_chips(state)\n ), # RAISE\n lambda: state._chips, # FOLD\n ],\n )\n\n round_over, terminated, reward = _check_round_over(state, action)\n last_action = jax.lax.select(round_over, INVALID_ACTION, action)\n current_player = jax.lax.select(\n round_over, state._first_player, 1 - state.current_player\n )\n raise_count = jax.lax.select(\n round_over, jnp.int8(0), state._raise_count + jnp.int8(action == RAISE)\n )\n\n reward *= jnp.min(chips)\n\n legal_action = jax.lax.switch(\n action,\n [\n lambda: jnp.bool_([1, 1, 0]), # CALL\n lambda: jnp.bool_([1, 1, 1]), # RAISE\n lambda: jnp.bool_([0, 0, 0]), # FOLD\n ],\n )\n legal_action = legal_action.at[RAISE].set(raise_count < MAX_RAISE)\n\n return state.replace( # type:ignore\n current_player=current_player,\n _last_action=last_action,\n legal_action_mask=legal_action,\n terminated=terminated,\n rewards=reward,\n _round=state._round + jnp.int8(round_over),\n _chips=chips,\n _raise_count=raise_count,\n )\n\n\ndef _check_round_over(state, action):\n round_over = (action == FOLD) | (\n (state._last_action != INVALID_ACTION) & (action == CALL)\n )\n terminated = round_over & (state._round == 1)\n\n reward = jax.lax.select(\n terminated & (action == FOLD),\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n jnp.float32([0, 0]),\n )\n reward = jax.lax.select(\n terminated & (action != FOLD),\n _get_unit_reward(state),\n reward,\n )\n return round_over, terminated, reward\n\n\ndef _get_unit_reward(state: State):\n win_by_one_pair = state._cards[state.current_player] == state._cards[2]\n lose_by_one_pair = (\n state._cards[1 - state.current_player] == state._cards[2]\n )\n win = win_by_one_pair | (\n ~lose_by_one_pair\n & (\n state._cards[state.current_player]\n > state._cards[1 - state.current_player]\n )\n )\n reward = jax.lax.select(\n win,\n jnp.float32([-1, -1]).at[state.current_player].set(1),\n jnp.float32([-1, -1]).at[1 - state.current_player].set(1),\n )\n return jax.lax.select(\n state._cards[state.current_player]\n == state._cards[1 - state.current_player], # Draw\n jnp.float32([0, 0]),\n reward,\n )\n\n\ndef _raise_chips(state):\n \"\"\"raise amounts is 2 in the first round and 4 in the second round.\"\"\"\n return (state._round + 1) * 2\n\n\ndef _observe(state: State, player_id) -> jnp.ndarray:\n \"\"\"\n Index Meaning\n 0~2 J ~ K in hand\n 3~5 J ~ K as public card\n 6~19 0 ~ 13 chips for the current player\n 20~33 0 ~ 13 chips for the opponent\n \"\"\"\n obs = jnp.zeros(34, dtype=jnp.bool_)\n obs = obs.at[state._cards[player_id]].set(TRUE)\n obs = jax.lax.select(\n state._round == 1, obs.at[3 + state._cards[2]].set(TRUE), obs\n )\n obs = obs.at[6 + state._chips[player_id]].set(TRUE)\n obs = obs.at[20 + state._chips[1 - player_id]].set(TRUE)\n\n return obs\n", "path": "pgx/leduc_holdem.py"}]}
| 3,109 | 282 |
gh_patches_debug_28059
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1058
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IntegrityError with Posgresql cache backend
I would like to update the current request with one of http://stackoverflow.com/a/1109198/186202
</issue>
<code>
[start of kinto/core/cache/postgresql/__init__.py]
1 from __future__ import absolute_import
2 from functools import wraps
3
4 import os
5 import time
6
7 from kinto.core import logger
8 from kinto.core.cache import CacheBase
9 from kinto.core.storage.postgresql.client import create_from_config
10 from kinto.core.storage.exceptions import BackendError
11 from kinto.core.utils import json
12
13
14 DELAY_BETWEEN_RETRIES_IN_SECONDS = 0.005
15 MAX_RETRIES = 10
16
17
18 def retry_on_failure(func):
19 @wraps(func)
20 def wraps_func(self, *args, **kwargs):
21 tries = kwargs.pop('tries', 0)
22 try:
23 return func(self, *args, **kwargs)
24 except BackendError as e:
25 if tries < MAX_RETRIES:
26 # Skip delay the 2 first times.
27 delay = max(0, tries - 1) * DELAY_BETWEEN_RETRIES_IN_SECONDS
28 time.sleep(delay)
29 return wraps_func(self, tries=(tries + 1), *args, **kwargs)
30 raise e
31 return wraps_func
32
33
34 class Cache(CacheBase):
35 """Cache backend using PostgreSQL.
36
37 Enable in configuration::
38
39 kinto.cache_backend = kinto.core.cache.postgresql
40
41 Database location URI can be customized::
42
43 kinto.cache_url = postgres://user:[email protected]:5432/dbname
44
45 Alternatively, username and password could also rely on system user ident
46 or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
47
48 .. note::
49
50 Some tables and indices are created when ``kinto migrate`` is run.
51 This requires some privileges on the database, or some error will
52 be raised.
53
54 **Alternatively**, the schema can be initialized outside the
55 python application, using the SQL file located in
56 :file:`kinto/core/cache/postgresql/schema.sql`. This allows to
57 distinguish schema manipulation privileges from schema usage.
58
59
60 A connection pool is enabled by default::
61
62 kinto.cache_pool_size = 10
63 kinto.cache_maxoverflow = 10
64 kinto.cache_max_backlog = -1
65 kinto.cache_pool_recycle = -1
66 kinto.cache_pool_timeout = 30
67 kinto.cache_poolclass =
68 kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
69
70 The ``max_backlog`` limits the number of threads that can be in the queue
71 waiting for a connection. Once this limit has been reached, any further
72 attempts to acquire a connection will be rejected immediately, instead of
73 locking up all threads by keeping them waiting in the queue.
74
75 See `dedicated section in SQLAlchemy documentation
76 <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
77 for default values and behaviour.
78
79 .. note::
80
81 Using a `dedicated connection pool <http://pgpool.net>`_ is still
82 recommended to allow load balancing, replication or limit the number
83 of connections used in a multi-process deployment.
84
85 :noindex:
86 """ # NOQA
87 def __init__(self, client, *args, **kwargs):
88 super(Cache, self).__init__(*args, **kwargs)
89 self.client = client
90
91 def initialize_schema(self, dry_run=False):
92 # Check if cache table exists.
93 query = """
94 SELECT 1
95 FROM information_schema.tables
96 WHERE table_name = 'cache';
97 """
98 with self.client.connect(readonly=True) as conn:
99 result = conn.execute(query)
100 if result.rowcount > 0:
101 logger.info("PostgreSQL cache schema is up-to-date.")
102 return
103
104 # Create schema
105 here = os.path.abspath(os.path.dirname(__file__))
106 sql_file = os.path.join(here, 'schema.sql')
107
108 if dry_run:
109 logger.info("Create cache schema from %s" % sql_file)
110 return
111
112 # Since called outside request, force commit.
113 schema = open(sql_file).read()
114 with self.client.connect(force_commit=True) as conn:
115 conn.execute(schema)
116 logger.info('Created PostgreSQL cache tables')
117
118 def flush(self):
119 query = """
120 DELETE FROM cache;
121 """
122 # Since called outside request (e.g. tests), force commit.
123 with self.client.connect(force_commit=True) as conn:
124 conn.execute(query)
125 logger.debug('Flushed PostgreSQL cache tables')
126
127 def ttl(self, key):
128 query = """
129 SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl
130 FROM cache
131 WHERE key = :key
132 AND ttl IS NOT NULL;
133 """
134 with self.client.connect(readonly=True) as conn:
135 result = conn.execute(query, dict(key=self.prefix + key))
136 if result.rowcount > 0:
137 return result.fetchone()['ttl']
138 return -1
139
140 def expire(self, key, ttl):
141 query = """
142 UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;
143 """
144 with self.client.connect() as conn:
145 conn.execute(query, dict(ttl=ttl, key=self.prefix + key))
146
147 @retry_on_failure
148 def set(self, key, value, ttl=None):
149 if ttl is None:
150 logger.warning("No TTL for cache key %r" % key)
151 query = """
152 WITH upsert AS (
153 UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)
154 WHERE key=:key
155 RETURNING *)
156 INSERT INTO cache (key, value, ttl)
157 SELECT :key, :value, sec2ttl(:ttl)
158 WHERE NOT EXISTS (SELECT * FROM upsert)
159 """
160 value = json.dumps(value)
161 with self.client.connect() as conn:
162 conn.execute(query, dict(key=self.prefix + key,
163 value=value, ttl=ttl))
164
165 def get(self, key):
166 purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;"
167 query = "SELECT value FROM cache WHERE key = :key;"
168 with self.client.connect() as conn:
169 conn.execute(purge)
170 result = conn.execute(query, dict(key=self.prefix + key))
171 if result.rowcount > 0:
172 value = result.fetchone()['value']
173 return json.loads(value)
174
175 def delete(self, key):
176 query = "DELETE FROM cache WHERE key = :key"
177 with self.client.connect() as conn:
178 conn.execute(query, dict(key=self.prefix + key))
179
180
181 def load_from_config(config):
182 settings = config.get_settings()
183 client = create_from_config(config, prefix='cache_', with_transaction=False)
184 return Cache(client=client, cache_prefix=settings['cache_prefix'])
185
[end of kinto/core/cache/postgresql/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py
--- a/kinto/core/cache/postgresql/__init__.py
+++ b/kinto/core/cache/postgresql/__init__.py
@@ -148,19 +148,31 @@
def set(self, key, value, ttl=None):
if ttl is None:
logger.warning("No TTL for cache key %r" % key)
+ # Query for PostgreSQL 9.5+
query = """
+ INSERT INTO cache (key, value, ttl)
+ VALUES (:key, :value, sec2ttl(:ttl))
+ ON CONFLICT (key) DO
+ UPDATE SET value = :value, ttl = sec2ttl(:ttl);
+ """
+ # Query for PostgreSQL 9.4
+ query_pg_94 = """
WITH upsert AS (
UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)
- WHERE key=:key
+ WHERE key = :key
RETURNING *)
INSERT INTO cache (key, value, ttl)
SELECT :key, :value, sec2ttl(:ttl)
- WHERE NOT EXISTS (SELECT * FROM upsert)
+ WHERE NOT EXISTS (SELECT * FROM upsert);
"""
value = json.dumps(value)
+ params = dict(key=self.prefix + key, value=value, ttl=ttl)
with self.client.connect() as conn:
- conn.execute(query, dict(key=self.prefix + key,
- value=value, ttl=ttl))
+ server_version = conn.connection().dialect.server_version_info
+ if server_version >= (9, 5):
+ conn.execute(query, params)
+ else:
+ conn.execute(query_pg_94, params)
def get(self, key):
purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;"
|
{"golden_diff": "diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py\n--- a/kinto/core/cache/postgresql/__init__.py\n+++ b/kinto/core/cache/postgresql/__init__.py\n@@ -148,19 +148,31 @@\n def set(self, key, value, ttl=None):\n if ttl is None:\n logger.warning(\"No TTL for cache key %r\" % key)\n+ # Query for PostgreSQL 9.5+\n query = \"\"\"\n+ INSERT INTO cache (key, value, ttl)\n+ VALUES (:key, :value, sec2ttl(:ttl))\n+ ON CONFLICT (key) DO\n+ UPDATE SET value = :value, ttl = sec2ttl(:ttl);\n+ \"\"\"\n+ # Query for PostgreSQL 9.4\n+ query_pg_94 = \"\"\"\n WITH upsert AS (\n UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)\n- WHERE key=:key\n+ WHERE key = :key\n RETURNING *)\n INSERT INTO cache (key, value, ttl)\n SELECT :key, :value, sec2ttl(:ttl)\n- WHERE NOT EXISTS (SELECT * FROM upsert)\n+ WHERE NOT EXISTS (SELECT * FROM upsert);\n \"\"\"\n value = json.dumps(value)\n+ params = dict(key=self.prefix + key, value=value, ttl=ttl)\n with self.client.connect() as conn:\n- conn.execute(query, dict(key=self.prefix + key,\n- value=value, ttl=ttl))\n+ server_version = conn.connection().dialect.server_version_info\n+ if server_version >= (9, 5):\n+ conn.execute(query, params)\n+ else:\n+ conn.execute(query_pg_94, params)\n \n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n", "issue": "IntegrityError with Posgresql cache backend\nI would like to update the current request with one of http://stackoverflow.com/a/1109198/186202\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom functools import wraps\n\nimport os\nimport time\n\nfrom kinto.core import logger\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.storage.exceptions import BackendError\nfrom kinto.core.utils import json\n\n\nDELAY_BETWEEN_RETRIES_IN_SECONDS = 0.005\nMAX_RETRIES = 10\n\n\ndef retry_on_failure(func):\n @wraps(func)\n def wraps_func(self, *args, **kwargs):\n tries = kwargs.pop('tries', 0)\n try:\n return func(self, *args, **kwargs)\n except BackendError as e:\n if tries < MAX_RETRIES:\n # Skip delay the 2 first times.\n delay = max(0, tries - 1) * DELAY_BETWEEN_RETRIES_IN_SECONDS\n time.sleep(delay)\n return wraps_func(self, tries=(tries + 1), *args, **kwargs)\n raise e\n return wraps_func\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super(Cache, self).__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from %s\" % sql_file)\n return\n\n # Since called outside request, force commit.\n schema = open(sql_file).read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n @retry_on_failure\n def set(self, key, value, ttl=None):\n if ttl is None:\n logger.warning(\"No TTL for cache key %r\" % key)\n query = \"\"\"\n WITH upsert AS (\n UPDATE cache SET value = :value, ttl = sec2ttl(:ttl)\n WHERE key=:key\n RETURNING *)\n INSERT INTO cache (key, value, ttl)\n SELECT :key, :value, sec2ttl(:ttl)\n WHERE NOT EXISTS (SELECT * FROM upsert)\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key\"\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key))\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}]}
| 2,475 | 418 |
gh_patches_debug_22662
|
rasdani/github-patches
|
git_diff
|
psf__black-2839
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve documentation for configuration options
Currently, our config options are documented only in a collapsed-by-default text block in https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#command-line-options. This is not very discoverable and makes it hard to give more detailed documentation, such as examples.
Instead, we should have a docs page with a separate section for each option. We can start with the existing descriptions, and extend them as needed for options with more complicated behavior.
</issue>
<code>
[start of scripts/check_version_in_basics_example.py]
1 """
2 Check that the rev value in the example from ``the_basics.md`` matches
3 the latest version of Black. This saves us from forgetting to update that
4 during the release process.
5 """
6
7 import os
8 import sys
9
10 import commonmark
11 from bs4 import BeautifulSoup
12
13
14 def main(changes: str, the_basics: str) -> None:
15 changes_html = commonmark.commonmark(changes)
16 changes_soup = BeautifulSoup(changes_html, "html.parser")
17 headers = changes_soup.find_all("h2")
18 tags = [header.string for header in headers if header.string != "Unreleased"]
19 latest_tag = tags[0]
20
21 the_basics_html = commonmark.commonmark(the_basics)
22 the_basics_soup = BeautifulSoup(the_basics_html, "html.parser")
23 (version_example,) = [
24 code_block.string
25 for code_block in the_basics_soup.find_all(class_="language-console")
26 if "$ black --version" in code_block.string
27 ]
28
29 for tag in tags:
30 if tag in version_example and tag != latest_tag:
31 print(
32 "Please set the version in the ``black --version`` "
33 "example from ``the_basics.md`` to be the latest one.\n"
34 f"Expected {latest_tag}, got {tag}.\n"
35 )
36 sys.exit(1)
37
38
39 if __name__ == "__main__":
40 with open("CHANGES.md", encoding="utf-8") as fd:
41 changes = fd.read()
42 with open(
43 os.path.join("docs", "usage_and_configuration", "the_basics.md"),
44 encoding="utf-8",
45 ) as fd:
46 the_basics = fd.read()
47 main(changes, the_basics)
48
[end of scripts/check_version_in_basics_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py
--- a/scripts/check_version_in_basics_example.py
+++ b/scripts/check_version_in_basics_example.py
@@ -20,20 +20,21 @@
the_basics_html = commonmark.commonmark(the_basics)
the_basics_soup = BeautifulSoup(the_basics_html, "html.parser")
- (version_example,) = [
+ version_examples = [
code_block.string
for code_block in the_basics_soup.find_all(class_="language-console")
if "$ black --version" in code_block.string
]
for tag in tags:
- if tag in version_example and tag != latest_tag:
- print(
- "Please set the version in the ``black --version`` "
- "example from ``the_basics.md`` to be the latest one.\n"
- f"Expected {latest_tag}, got {tag}.\n"
- )
- sys.exit(1)
+ for version_example in version_examples:
+ if tag in version_example and tag != latest_tag:
+ print(
+ "Please set the version in the ``black --version`` "
+ "examples from ``the_basics.md`` to be the latest one.\n"
+ f"Expected {latest_tag}, got {tag}.\n"
+ )
+ sys.exit(1)
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py\n--- a/scripts/check_version_in_basics_example.py\n+++ b/scripts/check_version_in_basics_example.py\n@@ -20,20 +20,21 @@\n \n the_basics_html = commonmark.commonmark(the_basics)\n the_basics_soup = BeautifulSoup(the_basics_html, \"html.parser\")\n- (version_example,) = [\n+ version_examples = [\n code_block.string\n for code_block in the_basics_soup.find_all(class_=\"language-console\")\n if \"$ black --version\" in code_block.string\n ]\n \n for tag in tags:\n- if tag in version_example and tag != latest_tag:\n- print(\n- \"Please set the version in the ``black --version`` \"\n- \"example from ``the_basics.md`` to be the latest one.\\n\"\n- f\"Expected {latest_tag}, got {tag}.\\n\"\n- )\n- sys.exit(1)\n+ for version_example in version_examples:\n+ if tag in version_example and tag != latest_tag:\n+ print(\n+ \"Please set the version in the ``black --version`` \"\n+ \"examples from ``the_basics.md`` to be the latest one.\\n\"\n+ f\"Expected {latest_tag}, got {tag}.\\n\"\n+ )\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\":\n", "issue": "Improve documentation for configuration options\nCurrently, our config options are documented only in a collapsed-by-default text block in https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#command-line-options. This is not very discoverable and makes it hard to give more detailed documentation, such as examples.\r\n\r\nInstead, we should have a docs page with a separate section for each option. We can start with the existing descriptions, and extend them as needed for options with more complicated behavior.\n", "before_files": [{"content": "\"\"\"\nCheck that the rev value in the example from ``the_basics.md`` matches\nthe latest version of Black. This saves us from forgetting to update that\nduring the release process.\n\"\"\"\n\nimport os\nimport sys\n\nimport commonmark\nfrom bs4 import BeautifulSoup\n\n\ndef main(changes: str, the_basics: str) -> None:\n changes_html = commonmark.commonmark(changes)\n changes_soup = BeautifulSoup(changes_html, \"html.parser\")\n headers = changes_soup.find_all(\"h2\")\n tags = [header.string for header in headers if header.string != \"Unreleased\"]\n latest_tag = tags[0]\n\n the_basics_html = commonmark.commonmark(the_basics)\n the_basics_soup = BeautifulSoup(the_basics_html, \"html.parser\")\n (version_example,) = [\n code_block.string\n for code_block in the_basics_soup.find_all(class_=\"language-console\")\n if \"$ black --version\" in code_block.string\n ]\n\n for tag in tags:\n if tag in version_example and tag != latest_tag:\n print(\n \"Please set the version in the ``black --version`` \"\n \"example from ``the_basics.md`` to be the latest one.\\n\"\n f\"Expected {latest_tag}, got {tag}.\\n\"\n )\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n with open(\"CHANGES.md\", encoding=\"utf-8\") as fd:\n changes = fd.read()\n with open(\n os.path.join(\"docs\", \"usage_and_configuration\", \"the_basics.md\"),\n encoding=\"utf-8\",\n ) as fd:\n the_basics = fd.read()\n main(changes, the_basics)\n", "path": "scripts/check_version_in_basics_example.py"}]}
| 1,097 | 318 |
gh_patches_debug_738
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-7766
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Required pyparsing version
I've been experimenting with writing tests using the oldest allowed versions of our Python dependencies. `setup.py` for `letsencrypt-nginx` says it requires `pyparsing>=1.5.5` but when I pin version 1.5.5, I encounter problems. You can see Travis logs of the issue [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739657) and [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739658).
We should determine what version we require and update `setup.py` accordingly.
</issue>
<code>
[start of certbot-nginx/setup.py]
1 import sys
2
3 from setuptools import find_packages
4 from setuptools import setup
5 from setuptools.command.test import test as TestCommand
6
7 version = '1.3.0.dev0'
8
9 # Remember to update local-oldest-requirements.txt when changing the minimum
10 # acme/certbot version.
11 install_requires = [
12 'acme>=1.0.0',
13 'certbot>=1.1.0',
14 'mock',
15 'PyOpenSSL',
16 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
17 'setuptools',
18 'zope.interface',
19 ]
20
21
22 class PyTest(TestCommand):
23 user_options = []
24
25 def initialize_options(self):
26 TestCommand.initialize_options(self)
27 self.pytest_args = ''
28
29 def run_tests(self):
30 import shlex
31 # import here, cause outside the eggs aren't loaded
32 import pytest
33 errno = pytest.main(shlex.split(self.pytest_args))
34 sys.exit(errno)
35
36
37 setup(
38 name='certbot-nginx',
39 version=version,
40 description="Nginx plugin for Certbot",
41 url='https://github.com/letsencrypt/letsencrypt',
42 author="Certbot Project",
43 author_email='[email protected]',
44 license='Apache License 2.0',
45 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
46 classifiers=[
47 'Development Status :: 5 - Production/Stable',
48 'Environment :: Plugins',
49 'Intended Audience :: System Administrators',
50 'License :: OSI Approved :: Apache Software License',
51 'Operating System :: POSIX :: Linux',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 'Programming Language :: Python :: 3.7',
59 'Programming Language :: Python :: 3.8',
60 'Topic :: Internet :: WWW/HTTP',
61 'Topic :: Security',
62 'Topic :: System :: Installation/Setup',
63 'Topic :: System :: Networking',
64 'Topic :: System :: Systems Administration',
65 'Topic :: Utilities',
66 ],
67
68 packages=find_packages(),
69 include_package_data=True,
70 install_requires=install_requires,
71 entry_points={
72 'certbot.plugins': [
73 'nginx = certbot_nginx._internal.configurator:NginxConfigurator',
74 ],
75 },
76 test_suite='certbot_nginx',
77 tests_require=["pytest"],
78 cmdclass={"test": PyTest},
79 )
80
[end of certbot-nginx/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -13,7 +13,7 @@
'certbot>=1.1.0',
'mock',
'PyOpenSSL',
- 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
+ 'pyparsing>=1.5.5', # Python3 support
'setuptools',
'zope.interface',
]
|
{"golden_diff": "diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py\n--- a/certbot-nginx/setup.py\n+++ b/certbot-nginx/setup.py\n@@ -13,7 +13,7 @@\n 'certbot>=1.1.0',\n 'mock',\n 'PyOpenSSL',\n- 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n+ 'pyparsing>=1.5.5', # Python3 support\n 'setuptools',\n 'zope.interface',\n ]\n", "issue": "Required pyparsing version\nI've been experimenting with writing tests using the oldest allowed versions of our Python dependencies. `setup.py` for `letsencrypt-nginx` says it requires `pyparsing>=1.5.5` but when I pin version 1.5.5, I encounter problems. You can see Travis logs of the issue [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739657) and [here](https://travis-ci.org/letsencrypt/letsencrypt/jobs/100739658).\n\nWe should determine what version we require and update `setup.py` accordingly.\n\n", "before_files": [{"content": "import sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\nversion = '1.3.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n 'acme>=1.0.0',\n 'certbot>=1.1.0',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n 'setuptools',\n 'zope.interface',\n]\n\n\nclass PyTest(TestCommand):\n user_options = []\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def run_tests(self):\n import shlex\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx._internal.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n tests_require=[\"pytest\"],\n cmdclass={\"test\": PyTest},\n)\n", "path": "certbot-nginx/setup.py"}]}
| 1,415 | 126 |
gh_patches_debug_10738
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-1071
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unpredictable ZeroDivisionErrors in direct_confirmation_measure
Hi guys,
first of all a big thank you for your tremendous work.
I am trying to train a LdaMulticore model guided by a coherence measure (UMASS). However I face the following problem:
- A ZeroDivisionError is thrown sporadically (both with the training and test set set, but definitely more frequently with the latter one). This makes it impossible to compare the coherence measure over a range of number of topics.
```
num_docs = len(corpus)
split = train_perc * num_docs // 100
indices = list(range(num_docs))
random.shuffle(indices)
# Split into two sets and DISCARD empty documents
train_corpus = [corpus[i] for i in indices[:split] if corpus[i]]
test_corpus = [corpus[i] for i in indices[split:] if corpus[i]]
# Number of passes (heuristic) (e.g. min_runs = 10)
num_passes = max(num_topics, min_runs)
lda_model = models.LdaMulticore(train_corpus,
id2word=dictionary,
batch=True,
num_topics=num_topics,
alpha="asymmetric",
passes=num_passes)
if metric == 'coherence':
left_score = CoherenceModel(model=lda_model,
corpus=test_corpus,
dictionary=dictionary,
coherence="u_mass").get_coherence()
```
Error log:
```
Traceback (most recent call last):
File "DataGenerator.py", line 198, in <module>
main()
File "DataGenerator.py", line 45, in main
lda_model_file=OUTPUT_LDA_MODEL)
File "/cluster/home/lennartv/ma/python/deep/preprocessing/Projector.py", line 217, in create_projection
num_topics, lda_model = optimize_lda(corpus, dictionary)
File "/cluster/home/lennartv/ma/python/deep/preprocessing/Projector.py", line 107, in optimize_lda
coherence="u_mass").get_coherence()
File "/cluster/home/lennartv/.local/lib64/python3.4/site-packages/gensim/models/coherencemodel.py", line 202, in get_coherence
confirmed_measures = measure.conf(segmented_topics, per_topic_postings, num_docs)
File "/cluster/home/lennartv/.local/lib64/python3.4/site-packages/gensim/topic_coherence/direct_confirmation_measure.py", line 40, in log_conditional_probability
m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))
ZeroDivisionError: float division by zero
```
I tried to assess the problem and I noticed the following things:
- There seems to be a connection to how well the LDA algorithm converged. Varying the number of passes can mitigate the problem.
I don't have enough insight in the algorithm but the bug seems to stem from the fact that `w_star_docs` in the following code block can become an empty set under certain circumstances:
```
m_lc = []
for s_i in segmented_topics:
for w_prime, w_star in s_i:
w_prime_docs = per_topic_postings[w_prime]
w_star_docs = per_topic_postings[w_star]
co_docs = w_prime_docs.intersection(w_star_docs)
m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))
m_lc.append(m_lc_i)
return m_lc
```
Thanks in advance for your efforts,
Lennart
</issue>
<code>
[start of gensim/topic_coherence/direct_confirmation_measure.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2013 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 This module contains functions to compute direct confirmation on a pair of words or word subsets.
9 """
10
11 import logging
12 import numpy as np
13
14 logger = logging.getLogger(__name__)
15
16 EPSILON = 1e-12 # Should be small. Value as suggested in paper.
17
18 def log_conditional_probability(segmented_topics, per_topic_postings, num_docs):
19 """
20 This function calculates the log-conditional-probability measure
21 which is used by coherence measures such as U_mass.
22 This is defined as: m_lc(S_i) = log[(P(W', W*) + e) / P(W*)]
23
24 Args:
25 ----
26 segmented_topics : Output from the segmentation module of the segmented topics. Is a list of list of tuples.
27 per_topic_postings : Output from the probability_estimation module. Is a dictionary of the posting list of all topics.
28 num_docs : Total number of documents in corresponding corpus.
29
30 Returns:
31 -------
32 m_lc : List of log conditional probability measure on each set in segmented topics.
33 """
34 m_lc = []
35 for s_i in segmented_topics:
36 for w_prime, w_star in s_i:
37 w_prime_docs = per_topic_postings[w_prime]
38 w_star_docs = per_topic_postings[w_star]
39 co_docs = w_prime_docs.intersection(w_star_docs)
40 m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))
41 m_lc.append(m_lc_i)
42
43 return m_lc
44
45 def log_ratio_measure(segmented_topics, per_topic_postings, num_docs, normalize=False):
46 """
47 If normalize=False:
48 Popularly known as PMI.
49 This function calculates the log-ratio-measure which is used by
50 coherence measures such as c_v.
51 This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))]
52
53 If normalize=True:
54 This function calculates the normalized-log-ratio-measure, popularly knowns as
55 NPMI which is used by coherence measures such as c_v.
56 This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e]
57
58 Args:
59 ----
60 segmented topics : Output from the segmentation module of the segmented topics. Is a list of list of tuples.
61 per_topic_postings : Output from the probability_estimation module. Is a dictionary of the posting list of all topics
62 num_docs : Total number of documents in corpus. Used for calculating probability.
63
64 Returns:
65 -------
66 m_lr : List of log ratio measures on each set in segmented topics.
67 """
68 m_lr = []
69 for s_i in segmented_topics:
70 for w_prime, w_star in s_i:
71 w_prime_docs = per_topic_postings[w_prime]
72 w_star_docs = per_topic_postings[w_star]
73 co_docs = w_prime_docs.intersection(w_star_docs)
74 if normalize:
75 # For normalized log ratio measure
76 numerator = log_ratio_measure([[(w_prime, w_star)]], per_topic_postings, num_docs)[0]
77 co_doc_prob = len(co_docs) / float(num_docs)
78 m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))
79 else:
80 # For log ratio measure without normalization
81 numerator = (len(co_docs) / float(num_docs)) + EPSILON
82 denominator = (len(w_prime_docs) / float(num_docs)) * (len(w_star_docs) / float(num_docs))
83 m_lr_i = np.log(numerator / denominator)
84 m_lr.append(m_lr_i)
85
86 return m_lr
87
[end of gensim/topic_coherence/direct_confirmation_measure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gensim/topic_coherence/direct_confirmation_measure.py b/gensim/topic_coherence/direct_confirmation_measure.py
--- a/gensim/topic_coherence/direct_confirmation_measure.py
+++ b/gensim/topic_coherence/direct_confirmation_measure.py
@@ -37,7 +37,10 @@
w_prime_docs = per_topic_postings[w_prime]
w_star_docs = per_topic_postings[w_star]
co_docs = w_prime_docs.intersection(w_star_docs)
- m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))
+ if w_star_docs:
+ m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))
+ else:
+ m_lc_i = 0
m_lc.append(m_lc_i)
return m_lc
|
{"golden_diff": "diff --git a/gensim/topic_coherence/direct_confirmation_measure.py b/gensim/topic_coherence/direct_confirmation_measure.py\n--- a/gensim/topic_coherence/direct_confirmation_measure.py\n+++ b/gensim/topic_coherence/direct_confirmation_measure.py\n@@ -37,7 +37,10 @@\n w_prime_docs = per_topic_postings[w_prime]\n w_star_docs = per_topic_postings[w_star]\n co_docs = w_prime_docs.intersection(w_star_docs)\n- m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))\n+ if w_star_docs:\n+ m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))\n+ else:\n+ m_lc_i = 0\n m_lc.append(m_lc_i)\n \n return m_lc\n", "issue": "Unpredictable ZeroDivisionErrors in direct_confirmation_measure\nHi guys,\r\n\r\nfirst of all a big thank you for your tremendous work.\r\nI am trying to train a LdaMulticore model guided by a coherence measure (UMASS). However I face the following problem:\r\n\r\n- A ZeroDivisionError is thrown sporadically (both with the training and test set set, but definitely more frequently with the latter one). This makes it impossible to compare the coherence measure over a range of number of topics.\r\n\r\n```\r\n num_docs = len(corpus)\r\n split = train_perc * num_docs // 100\r\n indices = list(range(num_docs))\r\n random.shuffle(indices)\r\n \r\n # Split into two sets and DISCARD empty documents\r\n train_corpus = [corpus[i] for i in indices[:split] if corpus[i]]\r\n test_corpus = [corpus[i] for i in indices[split:] if corpus[i]]\r\n\r\n # Number of passes (heuristic) (e.g. min_runs = 10)\r\n num_passes = max(num_topics, min_runs)\r\n\r\n lda_model = models.LdaMulticore(train_corpus,\r\n id2word=dictionary,\r\n batch=True,\r\n num_topics=num_topics,\r\n alpha=\"asymmetric\",\r\n passes=num_passes)\r\n\r\n if metric == 'coherence':\r\n left_score = CoherenceModel(model=lda_model,\r\n corpus=test_corpus,\r\n dictionary=dictionary,\r\n coherence=\"u_mass\").get_coherence()\r\n\r\n\r\n\r\n```\r\n\r\nError log:\r\n```\r\nTraceback (most recent call last):\r\nFile \"DataGenerator.py\", line 198, in <module>\r\nmain()\r\nFile \"DataGenerator.py\", line 45, in main\r\nlda_model_file=OUTPUT_LDA_MODEL)\r\nFile \"/cluster/home/lennartv/ma/python/deep/preprocessing/Projector.py\", line 217, in create_projection\r\nnum_topics, lda_model = optimize_lda(corpus, dictionary)\r\nFile \"/cluster/home/lennartv/ma/python/deep/preprocessing/Projector.py\", line 107, in optimize_lda\r\ncoherence=\"u_mass\").get_coherence()\r\nFile \"/cluster/home/lennartv/.local/lib64/python3.4/site-packages/gensim/models/coherencemodel.py\", line 202, in get_coherence\r\nconfirmed_measures = measure.conf(segmented_topics, per_topic_postings, num_docs)\r\nFile \"/cluster/home/lennartv/.local/lib64/python3.4/site-packages/gensim/topic_coherence/direct_confirmation_measure.py\", line 40, in log_conditional_probability\r\nm_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))\r\nZeroDivisionError: float division by zero\r\n```\r\n\r\nI tried to assess the problem and I noticed the following things:\r\n\r\n- There seems to be a connection to how well the LDA algorithm converged. Varying the number of passes can mitigate the problem.\r\n\r\nI don't have enough insight in the algorithm but the bug seems to stem from the fact that `w_star_docs` in the following code block can become an empty set under certain circumstances:\r\n\r\n```\r\n m_lc = []\r\n for s_i in segmented_topics:\r\n for w_prime, w_star in s_i:\r\n w_prime_docs = per_topic_postings[w_prime]\r\n w_star_docs = per_topic_postings[w_star]\r\n co_docs = w_prime_docs.intersection(w_star_docs)\r\n m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))\r\n m_lc.append(m_lc_i)\r\n\r\n return m_lc\r\n```\r\n\r\nThanks in advance for your efforts,\r\n Lennart\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module contains functions to compute direct confirmation on a pair of words or word subsets.\n\"\"\"\n\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nEPSILON = 1e-12 # Should be small. Value as suggested in paper.\n\ndef log_conditional_probability(segmented_topics, per_topic_postings, num_docs):\n \"\"\"\n This function calculates the log-conditional-probability measure\n which is used by coherence measures such as U_mass.\n This is defined as: m_lc(S_i) = log[(P(W', W*) + e) / P(W*)]\n\n Args:\n ----\n segmented_topics : Output from the segmentation module of the segmented topics. Is a list of list of tuples.\n per_topic_postings : Output from the probability_estimation module. Is a dictionary of the posting list of all topics.\n num_docs : Total number of documents in corresponding corpus.\n\n Returns:\n -------\n m_lc : List of log conditional probability measure on each set in segmented topics.\n \"\"\"\n m_lc = []\n for s_i in segmented_topics:\n for w_prime, w_star in s_i:\n w_prime_docs = per_topic_postings[w_prime]\n w_star_docs = per_topic_postings[w_star]\n co_docs = w_prime_docs.intersection(w_star_docs)\n m_lc_i = np.log(((len(co_docs) / float(num_docs)) + EPSILON) / (len(w_star_docs) / float(num_docs)))\n m_lc.append(m_lc_i)\n\n return m_lc\n\ndef log_ratio_measure(segmented_topics, per_topic_postings, num_docs, normalize=False):\n \"\"\"\n If normalize=False:\n Popularly known as PMI.\n This function calculates the log-ratio-measure which is used by\n coherence measures such as c_v.\n This is defined as: m_lr(S_i) = log[(P(W', W*) + e) / (P(W') * P(W*))]\n\n If normalize=True:\n This function calculates the normalized-log-ratio-measure, popularly knowns as\n NPMI which is used by coherence measures such as c_v.\n This is defined as: m_nlr(S_i) = m_lr(S_i) / -log[P(W', W*) + e]\n\n Args:\n ----\n segmented topics : Output from the segmentation module of the segmented topics. Is a list of list of tuples.\n per_topic_postings : Output from the probability_estimation module. Is a dictionary of the posting list of all topics\n num_docs : Total number of documents in corpus. Used for calculating probability.\n\n Returns:\n -------\n m_lr : List of log ratio measures on each set in segmented topics.\n \"\"\"\n m_lr = []\n for s_i in segmented_topics:\n for w_prime, w_star in s_i:\n w_prime_docs = per_topic_postings[w_prime]\n w_star_docs = per_topic_postings[w_star]\n co_docs = w_prime_docs.intersection(w_star_docs)\n if normalize:\n # For normalized log ratio measure\n numerator = log_ratio_measure([[(w_prime, w_star)]], per_topic_postings, num_docs)[0]\n co_doc_prob = len(co_docs) / float(num_docs)\n m_lr_i = numerator / (-np.log(co_doc_prob + EPSILON))\n else:\n # For log ratio measure without normalization\n numerator = (len(co_docs) / float(num_docs)) + EPSILON\n denominator = (len(w_prime_docs) / float(num_docs)) * (len(w_star_docs) / float(num_docs))\n m_lr_i = np.log(numerator / denominator)\n m_lr.append(m_lr_i)\n\n return m_lr\n", "path": "gensim/topic_coherence/direct_confirmation_measure.py"}]}
| 2,354 | 208 |
gh_patches_debug_36846
|
rasdani/github-patches
|
git_diff
|
NVIDIA__NVFlare-1224
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Potential bug in SVT?
Hi all,
is this intended that in [line 103](https://github.com/NVIDIA/NVFlare/blob/e5e8c15a3455c81cb16268fbaafeef3f26d2b195/nvflare/app_common/filters/svt_privacy.py#L103) of the SVTPrivacy class, only a random choice of the accepted parameters is taken with replacement? This will lead to a lower number of accepted and candidate parameters ([Line 108](https://github.com/NVIDIA/NVFlare/blob/e5e8c15a3455c81cb16268fbaafeef3f26d2b195/nvflare/app_common/filters/svt_privacy.py#L108)) than originally computed.
So even if I am running SVT it with a fraction of 1.0, not all of my noisy values (3.491.530) are transferred, but around 1.284.489 of them are set to 0.
</issue>
<code>
[start of nvflare/app_common/filters/svt_privacy.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Union
16
17 import numpy as np
18
19 from nvflare.apis.dxo import DXO, DataKind, MetaKey
20 from nvflare.apis.dxo_filter import DXOFilter
21 from nvflare.apis.fl_context import FLContext
22 from nvflare.apis.shareable import Shareable
23
24
25 class SVTPrivacy(DXOFilter):
26 def __init__(self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None):
27 """Implementation of the standard Sparse Vector Technique (SVT) differential privacy algorithm.
28
29 lambda_rho = gamma * 2.0 / epsilon
30 threshold = tau + np.random.laplace(scale=lambda_rho)
31
32 Args:
33 fraction (float, optional): used to determine dataset threshold. Defaults to 0.1.
34 epsilon (float, optional): Defaults to 0.1.
35 noise_var (float, optional): additive noise. Defaults to 0.1.
36 gamma (float, optional): Defaults to 1e-5.
37 tau (float, optional): Defaults to 1e-6.
38 """
39 if not data_kinds:
40 data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
41
42 super().__init__(supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF], data_kinds_to_filter=data_kinds)
43
44 self.frac = fraction # fraction of the model to upload
45 self.eps_1 = epsilon
46 self.eps_2 = None # to be derived from eps_1
47 self.eps_3 = noise_var
48 self.gamma = gamma
49 self.tau = tau
50
51 def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
52 """Compute the differentially private SVT.
53
54 Args:
55 dxo: information from client
56 shareable: that the dxo belongs to
57 fl_ctx: context provided by workflow
58
59 Returns: filtered result.
60 """
61 self.log_debug(fl_ctx, "inside filter")
62 model_diff = dxo.data
63 total_steps = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1)
64
65 delta_w = np.concatenate([model_diff[name].ravel() / np.float(total_steps) for name in sorted(model_diff)])
66 self.log_info(
67 fl_ctx,
68 "Delta_w: Max abs: {}, Min abs: {}, Median abs: {}.".format(
69 np.max(np.abs(delta_w)), np.min(np.abs(delta_w)), np.median(np.abs(delta_w))
70 ),
71 )
72
73 # precompute thresholds
74 n_upload = np.minimum(np.ceil(np.float(delta_w.size) * self.frac), np.float(delta_w.size))
75
76 # eps_1: threshold with noise
77 lambda_rho = self.gamma * 2.0 / self.eps_1
78 threshold = self.tau + np.random.laplace(scale=lambda_rho)
79 # eps_2: query with noise
80 self.eps_2 = self.eps_1 * (2.0 * n_upload) ** (2.0 / 3.0)
81 lambda_nu = self.gamma * 4.0 * n_upload / self.eps_2
82 self.logger.info(
83 "total params: %s, epsilon: %s, "
84 "perparam budget %s, threshold tau: %s + f(eps_1) = %s, "
85 "clip gamma: %s",
86 delta_w.size,
87 self.eps_1,
88 self.eps_1 / n_upload,
89 self.tau,
90 threshold,
91 self.gamma,
92 )
93
94 # selecting weights with additive noise
95 accepted, candidate_idx = [], np.arange(delta_w.size)
96 _clipped_w = np.abs(np.clip(delta_w, a_min=-self.gamma, a_max=self.gamma))
97 while len(accepted) < n_upload:
98 nu_i = np.random.laplace(scale=lambda_nu, size=candidate_idx.shape)
99 above_threshold = (_clipped_w[candidate_idx] + nu_i) >= threshold
100 accepted += candidate_idx[above_threshold].tolist()
101 candidate_idx = candidate_idx[~above_threshold]
102 self.log_info(fl_ctx, "selected {} responses, requested {}".format(len(accepted), n_upload))
103 accepted = np.random.choice(accepted, size=np.int64(n_upload))
104 # eps_3 return with noise
105 noise = np.random.laplace(scale=self.gamma * 2.0 / self.eps_3, size=accepted.shape)
106 self.log_info(fl_ctx, "noise max: {}, median {}".format(np.max(np.abs(noise)), np.median(np.abs(noise))))
107 delta_w[accepted] = np.clip(delta_w[accepted] + noise, a_min=-self.gamma, a_max=self.gamma)
108 candidate_idx = list(set(np.arange(delta_w.size)) - set(accepted))
109 delta_w[candidate_idx] = 0.0
110
111 # resume original format
112 dp_w, _start = {}, 0
113 for name in sorted(model_diff):
114 if np.ndim(model_diff[name]) == 0:
115 dp_w[name] = model_diff[name]
116 _start += 1
117 continue
118 value = delta_w[_start : (_start + model_diff[name].size)]
119 dp_w[name] = value.reshape(model_diff[name].shape) * np.float(total_steps)
120 _start += model_diff[name].size
121
122 # We update the shareable weights only. Headers are unchanged.
123 dxo.data = dp_w
124 return dxo
125
[end of nvflare/app_common/filters/svt_privacy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nvflare/app_common/filters/svt_privacy.py b/nvflare/app_common/filters/svt_privacy.py
--- a/nvflare/app_common/filters/svt_privacy.py
+++ b/nvflare/app_common/filters/svt_privacy.py
@@ -23,7 +23,9 @@
class SVTPrivacy(DXOFilter):
- def __init__(self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None):
+ def __init__(
+ self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None, replace=True
+ ):
"""Implementation of the standard Sparse Vector Technique (SVT) differential privacy algorithm.
lambda_rho = gamma * 2.0 / epsilon
@@ -35,6 +37,8 @@
noise_var (float, optional): additive noise. Defaults to 0.1.
gamma (float, optional): Defaults to 1e-5.
tau (float, optional): Defaults to 1e-6.
+ data_kinds (str, optional): Defaults to None.
+ replace (bool): whether to sample with replacement. Defaults to True.
"""
if not data_kinds:
data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]
@@ -47,6 +51,7 @@
self.eps_3 = noise_var
self.gamma = gamma
self.tau = tau
+ self.replace = replace
def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:
"""Compute the differentially private SVT.
@@ -100,7 +105,7 @@
accepted += candidate_idx[above_threshold].tolist()
candidate_idx = candidate_idx[~above_threshold]
self.log_info(fl_ctx, "selected {} responses, requested {}".format(len(accepted), n_upload))
- accepted = np.random.choice(accepted, size=np.int64(n_upload))
+ accepted = np.random.choice(accepted, size=np.int64(n_upload), replace=self.replace)
# eps_3 return with noise
noise = np.random.laplace(scale=self.gamma * 2.0 / self.eps_3, size=accepted.shape)
self.log_info(fl_ctx, "noise max: {}, median {}".format(np.max(np.abs(noise)), np.median(np.abs(noise))))
|
{"golden_diff": "diff --git a/nvflare/app_common/filters/svt_privacy.py b/nvflare/app_common/filters/svt_privacy.py\n--- a/nvflare/app_common/filters/svt_privacy.py\n+++ b/nvflare/app_common/filters/svt_privacy.py\n@@ -23,7 +23,9 @@\n \n \n class SVTPrivacy(DXOFilter):\n- def __init__(self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None):\n+ def __init__(\n+ self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None, replace=True\n+ ):\n \"\"\"Implementation of the standard Sparse Vector Technique (SVT) differential privacy algorithm.\n \n lambda_rho = gamma * 2.0 / epsilon\n@@ -35,6 +37,8 @@\n noise_var (float, optional): additive noise. Defaults to 0.1.\n gamma (float, optional): Defaults to 1e-5.\n tau (float, optional): Defaults to 1e-6.\n+ data_kinds (str, optional): Defaults to None.\n+ replace (bool): whether to sample with replacement. Defaults to True.\n \"\"\"\n if not data_kinds:\n data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]\n@@ -47,6 +51,7 @@\n self.eps_3 = noise_var\n self.gamma = gamma\n self.tau = tau\n+ self.replace = replace\n \n def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:\n \"\"\"Compute the differentially private SVT.\n@@ -100,7 +105,7 @@\n accepted += candidate_idx[above_threshold].tolist()\n candidate_idx = candidate_idx[~above_threshold]\n self.log_info(fl_ctx, \"selected {} responses, requested {}\".format(len(accepted), n_upload))\n- accepted = np.random.choice(accepted, size=np.int64(n_upload))\n+ accepted = np.random.choice(accepted, size=np.int64(n_upload), replace=self.replace)\n # eps_3 return with noise\n noise = np.random.laplace(scale=self.gamma * 2.0 / self.eps_3, size=accepted.shape)\n self.log_info(fl_ctx, \"noise max: {}, median {}\".format(np.max(np.abs(noise)), np.median(np.abs(noise))))\n", "issue": "Potential bug in SVT?\nHi all, \r\nis this intended that in [line 103](https://github.com/NVIDIA/NVFlare/blob/e5e8c15a3455c81cb16268fbaafeef3f26d2b195/nvflare/app_common/filters/svt_privacy.py#L103) of the SVTPrivacy class, only a random choice of the accepted parameters is taken with replacement? This will lead to a lower number of accepted and candidate parameters ([Line 108](https://github.com/NVIDIA/NVFlare/blob/e5e8c15a3455c81cb16268fbaafeef3f26d2b195/nvflare/app_common/filters/svt_privacy.py#L108)) than originally computed. \r\n\r\nSo even if I am running SVT it with a fraction of 1.0, not all of my noisy values (3.491.530) are transferred, but around 1.284.489 of them are set to 0. \r\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union\n\nimport numpy as np\n\nfrom nvflare.apis.dxo import DXO, DataKind, MetaKey\nfrom nvflare.apis.dxo_filter import DXOFilter\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\n\n\nclass SVTPrivacy(DXOFilter):\n def __init__(self, fraction=0.1, epsilon=0.1, noise_var=0.1, gamma=1e-5, tau=1e-6, data_kinds: [str] = None):\n \"\"\"Implementation of the standard Sparse Vector Technique (SVT) differential privacy algorithm.\n\n lambda_rho = gamma * 2.0 / epsilon\n threshold = tau + np.random.laplace(scale=lambda_rho)\n\n Args:\n fraction (float, optional): used to determine dataset threshold. Defaults to 0.1.\n epsilon (float, optional): Defaults to 0.1.\n noise_var (float, optional): additive noise. Defaults to 0.1.\n gamma (float, optional): Defaults to 1e-5.\n tau (float, optional): Defaults to 1e-6.\n \"\"\"\n if not data_kinds:\n data_kinds = [DataKind.WEIGHT_DIFF, DataKind.WEIGHTS]\n\n super().__init__(supported_data_kinds=[DataKind.WEIGHTS, DataKind.WEIGHT_DIFF], data_kinds_to_filter=data_kinds)\n\n self.frac = fraction # fraction of the model to upload\n self.eps_1 = epsilon\n self.eps_2 = None # to be derived from eps_1\n self.eps_3 = noise_var\n self.gamma = gamma\n self.tau = tau\n\n def process_dxo(self, dxo: DXO, shareable: Shareable, fl_ctx: FLContext) -> Union[None, DXO]:\n \"\"\"Compute the differentially private SVT.\n\n Args:\n dxo: information from client\n shareable: that the dxo belongs to\n fl_ctx: context provided by workflow\n\n Returns: filtered result.\n \"\"\"\n self.log_debug(fl_ctx, \"inside filter\")\n model_diff = dxo.data\n total_steps = dxo.get_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, 1)\n\n delta_w = np.concatenate([model_diff[name].ravel() / np.float(total_steps) for name in sorted(model_diff)])\n self.log_info(\n fl_ctx,\n \"Delta_w: Max abs: {}, Min abs: {}, Median abs: {}.\".format(\n np.max(np.abs(delta_w)), np.min(np.abs(delta_w)), np.median(np.abs(delta_w))\n ),\n )\n\n # precompute thresholds\n n_upload = np.minimum(np.ceil(np.float(delta_w.size) * self.frac), np.float(delta_w.size))\n\n # eps_1: threshold with noise\n lambda_rho = self.gamma * 2.0 / self.eps_1\n threshold = self.tau + np.random.laplace(scale=lambda_rho)\n # eps_2: query with noise\n self.eps_2 = self.eps_1 * (2.0 * n_upload) ** (2.0 / 3.0)\n lambda_nu = self.gamma * 4.0 * n_upload / self.eps_2\n self.logger.info(\n \"total params: %s, epsilon: %s, \"\n \"perparam budget %s, threshold tau: %s + f(eps_1) = %s, \"\n \"clip gamma: %s\",\n delta_w.size,\n self.eps_1,\n self.eps_1 / n_upload,\n self.tau,\n threshold,\n self.gamma,\n )\n\n # selecting weights with additive noise\n accepted, candidate_idx = [], np.arange(delta_w.size)\n _clipped_w = np.abs(np.clip(delta_w, a_min=-self.gamma, a_max=self.gamma))\n while len(accepted) < n_upload:\n nu_i = np.random.laplace(scale=lambda_nu, size=candidate_idx.shape)\n above_threshold = (_clipped_w[candidate_idx] + nu_i) >= threshold\n accepted += candidate_idx[above_threshold].tolist()\n candidate_idx = candidate_idx[~above_threshold]\n self.log_info(fl_ctx, \"selected {} responses, requested {}\".format(len(accepted), n_upload))\n accepted = np.random.choice(accepted, size=np.int64(n_upload))\n # eps_3 return with noise\n noise = np.random.laplace(scale=self.gamma * 2.0 / self.eps_3, size=accepted.shape)\n self.log_info(fl_ctx, \"noise max: {}, median {}\".format(np.max(np.abs(noise)), np.median(np.abs(noise))))\n delta_w[accepted] = np.clip(delta_w[accepted] + noise, a_min=-self.gamma, a_max=self.gamma)\n candidate_idx = list(set(np.arange(delta_w.size)) - set(accepted))\n delta_w[candidate_idx] = 0.0\n\n # resume original format\n dp_w, _start = {}, 0\n for name in sorted(model_diff):\n if np.ndim(model_diff[name]) == 0:\n dp_w[name] = model_diff[name]\n _start += 1\n continue\n value = delta_w[_start : (_start + model_diff[name].size)]\n dp_w[name] = value.reshape(model_diff[name].shape) * np.float(total_steps)\n _start += model_diff[name].size\n\n # We update the shareable weights only. Headers are unchanged.\n dxo.data = dp_w\n return dxo\n", "path": "nvflare/app_common/filters/svt_privacy.py"}]}
| 2,440 | 589 |
gh_patches_debug_30345
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-12435
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Upload an image to product with really long name it generates only one thumbnail size.
### What are you trying to achieve?
Thumbnails of different sizes should be generated regardless of the file name
### Steps to reproduce the problem
1. Go to the product page and upload an image with a long name `2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02`
2. Query thumbnails for the product:
```
query Product{
product(id: "UHJvZHVjdDo4Nzk="){
id
thumbnail{
alt
url
}
thumbnailx2: thumbnail(size: 1024){
alt
url
}
media{
url
}
}
}
```
Current result:
In the response, it looks like thumbnails were generated
```
{
"data": {
"product": {
"id": "UHJvZHVjdDo2MDk=",
"thumbnail": {
"alt": "",
"url": "https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/256/"
},
"thumbnailx2": {
"alt": "",
"url": "https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/1024/"
},
"media": [
{
"url": "https://*.staging.saleor.cloud/media/thumbnails/products/2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02.webp"
}
]
}
},
```
but when entering the URL `https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/256/` it changed to the original media URL `https://*.staging.saleor.cloud/media/thumbnails/products/2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02.webp`
https://saleorcommerce.slack.com/archives/C039MGJDWU8/p1678795547580359
### What did you expect to happen?
Thumbnails are generated in proper size
### Logs
_No response_
### Environment
Saleor version: core v3.13.0-a (3.7.52 and 3.9.12)
</issue>
<code>
[start of saleor/graphql/core/utils/__init__.py]
1 import binascii
2 import os
3 import secrets
4 from typing import Literal, Tuple, Type, Union, overload
5
6 import graphene
7 from django.core.exceptions import ValidationError
8 from graphene import ObjectType
9 from graphql.error import GraphQLError
10
11 from ....plugins.const import APP_ID_PREFIX
12 from ..validators import validate_if_int_or_uuid
13
14
15 def snake_to_camel_case(name):
16 """Convert snake_case variable name to camelCase."""
17 if isinstance(name, str):
18 split_name = name.split("_")
19 return split_name[0] + "".join(map(str.capitalize, split_name[1:]))
20 return name
21
22
23 def str_to_enum(name):
24 """Create an enum value from a string."""
25 return name.replace(" ", "_").replace("-", "_").upper()
26
27
28 def get_duplicates_items(first_list, second_list):
29 """Return items that appear on both provided lists."""
30 if first_list and second_list:
31 return set(first_list) & set(second_list)
32 return []
33
34
35 def get_duplicated_values(values):
36 """Return set of duplicated values."""
37 return {value for value in values if values.count(value) > 1}
38
39
40 @overload
41 def from_global_id_or_error(
42 global_id: str,
43 only_type: Union[ObjectType, str, None] = None,
44 raise_error: Literal[True] = True,
45 ) -> Tuple[str, str]:
46 ...
47
48
49 @overload
50 def from_global_id_or_error(
51 global_id: str,
52 only_type: Union[Type[ObjectType], str, None] = None,
53 raise_error: bool = False,
54 ) -> Union[Tuple[str, str], Tuple[str, None]]:
55 ...
56
57
58 def from_global_id_or_error(
59 global_id: str,
60 only_type: Union[Type[ObjectType], str, None] = None,
61 raise_error: bool = False,
62 ):
63 """Resolve global ID or raise GraphQLError.
64
65 Validates if given ID is a proper ID handled by Saleor.
66 Valid IDs formats, base64 encoded:
67 'app:<int>:<str>' : External app ID with 'app' prefix
68 '<type>:<int>' : Internal ID containing object type and ID as integer
69 '<type>:<UUID>' : Internal ID containing object type and UUID
70 Optionally validate the object type, if `only_type` is provided,
71 raise GraphQLError when `raise_error` is set to True.
72
73 Returns tuple: (type, id).
74 """
75 try:
76 type_, id_ = graphene.Node.from_global_id(global_id)
77 except (binascii.Error, UnicodeDecodeError, ValueError):
78 raise GraphQLError(f"Couldn't resolve id: {global_id}.")
79 if type_ == APP_ID_PREFIX:
80 id_ = global_id
81 else:
82 if not validate_if_int_or_uuid(id_):
83 raise GraphQLError(f"Error occurred during ID - {global_id} validation.")
84
85 if only_type and str(type_) != str(only_type):
86 if not raise_error:
87 return type_, None
88 raise GraphQLError(f"Must receive a {only_type} id.")
89 return type_, id_
90
91
92 def from_global_id_or_none(
93 global_id, only_type: Union[ObjectType, str, None] = None, raise_error: bool = False
94 ):
95 if not global_id:
96 return None
97
98 return from_global_id_or_error(global_id, only_type, raise_error)[1]
99
100
101 def to_global_id_or_none(instance):
102 class_name = instance.__class__.__name__
103 if instance is None or instance.pk is None:
104 return None
105 return graphene.Node.to_global_id(class_name, instance.pk)
106
107
108 def add_hash_to_file_name(file):
109 """Add unique text fragment to the file name to prevent file overriding."""
110 file_name, format = os.path.splitext(file._name)
111 hash = secrets.token_hex(nbytes=4)
112 new_name = f"{file_name}_{hash}{format}"
113 file._name = new_name
114
115
116 def raise_validation_error(field=None, message=None, code=None):
117 raise ValidationError({field: ValidationError(message, code=code)})
118
119
120 def ext_ref_to_global_id_or_error(model, external_reference):
121 """Convert external reference to graphen global id."""
122 internal_id = (
123 model.objects.filter(external_reference=external_reference)
124 .values_list("id", flat=True)
125 .first()
126 )
127 if internal_id:
128 return graphene.Node.to_global_id(model.__name__, internal_id)
129 else:
130 raise_validation_error(
131 field="externalReference",
132 message=f"Couldn't resolve to a node: {external_reference}",
133 code="not_found",
134 )
135
[end of saleor/graphql/core/utils/__init__.py]
[start of saleor/graphql/core/validators/file.py]
1 import mimetypes
2 import os
3 import secrets
4
5 import requests
6 from django.core.exceptions import ValidationError
7 from PIL import Image, UnidentifiedImageError
8
9 from ....thumbnail import MIME_TYPE_TO_PIL_IDENTIFIER
10 from ..utils import add_hash_to_file_name
11
12 Image.init()
13
14
15 def is_image_mimetype(mimetype: str) -> bool:
16 """Check if mimetype is image."""
17 if mimetype is None:
18 return False
19 return mimetype.startswith("image/")
20
21
22 def is_supported_image_mimetype(mimetype: str) -> bool:
23 """Check if mimetype is a mimetype that thumbnails support."""
24 if mimetype is None:
25 return False
26 return mimetype in MIME_TYPE_TO_PIL_IDENTIFIER.keys()
27
28
29 def is_image_url(url: str) -> bool:
30 """Check if file URL seems to be an image."""
31 if url.endswith(".webp"):
32 # webp is not recognized by mimetypes as image
33 # https://bugs.python.org/issue38902
34 return True
35 filetype = mimetypes.guess_type(url)[0]
36 return filetype is not None and is_image_mimetype(filetype)
37
38
39 def validate_image_url(url: str, field_name: str, error_code: str) -> None:
40 """Check if remote file has content type of image.
41
42 Instead of the whole file, only the headers are fetched.
43 """
44 head = requests.head(url)
45 header = head.headers
46 content_type = header.get("content-type")
47 if content_type is None or not is_supported_image_mimetype(content_type):
48 raise ValidationError(
49 {field_name: ValidationError("Invalid file type.", code=error_code)}
50 )
51
52
53 def get_filename_from_url(url: str) -> str:
54 """Prepare unique filename for file from URL to avoid overwritting."""
55 file_name = os.path.basename(url)
56 name, format = os.path.splitext(file_name)
57 hash = secrets.token_hex(nbytes=4)
58 return f"{name}_{hash}{format}"
59
60
61 def clean_image_file(cleaned_input, img_field_name, error_class):
62 """Extract and clean uploaded image file.
63
64 Validate if the file is an image supported by thumbnails.
65 """
66 img_file = cleaned_input.get(img_field_name)
67 if not img_file:
68 raise ValidationError(
69 {
70 img_field_name: ValidationError(
71 "File is required.", code=error_class.REQUIRED
72 )
73 }
74 )
75 if not is_supported_image_mimetype(img_file.content_type):
76 raise ValidationError(
77 {
78 img_field_name: ValidationError(
79 "Invalid file type.", code=error_class.INVALID
80 )
81 }
82 )
83
84 _validate_image_format(img_file, img_field_name, error_class)
85 try:
86 with Image.open(img_file) as image:
87 _validate_image_exif(image, img_field_name, error_class)
88 except (SyntaxError, TypeError, UnidentifiedImageError) as e:
89 raise ValidationError(
90 {
91 img_field_name: ValidationError(
92 "Invalid file. The following error was raised during the attempt "
93 f"of opening the file: {str(e)}",
94 code=error_class.INVALID.value,
95 )
96 }
97 )
98
99 add_hash_to_file_name(img_file)
100 return img_file
101
102
103 def _validate_image_format(file, field_name, error_class):
104 """Validate image file format."""
105 allowed_extensions = _get_allowed_extensions()
106 _file_name, format = os.path.splitext(file._name)
107 if not format:
108 raise ValidationError(
109 {
110 field_name: ValidationError(
111 "Lack of file extension.", code=error_class.INVALID
112 )
113 }
114 )
115 elif format not in allowed_extensions:
116 raise ValidationError(
117 {
118 field_name: ValidationError(
119 "Invalid file extension. Image file required.",
120 code=error_class.INVALID,
121 )
122 }
123 )
124
125
126 def _get_allowed_extensions():
127 """Return image extension lists that are supported by thumbnails."""
128 return [
129 ext.lower()
130 for ext, file_type in Image.EXTENSION.items()
131 if file_type.upper() in MIME_TYPE_TO_PIL_IDENTIFIER.values()
132 ]
133
134
135 def _validate_image_exif(img, field_name, error_class):
136 try:
137 img.getexif()
138 except (SyntaxError, TypeError, UnidentifiedImageError) as e:
139 raise ValidationError(
140 {
141 field_name: ValidationError(
142 "Invalid file. The following error was raised during the attempt "
143 f"of getting the exchangeable image file data: {str(e)}.",
144 code=error_class.INVALID.value,
145 )
146 }
147 )
148
[end of saleor/graphql/core/validators/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/graphql/core/utils/__init__.py b/saleor/graphql/core/utils/__init__.py
--- a/saleor/graphql/core/utils/__init__.py
+++ b/saleor/graphql/core/utils/__init__.py
@@ -11,6 +11,8 @@
from ....plugins.webhook.utils import APP_ID_PREFIX
from ..validators import validate_if_int_or_uuid
+FILE_NAME_MAX_LENGTH = 55
+
def snake_to_camel_case(name):
"""Convert snake_case variable name to camelCase."""
@@ -108,6 +110,7 @@
def add_hash_to_file_name(file):
"""Add unique text fragment to the file name to prevent file overriding."""
file_name, format = os.path.splitext(file._name)
+ file_name = file_name[:FILE_NAME_MAX_LENGTH]
hash = secrets.token_hex(nbytes=4)
new_name = f"{file_name}_{hash}{format}"
file._name = new_name
diff --git a/saleor/graphql/core/validators/file.py b/saleor/graphql/core/validators/file.py
--- a/saleor/graphql/core/validators/file.py
+++ b/saleor/graphql/core/validators/file.py
@@ -7,7 +7,7 @@
from PIL import Image, UnidentifiedImageError
from ....thumbnail import MIME_TYPE_TO_PIL_IDENTIFIER
-from ..utils import add_hash_to_file_name
+from ..utils import FILE_NAME_MAX_LENGTH, add_hash_to_file_name
Image.init()
@@ -54,6 +54,7 @@
"""Prepare unique filename for file from URL to avoid overwritting."""
file_name = os.path.basename(url)
name, format = os.path.splitext(file_name)
+ name = name[:FILE_NAME_MAX_LENGTH]
hash = secrets.token_hex(nbytes=4)
return f"{name}_{hash}{format}"
|
{"golden_diff": "diff --git a/saleor/graphql/core/utils/__init__.py b/saleor/graphql/core/utils/__init__.py\n--- a/saleor/graphql/core/utils/__init__.py\n+++ b/saleor/graphql/core/utils/__init__.py\n@@ -11,6 +11,8 @@\n from ....plugins.webhook.utils import APP_ID_PREFIX\n from ..validators import validate_if_int_or_uuid\n \n+FILE_NAME_MAX_LENGTH = 55\n+\n \n def snake_to_camel_case(name):\n \"\"\"Convert snake_case variable name to camelCase.\"\"\"\n@@ -108,6 +110,7 @@\n def add_hash_to_file_name(file):\n \"\"\"Add unique text fragment to the file name to prevent file overriding.\"\"\"\n file_name, format = os.path.splitext(file._name)\n+ file_name = file_name[:FILE_NAME_MAX_LENGTH]\n hash = secrets.token_hex(nbytes=4)\n new_name = f\"{file_name}_{hash}{format}\"\n file._name = new_name\ndiff --git a/saleor/graphql/core/validators/file.py b/saleor/graphql/core/validators/file.py\n--- a/saleor/graphql/core/validators/file.py\n+++ b/saleor/graphql/core/validators/file.py\n@@ -7,7 +7,7 @@\n from PIL import Image, UnidentifiedImageError\n \n from ....thumbnail import MIME_TYPE_TO_PIL_IDENTIFIER\n-from ..utils import add_hash_to_file_name\n+from ..utils import FILE_NAME_MAX_LENGTH, add_hash_to_file_name\n \n Image.init()\n \n@@ -54,6 +54,7 @@\n \"\"\"Prepare unique filename for file from URL to avoid overwritting.\"\"\"\n file_name = os.path.basename(url)\n name, format = os.path.splitext(file_name)\n+ name = name[:FILE_NAME_MAX_LENGTH]\n hash = secrets.token_hex(nbytes=4)\n return f\"{name}_{hash}{format}\"\n", "issue": "Bug: Upload an image to product with really long name it generates only one thumbnail size.\n### What are you trying to achieve?\r\n\r\nThumbnails of different sizes should be generated regardless of the file name\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Go to the product page and upload an image with a long name `2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02`\r\n2. Query thumbnails for the product:\r\n```\r\nquery Product{\r\n product(id: \"UHJvZHVjdDo4Nzk=\"){\r\n id\r\n thumbnail{\r\n alt\r\n url\r\n }\r\n thumbnailx2: thumbnail(size: 1024){\r\n alt\r\n url\r\n }\r\n media{\r\n url\r\n }\r\n }\r\n}\r\n```\r\n\r\nCurrent result: \r\nIn the response, it looks like thumbnails were generated \r\n```\r\n{\r\n \"data\": {\r\n \"product\": {\r\n \"id\": \"UHJvZHVjdDo2MDk=\",\r\n \"thumbnail\": {\r\n \"alt\": \"\",\r\n \"url\": \"https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/256/\"\r\n },\r\n \"thumbnailx2\": {\r\n \"alt\": \"\",\r\n \"url\": \"https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/1024/\"\r\n },\r\n \"media\": [\r\n {\r\n \"url\": \"https://*.staging.saleor.cloud/media/thumbnails/products/2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02.webp\"\r\n }\r\n ]\r\n }\r\n },\r\n```\r\nbut when entering the URL `https://*.staging.saleor.cloud/thumbnail/UHJvZHVjdE1lZGlhOjE3NzI=/256/` it changed to the original media URL `https://*.staging.saleor.cloud/media/thumbnails/products/2Fvar2Ffolders2Fbj2F61gtb14j7rz4_4yd15tnkzjh0000gn2FT2Ftmp7yvuo2a1_30519a02.webp`\r\n\r\nhttps://saleorcommerce.slack.com/archives/C039MGJDWU8/p1678795547580359\r\n\r\n### What did you expect to happen?\r\n\r\nThumbnails are generated in proper size\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nSaleor version: core v3.13.0-a (3.7.52 and 3.9.12)\r\n\r\n\n", "before_files": [{"content": "import binascii\nimport os\nimport secrets\nfrom typing import Literal, Tuple, Type, Union, overload\n\nimport graphene\nfrom django.core.exceptions import ValidationError\nfrom graphene import ObjectType\nfrom graphql.error import GraphQLError\n\nfrom ....plugins.const import APP_ID_PREFIX\nfrom ..validators import validate_if_int_or_uuid\n\n\ndef snake_to_camel_case(name):\n \"\"\"Convert snake_case variable name to camelCase.\"\"\"\n if isinstance(name, str):\n split_name = name.split(\"_\")\n return split_name[0] + \"\".join(map(str.capitalize, split_name[1:]))\n return name\n\n\ndef str_to_enum(name):\n \"\"\"Create an enum value from a string.\"\"\"\n return name.replace(\" \", \"_\").replace(\"-\", \"_\").upper()\n\n\ndef get_duplicates_items(first_list, second_list):\n \"\"\"Return items that appear on both provided lists.\"\"\"\n if first_list and second_list:\n return set(first_list) & set(second_list)\n return []\n\n\ndef get_duplicated_values(values):\n \"\"\"Return set of duplicated values.\"\"\"\n return {value for value in values if values.count(value) > 1}\n\n\n@overload\ndef from_global_id_or_error(\n global_id: str,\n only_type: Union[ObjectType, str, None] = None,\n raise_error: Literal[True] = True,\n) -> Tuple[str, str]:\n ...\n\n\n@overload\ndef from_global_id_or_error(\n global_id: str,\n only_type: Union[Type[ObjectType], str, None] = None,\n raise_error: bool = False,\n) -> Union[Tuple[str, str], Tuple[str, None]]:\n ...\n\n\ndef from_global_id_or_error(\n global_id: str,\n only_type: Union[Type[ObjectType], str, None] = None,\n raise_error: bool = False,\n):\n \"\"\"Resolve global ID or raise GraphQLError.\n\n Validates if given ID is a proper ID handled by Saleor.\n Valid IDs formats, base64 encoded:\n 'app:<int>:<str>' : External app ID with 'app' prefix\n '<type>:<int>' : Internal ID containing object type and ID as integer\n '<type>:<UUID>' : Internal ID containing object type and UUID\n Optionally validate the object type, if `only_type` is provided,\n raise GraphQLError when `raise_error` is set to True.\n\n Returns tuple: (type, id).\n \"\"\"\n try:\n type_, id_ = graphene.Node.from_global_id(global_id)\n except (binascii.Error, UnicodeDecodeError, ValueError):\n raise GraphQLError(f\"Couldn't resolve id: {global_id}.\")\n if type_ == APP_ID_PREFIX:\n id_ = global_id\n else:\n if not validate_if_int_or_uuid(id_):\n raise GraphQLError(f\"Error occurred during ID - {global_id} validation.\")\n\n if only_type and str(type_) != str(only_type):\n if not raise_error:\n return type_, None\n raise GraphQLError(f\"Must receive a {only_type} id.\")\n return type_, id_\n\n\ndef from_global_id_or_none(\n global_id, only_type: Union[ObjectType, str, None] = None, raise_error: bool = False\n):\n if not global_id:\n return None\n\n return from_global_id_or_error(global_id, only_type, raise_error)[1]\n\n\ndef to_global_id_or_none(instance):\n class_name = instance.__class__.__name__\n if instance is None or instance.pk is None:\n return None\n return graphene.Node.to_global_id(class_name, instance.pk)\n\n\ndef add_hash_to_file_name(file):\n \"\"\"Add unique text fragment to the file name to prevent file overriding.\"\"\"\n file_name, format = os.path.splitext(file._name)\n hash = secrets.token_hex(nbytes=4)\n new_name = f\"{file_name}_{hash}{format}\"\n file._name = new_name\n\n\ndef raise_validation_error(field=None, message=None, code=None):\n raise ValidationError({field: ValidationError(message, code=code)})\n\n\ndef ext_ref_to_global_id_or_error(model, external_reference):\n \"\"\"Convert external reference to graphen global id.\"\"\"\n internal_id = (\n model.objects.filter(external_reference=external_reference)\n .values_list(\"id\", flat=True)\n .first()\n )\n if internal_id:\n return graphene.Node.to_global_id(model.__name__, internal_id)\n else:\n raise_validation_error(\n field=\"externalReference\",\n message=f\"Couldn't resolve to a node: {external_reference}\",\n code=\"not_found\",\n )\n", "path": "saleor/graphql/core/utils/__init__.py"}, {"content": "import mimetypes\nimport os\nimport secrets\n\nimport requests\nfrom django.core.exceptions import ValidationError\nfrom PIL import Image, UnidentifiedImageError\n\nfrom ....thumbnail import MIME_TYPE_TO_PIL_IDENTIFIER\nfrom ..utils import add_hash_to_file_name\n\nImage.init()\n\n\ndef is_image_mimetype(mimetype: str) -> bool:\n \"\"\"Check if mimetype is image.\"\"\"\n if mimetype is None:\n return False\n return mimetype.startswith(\"image/\")\n\n\ndef is_supported_image_mimetype(mimetype: str) -> bool:\n \"\"\"Check if mimetype is a mimetype that thumbnails support.\"\"\"\n if mimetype is None:\n return False\n return mimetype in MIME_TYPE_TO_PIL_IDENTIFIER.keys()\n\n\ndef is_image_url(url: str) -> bool:\n \"\"\"Check if file URL seems to be an image.\"\"\"\n if url.endswith(\".webp\"):\n # webp is not recognized by mimetypes as image\n # https://bugs.python.org/issue38902\n return True\n filetype = mimetypes.guess_type(url)[0]\n return filetype is not None and is_image_mimetype(filetype)\n\n\ndef validate_image_url(url: str, field_name: str, error_code: str) -> None:\n \"\"\"Check if remote file has content type of image.\n\n Instead of the whole file, only the headers are fetched.\n \"\"\"\n head = requests.head(url)\n header = head.headers\n content_type = header.get(\"content-type\")\n if content_type is None or not is_supported_image_mimetype(content_type):\n raise ValidationError(\n {field_name: ValidationError(\"Invalid file type.\", code=error_code)}\n )\n\n\ndef get_filename_from_url(url: str) -> str:\n \"\"\"Prepare unique filename for file from URL to avoid overwritting.\"\"\"\n file_name = os.path.basename(url)\n name, format = os.path.splitext(file_name)\n hash = secrets.token_hex(nbytes=4)\n return f\"{name}_{hash}{format}\"\n\n\ndef clean_image_file(cleaned_input, img_field_name, error_class):\n \"\"\"Extract and clean uploaded image file.\n\n Validate if the file is an image supported by thumbnails.\n \"\"\"\n img_file = cleaned_input.get(img_field_name)\n if not img_file:\n raise ValidationError(\n {\n img_field_name: ValidationError(\n \"File is required.\", code=error_class.REQUIRED\n )\n }\n )\n if not is_supported_image_mimetype(img_file.content_type):\n raise ValidationError(\n {\n img_field_name: ValidationError(\n \"Invalid file type.\", code=error_class.INVALID\n )\n }\n )\n\n _validate_image_format(img_file, img_field_name, error_class)\n try:\n with Image.open(img_file) as image:\n _validate_image_exif(image, img_field_name, error_class)\n except (SyntaxError, TypeError, UnidentifiedImageError) as e:\n raise ValidationError(\n {\n img_field_name: ValidationError(\n \"Invalid file. The following error was raised during the attempt \"\n f\"of opening the file: {str(e)}\",\n code=error_class.INVALID.value,\n )\n }\n )\n\n add_hash_to_file_name(img_file)\n return img_file\n\n\ndef _validate_image_format(file, field_name, error_class):\n \"\"\"Validate image file format.\"\"\"\n allowed_extensions = _get_allowed_extensions()\n _file_name, format = os.path.splitext(file._name)\n if not format:\n raise ValidationError(\n {\n field_name: ValidationError(\n \"Lack of file extension.\", code=error_class.INVALID\n )\n }\n )\n elif format not in allowed_extensions:\n raise ValidationError(\n {\n field_name: ValidationError(\n \"Invalid file extension. Image file required.\",\n code=error_class.INVALID,\n )\n }\n )\n\n\ndef _get_allowed_extensions():\n \"\"\"Return image extension lists that are supported by thumbnails.\"\"\"\n return [\n ext.lower()\n for ext, file_type in Image.EXTENSION.items()\n if file_type.upper() in MIME_TYPE_TO_PIL_IDENTIFIER.values()\n ]\n\n\ndef _validate_image_exif(img, field_name, error_class):\n try:\n img.getexif()\n except (SyntaxError, TypeError, UnidentifiedImageError) as e:\n raise ValidationError(\n {\n field_name: ValidationError(\n \"Invalid file. The following error was raised during the attempt \"\n f\"of getting the exchangeable image file data: {str(e)}.\",\n code=error_class.INVALID.value,\n )\n }\n )\n", "path": "saleor/graphql/core/validators/file.py"}]}
| 3,822 | 400 |
gh_patches_debug_4844
|
rasdani/github-patches
|
git_diff
|
twisted__twisted-11722
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.hypothesis should be in .gitignore
**Describe the incorrect behavior you saw**
`git diff` shows me an untracked `.hypothesis` directory.
**Describe how to cause this behavior**
I ran the tests.
**Describe the correct behavior you'd like to see**
`.hypothesis` [shouldn't be checked in](https://hypothesis.readthedocs.io/en/latest/database.html#the-hypothesis-example-database), so it should be ignored by `git`.
</issue>
<code>
[start of .github/scripts/check-pr-text.py]
1 #
2 # This script is designed to be called by the GHA workflow.
3 #
4 # It is designed to check that the PR text complies to our dev standards.
5 #
6 # The input is received via the environmet variables:
7 # * PR_TITLE - title of the PR
8 # * PR_BODY - the description of the PR
9 #
10 # To test it run
11 #
12 # $ export PR_TITLE='#1234 Test Title'
13 # $ export PR_BODY='some lines
14 # > Fixes #12345
15 # > more lines'
16 # $ python3 .github/scripts/check-pr-text.py
17 #
18 import os
19 import re
20 import sys
21
22 pr_title = os.environ.get("PR_TITLE", "")
23 pr_body = os.environ.get("PR_BODY", "")
24
25 print("--- DEBUG ---")
26 print(f"Title: {pr_title}")
27 print(f"Body:\n {pr_body}")
28 print("-------------")
29
30
31 def fail(message):
32 print(message)
33 print("Fix the title and then trigger a new push.")
34 print("A re-run for this job will not work.")
35 sys.exit(1)
36
37
38 if not pr_title:
39 fail("Title for the PR not found. " "Maybe missing PR_TITLE env var.")
40
41 if not pr_body:
42 fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
43
44 title_search = re.search(r"^(#\d+) .+", pr_title)
45 if not title_search:
46 fail(
47 "Title of PR has no issue ID reference. It must look like “#1234 Foo bar baz”."
48 )
49 else:
50 print(f"PR title is complaint for {title_search[1]}. Good job.")
51
52
53 body_search = re.search(r".*Fixes (#\d+).+", pr_body)
54 if not body_search:
55 fail('Body of PR has no "Fixes #12345" issue ID reference.')
56 else:
57 print(f"PR description is complaint for {body_search[1]}. Good job.")
58
59
60 if title_search[1] != body_search[1]:
61 fail("PR title and description have different IDs.")
62
63 # All good.
64 sys.exit(0)
65
[end of .github/scripts/check-pr-text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py
--- a/.github/scripts/check-pr-text.py
+++ b/.github/scripts/check-pr-text.py
@@ -41,7 +41,7 @@
if not pr_body:
fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
-title_search = re.search(r"^(#\d+) .+", pr_title)
+title_search = re.search(r"^(#\d+):? .+", pr_title)
if not title_search:
fail(
"Title of PR has no issue ID reference. It must look like “#1234 Foo bar baz”."
|
{"golden_diff": "diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py\n--- a/.github/scripts/check-pr-text.py\n+++ b/.github/scripts/check-pr-text.py\n@@ -41,7 +41,7 @@\n if not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n \n-title_search = re.search(r\"^(#\\d+) .+\", pr_title)\n+title_search = re.search(r\"^(#\\d+):? .+\", pr_title)\n if not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n", "issue": ".hypothesis should be in .gitignore\n**Describe the incorrect behavior you saw**\r\n\r\n`git diff` shows me an untracked `.hypothesis` directory.\r\n\r\n**Describe how to cause this behavior**\r\n\r\nI ran the tests.\r\n\r\n**Describe the correct behavior you'd like to see**\r\n\r\n`.hypothesis` [shouldn't be checked in](https://hypothesis.readthedocs.io/en/latest/database.html#the-hypothesis-example-database), so it should be ignored by `git`.\n", "before_files": [{"content": "#\n# This script is designed to be called by the GHA workflow.\n#\n# It is designed to check that the PR text complies to our dev standards.\n#\n# The input is received via the environmet variables:\n# * PR_TITLE - title of the PR\n# * PR_BODY - the description of the PR\n#\n# To test it run\n#\n# $ export PR_TITLE='#1234 Test Title'\n# $ export PR_BODY='some lines\n# > Fixes #12345\n# > more lines'\n# $ python3 .github/scripts/check-pr-text.py\n#\nimport os\nimport re\nimport sys\n\npr_title = os.environ.get(\"PR_TITLE\", \"\")\npr_body = os.environ.get(\"PR_BODY\", \"\")\n\nprint(\"--- DEBUG ---\")\nprint(f\"Title: {pr_title}\")\nprint(f\"Body:\\n {pr_body}\")\nprint(\"-------------\")\n\n\ndef fail(message):\n print(message)\n print(\"Fix the title and then trigger a new push.\")\n print(\"A re-run for this job will not work.\")\n sys.exit(1)\n\n\nif not pr_title:\n fail(\"Title for the PR not found. \" \"Maybe missing PR_TITLE env var.\")\n\nif not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n\ntitle_search = re.search(r\"^(#\\d+) .+\", pr_title)\nif not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n )\nelse:\n print(f\"PR title is complaint for {title_search[1]}. Good job.\")\n\n\nbody_search = re.search(r\".*Fixes (#\\d+).+\", pr_body)\nif not body_search:\n fail('Body of PR has no \"Fixes #12345\" issue ID reference.')\nelse:\n print(f\"PR description is complaint for {body_search[1]}. Good job.\")\n\n\nif title_search[1] != body_search[1]:\n fail(\"PR title and description have different IDs.\")\n\n# All good.\nsys.exit(0)\n", "path": ".github/scripts/check-pr-text.py"}]}
| 1,221 | 146 |
gh_patches_debug_37758
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-594
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Validate schema early
Currently graphql-core doesn't validate the schema until execution time (to allow for partial schema generation). Since strawberry doesn't support partial generation at the moment it would make sense to validate the schema as soon as it's instantiated so that any issues are raised early.
~~As a bonus it would be nice to be able to specify which validators to run.~~ Actually the schema validation is separate to the validate "rules" that I was thinking about here so this is not the right place to tackle that problem.
</issue>
<code>
[start of strawberry/schema/schema.py]
1 from typing import Any, Dict, List, Optional, Sequence, Type, Union
2
3 from graphql import GraphQLSchema, get_introspection_query, parse
4 from graphql.subscription import subscribe
5 from graphql.type.directives import specified_directives
6
7 from strawberry.custom_scalar import ScalarDefinition
8 from strawberry.enum import EnumDefinition
9 from strawberry.extensions import Extension
10 from strawberry.types.types import TypeDefinition
11 from strawberry.union import StrawberryUnion
12
13 from ..middleware import DirectivesMiddleware, Middleware
14 from ..printer import print_schema
15 from .base import ExecutionResult
16 from .execute import execute, execute_sync
17 from .types import ConcreteType, get_directive_type, get_object_type
18
19
20 class Schema:
21 def __init__(
22 self,
23 # TODO: can we make sure we only allow to pass something that has been decorated?
24 query: Type,
25 mutation: Optional[Type] = None,
26 subscription: Optional[Type] = None,
27 directives=(),
28 types=(),
29 extensions: Sequence[Type[Extension]] = (),
30 ):
31 self.extensions = extensions
32 self.type_map: Dict[str, ConcreteType] = {}
33
34 query_type = get_object_type(query, self.type_map)
35 mutation_type = get_object_type(mutation, self.type_map) if mutation else None
36 subscription_type = (
37 get_object_type(subscription, self.type_map) if subscription else None
38 )
39
40 self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]
41
42 directives = [
43 get_directive_type(directive, self.type_map) for directive in directives
44 ]
45
46 self._schema = GraphQLSchema(
47 query=query_type,
48 mutation=mutation_type,
49 subscription=subscription_type if subscription else None,
50 directives=specified_directives + directives,
51 types=[get_object_type(type, self.type_map) for type in types],
52 )
53
54 self.query = self.type_map[query_type.name]
55
56 def get_type_by_name(
57 self, name: str
58 ) -> Optional[
59 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
60 ]:
61 if name in self.type_map:
62 return self.type_map[name].definition
63
64 return None
65
66 async def execute(
67 self,
68 query: str,
69 variable_values: Optional[Dict[str, Any]] = None,
70 context_value: Optional[Any] = None,
71 root_value: Optional[Any] = None,
72 operation_name: Optional[str] = None,
73 ) -> ExecutionResult:
74 result = await execute(
75 self._schema,
76 query,
77 variable_values=variable_values,
78 root_value=root_value,
79 context_value=context_value,
80 operation_name=operation_name,
81 additional_middlewares=self.middleware,
82 extensions=self.extensions,
83 )
84
85 return ExecutionResult(
86 data=result.data,
87 errors=result.errors,
88 extensions=result.extensions,
89 )
90
91 def execute_sync(
92 self,
93 query: str,
94 variable_values: Optional[Dict[str, Any]] = None,
95 context_value: Optional[Any] = None,
96 root_value: Optional[Any] = None,
97 operation_name: Optional[str] = None,
98 ) -> ExecutionResult:
99 result = execute_sync(
100 self._schema,
101 query,
102 variable_values=variable_values,
103 root_value=root_value,
104 context_value=context_value,
105 operation_name=operation_name,
106 additional_middlewares=self.middleware,
107 extensions=self.extensions,
108 )
109
110 return ExecutionResult(
111 data=result.data,
112 errors=result.errors,
113 extensions=result.extensions,
114 )
115
116 async def subscribe(
117 self,
118 query: str,
119 variable_values: Optional[Dict[str, Any]] = None,
120 context_value: Optional[Any] = None,
121 root_value: Optional[Any] = None,
122 operation_name: Optional[str] = None,
123 ):
124 return await subscribe(
125 self._schema,
126 parse(query),
127 root_value=root_value,
128 context_value=context_value,
129 variable_values=variable_values,
130 operation_name=operation_name,
131 )
132
133 def as_str(self) -> str:
134 return print_schema(self)
135
136 __str__ = as_str
137
138 def introspect(self) -> Dict[str, Any]:
139 """Return the introspection query result for the current schema
140
141 Raises:
142 ValueError: If the introspection query fails due to an invalid schema
143 """
144 introspection = self.execute_sync(get_introspection_query())
145 if introspection.errors or not introspection.data:
146 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
147
148 return introspection.data
149
[end of strawberry/schema/schema.py]
[start of strawberry/schema/execute.py]
1 from asyncio import ensure_future
2 from inspect import isawaitable
3 from typing import Any, Awaitable, Dict, List, Sequence, Type, cast
4
5 from graphql import (
6 ExecutionResult as GraphQLExecutionResult,
7 GraphQLError,
8 GraphQLSchema,
9 execute as original_execute,
10 parse,
11 )
12 from graphql.type import validate_schema
13 from graphql.validation import validate
14
15 from strawberry.extensions import Extension
16 from strawberry.extensions.runner import ExtensionsRunner
17 from strawberry.types import ExecutionContext, ExecutionResult
18
19
20 async def execute(
21 schema: GraphQLSchema,
22 query: str,
23 extensions: Sequence[Type[Extension]],
24 root_value: Any = None,
25 context_value: Any = None,
26 variable_values: Dict[str, Any] = None,
27 additional_middlewares: List[Any] = None,
28 operation_name: str = None,
29 ) -> ExecutionResult:
30 execution_context = ExecutionContext(
31 query=query,
32 context=context_value,
33 variables=variable_values,
34 operation_name=operation_name,
35 )
36
37 extensions_runner = ExtensionsRunner(
38 execution_context=execution_context,
39 extensions=[extension() for extension in extensions],
40 )
41
42 additional_middlewares = additional_middlewares or []
43
44 with extensions_runner.request():
45 schema_validation_errors = validate_schema(schema)
46
47 if schema_validation_errors:
48 return ExecutionResult(
49 data=None,
50 errors=schema_validation_errors,
51 extensions=extensions_runner.get_extensions_results(),
52 )
53
54 try:
55 with extensions_runner.parsing():
56 document = parse(query)
57 except GraphQLError as error:
58 return ExecutionResult(
59 data=None,
60 errors=[error],
61 extensions=extensions_runner.get_extensions_results(),
62 )
63
64 except Exception as error: # pragma: no cover
65 error = GraphQLError(str(error), original_error=error)
66
67 return ExecutionResult(
68 data=None,
69 errors=[error],
70 extensions=extensions_runner.get_extensions_results(),
71 )
72
73 with extensions_runner.validation():
74 validation_errors = validate(schema, document)
75
76 if validation_errors:
77 return ExecutionResult(data=None, errors=validation_errors)
78
79 result = original_execute(
80 schema,
81 document,
82 root_value=root_value,
83 middleware=extensions_runner.as_middleware_manager(*additional_middlewares),
84 variable_values=variable_values,
85 operation_name=operation_name,
86 context_value=context_value,
87 )
88
89 if isawaitable(result):
90 result = await cast(Awaitable[GraphQLExecutionResult], result)
91
92 result = cast(GraphQLExecutionResult, result)
93
94 return ExecutionResult(
95 data=result.data,
96 errors=result.errors,
97 extensions=extensions_runner.get_extensions_results(),
98 )
99
100
101 def execute_sync(
102 schema: GraphQLSchema,
103 query: str,
104 extensions: Sequence[Type[Extension]],
105 root_value: Any = None,
106 context_value: Any = None,
107 variable_values: Dict[str, Any] = None,
108 additional_middlewares: List[Any] = None,
109 operation_name: str = None,
110 ) -> ExecutionResult:
111 execution_context = ExecutionContext(
112 query=query,
113 context=context_value,
114 variables=variable_values,
115 operation_name=operation_name,
116 )
117 extensions_runner = ExtensionsRunner(
118 execution_context=execution_context,
119 extensions=[extension() for extension in extensions],
120 )
121
122 additional_middlewares = additional_middlewares or []
123
124 with extensions_runner.request():
125 schema_validation_errors = validate_schema(schema)
126
127 if schema_validation_errors:
128 return ExecutionResult(
129 data=None,
130 errors=schema_validation_errors,
131 extensions=extensions_runner.get_extensions_results(),
132 )
133
134 try:
135 with extensions_runner.parsing():
136 document = parse(query)
137 except GraphQLError as error:
138 return ExecutionResult(
139 data=None,
140 errors=[error],
141 extensions=extensions_runner.get_extensions_results(),
142 )
143
144 except Exception as error: # pragma: no cover
145 error = GraphQLError(str(error), original_error=error)
146
147 return ExecutionResult(
148 data=None,
149 errors=[error],
150 extensions=extensions_runner.get_extensions_results(),
151 )
152
153 with extensions_runner.validation():
154 validation_errors = validate(schema, document)
155
156 if validation_errors:
157 return ExecutionResult(data=None, errors=validation_errors)
158
159 result = original_execute(
160 schema,
161 document,
162 root_value=root_value,
163 middleware=extensions_runner.as_middleware_manager(*additional_middlewares),
164 variable_values=variable_values,
165 operation_name=operation_name,
166 context_value=context_value,
167 )
168
169 if isawaitable(result):
170 ensure_future(cast(Awaitable[GraphQLExecutionResult], result)).cancel()
171 raise RuntimeError("GraphQL execution failed to complete synchronously.")
172
173 result = cast(GraphQLExecutionResult, result)
174
175 return ExecutionResult(
176 data=result.data,
177 errors=result.errors,
178 extensions=extensions_runner.get_extensions_results(),
179 )
180
[end of strawberry/schema/execute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/schema/execute.py b/strawberry/schema/execute.py
--- a/strawberry/schema/execute.py
+++ b/strawberry/schema/execute.py
@@ -9,7 +9,6 @@
execute as original_execute,
parse,
)
-from graphql.type import validate_schema
from graphql.validation import validate
from strawberry.extensions import Extension
@@ -42,14 +41,8 @@
additional_middlewares = additional_middlewares or []
with extensions_runner.request():
- schema_validation_errors = validate_schema(schema)
-
- if schema_validation_errors:
- return ExecutionResult(
- data=None,
- errors=schema_validation_errors,
- extensions=extensions_runner.get_extensions_results(),
- )
+ # Note: In graphql-core the schema would be validated here but in
+ # Strawberry we are validating it at initialisation time instead
try:
with extensions_runner.parsing():
@@ -122,14 +115,8 @@
additional_middlewares = additional_middlewares or []
with extensions_runner.request():
- schema_validation_errors = validate_schema(schema)
-
- if schema_validation_errors:
- return ExecutionResult(
- data=None,
- errors=schema_validation_errors,
- extensions=extensions_runner.get_extensions_results(),
- )
+ # Note: In graphql-core the schema would be validated here but in
+ # Strawberry we are validating it at initialisation time instead
try:
with extensions_runner.parsing():
diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py
--- a/strawberry/schema/schema.py
+++ b/strawberry/schema/schema.py
@@ -1,6 +1,6 @@
from typing import Any, Dict, List, Optional, Sequence, Type, Union
-from graphql import GraphQLSchema, get_introspection_query, parse
+from graphql import GraphQLSchema, get_introspection_query, parse, validate_schema
from graphql.subscription import subscribe
from graphql.type.directives import specified_directives
@@ -51,6 +51,13 @@
types=[get_object_type(type, self.type_map) for type in types],
)
+ # Validate schema early because we want developers to know about
+ # possible issues as soon as possible
+ errors = validate_schema(self._schema)
+ if errors:
+ formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
+ raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
+
self.query = self.type_map[query_type.name]
def get_type_by_name(
|
{"golden_diff": "diff --git a/strawberry/schema/execute.py b/strawberry/schema/execute.py\n--- a/strawberry/schema/execute.py\n+++ b/strawberry/schema/execute.py\n@@ -9,7 +9,6 @@\n execute as original_execute,\n parse,\n )\n-from graphql.type import validate_schema\n from graphql.validation import validate\n \n from strawberry.extensions import Extension\n@@ -42,14 +41,8 @@\n additional_middlewares = additional_middlewares or []\n \n with extensions_runner.request():\n- schema_validation_errors = validate_schema(schema)\n-\n- if schema_validation_errors:\n- return ExecutionResult(\n- data=None,\n- errors=schema_validation_errors,\n- extensions=extensions_runner.get_extensions_results(),\n- )\n+ # Note: In graphql-core the schema would be validated here but in\n+ # Strawberry we are validating it at initialisation time instead\n \n try:\n with extensions_runner.parsing():\n@@ -122,14 +115,8 @@\n additional_middlewares = additional_middlewares or []\n \n with extensions_runner.request():\n- schema_validation_errors = validate_schema(schema)\n-\n- if schema_validation_errors:\n- return ExecutionResult(\n- data=None,\n- errors=schema_validation_errors,\n- extensions=extensions_runner.get_extensions_results(),\n- )\n+ # Note: In graphql-core the schema would be validated here but in\n+ # Strawberry we are validating it at initialisation time instead\n \n try:\n with extensions_runner.parsing():\ndiff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,6 +1,6 @@\n from typing import Any, Dict, List, Optional, Sequence, Type, Union\n \n-from graphql import GraphQLSchema, get_introspection_query, parse\n+from graphql import GraphQLSchema, get_introspection_query, parse, validate_schema\n from graphql.subscription import subscribe\n from graphql.type.directives import specified_directives\n \n@@ -51,6 +51,13 @@\n types=[get_object_type(type, self.type_map) for type in types],\n )\n \n+ # Validate schema early because we want developers to know about\n+ # possible issues as soon as possible\n+ errors = validate_schema(self._schema)\n+ if errors:\n+ formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n+ raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n+\n self.query = self.type_map[query_type.name]\n \n def get_type_by_name(\n", "issue": "Validate schema early\nCurrently graphql-core doesn't validate the schema until execution time (to allow for partial schema generation). Since strawberry doesn't support partial generation at the moment it would make sense to validate the schema as soon as it's instantiated so that any issues are raised early.\r\n\r\n~~As a bonus it would be nice to be able to specify which validators to run.~~ Actually the schema validation is separate to the validate \"rules\" that I was thinking about here so this is not the right place to tackle that problem.\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Sequence, Type, Union\n\nfrom graphql import GraphQLSchema, get_introspection_query, parse\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.extensions import Extension\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.union import StrawberryUnion\n\nfrom ..middleware import DirectivesMiddleware, Middleware\nfrom ..printer import print_schema\nfrom .base import ExecutionResult\nfrom .execute import execute, execute_sync\nfrom .types import ConcreteType, get_directive_type, get_object_type\n\n\nclass Schema:\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives=(),\n types=(),\n extensions: Sequence[Type[Extension]] = (),\n ):\n self.extensions = extensions\n self.type_map: Dict[str, ConcreteType] = {}\n\n query_type = get_object_type(query, self.type_map)\n mutation_type = get_object_type(mutation, self.type_map) if mutation else None\n subscription_type = (\n get_object_type(subscription, self.type_map) if subscription else None\n )\n\n self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]\n\n directives = [\n get_directive_type(directive, self.type_map) for directive in directives\n ]\n\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + directives,\n types=[get_object_type(type, self.type_map) for type in types],\n )\n\n self.query = self.type_map[query_type.name]\n\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n if name in self.type_map:\n return self.type_map[name].definition\n\n return None\n\n async def execute(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ) -> ExecutionResult:\n result = await execute(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n def execute_sync(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ) -> ExecutionResult:\n result = execute_sync(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n async def subscribe(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}, {"content": "from asyncio import ensure_future\nfrom inspect import isawaitable\nfrom typing import Any, Awaitable, Dict, List, Sequence, Type, cast\n\nfrom graphql import (\n ExecutionResult as GraphQLExecutionResult,\n GraphQLError,\n GraphQLSchema,\n execute as original_execute,\n parse,\n)\nfrom graphql.type import validate_schema\nfrom graphql.validation import validate\n\nfrom strawberry.extensions import Extension\nfrom strawberry.extensions.runner import ExtensionsRunner\nfrom strawberry.types import ExecutionContext, ExecutionResult\n\n\nasync def execute(\n schema: GraphQLSchema,\n query: str,\n extensions: Sequence[Type[Extension]],\n root_value: Any = None,\n context_value: Any = None,\n variable_values: Dict[str, Any] = None,\n additional_middlewares: List[Any] = None,\n operation_name: str = None,\n) -> ExecutionResult:\n execution_context = ExecutionContext(\n query=query,\n context=context_value,\n variables=variable_values,\n operation_name=operation_name,\n )\n\n extensions_runner = ExtensionsRunner(\n execution_context=execution_context,\n extensions=[extension() for extension in extensions],\n )\n\n additional_middlewares = additional_middlewares or []\n\n with extensions_runner.request():\n schema_validation_errors = validate_schema(schema)\n\n if schema_validation_errors:\n return ExecutionResult(\n data=None,\n errors=schema_validation_errors,\n extensions=extensions_runner.get_extensions_results(),\n )\n\n try:\n with extensions_runner.parsing():\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(\n data=None,\n errors=[error],\n extensions=extensions_runner.get_extensions_results(),\n )\n\n except Exception as error: # pragma: no cover\n error = GraphQLError(str(error), original_error=error)\n\n return ExecutionResult(\n data=None,\n errors=[error],\n extensions=extensions_runner.get_extensions_results(),\n )\n\n with extensions_runner.validation():\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n result = original_execute(\n schema,\n document,\n root_value=root_value,\n middleware=extensions_runner.as_middleware_manager(*additional_middlewares),\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n\n if isawaitable(result):\n result = await cast(Awaitable[GraphQLExecutionResult], result)\n\n result = cast(GraphQLExecutionResult, result)\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=extensions_runner.get_extensions_results(),\n )\n\n\ndef execute_sync(\n schema: GraphQLSchema,\n query: str,\n extensions: Sequence[Type[Extension]],\n root_value: Any = None,\n context_value: Any = None,\n variable_values: Dict[str, Any] = None,\n additional_middlewares: List[Any] = None,\n operation_name: str = None,\n) -> ExecutionResult:\n execution_context = ExecutionContext(\n query=query,\n context=context_value,\n variables=variable_values,\n operation_name=operation_name,\n )\n extensions_runner = ExtensionsRunner(\n execution_context=execution_context,\n extensions=[extension() for extension in extensions],\n )\n\n additional_middlewares = additional_middlewares or []\n\n with extensions_runner.request():\n schema_validation_errors = validate_schema(schema)\n\n if schema_validation_errors:\n return ExecutionResult(\n data=None,\n errors=schema_validation_errors,\n extensions=extensions_runner.get_extensions_results(),\n )\n\n try:\n with extensions_runner.parsing():\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(\n data=None,\n errors=[error],\n extensions=extensions_runner.get_extensions_results(),\n )\n\n except Exception as error: # pragma: no cover\n error = GraphQLError(str(error), original_error=error)\n\n return ExecutionResult(\n data=None,\n errors=[error],\n extensions=extensions_runner.get_extensions_results(),\n )\n\n with extensions_runner.validation():\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n result = original_execute(\n schema,\n document,\n root_value=root_value,\n middleware=extensions_runner.as_middleware_manager(*additional_middlewares),\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n\n if isawaitable(result):\n ensure_future(cast(Awaitable[GraphQLExecutionResult], result)).cancel()\n raise RuntimeError(\"GraphQL execution failed to complete synchronously.\")\n\n result = cast(GraphQLExecutionResult, result)\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=extensions_runner.get_extensions_results(),\n )\n", "path": "strawberry/schema/execute.py"}]}
| 3,405 | 574 |
gh_patches_debug_41034
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-48228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get hourly event count from Snuba to compare with forecast
## Objective:
The forecasting algorithm generates an hourly threshold that we should compare against. Currently we fetch the entire day's event count from Snuba. This should be the hourly event count
</issue>
<code>
[start of src/sentry/issues/escalating.py]
1 """This module has the logic for querying Snuba for the hourly event count for a list of groups.
2 This is later used for generating group forecasts for determining when a group may be escalating.
3 """
4
5 import logging
6 from collections import defaultdict
7 from datetime import datetime, timedelta
8 from typing import Dict, List, Sequence, Tuple, TypedDict
9
10 from snuba_sdk import (
11 Column,
12 Condition,
13 Direction,
14 Entity,
15 Function,
16 Limit,
17 Offset,
18 Op,
19 OrderBy,
20 Query,
21 Request,
22 )
23
24 from sentry.issues.escalating_group_forecast import EscalatingGroupForecast
25 from sentry.issues.escalating_issues_alg import GroupCount
26 from sentry.models import Group
27 from sentry.models.group import GroupStatus
28 from sentry.models.groupinbox import GroupInboxReason, add_group_to_inbox
29 from sentry.snuba.dataset import Dataset, EntityKey
30 from sentry.types.group import GroupSubStatus
31 from sentry.utils.cache import cache
32 from sentry.utils.snuba import raw_snql_query
33
34 logger = logging.getLogger(__name__)
35
36 __all__ = ["query_groups_past_counts", "parse_groups_past_counts"]
37
38 REFERRER = "sentry.issues.escalating"
39 ELEMENTS_PER_SNUBA_PAGE = 10000 # This is the maximum value for Snuba
40 # The amount of data needed to generate a group forecast
41 BUCKETS_PER_GROUP = 7 * 24
42 ONE_WEEK_DURATION = 7
43 IS_ESCALATING_REFERRER = "sentry.issues.escalating.is_escalating"
44 GROUP_DAILY_COUNT_TTL = 60
45
46 GroupsCountResponse = TypedDict(
47 "GroupsCountResponse",
48 {"group_id": int, "hourBucket": str, "count()": int, "project_id": int},
49 )
50
51 ParsedGroupsCount = Dict[int, GroupCount]
52
53
54 def query_groups_past_counts(groups: Sequence[Group]) -> List[GroupsCountResponse]:
55 """Query Snuba for the counts for every group bucketed into hours.
56
57 It optimizes the query by guaranteeing that we look at group_ids that are from the same project id.
58 This is important for Snuba as the data is stored in blocks related to the project id.
59
60 We maximize the number of projects and groups to reduce the total number of Snuba queries.
61 Each project may not have enough groups in order to reach the max number of returned
62 elements (ELEMENTS_PER_SNUBA_PAGE), thus, projects with few groups should be grouped together until
63 we get at least a certain number of groups.
64
65 NOTE: Groups with less than the maximum number of buckets (think of groups with just 1 event or less
66 than 7 days old) will skew the optimization since we may only get one page and less elements than the max
67 ELEMENTS_PER_SNUBA_PAGE.
68 """
69 all_results = [] # type: ignore[var-annotated]
70 if not groups:
71 return all_results
72
73 start_date, end_date = _start_and_end_dates()
74 group_ids_by_project = _extract_project_and_group_ids(groups)
75 proj_ids, group_ids = [], []
76 processed_projects = 0
77 total_projects_count = len(group_ids_by_project)
78 organization_id = groups[0].project.organization.id
79
80 # This iteration guarantees that all groups for a project will be queried in the same call
81 # and only one page where the groups could be mixed with groups from another project
82 # Iterating over the sorted keys guarantees results for tests
83 for proj_id in sorted(group_ids_by_project.keys()):
84 _group_ids = group_ids_by_project[proj_id]
85 # Add them to the list of projects and groups to query
86 proj_ids.append(proj_id)
87 group_ids += _group_ids
88 processed_projects += 1
89 potential_num_elements = len(_group_ids) * BUCKETS_PER_GROUP
90 # This is trying to maximize the number of groups on the first page
91 if (
92 processed_projects < total_projects_count
93 and potential_num_elements < ELEMENTS_PER_SNUBA_PAGE
94 ):
95 continue
96
97 # TODO: Write this as a dispatcher type task and fire off a separate task per proj_ids
98 all_results += _query_with_pagination(
99 organization_id, proj_ids, group_ids, start_date, end_date
100 )
101 # We're ready for a new set of projects and ids
102 proj_ids, group_ids = [], []
103
104 return all_results
105
106
107 def _query_with_pagination(
108 organization_id: int,
109 project_ids: Sequence[int],
110 group_ids: Sequence[int],
111 start_date: datetime,
112 end_date: datetime,
113 ) -> List[GroupsCountResponse]:
114 """Query Snuba for event counts for the given list of project ids and groups ids in
115 a time range."""
116 all_results = []
117 offset = 0
118 while True:
119 query = _generate_query(project_ids, group_ids, offset, start_date, end_date)
120 request = Request(
121 dataset=Dataset.Events.value,
122 app_id=REFERRER,
123 query=query,
124 tenant_ids={"referrer": REFERRER, "organization_id": organization_id},
125 )
126 results = raw_snql_query(request, referrer=REFERRER)["data"]
127 all_results += results
128 offset += ELEMENTS_PER_SNUBA_PAGE
129 if not results or len(results) < ELEMENTS_PER_SNUBA_PAGE:
130 break
131
132 return all_results
133
134
135 def parse_groups_past_counts(response: Sequence[GroupsCountResponse]) -> ParsedGroupsCount:
136 """
137 Return the parsed snuba response for groups past counts to be used in generate_issue_forecast.
138 ParsedGroupCount is of the form {<group_id>: {"intervals": [str], "data": [int]}}.
139
140 `response`: Snuba response for group event counts
141 """
142 group_counts: ParsedGroupsCount = {}
143 group_ids_list = group_counts.keys()
144 for data in response:
145 group_id = data["group_id"]
146 if group_id not in group_ids_list:
147 group_counts[group_id] = {
148 "intervals": [data["hourBucket"]],
149 "data": [data["count()"]],
150 }
151 else:
152 group_counts[group_id]["intervals"].append(data["hourBucket"])
153 group_counts[group_id]["data"].append(data["count()"])
154 return group_counts
155
156
157 def _generate_query(
158 project_ids: Sequence[int],
159 group_ids: Sequence[int],
160 offset: int,
161 start_date: datetime,
162 end_date: datetime,
163 ) -> Query:
164 """This simply generates a query based on the passed parameters"""
165 group_id_col = Column("group_id")
166 proj_id_col = Column("project_id")
167 return Query(
168 match=Entity(EntityKey.Events.value),
169 select=[
170 proj_id_col,
171 group_id_col,
172 Function("toStartOfHour", [Column("timestamp")], "hourBucket"),
173 Function("count", []),
174 ],
175 groupby=[proj_id_col, group_id_col, Column("hourBucket")],
176 where=[
177 Condition(proj_id_col, Op.IN, Function("tuple", project_ids)),
178 Condition(Column("group_id"), Op.IN, Function("tuple", group_ids)),
179 Condition(Column("timestamp"), Op.GTE, start_date),
180 Condition(Column("timestamp"), Op.LT, end_date),
181 ],
182 limit=Limit(ELEMENTS_PER_SNUBA_PAGE),
183 offset=Offset(offset),
184 orderby=[
185 OrderBy(proj_id_col, Direction.ASC),
186 OrderBy(group_id_col, Direction.ASC),
187 OrderBy(Column("hourBucket"), Direction.ASC),
188 ],
189 )
190
191
192 def _start_and_end_dates(hours: int = BUCKETS_PER_GROUP) -> Tuple[datetime, datetime]:
193 """Return the start and end date of N hours time range."""
194 end_datetime = datetime.now()
195 return end_datetime - timedelta(hours=hours), end_datetime
196
197
198 def _extract_project_and_group_ids(groups: Sequence[Group]) -> Dict[int, List[int]]:
199 """Return all project and group IDs from a list of Group"""
200 group_ids_by_project: Dict[int, List[int]] = defaultdict(list)
201 for group in groups:
202 group_ids_by_project[group.project_id].append(group.id)
203
204 return group_ids_by_project
205
206
207 def get_group_daily_count(organization_id: int, project_id: int, group_id: int) -> int:
208 """Return the number of events a group has had today"""
209 key = f"daily-group-count:{project_id}:{group_id}"
210 daily_count = cache.get(key)
211
212 if daily_count is None:
213 today = datetime.now().date()
214 midnight = datetime.combine(today, datetime.min.time())
215 now = datetime.now()
216 query = Query(
217 match=Entity(EntityKey.Events.value),
218 select=[
219 Function("count", []),
220 ],
221 where=[
222 Condition(Column("project_id"), Op.EQ, project_id),
223 Condition(Column("group_id"), Op.EQ, group_id),
224 Condition(Column("timestamp"), Op.GTE, midnight),
225 Condition(Column("timestamp"), Op.LT, now),
226 ],
227 )
228 request = Request(
229 dataset=Dataset.Events.value,
230 app_id=IS_ESCALATING_REFERRER,
231 query=query,
232 tenant_ids={"referrer": IS_ESCALATING_REFERRER, "organization_id": organization_id},
233 )
234 daily_count = int(
235 raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)["data"][0]["count()"]
236 )
237 cache.set(key, daily_count, GROUP_DAILY_COUNT_TTL)
238 return int(daily_count)
239
240
241 def is_escalating(group: Group) -> bool:
242 """Return boolean depending on if the group is escalating or not"""
243 group_daily_count = get_group_daily_count(
244 group.project.organization.id, group.project.id, group.id
245 )
246 forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)
247 # Check if current event occurance is greater than forecast for today's date
248 if group_daily_count > forecast_today:
249 group.substatus = GroupSubStatus.ESCALATING
250 group.status = GroupStatus.UNRESOLVED
251 add_group_to_inbox(group, GroupInboxReason.ESCALATING)
252 return True
253 return False
254
[end of src/sentry/issues/escalating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/issues/escalating.py b/src/sentry/issues/escalating.py
--- a/src/sentry/issues/escalating.py
+++ b/src/sentry/issues/escalating.py
@@ -41,7 +41,7 @@
BUCKETS_PER_GROUP = 7 * 24
ONE_WEEK_DURATION = 7
IS_ESCALATING_REFERRER = "sentry.issues.escalating.is_escalating"
-GROUP_DAILY_COUNT_TTL = 60
+GROUP_HOURLY_COUNT_TTL = 60
GroupsCountResponse = TypedDict(
"GroupsCountResponse",
@@ -204,15 +204,14 @@
return group_ids_by_project
-def get_group_daily_count(organization_id: int, project_id: int, group_id: int) -> int:
- """Return the number of events a group has had today"""
- key = f"daily-group-count:{project_id}:{group_id}"
- daily_count = cache.get(key)
+def get_group_hourly_count(organization_id: int, project_id: int, group_id: int) -> int:
+ """Return the number of events a group has had today in the last hour"""
+ key = f"hourly-group-count:{project_id}:{group_id}"
+ hourly_count = cache.get(key)
- if daily_count is None:
- today = datetime.now().date()
- midnight = datetime.combine(today, datetime.min.time())
+ if hourly_count is None:
now = datetime.now()
+ current_hour = now.replace(minute=0, second=0, microsecond=0)
query = Query(
match=Entity(EntityKey.Events.value),
select=[
@@ -221,7 +220,7 @@
where=[
Condition(Column("project_id"), Op.EQ, project_id),
Condition(Column("group_id"), Op.EQ, group_id),
- Condition(Column("timestamp"), Op.GTE, midnight),
+ Condition(Column("timestamp"), Op.GTE, current_hour),
Condition(Column("timestamp"), Op.LT, now),
],
)
@@ -231,21 +230,21 @@
query=query,
tenant_ids={"referrer": IS_ESCALATING_REFERRER, "organization_id": organization_id},
)
- daily_count = int(
+ hourly_count = int(
raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)["data"][0]["count()"]
)
- cache.set(key, daily_count, GROUP_DAILY_COUNT_TTL)
- return int(daily_count)
+ cache.set(key, hourly_count, GROUP_HOURLY_COUNT_TTL)
+ return int(hourly_count)
def is_escalating(group: Group) -> bool:
"""Return boolean depending on if the group is escalating or not"""
- group_daily_count = get_group_daily_count(
+ group_hourly_count = get_group_hourly_count(
group.project.organization.id, group.project.id, group.id
)
forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)
# Check if current event occurance is greater than forecast for today's date
- if group_daily_count > forecast_today:
+ if group_hourly_count > forecast_today:
group.substatus = GroupSubStatus.ESCALATING
group.status = GroupStatus.UNRESOLVED
add_group_to_inbox(group, GroupInboxReason.ESCALATING)
|
{"golden_diff": "diff --git a/src/sentry/issues/escalating.py b/src/sentry/issues/escalating.py\n--- a/src/sentry/issues/escalating.py\n+++ b/src/sentry/issues/escalating.py\n@@ -41,7 +41,7 @@\n BUCKETS_PER_GROUP = 7 * 24\n ONE_WEEK_DURATION = 7\n IS_ESCALATING_REFERRER = \"sentry.issues.escalating.is_escalating\"\n-GROUP_DAILY_COUNT_TTL = 60\n+GROUP_HOURLY_COUNT_TTL = 60\n \n GroupsCountResponse = TypedDict(\n \"GroupsCountResponse\",\n@@ -204,15 +204,14 @@\n return group_ids_by_project\n \n \n-def get_group_daily_count(organization_id: int, project_id: int, group_id: int) -> int:\n- \"\"\"Return the number of events a group has had today\"\"\"\n- key = f\"daily-group-count:{project_id}:{group_id}\"\n- daily_count = cache.get(key)\n+def get_group_hourly_count(organization_id: int, project_id: int, group_id: int) -> int:\n+ \"\"\"Return the number of events a group has had today in the last hour\"\"\"\n+ key = f\"hourly-group-count:{project_id}:{group_id}\"\n+ hourly_count = cache.get(key)\n \n- if daily_count is None:\n- today = datetime.now().date()\n- midnight = datetime.combine(today, datetime.min.time())\n+ if hourly_count is None:\n now = datetime.now()\n+ current_hour = now.replace(minute=0, second=0, microsecond=0)\n query = Query(\n match=Entity(EntityKey.Events.value),\n select=[\n@@ -221,7 +220,7 @@\n where=[\n Condition(Column(\"project_id\"), Op.EQ, project_id),\n Condition(Column(\"group_id\"), Op.EQ, group_id),\n- Condition(Column(\"timestamp\"), Op.GTE, midnight),\n+ Condition(Column(\"timestamp\"), Op.GTE, current_hour),\n Condition(Column(\"timestamp\"), Op.LT, now),\n ],\n )\n@@ -231,21 +230,21 @@\n query=query,\n tenant_ids={\"referrer\": IS_ESCALATING_REFERRER, \"organization_id\": organization_id},\n )\n- daily_count = int(\n+ hourly_count = int(\n raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)[\"data\"][0][\"count()\"]\n )\n- cache.set(key, daily_count, GROUP_DAILY_COUNT_TTL)\n- return int(daily_count)\n+ cache.set(key, hourly_count, GROUP_HOURLY_COUNT_TTL)\n+ return int(hourly_count)\n \n \n def is_escalating(group: Group) -> bool:\n \"\"\"Return boolean depending on if the group is escalating or not\"\"\"\n- group_daily_count = get_group_daily_count(\n+ group_hourly_count = get_group_hourly_count(\n group.project.organization.id, group.project.id, group.id\n )\n forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)\n # Check if current event occurance is greater than forecast for today's date\n- if group_daily_count > forecast_today:\n+ if group_hourly_count > forecast_today:\n group.substatus = GroupSubStatus.ESCALATING\n group.status = GroupStatus.UNRESOLVED\n add_group_to_inbox(group, GroupInboxReason.ESCALATING)\n", "issue": "Get hourly event count from Snuba to compare with forecast\n## Objective:\n\nThe forecasting algorithm generates an hourly threshold that we should compare against. Currently we fetch the entire day's event count from Snuba. This should be the hourly event count\n", "before_files": [{"content": "\"\"\"This module has the logic for querying Snuba for the hourly event count for a list of groups.\nThis is later used for generating group forecasts for determining when a group may be escalating.\n\"\"\"\n\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom typing import Dict, List, Sequence, Tuple, TypedDict\n\nfrom snuba_sdk import (\n Column,\n Condition,\n Direction,\n Entity,\n Function,\n Limit,\n Offset,\n Op,\n OrderBy,\n Query,\n Request,\n)\n\nfrom sentry.issues.escalating_group_forecast import EscalatingGroupForecast\nfrom sentry.issues.escalating_issues_alg import GroupCount\nfrom sentry.models import Group\nfrom sentry.models.group import GroupStatus\nfrom sentry.models.groupinbox import GroupInboxReason, add_group_to_inbox\nfrom sentry.snuba.dataset import Dataset, EntityKey\nfrom sentry.types.group import GroupSubStatus\nfrom sentry.utils.cache import cache\nfrom sentry.utils.snuba import raw_snql_query\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"query_groups_past_counts\", \"parse_groups_past_counts\"]\n\nREFERRER = \"sentry.issues.escalating\"\nELEMENTS_PER_SNUBA_PAGE = 10000 # This is the maximum value for Snuba\n# The amount of data needed to generate a group forecast\nBUCKETS_PER_GROUP = 7 * 24\nONE_WEEK_DURATION = 7\nIS_ESCALATING_REFERRER = \"sentry.issues.escalating.is_escalating\"\nGROUP_DAILY_COUNT_TTL = 60\n\nGroupsCountResponse = TypedDict(\n \"GroupsCountResponse\",\n {\"group_id\": int, \"hourBucket\": str, \"count()\": int, \"project_id\": int},\n)\n\nParsedGroupsCount = Dict[int, GroupCount]\n\n\ndef query_groups_past_counts(groups: Sequence[Group]) -> List[GroupsCountResponse]:\n \"\"\"Query Snuba for the counts for every group bucketed into hours.\n\n It optimizes the query by guaranteeing that we look at group_ids that are from the same project id.\n This is important for Snuba as the data is stored in blocks related to the project id.\n\n We maximize the number of projects and groups to reduce the total number of Snuba queries.\n Each project may not have enough groups in order to reach the max number of returned\n elements (ELEMENTS_PER_SNUBA_PAGE), thus, projects with few groups should be grouped together until\n we get at least a certain number of groups.\n\n NOTE: Groups with less than the maximum number of buckets (think of groups with just 1 event or less\n than 7 days old) will skew the optimization since we may only get one page and less elements than the max\n ELEMENTS_PER_SNUBA_PAGE.\n \"\"\"\n all_results = [] # type: ignore[var-annotated]\n if not groups:\n return all_results\n\n start_date, end_date = _start_and_end_dates()\n group_ids_by_project = _extract_project_and_group_ids(groups)\n proj_ids, group_ids = [], []\n processed_projects = 0\n total_projects_count = len(group_ids_by_project)\n organization_id = groups[0].project.organization.id\n\n # This iteration guarantees that all groups for a project will be queried in the same call\n # and only one page where the groups could be mixed with groups from another project\n # Iterating over the sorted keys guarantees results for tests\n for proj_id in sorted(group_ids_by_project.keys()):\n _group_ids = group_ids_by_project[proj_id]\n # Add them to the list of projects and groups to query\n proj_ids.append(proj_id)\n group_ids += _group_ids\n processed_projects += 1\n potential_num_elements = len(_group_ids) * BUCKETS_PER_GROUP\n # This is trying to maximize the number of groups on the first page\n if (\n processed_projects < total_projects_count\n and potential_num_elements < ELEMENTS_PER_SNUBA_PAGE\n ):\n continue\n\n # TODO: Write this as a dispatcher type task and fire off a separate task per proj_ids\n all_results += _query_with_pagination(\n organization_id, proj_ids, group_ids, start_date, end_date\n )\n # We're ready for a new set of projects and ids\n proj_ids, group_ids = [], []\n\n return all_results\n\n\ndef _query_with_pagination(\n organization_id: int,\n project_ids: Sequence[int],\n group_ids: Sequence[int],\n start_date: datetime,\n end_date: datetime,\n) -> List[GroupsCountResponse]:\n \"\"\"Query Snuba for event counts for the given list of project ids and groups ids in\n a time range.\"\"\"\n all_results = []\n offset = 0\n while True:\n query = _generate_query(project_ids, group_ids, offset, start_date, end_date)\n request = Request(\n dataset=Dataset.Events.value,\n app_id=REFERRER,\n query=query,\n tenant_ids={\"referrer\": REFERRER, \"organization_id\": organization_id},\n )\n results = raw_snql_query(request, referrer=REFERRER)[\"data\"]\n all_results += results\n offset += ELEMENTS_PER_SNUBA_PAGE\n if not results or len(results) < ELEMENTS_PER_SNUBA_PAGE:\n break\n\n return all_results\n\n\ndef parse_groups_past_counts(response: Sequence[GroupsCountResponse]) -> ParsedGroupsCount:\n \"\"\"\n Return the parsed snuba response for groups past counts to be used in generate_issue_forecast.\n ParsedGroupCount is of the form {<group_id>: {\"intervals\": [str], \"data\": [int]}}.\n\n `response`: Snuba response for group event counts\n \"\"\"\n group_counts: ParsedGroupsCount = {}\n group_ids_list = group_counts.keys()\n for data in response:\n group_id = data[\"group_id\"]\n if group_id not in group_ids_list:\n group_counts[group_id] = {\n \"intervals\": [data[\"hourBucket\"]],\n \"data\": [data[\"count()\"]],\n }\n else:\n group_counts[group_id][\"intervals\"].append(data[\"hourBucket\"])\n group_counts[group_id][\"data\"].append(data[\"count()\"])\n return group_counts\n\n\ndef _generate_query(\n project_ids: Sequence[int],\n group_ids: Sequence[int],\n offset: int,\n start_date: datetime,\n end_date: datetime,\n) -> Query:\n \"\"\"This simply generates a query based on the passed parameters\"\"\"\n group_id_col = Column(\"group_id\")\n proj_id_col = Column(\"project_id\")\n return Query(\n match=Entity(EntityKey.Events.value),\n select=[\n proj_id_col,\n group_id_col,\n Function(\"toStartOfHour\", [Column(\"timestamp\")], \"hourBucket\"),\n Function(\"count\", []),\n ],\n groupby=[proj_id_col, group_id_col, Column(\"hourBucket\")],\n where=[\n Condition(proj_id_col, Op.IN, Function(\"tuple\", project_ids)),\n Condition(Column(\"group_id\"), Op.IN, Function(\"tuple\", group_ids)),\n Condition(Column(\"timestamp\"), Op.GTE, start_date),\n Condition(Column(\"timestamp\"), Op.LT, end_date),\n ],\n limit=Limit(ELEMENTS_PER_SNUBA_PAGE),\n offset=Offset(offset),\n orderby=[\n OrderBy(proj_id_col, Direction.ASC),\n OrderBy(group_id_col, Direction.ASC),\n OrderBy(Column(\"hourBucket\"), Direction.ASC),\n ],\n )\n\n\ndef _start_and_end_dates(hours: int = BUCKETS_PER_GROUP) -> Tuple[datetime, datetime]:\n \"\"\"Return the start and end date of N hours time range.\"\"\"\n end_datetime = datetime.now()\n return end_datetime - timedelta(hours=hours), end_datetime\n\n\ndef _extract_project_and_group_ids(groups: Sequence[Group]) -> Dict[int, List[int]]:\n \"\"\"Return all project and group IDs from a list of Group\"\"\"\n group_ids_by_project: Dict[int, List[int]] = defaultdict(list)\n for group in groups:\n group_ids_by_project[group.project_id].append(group.id)\n\n return group_ids_by_project\n\n\ndef get_group_daily_count(organization_id: int, project_id: int, group_id: int) -> int:\n \"\"\"Return the number of events a group has had today\"\"\"\n key = f\"daily-group-count:{project_id}:{group_id}\"\n daily_count = cache.get(key)\n\n if daily_count is None:\n today = datetime.now().date()\n midnight = datetime.combine(today, datetime.min.time())\n now = datetime.now()\n query = Query(\n match=Entity(EntityKey.Events.value),\n select=[\n Function(\"count\", []),\n ],\n where=[\n Condition(Column(\"project_id\"), Op.EQ, project_id),\n Condition(Column(\"group_id\"), Op.EQ, group_id),\n Condition(Column(\"timestamp\"), Op.GTE, midnight),\n Condition(Column(\"timestamp\"), Op.LT, now),\n ],\n )\n request = Request(\n dataset=Dataset.Events.value,\n app_id=IS_ESCALATING_REFERRER,\n query=query,\n tenant_ids={\"referrer\": IS_ESCALATING_REFERRER, \"organization_id\": organization_id},\n )\n daily_count = int(\n raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)[\"data\"][0][\"count()\"]\n )\n cache.set(key, daily_count, GROUP_DAILY_COUNT_TTL)\n return int(daily_count)\n\n\ndef is_escalating(group: Group) -> bool:\n \"\"\"Return boolean depending on if the group is escalating or not\"\"\"\n group_daily_count = get_group_daily_count(\n group.project.organization.id, group.project.id, group.id\n )\n forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)\n # Check if current event occurance is greater than forecast for today's date\n if group_daily_count > forecast_today:\n group.substatus = GroupSubStatus.ESCALATING\n group.status = GroupStatus.UNRESOLVED\n add_group_to_inbox(group, GroupInboxReason.ESCALATING)\n return True\n return False\n", "path": "src/sentry/issues/escalating.py"}]}
| 3,455 | 764 |
gh_patches_debug_32604
|
rasdani/github-patches
|
git_diff
|
ansible-collections__amazon.aws-2019
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The cloudwatchlogs_log_group_info module generates throttling exceptions
### Summary
When running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none.
### Issue Type
Bug Report
### Component Name
amazon.aws.cloudwatchlogs_log_group_info
### Ansible Version
```console (paste below)
ansible [core 2.13.13]
config file = None
configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0]
jinja version = 3.1.3
libyaml = True
```
### Collection Versions
```console (paste below)
Collection Version
--------------------- -------
amazon.aws 7.3.0
community.general 8.3.0
nutanix.ncp 1.7.0
servicenow.servicenow 1.0.6
# /usr/local/lib/python3.8/dist-packages/ansible_collections
Collection Version
----------------------------- -------
amazon.aws 3.5.0
ansible.netcommon 3.1.3
ansible.posix 1.4.0
ansible.utils 2.8.0
ansible.windows 1.12.0
arista.eos 5.0.1
awx.awx 21.10.0
azure.azcollection 1.14.0
check_point.mgmt 2.3.0
chocolatey.chocolatey 1.3.1
cisco.aci 2.3.0
cisco.asa 3.1.0
cisco.dnac 6.6.1
cisco.intersight 1.0.22
cisco.ios 3.3.2
cisco.iosxr 3.3.1
cisco.ise 2.5.9
cisco.meraki 2.13.0
cisco.mso 2.1.0
cisco.nso 1.0.3
cisco.nxos 3.2.0
cisco.ucs 1.8.0
cloud.common 2.1.2
cloudscale_ch.cloud 2.2.3
community.aws 3.6.0
[...]
```
### AWS SDK versions
```console (paste below)
WARNING: Package(s) not found: boto
Name: boto3
Version: 1.34.45
Summary: The AWS SDK for Python
Home-page: https://github.com/boto/boto3
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.8/dist-packages
Requires: botocore, jmespath, s3transfer
Required-by:
---
Name: botocore
Version: 1.34.45
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services
Author-email: None
License: Apache License 2.0
Location: /usr/local/lib/python3.8/dist-packages
Requires: python-dateutil, jmespath, urllib3
Required-by: s3transfer, boto3, awscli
```
### Configuration
```console (paste below)
(no Ansible configuration)
```
### OS / Environment
NAME="Ubuntu"
VERSION="20.04.6 LTS (Focal Fossa)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 20.04.6 LTS"
### Steps to Reproduce
```yaml
- name: "Role based get all log groups in {{ selected_region }}"
amazon.aws.cloudwatchlogs_log_group_info:
region: "{{ selected_region }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
aws_session_token: "{{ aws_session_token }}"
log_group_name: "{{ log_group_prefix }}"
register: log_groups
```
### Expected Results
Should get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue.
### Actual Results
```console (paste below)
fatal: [127.0.0.1]: FAILED! => {"boto3_version": "1.34.45", "botocore_version": "1.34.45", "changed": false, "error": {"code": "ThrottlingException", "message": "Rate exceeded"}, "msg": "Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/cloudwatchlogs_log_group_info.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: Ansible Project
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 DOCUMENTATION = r"""
8 ---
9 module: cloudwatchlogs_log_group_info
10 version_added: 5.0.0
11 short_description: Get information about log_group in CloudWatchLogs
12 description:
13 - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
14 - This module was originally added to C(community.aws) in release 1.0.0.
15 author:
16 - Willian Ricardo (@willricardo) <[email protected]>
17 options:
18 log_group_name:
19 description:
20 - The name or prefix of the log group to filter by.
21 type: str
22 extends_documentation_fragment:
23 - amazon.aws.common.modules
24 - amazon.aws.region.modules
25 - amazon.aws.boto3
26 """
27
28 EXAMPLES = r"""
29 # Note: These examples do not set authentication details, see the AWS Guide for details.
30 - amazon.aws.cloudwatchlogs_log_group_info:
31 log_group_name: test-log-group
32 """
33
34 RETURN = r"""
35 log_groups:
36 description: Return the list of complex objects representing log groups
37 returned: success
38 type: complex
39 contains:
40 log_group_name:
41 description: The name of the log group.
42 returned: always
43 type: str
44 creation_time:
45 description: The creation time of the log group.
46 returned: always
47 type: int
48 retention_in_days:
49 description: The number of days to retain the log events in the specified log group.
50 returned: always
51 type: int
52 metric_filter_count:
53 description: The number of metric filters.
54 returned: always
55 type: int
56 arn:
57 description: The Amazon Resource Name (ARN) of the log group.
58 returned: always
59 type: str
60 stored_bytes:
61 description: The number of bytes stored.
62 returned: always
63 type: str
64 kms_key_id:
65 description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
66 returned: always
67 type: str
68 tags:
69 description: A dictionary representing the tags on the log group.
70 returned: always
71 type: dict
72 version_added: 4.0.0
73 version_added_collection: community.aws
74 """
75
76 try:
77 import botocore
78 except ImportError:
79 pass # Handled by AnsibleAWSModule
80
81 from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
82
83 from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
84 from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
85
86
87 def describe_log_group(client, log_group_name, module):
88 params = {}
89 if log_group_name:
90 params["logGroupNamePrefix"] = log_group_name
91 try:
92 paginator = client.get_paginator("describe_log_groups")
93 desc_log_group = paginator.paginate(**params).build_full_result()
94 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
95 module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}")
96
97 for log_group in desc_log_group["logGroups"]:
98 log_group_name = log_group["logGroupName"]
99 try:
100 tags = client.list_tags_log_group(logGroupName=log_group_name)
101 except is_boto3_error_code("AccessDeniedException"):
102 tags = {}
103 module.warn(f"Permission denied listing tags for log group {log_group_name}")
104 except (
105 botocore.exceptions.ClientError,
106 botocore.exceptions.BotoCoreError,
107 ) as e: # pylint: disable=duplicate-except
108 module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}")
109 log_group["tags"] = tags.get("tags", {})
110
111 return desc_log_group
112
113
114 def main():
115 argument_spec = dict(
116 log_group_name=dict(),
117 )
118
119 module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
120
121 try:
122 logs = module.client("logs")
123 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
124 module.fail_json_aws(e, msg="Failed to connect to AWS")
125
126 desc_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module)
127 final_log_group_snake = []
128
129 for log_group in desc_log_group["logGroups"]:
130 final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=["tags"]))
131
132 desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
133 module.exit_json(**desc_log_group_result)
134
135
136 if __name__ == "__main__":
137 main()
138
[end of plugins/modules/cloudwatchlogs_log_group_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py
--- a/plugins/modules/cloudwatchlogs_log_group_info.py
+++ b/plugins/modules/cloudwatchlogs_log_group_info.py
@@ -82,6 +82,18 @@
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule
+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
+
+
[email protected]_backoff()
+def list_tags_log_group_with_backoff(client, log_group_name):
+ return client.list_tags_log_group(logGroupName=log_group_name)
+
+
[email protected]_backoff()
+def describe_log_groups_with_backoff(client, **kwargs):
+ paginator = client.get_paginator("describe_log_groups")
+ return paginator.paginate(**kwargs).build_full_result()
def describe_log_group(client, log_group_name, module):
@@ -89,15 +101,14 @@
if log_group_name:
params["logGroupNamePrefix"] = log_group_name
try:
- paginator = client.get_paginator("describe_log_groups")
- desc_log_group = paginator.paginate(**params).build_full_result()
+ desc_log_group = describe_log_groups_with_backoff(client, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}")
for log_group in desc_log_group["logGroups"]:
log_group_name = log_group["logGroupName"]
try:
- tags = client.list_tags_log_group(logGroupName=log_group_name)
+ tags = list_tags_log_group_with_backoff(client, log_group_name)
except is_boto3_error_code("AccessDeniedException"):
tags = {}
module.warn(f"Permission denied listing tags for log group {log_group_name}")
|
{"golden_diff": "diff --git a/plugins/modules/cloudwatchlogs_log_group_info.py b/plugins/modules/cloudwatchlogs_log_group_info.py\n--- a/plugins/modules/cloudwatchlogs_log_group_info.py\n+++ b/plugins/modules/cloudwatchlogs_log_group_info.py\n@@ -82,6 +82,18 @@\n \n from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\n from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\n+from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry\n+\n+\[email protected]_backoff()\n+def list_tags_log_group_with_backoff(client, log_group_name):\n+ return client.list_tags_log_group(logGroupName=log_group_name)\n+\n+\[email protected]_backoff()\n+def describe_log_groups_with_backoff(client, **kwargs):\n+ paginator = client.get_paginator(\"describe_log_groups\")\n+ return paginator.paginate(**kwargs).build_full_result()\n \n \n def describe_log_group(client, log_group_name, module):\n@@ -89,15 +101,14 @@\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n- paginator = client.get_paginator(\"describe_log_groups\")\n- desc_log_group = paginator.paginate(**params).build_full_result()\n+ desc_log_group = describe_log_groups_with_backoff(client, **params)\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n \n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n- tags = client.list_tags_log_group(logGroupName=log_group_name)\n+ tags = list_tags_log_group_with_backoff(client, log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n", "issue": "The cloudwatchlogs_log_group_info module generates throttling exceptions\n### Summary\n\nWhen running the amazon.aws.cloudwatchlogs_log_group_info module on AWS accounts having more than (roughly) 50 log groups we get ThrottlingExceptions once every 20 calls or so. I noticed that both the describe and the list-tags boto calls in the cloudwatchlogs_log_group_info module have no throttling handling configured and use the default throttling handling policy, ie. none. \n\n### Issue Type\n\nBug Report\n\n### Component Name\n\namazon.aws.cloudwatchlogs_log_group_info\n\n### Ansible Version\n\n```console (paste below)\r\nansible [core 2.13.13]\r\n config file = None\r\n configured module search path = ['/home/rundeck/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible\r\n ansible collection location = /home/rundeck/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.8.10 (default, Nov 22 2023, 10:22:35) [GCC 9.4.0]\r\n jinja version = 3.1.3\r\n libyaml = True\r\n\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\nCollection Version\r\n--------------------- -------\r\namazon.aws 7.3.0\r\ncommunity.general 8.3.0\r\nnutanix.ncp 1.7.0\r\nservicenow.servicenow 1.0.6\r\n\r\n# /usr/local/lib/python3.8/dist-packages/ansible_collections\r\nCollection Version\r\n----------------------------- -------\r\namazon.aws 3.5.0\r\nansible.netcommon 3.1.3\r\nansible.posix 1.4.0\r\nansible.utils 2.8.0\r\nansible.windows 1.12.0\r\narista.eos 5.0.1\r\nawx.awx 21.10.0\r\nazure.azcollection 1.14.0\r\ncheck_point.mgmt 2.3.0\r\nchocolatey.chocolatey 1.3.1\r\ncisco.aci 2.3.0\r\ncisco.asa 3.1.0\r\ncisco.dnac 6.6.1\r\ncisco.intersight 1.0.22\r\ncisco.ios 3.3.2\r\ncisco.iosxr 3.3.1\r\ncisco.ise 2.5.9\r\ncisco.meraki 2.13.0\r\ncisco.mso 2.1.0\r\ncisco.nso 1.0.3\r\ncisco.nxos 3.2.0\r\ncisco.ucs 1.8.0\r\ncloud.common 2.1.2\r\ncloudscale_ch.cloud 2.2.3\r\ncommunity.aws 3.6.0\r\n[...]\r\n```\r\n\n\n### AWS SDK versions\n\n```console (paste below)\r\nWARNING: Package(s) not found: boto\r\nName: boto3\r\nVersion: 1.34.45\r\nSummary: The AWS SDK for Python\r\nHome-page: https://github.com/boto/boto3\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: botocore, jmespath, s3transfer\r\nRequired-by:\r\n---\r\nName: botocore\r\nVersion: 1.34.45\r\nSummary: Low-level, data-driven core of boto 3.\r\nHome-page: https://github.com/boto/botocore\r\nAuthor: Amazon Web Services\r\nAuthor-email: None\r\nLicense: Apache License 2.0\r\nLocation: /usr/local/lib/python3.8/dist-packages\r\nRequires: python-dateutil, jmespath, urllib3\r\nRequired-by: s3transfer, boto3, awscli\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n(no Ansible configuration)\r\n```\r\n\n\n### OS / Environment\n\nNAME=\"Ubuntu\"\r\nVERSION=\"20.04.6 LTS (Focal Fossa)\"\r\nID=ubuntu\r\nID_LIKE=debian\r\nPRETTY_NAME=\"Ubuntu 20.04.6 LTS\"\n\n### Steps to Reproduce\n\n```yaml \r\n- name: \"Role based get all log groups in {{ selected_region }}\"\r\n amazon.aws.cloudwatchlogs_log_group_info:\r\n region: \"{{ selected_region }}\"\r\n aws_access_key: \"{{ aws_access_key }}\"\r\n aws_secret_key: \"{{ aws_secret_key }}\"\r\n aws_session_token: \"{{ aws_session_token }}\"\r\n log_group_name: \"{{ log_group_prefix }}\"\r\n register: log_groups\r\n```\r\n\n\n### Expected Results\n\nShould get all log group information and tags without error. Optionally, allow us to add an option to skip getting the tags for each log group would allow engineers to work around this issue. \n\n### Actual Results\n\n```console (paste below)\r\nfatal: [127.0.0.1]: FAILED! => {\"boto3_version\": \"1.34.45\", \"botocore_version\": \"1.34.45\", \"changed\": false, \"error\": {\"code\": \"ThrottlingException\", \"message\": \"Rate exceeded\"}, \"msg\": \"Unable to describe tags for log group /aws/codebuild/tankmaintenanceplanning-pipeline-tsa: An error occurred (ThrottlingException) when calling the ListTagsLogGroup operation (reached max retries: 4): Rate exceeded\"\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cloudwatchlogs_log_group_info\nversion_added: 5.0.0\nshort_description: Get information about log_group in CloudWatchLogs\ndescription:\n - Lists the specified log groups. You can list all your log groups or filter the results by prefix.\n - This module was originally added to C(community.aws) in release 1.0.0.\nauthor:\n - Willian Ricardo (@willricardo) <[email protected]>\noptions:\n log_group_name:\n description:\n - The name or prefix of the log group to filter by.\n type: str\nextends_documentation_fragment:\n - amazon.aws.common.modules\n - amazon.aws.region.modules\n - amazon.aws.boto3\n\"\"\"\n\nEXAMPLES = r\"\"\"\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n- amazon.aws.cloudwatchlogs_log_group_info:\n log_group_name: test-log-group\n\"\"\"\n\nRETURN = r\"\"\"\nlog_groups:\n description: Return the list of complex objects representing log groups\n returned: success\n type: complex\n contains:\n log_group_name:\n description: The name of the log group.\n returned: always\n type: str\n creation_time:\n description: The creation time of the log group.\n returned: always\n type: int\n retention_in_days:\n description: The number of days to retain the log events in the specified log group.\n returned: always\n type: int\n metric_filter_count:\n description: The number of metric filters.\n returned: always\n type: int\n arn:\n description: The Amazon Resource Name (ARN) of the log group.\n returned: always\n type: str\n stored_bytes:\n description: The number of bytes stored.\n returned: always\n type: str\n kms_key_id:\n description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.\n returned: always\n type: str\n tags:\n description: A dictionary representing the tags on the log group.\n returned: always\n type: dict\n version_added: 4.0.0\n version_added_collection: community.aws\n\"\"\"\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule\n\n\ndef describe_log_group(client, log_group_name, module):\n params = {}\n if log_group_name:\n params[\"logGroupNamePrefix\"] = log_group_name\n try:\n paginator = client.get_paginator(\"describe_log_groups\")\n desc_log_group = paginator.paginate(**params).build_full_result()\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=f\"Unable to describe log group {log_group_name}\")\n\n for log_group in desc_log_group[\"logGroups\"]:\n log_group_name = log_group[\"logGroupName\"]\n try:\n tags = client.list_tags_log_group(logGroupName=log_group_name)\n except is_boto3_error_code(\"AccessDeniedException\"):\n tags = {}\n module.warn(f\"Permission denied listing tags for log group {log_group_name}\")\n except (\n botocore.exceptions.ClientError,\n botocore.exceptions.BotoCoreError,\n ) as e: # pylint: disable=duplicate-except\n module.fail_json_aws(e, msg=f\"Unable to describe tags for log group {log_group_name}\")\n log_group[\"tags\"] = tags.get(\"tags\", {})\n\n return desc_log_group\n\n\ndef main():\n argument_spec = dict(\n log_group_name=dict(),\n )\n\n module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)\n\n try:\n logs = module.client(\"logs\")\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg=\"Failed to connect to AWS\")\n\n desc_log_group = describe_log_group(client=logs, log_group_name=module.params[\"log_group_name\"], module=module)\n final_log_group_snake = []\n\n for log_group in desc_log_group[\"logGroups\"]:\n final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=[\"tags\"]))\n\n desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)\n module.exit_json(**desc_log_group_result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cloudwatchlogs_log_group_info.py"}]}
| 3,182 | 432 |
gh_patches_debug_5728
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-6217
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`CachedStFunctionWarning` incorrectly asks users to set `suppress_st_warning=True` when widget replay is disabled
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
When you include Streamlit widgets in a cache decorated function, a `CachedStFunctionWarning` is raised because widget replay is disabled by default. That `CachedStFunctionWarning` message asks users to set `suppress_st_warning=True` in the caching decorator to suppress the warning. However, the new cache primitives do not support `suppress_st_warning` as a param.
The warning needs to be edited to replace the mention of `suppress_st_warning` with `experimental_allow_widgets`.
### Reproducible Code Example
```Python
import streamlit as st
@st.cache_resource
def foo():
st.text_input("foo")
foo()
st.button("Re-run")
```
### Steps To Reproduce
1. Run the app
2. You see the following:

3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`
4. You see the following:

### Expected Behavior
1. Run the app
2. Expect to see:

3. Follow the suggestion and rewrite to `@st.cache_resource(experimental_allow_widgets=True)`
4. Expect to see:

### Current Behavior
1. Run the app
2. You see the following:

3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`
4. You see the following:

### Is this a regression?
- [ ] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.19.0
- Python version: 3.9
- Operating System: macOS
- Browser: Chrome
- Virtual environment: None
### Additional Information
_No response_
### Are you willing to submit a PR?
- [X] Yes, I am willing to submit a PR!
`CachedStFunctionWarning` incorrectly asks users to set `suppress_st_warning=True` when widget replay is disabled
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
When you include Streamlit widgets in a cache decorated function, a `CachedStFunctionWarning` is raised because widget replay is disabled by default. That `CachedStFunctionWarning` message asks users to set `suppress_st_warning=True` in the caching decorator to suppress the warning. However, the new cache primitives do not support `suppress_st_warning` as a param.
The warning needs to be edited to replace the mention of `suppress_st_warning` with `experimental_allow_widgets`.
### Reproducible Code Example
```Python
import streamlit as st
@st.cache_resource
def foo():
st.text_input("foo")
foo()
st.button("Re-run")
```
### Steps To Reproduce
1. Run the app
2. You see the following:

3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`
4. You see the following:

### Expected Behavior
1. Run the app
2. Expect to see:

3. Follow the suggestion and rewrite to `@st.cache_resource(experimental_allow_widgets=True)`
4. Expect to see:

### Current Behavior
1. Run the app
2. You see the following:

3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`
4. You see the following:

### Is this a regression?
- [ ] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.19.0
- Python version: 3.9
- Operating System: macOS
- Browser: Chrome
- Virtual environment: None
### Additional Information
_No response_
### Are you willing to submit a PR?
- [X] Yes, I am willing to submit a PR!
</issue>
<code>
[start of lib/streamlit/runtime/caching/cache_errors.py]
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import types
16 from typing import Any, Optional
17
18 from streamlit import type_util
19 from streamlit.errors import (
20 MarkdownFormattedException,
21 StreamlitAPIException,
22 StreamlitAPIWarning,
23 )
24 from streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name
25
26 CACHE_DOCS_URL = "https://docs.streamlit.io/library/advanced-features/caching"
27
28
29 def get_cached_func_name_md(func: Any) -> str:
30 """Get markdown representation of the function name."""
31 if hasattr(func, "__name__"):
32 return "`%s()`" % func.__name__
33 elif hasattr(type(func), "__name__"):
34 return f"`{type(func).__name__}`"
35 return f"`{type(func)}`"
36
37
38 def get_return_value_type(return_value: Any) -> str:
39 if hasattr(return_value, "__module__") and hasattr(type(return_value), "__name__"):
40 return f"`{return_value.__module__}.{type(return_value).__name__}`"
41 return get_cached_func_name_md(return_value)
42
43
44 class UnhashableTypeError(Exception):
45 pass
46
47
48 class UnhashableParamError(StreamlitAPIException):
49 def __init__(
50 self,
51 cache_type: CacheType,
52 func: types.FunctionType,
53 arg_name: Optional[str],
54 arg_value: Any,
55 orig_exc: BaseException,
56 ):
57 msg = self._create_message(cache_type, func, arg_name, arg_value)
58 super().__init__(msg)
59 self.with_traceback(orig_exc.__traceback__)
60
61 @staticmethod
62 def _create_message(
63 cache_type: CacheType,
64 func: types.FunctionType,
65 arg_name: Optional[str],
66 arg_value: Any,
67 ) -> str:
68 arg_name_str = arg_name if arg_name is not None else "(unnamed)"
69 arg_type = type_util.get_fqn_type(arg_value)
70 func_name = func.__name__
71 arg_replacement_name = f"_{arg_name}" if arg_name is not None else "_arg"
72
73 return (
74 f"""
75 Cannot hash argument '{arg_name_str}' (of type `{arg_type}`) in '{func_name}'.
76
77 To address this, you can tell Streamlit not to hash this argument by adding a
78 leading underscore to the argument's name in the function signature:
79
80 ```
81 @st.{get_decorator_api_name(cache_type)}
82 def {func_name}({arg_replacement_name}, ...):
83 ...
84 ```
85 """
86 ).strip("\n")
87
88
89 class CacheKeyNotFoundError(Exception):
90 pass
91
92
93 class CacheError(Exception):
94 pass
95
96
97 class CachedStFunctionWarning(StreamlitAPIWarning):
98 def __init__(
99 self,
100 cache_type: CacheType,
101 st_func_name: str,
102 cached_func: types.FunctionType,
103 ):
104 args = {
105 "st_func_name": f"`st.{st_func_name}()`",
106 "func_name": self._get_cached_func_name_md(cached_func),
107 "decorator_name": get_decorator_api_name(cache_type),
108 }
109
110 msg = (
111 """
112 Your script uses %(st_func_name)s to write to your Streamlit app from within
113 some cached code at %(func_name)s. This code will only be called when we detect
114 a cache "miss", which can lead to unexpected results.
115
116 How to fix this:
117 * Move the %(st_func_name)s call outside %(func_name)s.
118 * Or, if you know what you're doing, use `@st.%(decorator_name)s(suppress_st_warning=True)`
119 to suppress the warning.
120 """
121 % args
122 ).strip("\n")
123
124 super().__init__(msg)
125
126 @staticmethod
127 def _get_cached_func_name_md(func: types.FunctionType) -> str:
128 """Get markdown representation of the function name."""
129 if hasattr(func, "__name__"):
130 return "`%s()`" % func.__name__
131 else:
132 return "a cached function"
133
134
135 class CacheReplayClosureError(StreamlitAPIException):
136 def __init__(
137 self,
138 cache_type: CacheType,
139 cached_func: types.FunctionType,
140 ):
141 func_name = get_cached_func_name_md(cached_func)
142 decorator_name = get_decorator_api_name(cache_type)
143
144 msg = (
145 f"""
146 While running {func_name}, a streamlit element is called on some layout block created outside the function.
147 This is incompatible with replaying the cached effect of that element, because the
148 the referenced block might not exist when the replay happens.
149
150 How to fix this:
151 * Move the creation of $THING inside {func_name}.
152 * Move the call to the streamlit element outside of {func_name}.
153 * Remove the `@st.{decorator_name}` decorator from {func_name}.
154 """
155 ).strip("\n")
156
157 super().__init__(msg)
158
159
160 class UnserializableReturnValueError(MarkdownFormattedException):
161 def __init__(self, func: types.FunctionType, return_value: types.FunctionType):
162 MarkdownFormattedException.__init__(
163 self,
164 f"""
165 Cannot serialize the return value (of type {get_return_value_type(return_value)}) in {get_cached_func_name_md(func)}.
166 `st.cache_data` uses [pickle](https://docs.python.org/3/library/pickle.html) to
167 serialize the function’s return value and safely store it in the cache without mutating the original object. Please convert the return value to a pickle-serializable type.
168 If you want to cache unserializable objects such as database connections or Tensorflow
169 sessions, use `st.cache_resource` instead (see [our docs]({CACHE_DOCS_URL}) for differences).""",
170 )
171
172
173 class UnevaluatedDataFrameError(StreamlitAPIException):
174 """Used to display a message about uncollected dataframe being used"""
175
176 pass
177
[end of lib/streamlit/runtime/caching/cache_errors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/streamlit/runtime/caching/cache_errors.py b/lib/streamlit/runtime/caching/cache_errors.py
--- a/lib/streamlit/runtime/caching/cache_errors.py
+++ b/lib/streamlit/runtime/caching/cache_errors.py
@@ -115,8 +115,8 @@
How to fix this:
* Move the %(st_func_name)s call outside %(func_name)s.
-* Or, if you know what you're doing, use `@st.%(decorator_name)s(suppress_st_warning=True)`
-to suppress the warning.
+* Or, if you know what you're doing, use `@st.%(decorator_name)s(experimental_allow_widgets=True)`
+to enable widget replay and suppress this warning.
"""
% args
).strip("\n")
|
{"golden_diff": "diff --git a/lib/streamlit/runtime/caching/cache_errors.py b/lib/streamlit/runtime/caching/cache_errors.py\n--- a/lib/streamlit/runtime/caching/cache_errors.py\n+++ b/lib/streamlit/runtime/caching/cache_errors.py\n@@ -115,8 +115,8 @@\n \n How to fix this:\n * Move the %(st_func_name)s call outside %(func_name)s.\n-* Or, if you know what you're doing, use `@st.%(decorator_name)s(suppress_st_warning=True)`\n-to suppress the warning.\n+* Or, if you know what you're doing, use `@st.%(decorator_name)s(experimental_allow_widgets=True)`\n+to enable widget replay and suppress this warning.\n \"\"\"\n % args\n ).strip(\"\\n\")\n", "issue": "`CachedStFunctionWarning` incorrectly asks users to set `suppress_st_warning=True` when widget replay is disabled\n### Checklist\n\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\n- [X] I added a very descriptive title to this issue.\n- [X] I have provided sufficient information below to help reproduce this issue.\n\n### Summary\n\nWhen you include Streamlit widgets in a cache decorated function, a `CachedStFunctionWarning` is raised because widget replay is disabled by default. That `CachedStFunctionWarning` message asks users to set `suppress_st_warning=True` in the caching decorator to suppress the warning. However, the new cache primitives do not support `suppress_st_warning` as a param.\r\n\r\nThe warning needs to be edited to replace the mention of `suppress_st_warning` with `experimental_allow_widgets`.\n\n### Reproducible Code Example\n\n```Python\nimport streamlit as st\r\n\r\[email protected]_resource\r\ndef foo():\r\n st.text_input(\"foo\")\r\n\r\nfoo()\r\n\r\nst.button(\"Re-run\")\n```\n\n\n### Steps To Reproduce\n\n1. Run the app\r\n2. You see the following:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`\r\n4. You see the following:\r\n\n\n### Expected Behavior\n\n1. Run the app\r\n2. Expect to see:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(experimental_allow_widgets=True)`\r\n4. Expect to see:\r\n\r\n\n\n### Current Behavior\n\n1. Run the app\r\n2. You see the following:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`\r\n4. You see the following:\r\n\n\n### Is this a regression?\n\n- [ ] Yes, this used to work in a previous version.\n\n### Debug info\n\n- Streamlit version: 1.19.0\r\n- Python version: 3.9\r\n- Operating System: macOS\r\n- Browser: Chrome\r\n- Virtual environment: None\r\n\n\n### Additional Information\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes, I am willing to submit a PR!\n`CachedStFunctionWarning` incorrectly asks users to set `suppress_st_warning=True` when widget replay is disabled\n### Checklist\n\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\n- [X] I added a very descriptive title to this issue.\n- [X] I have provided sufficient information below to help reproduce this issue.\n\n### Summary\n\nWhen you include Streamlit widgets in a cache decorated function, a `CachedStFunctionWarning` is raised because widget replay is disabled by default. That `CachedStFunctionWarning` message asks users to set `suppress_st_warning=True` in the caching decorator to suppress the warning. However, the new cache primitives do not support `suppress_st_warning` as a param.\r\n\r\nThe warning needs to be edited to replace the mention of `suppress_st_warning` with `experimental_allow_widgets`.\n\n### Reproducible Code Example\n\n```Python\nimport streamlit as st\r\n\r\[email protected]_resource\r\ndef foo():\r\n st.text_input(\"foo\")\r\n\r\nfoo()\r\n\r\nst.button(\"Re-run\")\n```\n\n\n### Steps To Reproduce\n\n1. Run the app\r\n2. You see the following:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`\r\n4. You see the following:\r\n\n\n### Expected Behavior\n\n1. Run the app\r\n2. Expect to see:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(experimental_allow_widgets=True)`\r\n4. Expect to see:\r\n\r\n\n\n### Current Behavior\n\n1. Run the app\r\n2. You see the following:\r\n\r\n3. Follow the suggestion and rewrite to `@st.cache_resource(suppress_st_warning=True)`\r\n4. You see the following:\r\n\n\n### Is this a regression?\n\n- [ ] Yes, this used to work in a previous version.\n\n### Debug info\n\n- Streamlit version: 1.19.0\r\n- Python version: 3.9\r\n- Operating System: macOS\r\n- Browser: Chrome\r\n- Virtual environment: None\r\n\n\n### Additional Information\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport types\nfrom typing import Any, Optional\n\nfrom streamlit import type_util\nfrom streamlit.errors import (\n MarkdownFormattedException,\n StreamlitAPIException,\n StreamlitAPIWarning,\n)\nfrom streamlit.runtime.caching.cache_type import CacheType, get_decorator_api_name\n\nCACHE_DOCS_URL = \"https://docs.streamlit.io/library/advanced-features/caching\"\n\n\ndef get_cached_func_name_md(func: Any) -> str:\n \"\"\"Get markdown representation of the function name.\"\"\"\n if hasattr(func, \"__name__\"):\n return \"`%s()`\" % func.__name__\n elif hasattr(type(func), \"__name__\"):\n return f\"`{type(func).__name__}`\"\n return f\"`{type(func)}`\"\n\n\ndef get_return_value_type(return_value: Any) -> str:\n if hasattr(return_value, \"__module__\") and hasattr(type(return_value), \"__name__\"):\n return f\"`{return_value.__module__}.{type(return_value).__name__}`\"\n return get_cached_func_name_md(return_value)\n\n\nclass UnhashableTypeError(Exception):\n pass\n\n\nclass UnhashableParamError(StreamlitAPIException):\n def __init__(\n self,\n cache_type: CacheType,\n func: types.FunctionType,\n arg_name: Optional[str],\n arg_value: Any,\n orig_exc: BaseException,\n ):\n msg = self._create_message(cache_type, func, arg_name, arg_value)\n super().__init__(msg)\n self.with_traceback(orig_exc.__traceback__)\n\n @staticmethod\n def _create_message(\n cache_type: CacheType,\n func: types.FunctionType,\n arg_name: Optional[str],\n arg_value: Any,\n ) -> str:\n arg_name_str = arg_name if arg_name is not None else \"(unnamed)\"\n arg_type = type_util.get_fqn_type(arg_value)\n func_name = func.__name__\n arg_replacement_name = f\"_{arg_name}\" if arg_name is not None else \"_arg\"\n\n return (\n f\"\"\"\nCannot hash argument '{arg_name_str}' (of type `{arg_type}`) in '{func_name}'.\n\nTo address this, you can tell Streamlit not to hash this argument by adding a\nleading underscore to the argument's name in the function signature:\n\n```\n@st.{get_decorator_api_name(cache_type)}\ndef {func_name}({arg_replacement_name}, ...):\n ...\n```\n \"\"\"\n ).strip(\"\\n\")\n\n\nclass CacheKeyNotFoundError(Exception):\n pass\n\n\nclass CacheError(Exception):\n pass\n\n\nclass CachedStFunctionWarning(StreamlitAPIWarning):\n def __init__(\n self,\n cache_type: CacheType,\n st_func_name: str,\n cached_func: types.FunctionType,\n ):\n args = {\n \"st_func_name\": f\"`st.{st_func_name}()`\",\n \"func_name\": self._get_cached_func_name_md(cached_func),\n \"decorator_name\": get_decorator_api_name(cache_type),\n }\n\n msg = (\n \"\"\"\nYour script uses %(st_func_name)s to write to your Streamlit app from within\nsome cached code at %(func_name)s. This code will only be called when we detect\na cache \"miss\", which can lead to unexpected results.\n\nHow to fix this:\n* Move the %(st_func_name)s call outside %(func_name)s.\n* Or, if you know what you're doing, use `@st.%(decorator_name)s(suppress_st_warning=True)`\nto suppress the warning.\n \"\"\"\n % args\n ).strip(\"\\n\")\n\n super().__init__(msg)\n\n @staticmethod\n def _get_cached_func_name_md(func: types.FunctionType) -> str:\n \"\"\"Get markdown representation of the function name.\"\"\"\n if hasattr(func, \"__name__\"):\n return \"`%s()`\" % func.__name__\n else:\n return \"a cached function\"\n\n\nclass CacheReplayClosureError(StreamlitAPIException):\n def __init__(\n self,\n cache_type: CacheType,\n cached_func: types.FunctionType,\n ):\n func_name = get_cached_func_name_md(cached_func)\n decorator_name = get_decorator_api_name(cache_type)\n\n msg = (\n f\"\"\"\nWhile running {func_name}, a streamlit element is called on some layout block created outside the function.\nThis is incompatible with replaying the cached effect of that element, because the\nthe referenced block might not exist when the replay happens.\n\nHow to fix this:\n* Move the creation of $THING inside {func_name}.\n* Move the call to the streamlit element outside of {func_name}.\n* Remove the `@st.{decorator_name}` decorator from {func_name}.\n \"\"\"\n ).strip(\"\\n\")\n\n super().__init__(msg)\n\n\nclass UnserializableReturnValueError(MarkdownFormattedException):\n def __init__(self, func: types.FunctionType, return_value: types.FunctionType):\n MarkdownFormattedException.__init__(\n self,\n f\"\"\"\n Cannot serialize the return value (of type {get_return_value_type(return_value)}) in {get_cached_func_name_md(func)}.\n `st.cache_data` uses [pickle](https://docs.python.org/3/library/pickle.html) to\n serialize the function\u2019s return value and safely store it in the cache without mutating the original object. Please convert the return value to a pickle-serializable type.\n If you want to cache unserializable objects such as database connections or Tensorflow\n sessions, use `st.cache_resource` instead (see [our docs]({CACHE_DOCS_URL}) for differences).\"\"\",\n )\n\n\nclass UnevaluatedDataFrameError(StreamlitAPIException):\n \"\"\"Used to display a message about uncollected dataframe being used\"\"\"\n\n pass\n", "path": "lib/streamlit/runtime/caching/cache_errors.py"}]}
| 4,022 | 166 |
gh_patches_debug_391
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-1992
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace pyaml dependency with PyYAML
There is a dependency on pyaml in setup.py:
https://github.com/spulec/moto/blob/master/setup.py#L18
I think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.
pyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 import setuptools
4 from setuptools import setup, find_packages
5 import sys
6
7
8 install_requires = [
9 "Jinja2>=2.7.3",
10 "boto>=2.36.0",
11 "boto3>=1.6.16",
12 "botocore>=1.12.13",
13 "cryptography>=2.3.0",
14 "requests>=2.5",
15 "xmltodict",
16 "six>1.9",
17 "werkzeug",
18 "pyaml",
19 "pytz",
20 "python-dateutil<3.0.0,>=2.1",
21 "python-jose<3.0.0",
22 "mock",
23 "docker>=2.5.1",
24 "jsondiff==1.1.1",
25 "aws-xray-sdk!=0.96,>=0.93",
26 "responses>=0.9.0",
27 ]
28
29 extras_require = {
30 'server': ['flask'],
31 }
32
33 # https://hynek.me/articles/conditional-python-dependencies/
34 if int(setuptools.__version__.split(".", 1)[0]) < 18:
35 if sys.version_info[0:2] < (3, 3):
36 install_requires.append("backports.tempfile")
37 else:
38 extras_require[":python_version<'3.3'"] = ["backports.tempfile"]
39
40
41 setup(
42 name='moto',
43 version='1.3.7',
44 description='A library that allows your python tests to easily'
45 ' mock out the boto library',
46 author='Steve Pulec',
47 author_email='[email protected]',
48 url='https://github.com/spulec/moto',
49 entry_points={
50 'console_scripts': [
51 'moto_server = moto.server:main',
52 ],
53 },
54 packages=find_packages(exclude=("tests", "tests.*")),
55 install_requires=install_requires,
56 extras_require=extras_require,
57 include_package_data=True,
58 license="Apache",
59 test_suite="tests",
60 classifiers=[
61 "Programming Language :: Python :: 2",
62 "Programming Language :: Python :: 2.7",
63 "Programming Language :: Python :: 3",
64 "Programming Language :: Python :: 3.3",
65 "Programming Language :: Python :: 3.4",
66 "Programming Language :: Python :: 3.5",
67 "Programming Language :: Python :: 3.6",
68 "License :: OSI Approved :: Apache Software License",
69 "Topic :: Software Development :: Testing",
70 ],
71 )
72
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"xmltodict",
"six>1.9",
"werkzeug",
- "pyaml",
+ "PyYAML",
"pytz",
"python-dateutil<3.0.0,>=2.1",
"python-jose<3.0.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n- \"pyaml\",\n+ \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n", "issue": "Replace pyaml dependency with PyYAML\nThere is a dependency on pyaml in setup.py:\r\n\r\nhttps://github.com/spulec/moto/blob/master/setup.py#L18\r\n\r\nI think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.\r\n\r\npyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]}
| 1,343 | 98 |
gh_patches_debug_24229
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-2160
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Skai plugin broken
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
Skai plugin is broken since yesterday, but actually it is no longer needed because they provide a lot more stable stream (they don't change stream three or so times a day). **Imho it can be removed.**
New live url as follows:
http://www.skaitv.gr/live
</issue>
<code>
[start of src/streamlink/plugins/skai.py]
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import validate
5
6 YOUTUBE_URL = "https://www.youtube.com/watch?v={0}"
7 _url_re = re.compile(r'http(s)?://www\.skai.gr/.*')
8 _youtube_id = re.compile(r'<span\s+itemprop="contentUrl"\s+href="(.*)"></span>', re.MULTILINE)
9 _youtube_url_schema = validate.Schema(
10 validate.all(
11 validate.transform(_youtube_id.search),
12 validate.any(
13 None,
14 validate.all(
15 validate.get(1),
16 validate.text
17 )
18 )
19 )
20 )
21
22
23 class Skai(Plugin):
24 @classmethod
25 def can_handle_url(cls, url):
26 return _url_re.match(url)
27
28 def _get_streams(self):
29 channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)
30 if channel_id:
31 return self.session.streams(YOUTUBE_URL.format(channel_id))
32
33
34 __plugin__ = Skai
35
[end of src/streamlink/plugins/skai.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/skai.py b/src/streamlink/plugins/skai.py
--- a/src/streamlink/plugins/skai.py
+++ b/src/streamlink/plugins/skai.py
@@ -3,20 +3,15 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
-YOUTUBE_URL = "https://www.youtube.com/watch?v={0}"
-_url_re = re.compile(r'http(s)?://www\.skai.gr/.*')
-_youtube_id = re.compile(r'<span\s+itemprop="contentUrl"\s+href="(.*)"></span>', re.MULTILINE)
-_youtube_url_schema = validate.Schema(
- validate.all(
- validate.transform(_youtube_id.search),
- validate.any(
- None,
- validate.all(
- validate.get(1),
- validate.text
- )
- )
- )
+
+_url_re = re.compile(r'http(s)?://www\.skai(?:tv)?.gr/.*')
+_api_url = "http://www.skaitv.gr/json/live.php"
+_api_res_schema = validate.Schema(validate.all(
+ validate.get("now"),
+ {
+ "livestream": validate.url()
+ },
+ validate.get("livestream"))
)
@@ -26,9 +21,10 @@
return _url_re.match(url)
def _get_streams(self):
- channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)
- if channel_id:
- return self.session.streams(YOUTUBE_URL.format(channel_id))
+ api_res = self.session.http.get(_api_url)
+ yt_url = self.session.http.json(api_res, schema=_api_res_schema)
+ if yt_url:
+ return self.session.streams(yt_url)
__plugin__ = Skai
|
{"golden_diff": "diff --git a/src/streamlink/plugins/skai.py b/src/streamlink/plugins/skai.py\n--- a/src/streamlink/plugins/skai.py\n+++ b/src/streamlink/plugins/skai.py\n@@ -3,20 +3,15 @@\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import validate\n \n-YOUTUBE_URL = \"https://www.youtube.com/watch?v={0}\"\n-_url_re = re.compile(r'http(s)?://www\\.skai.gr/.*')\n-_youtube_id = re.compile(r'<span\\s+itemprop=\"contentUrl\"\\s+href=\"(.*)\"></span>', re.MULTILINE)\n-_youtube_url_schema = validate.Schema(\n- validate.all(\n- validate.transform(_youtube_id.search),\n- validate.any(\n- None,\n- validate.all(\n- validate.get(1),\n- validate.text\n- )\n- )\n- )\n+\n+_url_re = re.compile(r'http(s)?://www\\.skai(?:tv)?.gr/.*')\n+_api_url = \"http://www.skaitv.gr/json/live.php\"\n+_api_res_schema = validate.Schema(validate.all(\n+ validate.get(\"now\"),\n+ {\n+ \"livestream\": validate.url()\n+ },\n+ validate.get(\"livestream\"))\n )\n \n \n@@ -26,9 +21,10 @@\n return _url_re.match(url)\n \n def _get_streams(self):\n- channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)\n- if channel_id:\n- return self.session.streams(YOUTUBE_URL.format(channel_id))\n+ api_res = self.session.http.get(_api_url)\n+ yt_url = self.session.http.json(api_res, schema=_api_res_schema)\n+ if yt_url:\n+ return self.session.streams(yt_url)\n \n \n __plugin__ = Skai\n", "issue": "Skai plugin broken\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nSkai plugin is broken since yesterday, but actually it is no longer needed because they provide a lot more stable stream (they don't change stream three or so times a day). **Imho it can be removed.**\r\n\r\nNew live url as follows:\r\n\r\nhttp://www.skaitv.gr/live\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\n\nYOUTUBE_URL = \"https://www.youtube.com/watch?v={0}\"\n_url_re = re.compile(r'http(s)?://www\\.skai.gr/.*')\n_youtube_id = re.compile(r'<span\\s+itemprop=\"contentUrl\"\\s+href=\"(.*)\"></span>', re.MULTILINE)\n_youtube_url_schema = validate.Schema(\n validate.all(\n validate.transform(_youtube_id.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.text\n )\n )\n )\n)\n\n\nclass Skai(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n channel_id = self.session.http.get(self.url, schema=_youtube_url_schema)\n if channel_id:\n return self.session.streams(YOUTUBE_URL.format(channel_id))\n\n\n__plugin__ = Skai\n", "path": "src/streamlink/plugins/skai.py"}]}
| 1,014 | 400 |
gh_patches_debug_48579
|
rasdani/github-patches
|
git_diff
|
openai__gym-1730
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sampling Bug
Gym Version: 0.15.3
issue: Box samples numbers above the `high` parameter.
```
from gym.spaces import Box
observation_space = Box(low=-3, high=-1, shape=(9,), dtype='int')
print(observation_space.sample())
>> [ 0 -2 0 -2 0 -1 0 -2 0]
```
The current implementation samples float numbers from uniform distribution of [`low`, `high`] and then converts the resulting samples to desired `dtype`. This runs into the problem of sampling `low` parameter very rarely(and not uniformly) when `dtype` is `int`(as we are converting the floats back to int which results in ceil operation in case of negative numbers) i.e in the above example -3 is almost never sampled as most of the low sampled floats like -2.85, -2.9 get converted to -2.
https://github.com/openai/gym/blob/0cd9266d986d470ed9c0dd87a41cd680b65cfe1c/gym/spaces/box.py#L93-L97
</issue>
<code>
[start of gym/spaces/box.py]
1 import numpy as np
2
3 from .space import Space
4
5
6 class Box(Space):
7 """
8 A (possibly unbounded) box in R^n. Specifically, a Box represents the
9 Cartesian product of n closed intervals. Each interval has the form of one
10 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
11
12 There are two common use cases:
13
14 * Identical bound for each dimension::
15 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
16 Box(3, 4)
17
18 * Independent bound for each dimension::
19 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
20 Box(2,)
21
22 """
23 def __init__(self, low, high, shape=None, dtype=np.float32):
24 assert dtype is not None, 'dtype must be explicitly provided. '
25 self.dtype = np.dtype(dtype)
26
27 if shape is None:
28 assert low.shape == high.shape, 'box dimension mismatch. '
29 self.shape = low.shape
30 self.low = low
31 self.high = high
32 else:
33 assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '
34 self.shape = tuple(shape)
35 self.low = np.full(self.shape, low)
36 self.high = np.full(self.shape, high)
37
38 self.low = self.low.astype(self.dtype)
39 self.high = self.high.astype(self.dtype)
40
41 # Boolean arrays which indicate the interval type for each coordinate
42 self.bounded_below = -np.inf < self.low
43 self.bounded_above = np.inf > self.high
44
45 super(Box, self).__init__(self.shape, self.dtype)
46
47 def is_bounded(self, manner="both"):
48 below = np.all(self.bounded_below)
49 above = np.all(self.bounded_above)
50 if manner == "both":
51 return below and above
52 elif manner == "below":
53 return below
54 elif manner == "above":
55 return above
56 else:
57 raise ValueError("manner is not in {'below', 'above', 'both'}")
58
59 def sample(self):
60 """
61 Generates a single random sample inside of the Box.
62
63 In creating a sample of the box, each coordinate is sampled according to
64 the form of the interval:
65
66 * [a, b] : uniform distribution
67 * [a, oo) : shifted exponential distribution
68 * (-oo, b] : shifted negative exponential distribution
69 * (-oo, oo) : normal distribution
70 """
71 high = self.high if self.dtype.kind == 'f' \
72 else self.high.astype('int64') + 1
73 sample = np.empty(self.shape)
74
75 # Masking arrays which classify the coordinates according to interval
76 # type
77 unbounded = ~self.bounded_below & ~self.bounded_above
78 upp_bounded = ~self.bounded_below & self.bounded_above
79 low_bounded = self.bounded_below & ~self.bounded_above
80 bounded = self.bounded_below & self.bounded_above
81
82
83 # Vectorized sampling by interval type
84 sample[unbounded] = self.np_random.normal(
85 size=unbounded[unbounded].shape)
86
87 sample[low_bounded] = self.np_random.exponential(
88 size=low_bounded[low_bounded].shape) + self.low[low_bounded]
89
90 sample[upp_bounded] = -self.np_random.exponential(
91 size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]
92
93 sample[bounded] = self.np_random.uniform(low=self.low[bounded],
94 high=high[bounded],
95 size=bounded[bounded].shape)
96
97 return sample.astype(self.dtype)
98
99 def contains(self, x):
100 if isinstance(x, list):
101 x = np.array(x) # Promote list to array for contains check
102 return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
103
104 def to_jsonable(self, sample_n):
105 return np.array(sample_n).tolist()
106
107 def from_jsonable(self, sample_n):
108 return [np.asarray(sample) for sample in sample_n]
109
110 def __repr__(self):
111 return "Box" + str(self.shape)
112
113 def __eq__(self, other):
114 return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
115
[end of gym/spaces/box.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -93,6 +93,8 @@
sample[bounded] = self.np_random.uniform(low=self.low[bounded],
high=high[bounded],
size=bounded[bounded].shape)
+ if self.dtype.kind == 'i':
+ sample = np.floor(sample)
return sample.astype(self.dtype)
|
{"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -93,6 +93,8 @@\n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n+ if self.dtype.kind == 'i':\n+ sample = np.floor(sample)\n \n return sample.astype(self.dtype)\n", "issue": "Sampling Bug\nGym Version: 0.15.3\r\nissue: Box samples numbers above the `high` parameter.\r\n\r\n```\r\nfrom gym.spaces import Box\r\nobservation_space = Box(low=-3, high=-1, shape=(9,), dtype='int')\r\nprint(observation_space.sample())\r\n>> [ 0 -2 0 -2 0 -1 0 -2 0]\r\n```\r\nThe current implementation samples float numbers from uniform distribution of [`low`, `high`] and then converts the resulting samples to desired `dtype`. This runs into the problem of sampling `low` parameter very rarely(and not uniformly) when `dtype` is `int`(as we are converting the floats back to int which results in ceil operation in case of negative numbers) i.e in the above example -3 is almost never sampled as most of the low sampled floats like -2.85, -2.9 get converted to -2.\r\nhttps://github.com/openai/gym/blob/0cd9266d986d470ed9c0dd87a41cd680b65cfe1c/gym/spaces/box.py#L93-L97\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .space import Space\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n \n There are two common use cases:\n \n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n \n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n def __init__(self, low, high, shape=None, dtype=np.float32):\n assert dtype is not None, 'dtype must be explicitly provided. '\n self.dtype = np.dtype(dtype)\n\n if shape is None:\n assert low.shape == high.shape, 'box dimension mismatch. '\n self.shape = low.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high), 'box requires scalar bounds. '\n self.shape = tuple(shape)\n self.low = np.full(self.shape, low)\n self.high = np.full(self.shape, high)\n\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super(Box, self).__init__(self.shape, self.dtype)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box. \n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n \n * [a, b] : uniform distribution \n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == 'f' \\\n else self.high.astype('int64') + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n \n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(\n size=unbounded[unbounded].shape)\n\n sample[low_bounded] = self.np_random.exponential(\n size=low_bounded[low_bounded].shape) + self.low[low_bounded]\n \n sample[upp_bounded] = -self.np_random.exponential(\n size=upp_bounded[upp_bounded].shape) - self.high[upp_bounded]\n \n sample[bounded] = self.np_random.uniform(low=self.low[bounded], \n high=high[bounded],\n size=bounded[bounded].shape)\n\n return sample.astype(self.dtype)\n \n def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and (self.shape == other.shape) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high)\n", "path": "gym/spaces/box.py"}]}
| 2,050 | 112 |
gh_patches_debug_26366
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-510
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
numerical_grad caution
I was implementing a differentiable Transpose function.
```
class Transpose(Function):
def forward(self, inputs):
x = inputs[0]
return x.transpose(),
def backward(self, inputs, grads):
return grads[0].transpose(),
```
While testing the gradient with numerical_grad,
```
def test_numerical_grad_cpu(self):
x = np.random.randn(1, 10)
x_var = Variable(x)
y_var = self.f(x_var)
y_var.grad = np.random.rand(10, 1)
y_var.backward()
cl = lambda: self.f.forward((x,))
gx, = gradient_check.numerical_grad(cl, (x,), (y_var.grad,))
gradient_check.assert_allclose(gx, x_var.grad)
```
(here `self.f = Transpose()`)
the numerical gradient `gx` keeps coming back as 0. After much frustration, I finally figured out that I was returning a view of `x` in the above code, and in `numerical_grad_cpu`,
```
flat_x[i] = orig + eps
ys1 = f()
flat_x[i] = orig - eps
ys2 = f()
flat_x[i] = orig
```
`ys1` and `ys2` end up being equal after the last line resetting `flat_x[i]` to the original value. I solved my problem by changing `cl = lambda: self.f.forward((x,))` to `cl = lambda: np.copy(self.f.forward((x,)))`.
I'm not sure how frequent this phenomenon could occur outside of transpose, but I just wanted to put this out here so that there could be a discussion. Perhaps a passing note in the documentation suffices here. Or doing `ys1 = np.copy(f())` instead might work as well.
</issue>
<code>
[start of chainer/gradient_check.py]
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import utils
6
7
8 def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):
9 grads = tuple(numpy.zeros_like(x) for x in inputs)
10 for x, gx in zip(inputs, grads):
11 flat_x = x.ravel()
12 flat_gx = gx.ravel()
13 for i in six.moves.range(flat_x.size):
14 orig = flat_x[i]
15 flat_x[i] = orig + eps
16 ys1 = f()
17 flat_x[i] = orig - eps
18 ys2 = f()
19 flat_x[i] = orig
20
21 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
22 if gy is not None:
23 dot = float(sum(((y1 - y2) * gy).ravel()))
24 flat_gx[i] += dot / (2 * eps)
25
26 return grads
27
28
29 def numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):
30 grads = tuple(cuda.zeros_like(x) for x in inputs)
31 for x, gx in zip(inputs, grads):
32 x = x.ravel()
33 gx = gx.ravel()
34 x_cpu = x.get()
35 gx_cpu = gx.get()
36 for i in six.moves.range(x_cpu.size):
37 orig = x_cpu[i]
38 x_cpu[i] = orig + eps
39 x.set(x_cpu)
40 ys1 = f()
41 x_cpu[i] = orig - eps
42 x.set(x_cpu)
43 ys2 = f()
44 x_cpu[i] = orig
45 x.set(x_cpu)
46
47 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
48 if gy is not None:
49 dot = sum(((y1 - y2) * gy).ravel()).get()
50 gx_cpu[i] += dot / (2 * eps)
51 gx.set(gx_cpu)
52
53 return grads
54
55
56 def numerical_grad(f, inputs, grad_outputs, eps=1e-3):
57 """Computes numerical gradient by finite differences.
58
59 This function is used to implement gradient check. For usage example, see
60 unit tests of :mod:`chainer.functions`.
61
62 Args:
63 f (function): Python function with no arguments that runs forward
64 computation and returns the result.
65 inputs (tuple of arrays): Tuple of arrays that should be treated as
66 inputs. Each element of them is slightly modified to realize
67 numerical gradient by finite differences.
68 grad_outputs (tuple of arrays): Tuple of arrays that are treated as
69 output gradients.
70 eps (float): Epsilon value of finite differences.
71
72 Returns:
73 tuple: Numerical gradient arrays corresponding to ``inputs``.
74
75 """
76 assert eps > 0
77 inputs = tuple(inputs)
78 grad_outputs = tuple(grad_outputs)
79 gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)
80
81 cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)
82
83 if gpu and cpu:
84 raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
85 elif gpu:
86 return numerical_grad_gpu(f, inputs, grad_outputs, eps)
87 else:
88 return numerical_grad_cpu(f, inputs, grad_outputs, eps)
89
90
91 def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
92 """Asserts if some corresponding element of x and y differs too much.
93
94 This function can handle both CPU and GPU arrays simultaneously.
95
96 Args:
97 x: Left-hand-side array.
98 y: Right-hand-side array.
99 atol (float): Absolute tolerance.
100 rtol (float): Relative tolerance.
101 verbose (bool): If True, it outputs verbose messages on error.
102
103 """
104 x = cuda.to_cpu(utils.force_array(x))
105 y = cuda.to_cpu(utils.force_array(y))
106 try:
107 numpy.testing.assert_allclose(
108 x, y, atol=atol, rtol=rtol, verbose=verbose)
109 except Exception:
110 print('error:', numpy.abs(x - y).max())
111 raise
112
[end of chainer/gradient_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py
--- a/chainer/gradient_check.py
+++ b/chainer/gradient_check.py
@@ -5,6 +5,11 @@
from chainer import utils
+def _copy_arrays(xs):
+ xp = cuda.get_array_module(*xs)
+ return tuple(xp.copy(x) for x in xs)
+
+
def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):
grads = tuple(numpy.zeros_like(x) for x in inputs)
for x, gx in zip(inputs, grads):
@@ -13,9 +18,9 @@
for i in six.moves.range(flat_x.size):
orig = flat_x[i]
flat_x[i] = orig + eps
- ys1 = f()
+ ys1 = _copy_arrays(f())
flat_x[i] = orig - eps
- ys2 = f()
+ ys2 = _copy_arrays(f())
flat_x[i] = orig
for y1, y2, gy in zip(ys1, ys2, grad_outputs):
@@ -37,10 +42,10 @@
orig = x_cpu[i]
x_cpu[i] = orig + eps
x.set(x_cpu)
- ys1 = f()
+ ys1 = _copy_arrays(f())
x_cpu[i] = orig - eps
x.set(x_cpu)
- ys2 = f()
+ ys2 = _copy_arrays(f())
x_cpu[i] = orig
x.set(x_cpu)
|
{"golden_diff": "diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py\n--- a/chainer/gradient_check.py\n+++ b/chainer/gradient_check.py\n@@ -5,6 +5,11 @@\n from chainer import utils\n \n \n+def _copy_arrays(xs):\n+ xp = cuda.get_array_module(*xs)\n+ return tuple(xp.copy(x) for x in xs)\n+\n+\n def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n@@ -13,9 +18,9 @@\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n- ys1 = f()\n+ ys1 = _copy_arrays(f())\n flat_x[i] = orig - eps\n- ys2 = f()\n+ ys2 = _copy_arrays(f())\n flat_x[i] = orig\n \n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n@@ -37,10 +42,10 @@\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n- ys1 = f()\n+ ys1 = _copy_arrays(f())\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n- ys2 = f()\n+ ys2 = _copy_arrays(f())\n x_cpu[i] = orig\n x.set(x_cpu)\n", "issue": "numerical_grad caution\nI was implementing a differentiable Transpose function. \n\n```\nclass Transpose(Function):\n def forward(self, inputs):\n x = inputs[0]\n return x.transpose(),\n def backward(self, inputs, grads):\n return grads[0].transpose(),\n```\n\nWhile testing the gradient with numerical_grad, \n\n```\n def test_numerical_grad_cpu(self):\n x = np.random.randn(1, 10)\n x_var = Variable(x)\n y_var = self.f(x_var)\n y_var.grad = np.random.rand(10, 1)\n y_var.backward()\n cl = lambda: self.f.forward((x,))\n gx, = gradient_check.numerical_grad(cl, (x,), (y_var.grad,))\n gradient_check.assert_allclose(gx, x_var.grad)\n```\n\n(here `self.f = Transpose()`)\nthe numerical gradient `gx` keeps coming back as 0. After much frustration, I finally figured out that I was returning a view of `x` in the above code, and in `numerical_grad_cpu`,\n\n```\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n```\n\n`ys1` and `ys2` end up being equal after the last line resetting `flat_x[i]` to the original value. I solved my problem by changing `cl = lambda: self.f.forward((x,))` to `cl = lambda: np.copy(self.f.forward((x,)))`.\n\nI'm not sure how frequent this phenomenon could occur outside of transpose, but I just wanted to put this out here so that there could be a discussion. Perhaps a passing note in the documentation suffices here. Or doing `ys1 = np.copy(f())` instead might work as well.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import utils\n\n\ndef numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n flat_x = x.ravel()\n flat_gx = gx.ravel()\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = float(sum(((y1 - y2) * gy).ravel()))\n flat_gx[i] += dot / (2 * eps)\n\n return grads\n\n\ndef numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(cuda.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n x = x.ravel()\n gx = gx.ravel()\n x_cpu = x.get()\n gx_cpu = gx.get()\n for i in six.moves.range(x_cpu.size):\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n ys1 = f()\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n ys2 = f()\n x_cpu[i] = orig\n x.set(x_cpu)\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = sum(((y1 - y2) * gy).ravel()).get()\n gx_cpu[i] += dot / (2 * eps)\n gx.set(gx_cpu)\n\n return grads\n\n\ndef numerical_grad(f, inputs, grad_outputs, eps=1e-3):\n \"\"\"Computes numerical gradient by finite differences.\n\n This function is used to implement gradient check. For usage example, see\n unit tests of :mod:`chainer.functions`.\n\n Args:\n f (function): Python function with no arguments that runs forward\n computation and returns the result.\n inputs (tuple of arrays): Tuple of arrays that should be treated as\n inputs. Each element of them is slightly modified to realize\n numerical gradient by finite differences.\n grad_outputs (tuple of arrays): Tuple of arrays that are treated as\n output gradients.\n eps (float): Epsilon value of finite differences.\n\n Returns:\n tuple: Numerical gradient arrays corresponding to ``inputs``.\n\n \"\"\"\n assert eps > 0\n inputs = tuple(inputs)\n grad_outputs = tuple(grad_outputs)\n gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)\n\n cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)\n\n if gpu and cpu:\n raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')\n elif gpu:\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n else:\n return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If True, it outputs verbose messages on error.\n\n \"\"\"\n x = cuda.to_cpu(utils.force_array(x))\n y = cuda.to_cpu(utils.force_array(y))\n try:\n numpy.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n except Exception:\n print('error:', numpy.abs(x - y).max())\n raise\n", "path": "chainer/gradient_check.py"}]}
| 2,059 | 346 |
gh_patches_debug_6369
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18211
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
selu
#14951
</issue>
<code>
[start of ivy/functional/frontends/mindspore/ops/function/nn_func.py]
1 """Includes Mindspore Frontend functions listed in the TODO list
2 https://github.com/unifyai/ivy/issues/14951."""
3
4 # local
5 import ivy
6 from ivy.func_wrapper import with_supported_dtypes
7 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
8
9
10 @with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
11 @to_ivy_arrays_and_back
12 def softsign(x):
13 return ivy.divide(x, ivy.add(1, ivy.abs(x)))
14
[end of ivy/functional/frontends/mindspore/ops/function/nn_func.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py
+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
@@ -7,6 +7,12 @@
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
+@to_ivy_arrays_and_back
+def selu(input_x):
+ return ivy.selu(input_x)
+
+
@with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
def softsign(x):
|
{"golden_diff": "diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n@@ -7,6 +7,12 @@\n from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n \n \n+@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n+@to_ivy_arrays_and_back\n+def selu(input_x):\n+ return ivy.selu(input_x)\n+\n+ \n @with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n @to_ivy_arrays_and_back\n def softsign(x):\n", "issue": "selu\n#14951 \n", "before_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}]}
| 717 | 211 |
gh_patches_debug_5837
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-1639
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: 404 error while accessing contribution guide
When I was trying to access the contribution guide mentioned in `CONTRIBUTING.rst`, I am getting 404 error - https://googleapis.github.io/google-api-python-client/contributing.html


</issue>
<code>
[start of owlbot.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import synthtool as s
16 from synthtool import gcp
17
18 from synthtool.languages import python
19
20 common = gcp.CommonTemplates()
21
22 # ----------------------------------------------------------------------------
23 # Add templated files
24 # ----------------------------------------------------------------------------
25 templated_files = common.py_library()
26
27 # Copy kokoro configs.
28 # Docs are excluded as repo docs cannot currently be generated using sphinx.
29 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])
30 s.move(templated_files / '.trampolinerc') # config file for trampoline_v2
31
32 # Also move issue templates
33 s.move(templated_files / '.github', excludes=['CODEOWNERS'])
34
35 # Move scripts folder needed for samples CI
36 s.move(templated_files / 'scripts')
37
38 # ----------------------------------------------------------------------------
39 # Samples templates
40 # ----------------------------------------------------------------------------
41
42 python.py_samples(skip_readmes=True)
43
[end of owlbot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/owlbot.py b/owlbot.py
--- a/owlbot.py
+++ b/owlbot.py
@@ -35,6 +35,9 @@
# Move scripts folder needed for samples CI
s.move(templated_files / 'scripts')
+# Copy CONTRIBUTING.rst
+s.move(templated_files / 'CONTRIBUTING.rst')
+
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/owlbot.py b/owlbot.py\n--- a/owlbot.py\n+++ b/owlbot.py\n@@ -35,6 +35,9 @@\n # Move scripts folder needed for samples CI\n s.move(templated_files / 'scripts')\n \n+# Copy CONTRIBUTING.rst\n+s.move(templated_files / 'CONTRIBUTING.rst')\n+\n # ----------------------------------------------------------------------------\n # Samples templates\n # ----------------------------------------------------------------------------\n", "issue": "docs: 404 error while accessing contribution guide\nWhen I was trying to access the contribution guide mentioned in `CONTRIBUTING.rst`, I am getting 404 error - https://googleapis.github.io/google-api-python-client/contributing.html\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\nfrom synthtool.languages import python\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\ns.move(templated_files / '.trampolinerc') # config file for trampoline_v2\n\n# Also move issue templates\ns.move(templated_files / '.github', excludes=['CODEOWNERS'])\n\n# Move scripts folder needed for samples CI\ns.move(templated_files / 'scripts')\n\n# ----------------------------------------------------------------------------\n# Samples templates\n# ----------------------------------------------------------------------------\n\npython.py_samples(skip_readmes=True)\n", "path": "owlbot.py"}]}
| 1,104 | 87 |
gh_patches_debug_12483
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-2929
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump Ansible to 2.4
## Description
The current version of Ansible in the admin workstation uses PyCrypto==2.6.1 as a dependency, which is causing CI safety failures when checking the admin pip requirements due to the fact that there is a CVE in PyCrypto 2.6.1. See upstream discussion in https://github.com/ansible/ansible/issues/23179.
We should bump to a more recent version of Ansible in the admin workstations that does not have PyCrypto as a dependency
## User Stories
As a SecureDrop administrator, I don't want to run software relying on unmaintained dependencies.
Temporarily disable safety check
## Description
We'll need to temporarily disable safety in order to merge until #2926 is resolved (and we'll need to cherry pick the disabling of safety into the 0.5.2 release branch).
## User Stories
As a SecureDrop maintainer, I don't want to merge with failing CI.
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5
6 import ansible
7
8 try:
9 # Version 2.0+
10 from ansible.plugins.callback import CallbackBase
11 except ImportError:
12 CallbackBase = object
13
14
15 def print_red_bold(text):
16 print('\x1b[31;1m' + text + '\x1b[0m')
17
18
19 class CallbackModule(CallbackBase):
20 def __init__(self):
21 # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+
22 required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml
23 if not ansible.__version__.startswith(required_version):
24 print_red_bold(
25 "SecureDrop restriction: only Ansible {version}.* is supported. "
26 .format(version=required_version)
27 )
28 sys.exit(1)
29
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -19,7 +19,7 @@
class CallbackModule(CallbackBase):
def __init__(self):
# Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+
- required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml
+ required_version = '2.4.2' # Keep synchronized with requirements files
if not ansible.__version__.startswith(required_version):
print_red_bold(
"SecureDrop restriction: only Ansible {version}.* is supported. "
|
{"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -19,7 +19,7 @@\n class CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+\n- required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml\n+ required_version = '2.4.2' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.* is supported. \"\n", "issue": "Bump Ansible to 2.4\n## Description\r\n\r\nThe current version of Ansible in the admin workstation uses PyCrypto==2.6.1 as a dependency, which is causing CI safety failures when checking the admin pip requirements due to the fact that there is a CVE in PyCrypto 2.6.1. See upstream discussion in https://github.com/ansible/ansible/issues/23179. \r\n\r\nWe should bump to a more recent version of Ansible in the admin workstations that does not have PyCrypto as a dependency\r\n\r\n## User Stories\r\n\r\nAs a SecureDrop administrator, I don't want to run software relying on unmaintained dependencies.\nTemporarily disable safety check\n## Description\r\n\r\nWe'll need to temporarily disable safety in order to merge until #2926 is resolved (and we'll need to cherry pick the disabling of safety into the 0.5.2 release branch). \r\n\r\n## User Stories\r\n\r\nAs a SecureDrop maintainer, I don't want to merge with failing CI. \n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible with Ansible 2.0+\n required_version = '2.3.2' # Keep synchronized with group_vars/all/main.yml\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.* is supported. \"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]}
| 1,012 | 185 |
gh_patches_debug_6736
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-3490
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
I get the same profit / loss report in all time frames
<!--
Have you searched for similar issues before posting it? Yes
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
Please do not use bug reports to request new features.
-->
## Describe your environment
* Operating system: ____Ubuntu 18.04.4 LTS
* Python Version: _____Python 3.6.9
* CCXT version: _____ ccxt==1.29.5
* Freqtrade Version: ____ freqtrade develop-761407f7
Today, I updated with the method below.
cd freqtrade
git pull
python3 -m pip install -r requirements.txt --user
python3 -m pip install -r requirements-hyperopt.txt --user
python3 -m pip install -r requirements-plot.txt --user
and
freqtrade download-data --days 365 --timeframes 5m 15m 30m 1h 4h 1d
https://github.com/freqtrade/freqtrade/issues/3104 (I keep getting this error on 1 and 5 minute candles)
I use StaticPairList
I did backtest as below
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 5m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 15m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 30m
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 4h
freqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1d
The problem I encountered:
I get the same profit / loss report in all time frames
</issue>
<code>
[start of freqtrade/configuration/deprecated_settings.py]
1 """
2 Functions to handle deprecated settings
3 """
4
5 import logging
6 from typing import Any, Dict
7
8 from freqtrade.exceptions import OperationalException
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def check_conflicting_settings(config: Dict[str, Any],
15 section1: str, name1: str,
16 section2: str, name2: str) -> None:
17 section1_config = config.get(section1, {})
18 section2_config = config.get(section2, {})
19 if name1 in section1_config and name2 in section2_config:
20 raise OperationalException(
21 f"Conflicting settings `{section1}.{name1}` and `{section2}.{name2}` "
22 "(DEPRECATED) detected in the configuration file. "
23 "This deprecated setting will be removed in the next versions of Freqtrade. "
24 f"Please delete it from your configuration and use the `{section1}.{name1}` "
25 "setting instead."
26 )
27
28
29 def process_deprecated_setting(config: Dict[str, Any],
30 section1: str, name1: str,
31 section2: str, name2: str) -> None:
32 section2_config = config.get(section2, {})
33
34 if name2 in section2_config:
35 logger.warning(
36 "DEPRECATED: "
37 f"The `{section2}.{name2}` setting is deprecated and "
38 "will be removed in the next versions of Freqtrade. "
39 f"Please use the `{section1}.{name1}` setting in your configuration instead."
40 )
41 section1_config = config.get(section1, {})
42 section1_config[name1] = section2_config[name2]
43
44
45 def process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:
46
47 check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',
48 'experimental', 'use_sell_signal')
49 check_conflicting_settings(config, 'ask_strategy', 'sell_profit_only',
50 'experimental', 'sell_profit_only')
51 check_conflicting_settings(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
52 'experimental', 'ignore_roi_if_buy_signal')
53
54 process_deprecated_setting(config, 'ask_strategy', 'use_sell_signal',
55 'experimental', 'use_sell_signal')
56 process_deprecated_setting(config, 'ask_strategy', 'sell_profit_only',
57 'experimental', 'sell_profit_only')
58 process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',
59 'experimental', 'ignore_roi_if_buy_signal')
60
61 if (config.get('edge', {}).get('enabled', False)
62 and 'capital_available_percentage' in config.get('edge', {})):
63 raise OperationalException(
64 "DEPRECATED: "
65 "Using 'edge.capital_available_percentage' has been deprecated in favor of "
66 "'tradable_balance_ratio'. Please migrate your configuration to "
67 "'tradable_balance_ratio' and remove 'capital_available_percentage' "
68 "from the edge configuration."
69 )
70 if 'ticker_interval' in config:
71 logger.warning(
72 "DEPRECATED: "
73 "Please use 'timeframe' instead of 'ticker_interval."
74 )
75 config['timeframe'] = config['ticker_interval']
76
[end of freqtrade/configuration/deprecated_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/freqtrade/configuration/deprecated_settings.py b/freqtrade/configuration/deprecated_settings.py
--- a/freqtrade/configuration/deprecated_settings.py
+++ b/freqtrade/configuration/deprecated_settings.py
@@ -72,4 +72,9 @@
"DEPRECATED: "
"Please use 'timeframe' instead of 'ticker_interval."
)
+ if 'timeframe' in config:
+ raise OperationalException(
+ "Both 'timeframe' and 'ticker_interval' detected."
+ "Please remove 'ticker_interval' from your configuration to continue operating."
+ )
config['timeframe'] = config['ticker_interval']
|
{"golden_diff": "diff --git a/freqtrade/configuration/deprecated_settings.py b/freqtrade/configuration/deprecated_settings.py\n--- a/freqtrade/configuration/deprecated_settings.py\n+++ b/freqtrade/configuration/deprecated_settings.py\n@@ -72,4 +72,9 @@\n \"DEPRECATED: \"\n \"Please use 'timeframe' instead of 'ticker_interval.\"\n )\n+ if 'timeframe' in config:\n+ raise OperationalException(\n+ \"Both 'timeframe' and 'ticker_interval' detected.\"\n+ \"Please remove 'ticker_interval' from your configuration to continue operating.\"\n+ )\n config['timeframe'] = config['ticker_interval']\n", "issue": "I get the same profit / loss report in all time frames\n<!-- \r\nHave you searched for similar issues before posting it? Yes\r\n\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\nPlease do not use bug reports to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: ____Ubuntu 18.04.4 LTS\r\n * Python Version: _____Python 3.6.9\r\n * CCXT version: _____ ccxt==1.29.5\r\n * Freqtrade Version: ____ freqtrade develop-761407f7\r\n \r\nToday, I updated with the method below.\r\ncd freqtrade\r\ngit pull\r\npython3 -m pip install -r requirements.txt --user\r\npython3 -m pip install -r requirements-hyperopt.txt --user\r\npython3 -m pip install -r requirements-plot.txt --user\r\n\r\nand\r\nfreqtrade download-data --days 365 --timeframes 5m 15m 30m 1h 4h 1d\r\nhttps://github.com/freqtrade/freqtrade/issues/3104 (I keep getting this error on 1 and 5 minute candles)\r\n\r\nI use StaticPairList\r\n\r\nI did backtest as below\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 5m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 15m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 30m\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 4h\r\nfreqtrade backtesting --strategy strateji --timerange=20200101- --ticker-interval 1d\r\n\r\nThe problem I encountered:\r\nI get the same profit / loss report in all time frames\n", "before_files": [{"content": "\"\"\"\nFunctions to handle deprecated settings\n\"\"\"\n\nimport logging\nfrom typing import Any, Dict\n\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_conflicting_settings(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section1_config = config.get(section1, {})\n section2_config = config.get(section2, {})\n if name1 in section1_config and name2 in section2_config:\n raise OperationalException(\n f\"Conflicting settings `{section1}.{name1}` and `{section2}.{name2}` \"\n \"(DEPRECATED) detected in the configuration file. \"\n \"This deprecated setting will be removed in the next versions of Freqtrade. \"\n f\"Please delete it from your configuration and use the `{section1}.{name1}` \"\n \"setting instead.\"\n )\n\n\ndef process_deprecated_setting(config: Dict[str, Any],\n section1: str, name1: str,\n section2: str, name2: str) -> None:\n section2_config = config.get(section2, {})\n\n if name2 in section2_config:\n logger.warning(\n \"DEPRECATED: \"\n f\"The `{section2}.{name2}` setting is deprecated and \"\n \"will be removed in the next versions of Freqtrade. \"\n f\"Please use the `{section1}.{name1}` setting in your configuration instead.\"\n )\n section1_config = config.get(section1, {})\n section1_config[name1] = section2_config[name2]\n\n\ndef process_temporary_deprecated_settings(config: Dict[str, Any]) -> None:\n\n check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n check_conflicting_settings(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n check_conflicting_settings(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n process_deprecated_setting(config, 'ask_strategy', 'use_sell_signal',\n 'experimental', 'use_sell_signal')\n process_deprecated_setting(config, 'ask_strategy', 'sell_profit_only',\n 'experimental', 'sell_profit_only')\n process_deprecated_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal',\n 'experimental', 'ignore_roi_if_buy_signal')\n\n if (config.get('edge', {}).get('enabled', False)\n and 'capital_available_percentage' in config.get('edge', {})):\n raise OperationalException(\n \"DEPRECATED: \"\n \"Using 'edge.capital_available_percentage' has been deprecated in favor of \"\n \"'tradable_balance_ratio'. Please migrate your configuration to \"\n \"'tradable_balance_ratio' and remove 'capital_available_percentage' \"\n \"from the edge configuration.\"\n )\n if 'ticker_interval' in config:\n logger.warning(\n \"DEPRECATED: \"\n \"Please use 'timeframe' instead of 'ticker_interval.\"\n )\n config['timeframe'] = config['ticker_interval']\n", "path": "freqtrade/configuration/deprecated_settings.py"}]}
| 1,853 | 140 |
gh_patches_debug_8822
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-4588
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
<Improvement> Not showing user is exist notification on UI.
### Observed behavior
We are able to create user using CREATE AN ACCOUNT option which is on login page of kolibri. But when someone uses existing username to create account, it will not show any kind of existing user notification on UI.
Not able to distinguish whether account is exist or not.
### Expected behavior
It must show existing username notification on UI if user is exist.
### Steps to reproduce
1. Login with Admin and go to the facility.
2. Click on settings.
3. Select Allow learners to create accounts.
4. Logout and click on CREATE AN ACCOUNT button and use existing username to create account.
### Context
* Kolibri version : Kolibri 0.11.0
* Operating system : ubuntu 14.04
* Browser : Chrome
### Screenshots:

</issue>
<code>
[start of kolibri/core/auth/serializers.py]
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 from rest_framework import serializers
6 from rest_framework.validators import UniqueTogetherValidator
7
8 from .constants.collection_kinds import LEARNERGROUP
9 from .models import Classroom
10 from .models import Facility
11 from .models import FacilityDataset
12 from .models import FacilityUser
13 from .models import LearnerGroup
14 from .models import Membership
15 from .models import Role
16 from kolibri.core import error_constants
17
18
19 class RoleSerializer(serializers.ModelSerializer):
20 collection_parent = serializers.SerializerMethodField()
21
22 class Meta:
23 model = Role
24 fields = ('id', 'kind', 'collection', 'user', 'collection_parent',)
25
26 def get_collection_parent(self, instance):
27 if instance.collection.parent is not None:
28 return instance.collection.parent.id
29 else:
30 return None
31
32
33 class FacilityUserSerializer(serializers.ModelSerializer):
34 roles = RoleSerializer(many=True, read_only=True)
35
36 class Meta:
37 model = FacilityUser
38 extra_kwargs = {'password': {'write_only': True}}
39 fields = ('id', 'username', 'full_name', 'password', 'facility', 'roles', 'is_superuser')
40
41 def create(self, validated_data):
42 if FacilityUser.objects.filter(username__iexact=validated_data['username']).exists():
43 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
44 code=error_constants.USERNAME_ALREADY_EXISTS)
45 return super(FacilityUserSerializer, self).create(validated_data)
46
47 def update(self, instance, validated_data):
48 if validated_data.get('username') and FacilityUser.objects.exclude(id__exact=instance.id).filter(username__iexact=validated_data['username']).exists():
49 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
50 code=error_constants.USERNAME_ALREADY_EXISTS)
51 return super(FacilityUserSerializer, self).update(instance, validated_data)
52
53
54 class FacilityUserSignupSerializer(FacilityUserSerializer):
55
56 def validate_username(self, value):
57 if FacilityUser.objects.filter(username__iexact=value).exists():
58 raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
59 code=error_constants.USERNAME_ALREADY_EXISTS)
60 return value
61
62
63 class FacilityUsernameSerializer(serializers.ModelSerializer):
64
65 class Meta:
66 model = FacilityUser
67 fields = ('username', )
68
69
70 class MembershipSerializer(serializers.ModelSerializer):
71
72 class Meta:
73 model = Membership
74 fields = ('id', 'collection', 'user')
75
76 def create(self, validated_data):
77 user = validated_data["user"]
78 collection = validated_data["collection"]
79 if collection.kind == LEARNERGROUP and user.memberships.filter(collection__parent=collection.parent).exists():
80 # We are trying to create a membership for a user in a group, but they already belong to a group
81 # in the same class as this group. We may want to allow this, but the frontend does not currently
82 # support this. Error!
83 raise serializers.ValidationError(detail={'classroom': 'This user is already in a group in this class'},
84 code=error_constants.USER_ALREADY_IN_GROUP_IN_CLASS)
85 return super(MembershipSerializer, self).create(validated_data)
86
87
88 class FacilityDatasetSerializer(serializers.ModelSerializer):
89
90 class Meta:
91 model = FacilityDataset
92 fields = ('id', 'learner_can_edit_username', 'learner_can_edit_name', 'learner_can_edit_password',
93 'learner_can_sign_up', 'learner_can_delete_account', 'learner_can_login_with_no_password',
94 'show_download_button_in_learn', 'description', 'location', 'allow_guest_access')
95
96
97 class FacilitySerializer(serializers.ModelSerializer):
98 dataset = FacilityDatasetSerializer(read_only=True)
99 default = serializers.SerializerMethodField()
100
101 class Meta:
102 model = Facility
103 extra_kwargs = {'id': {'read_only': True}, 'dataset': {'read_only': True}}
104 fields = ('id', 'name', 'dataset', 'default')
105
106 def get_default(self, instance):
107 return instance == Facility.get_default_facility()
108
109
110 class PublicFacilitySerializer(serializers.ModelSerializer):
111
112 class Meta:
113 model = Facility
114 fields = ('dataset', 'name')
115
116
117 class ClassroomSerializer(serializers.ModelSerializer):
118 learner_count = serializers.SerializerMethodField()
119 coaches = serializers.SerializerMethodField()
120
121 def get_learner_count(self, instance):
122 return instance.get_members().count()
123
124 def get_coaches(self, instance):
125 return FacilityUserSerializer(instance.get_coaches(), many=True).data
126
127 class Meta:
128 model = Classroom
129 fields = (
130 'id',
131 'name',
132 'parent',
133 'learner_count',
134 'coaches',
135 )
136
137 validators = [
138 UniqueTogetherValidator(
139 queryset=Classroom.objects.all(),
140 fields=('parent', 'name')
141 )
142 ]
143
144
145 class LearnerGroupSerializer(serializers.ModelSerializer):
146
147 user_ids = serializers.SerializerMethodField()
148
149 def get_user_ids(self, group):
150 return [str(user_id['id']) for user_id in group.get_members().values('id')]
151
152 class Meta:
153 model = LearnerGroup
154 fields = ('id', 'name', 'parent', 'user_ids')
155
156 validators = [
157 UniqueTogetherValidator(
158 queryset=Classroom.objects.all(),
159 fields=('parent', 'name')
160 )
161 ]
162
[end of kolibri/core/auth/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/core/auth/serializers.py b/kolibri/core/auth/serializers.py
--- a/kolibri/core/auth/serializers.py
+++ b/kolibri/core/auth/serializers.py
@@ -55,8 +55,10 @@
def validate_username(self, value):
if FacilityUser.objects.filter(username__iexact=value).exists():
- raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},
- code=error_constants.USERNAME_ALREADY_EXISTS)
+ raise serializers.ValidationError(
+ detail='An account with that username already exists.',
+ code=error_constants.USERNAME_ALREADY_EXISTS
+ )
return value
|
{"golden_diff": "diff --git a/kolibri/core/auth/serializers.py b/kolibri/core/auth/serializers.py\n--- a/kolibri/core/auth/serializers.py\n+++ b/kolibri/core/auth/serializers.py\n@@ -55,8 +55,10 @@\n \n def validate_username(self, value):\n if FacilityUser.objects.filter(username__iexact=value).exists():\n- raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n- code=error_constants.USERNAME_ALREADY_EXISTS)\n+ raise serializers.ValidationError(\n+ detail='An account with that username already exists.',\n+ code=error_constants.USERNAME_ALREADY_EXISTS\n+ )\n return value\n", "issue": "<Improvement> Not showing user is exist notification on UI.\n\r\n### Observed behavior\r\nWe are able to create user using CREATE AN ACCOUNT option which is on login page of kolibri. But when someone uses existing username to create account, it will not show any kind of existing user notification on UI.\r\nNot able to distinguish whether account is exist or not.\r\n### Expected behavior\r\nIt must show existing username notification on UI if user is exist.\r\n\r\n\r\n### Steps to reproduce\r\n1. Login with Admin and go to the facility.\r\n2. Click on settings.\r\n3. Select Allow learners to create accounts.\r\n4. Logout and click on CREATE AN ACCOUNT button and use existing username to create account.\r\n\r\n### Context\r\n\r\n * Kolibri version : Kolibri 0.11.0 \r\n * Operating system : ubuntu 14.04\r\n * Browser : Chrome\r\n\r\n### Screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom .constants.collection_kinds import LEARNERGROUP\nfrom .models import Classroom\nfrom .models import Facility\nfrom .models import FacilityDataset\nfrom .models import FacilityUser\nfrom .models import LearnerGroup\nfrom .models import Membership\nfrom .models import Role\nfrom kolibri.core import error_constants\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n collection_parent = serializers.SerializerMethodField()\n\n class Meta:\n model = Role\n fields = ('id', 'kind', 'collection', 'user', 'collection_parent',)\n\n def get_collection_parent(self, instance):\n if instance.collection.parent is not None:\n return instance.collection.parent.id\n else:\n return None\n\n\nclass FacilityUserSerializer(serializers.ModelSerializer):\n roles = RoleSerializer(many=True, read_only=True)\n\n class Meta:\n model = FacilityUser\n extra_kwargs = {'password': {'write_only': True}}\n fields = ('id', 'username', 'full_name', 'password', 'facility', 'roles', 'is_superuser')\n\n def create(self, validated_data):\n if FacilityUser.objects.filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).create(validated_data)\n\n def update(self, instance, validated_data):\n if validated_data.get('username') and FacilityUser.objects.exclude(id__exact=instance.id).filter(username__iexact=validated_data['username']).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return super(FacilityUserSerializer, self).update(instance, validated_data)\n\n\nclass FacilityUserSignupSerializer(FacilityUserSerializer):\n\n def validate_username(self, value):\n if FacilityUser.objects.filter(username__iexact=value).exists():\n raise serializers.ValidationError(detail={'username': ['An account with that username already exists.']},\n code=error_constants.USERNAME_ALREADY_EXISTS)\n return value\n\n\nclass FacilityUsernameSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityUser\n fields = ('username', )\n\n\nclass MembershipSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Membership\n fields = ('id', 'collection', 'user')\n\n def create(self, validated_data):\n user = validated_data[\"user\"]\n collection = validated_data[\"collection\"]\n if collection.kind == LEARNERGROUP and user.memberships.filter(collection__parent=collection.parent).exists():\n # We are trying to create a membership for a user in a group, but they already belong to a group\n # in the same class as this group. We may want to allow this, but the frontend does not currently\n # support this. Error!\n raise serializers.ValidationError(detail={'classroom': 'This user is already in a group in this class'},\n code=error_constants.USER_ALREADY_IN_GROUP_IN_CLASS)\n return super(MembershipSerializer, self).create(validated_data)\n\n\nclass FacilityDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FacilityDataset\n fields = ('id', 'learner_can_edit_username', 'learner_can_edit_name', 'learner_can_edit_password',\n 'learner_can_sign_up', 'learner_can_delete_account', 'learner_can_login_with_no_password',\n 'show_download_button_in_learn', 'description', 'location', 'allow_guest_access')\n\n\nclass FacilitySerializer(serializers.ModelSerializer):\n dataset = FacilityDatasetSerializer(read_only=True)\n default = serializers.SerializerMethodField()\n\n class Meta:\n model = Facility\n extra_kwargs = {'id': {'read_only': True}, 'dataset': {'read_only': True}}\n fields = ('id', 'name', 'dataset', 'default')\n\n def get_default(self, instance):\n return instance == Facility.get_default_facility()\n\n\nclass PublicFacilitySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Facility\n fields = ('dataset', 'name')\n\n\nclass ClassroomSerializer(serializers.ModelSerializer):\n learner_count = serializers.SerializerMethodField()\n coaches = serializers.SerializerMethodField()\n\n def get_learner_count(self, instance):\n return instance.get_members().count()\n\n def get_coaches(self, instance):\n return FacilityUserSerializer(instance.get_coaches(), many=True).data\n\n class Meta:\n model = Classroom\n fields = (\n 'id',\n 'name',\n 'parent',\n 'learner_count',\n 'coaches',\n )\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n\n\nclass LearnerGroupSerializer(serializers.ModelSerializer):\n\n user_ids = serializers.SerializerMethodField()\n\n def get_user_ids(self, group):\n return [str(user_id['id']) for user_id in group.get_members().values('id')]\n\n class Meta:\n model = LearnerGroup\n fields = ('id', 'name', 'parent', 'user_ids')\n\n validators = [\n UniqueTogetherValidator(\n queryset=Classroom.objects.all(),\n fields=('parent', 'name')\n )\n ]\n", "path": "kolibri/core/auth/serializers.py"}]}
| 2,307 | 150 |
gh_patches_debug_14756
|
rasdani/github-patches
|
git_diff
|
translate__pootle-4277
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`sync_stores` doesn't handle disabled projects
We addressed the similar issue for `update_stores` #4198.
`sync_stores` should work for disabled projects as well https://github.com/translate/pootle/issues/4198#issuecomment-161717337.
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/sync_stores.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
12 from optparse import make_option
13
14 from pootle_app.management.commands import PootleCommand
15
16
17 class Command(PootleCommand):
18 option_list = PootleCommand.option_list + (
19 make_option(
20 '--overwrite',
21 action='store_true',
22 dest='overwrite',
23 default=False,
24 help="Don't just save translations, but "
25 "overwrite files to reflect state in database",
26 ),
27 make_option(
28 '--skip-missing',
29 action='store_true',
30 dest='skip_missing',
31 default=False,
32 help="Ignore missing files on disk",
33 ),
34 make_option(
35 '--force',
36 action='store_true',
37 dest='force',
38 default=False,
39 help="Don't ignore stores synced after last change",
40 ),
41 )
42 help = "Save new translations to disk manually."
43
44 def handle_all_stores(self, translation_project, **options):
45 translation_project.sync(
46 conservative=not options['overwrite'],
47 skip_missing=options['skip_missing'],
48 only_newer=not options['force']
49 )
50
51 def handle_store(self, store, **options):
52 store.sync(
53 conservative=not options['overwrite'],
54 update_structure=options['overwrite'],
55 skip_missing=options['skip_missing'],
56 only_newer=not options['force']
57 )
58
[end of pootle/apps/pootle_app/management/commands/sync_stores.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_app/management/commands/sync_stores.py b/pootle/apps/pootle_app/management/commands/sync_stores.py
--- a/pootle/apps/pootle_app/management/commands/sync_stores.py
+++ b/pootle/apps/pootle_app/management/commands/sync_stores.py
@@ -40,13 +40,15 @@
),
)
help = "Save new translations to disk manually."
+ process_disabled_projects = True
def handle_all_stores(self, translation_project, **options):
- translation_project.sync(
- conservative=not options['overwrite'],
- skip_missing=options['skip_missing'],
- only_newer=not options['force']
- )
+ if translation_project.directory_exists_on_disk():
+ translation_project.sync(
+ conservative=not options['overwrite'],
+ skip_missing=options['skip_missing'],
+ only_newer=not options['force']
+ )
def handle_store(self, store, **options):
store.sync(
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/sync_stores.py b/pootle/apps/pootle_app/management/commands/sync_stores.py\n--- a/pootle/apps/pootle_app/management/commands/sync_stores.py\n+++ b/pootle/apps/pootle_app/management/commands/sync_stores.py\n@@ -40,13 +40,15 @@\n ),\n )\n help = \"Save new translations to disk manually.\"\n+ process_disabled_projects = True\n \n def handle_all_stores(self, translation_project, **options):\n- translation_project.sync(\n- conservative=not options['overwrite'],\n- skip_missing=options['skip_missing'],\n- only_newer=not options['force']\n- )\n+ if translation_project.directory_exists_on_disk():\n+ translation_project.sync(\n+ conservative=not options['overwrite'],\n+ skip_missing=options['skip_missing'],\n+ only_newer=not options['force']\n+ )\n \n def handle_store(self, store, **options):\n store.sync(\n", "issue": "`sync_stores` doesn't handle disabled projects\nWe addressed the similar issue for `update_stores` #4198.\n`sync_stores` should work for disabled projects as well https://github.com/translate/pootle/issues/4198#issuecomment-161717337.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\nfrom optparse import make_option\n\nfrom pootle_app.management.commands import PootleCommand\n\n\nclass Command(PootleCommand):\n option_list = PootleCommand.option_list + (\n make_option(\n '--overwrite',\n action='store_true',\n dest='overwrite',\n default=False,\n help=\"Don't just save translations, but \"\n \"overwrite files to reflect state in database\",\n ),\n make_option(\n '--skip-missing',\n action='store_true',\n dest='skip_missing',\n default=False,\n help=\"Ignore missing files on disk\",\n ),\n make_option(\n '--force',\n action='store_true',\n dest='force',\n default=False,\n help=\"Don't ignore stores synced after last change\",\n ),\n )\n help = \"Save new translations to disk manually.\"\n\n def handle_all_stores(self, translation_project, **options):\n translation_project.sync(\n conservative=not options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n\n def handle_store(self, store, **options):\n store.sync(\n conservative=not options['overwrite'],\n update_structure=options['overwrite'],\n skip_missing=options['skip_missing'],\n only_newer=not options['force']\n )\n", "path": "pootle/apps/pootle_app/management/commands/sync_stores.py"}]}
| 1,104 | 233 |
gh_patches_debug_16297
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-7300
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Debugging history saves both python commands and pdb commands
<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->
### Issue Report Checklist
* [x] Searched the [issues page](https://github.com/spyder-ide/spyder/issues?q=is%3Aissue) for similar reports
* [x] Read the relevant sections of the [Spyder Troubleshooting Guide](https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) and followed its advice
* [x] Reproduced the issue after updating with ``conda update spyder`` (or ``pip``, if not using Anaconda)
* [x] Could not reproduce inside ``jupyter qtconsole`` (if console-related)
* [ ] Tried basic troubleshooting (if a bug/error)
* [ ] Restarted Spyder
* [ ] Reset preferences with ``spyder --reset``
* [ ] Reinstalled the latest version of [Anaconda](https://www.anaconda.com/download/)
* [ ] Tried the other applicable steps from the Troubleshooting Guide
* [x] Completed the **Problem Description**, **Steps to Reproduce** and **Version** sections below
## Problem Description
When debugging, I end up with many "s" and "n" in my debugging history, preventing me from finding what I am looking for. It would be nicer to only save python commands, or at least to have an option to do so.
</issue>
<code>
[start of spyder/widgets/ipythonconsole/debugging.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Widget that handles communications between a console in debugging
9 mode and Spyder
10 """
11
12 import ast
13 import pickle
14
15 from qtpy.QtCore import Qt
16 from qtconsole.rich_jupyter_widget import RichJupyterWidget
17
18 from spyder.config.base import PICKLE_PROTOCOL
19 from spyder.config.main import CONF
20 from spyder.py3compat import to_text_string
21
22
23 class DebuggingWidget(RichJupyterWidget):
24 """
25 Widget with the necessary attributes and methods to handle
26 communications between a console in debugging mode and
27 Spyder
28 """
29
30 # --- Public API --------------------------------------------------
31 def write_to_stdin(self, line):
32 """Send raw characters to the IPython kernel through stdin"""
33 self.kernel_client.input(line)
34
35 def set_spyder_breakpoints(self, force=False):
36 """Set Spyder breakpoints into a debugging session"""
37 if self._reading or force:
38 breakpoints_dict = CONF.get('run', 'breakpoints', {})
39
40 # We need to enclose pickled values in a list to be able to
41 # send them to the kernel in Python 2
42 serialiazed_breakpoints = [pickle.dumps(breakpoints_dict,
43 protocol=PICKLE_PROTOCOL)]
44 breakpoints = to_text_string(serialiazed_breakpoints)
45
46 cmd = u"!get_ipython().kernel._set_spyder_breakpoints({})"
47 self.kernel_client.input(cmd.format(breakpoints))
48
49 def dbg_exec_magic(self, magic, args=''):
50 """Run an IPython magic while debugging."""
51 code = "!get_ipython().kernel.shell.run_line_magic('{}', '{}')".format(
52 magic, args)
53 self.kernel_client.input(code)
54
55 def refresh_from_pdb(self, pdb_state):
56 """
57 Refresh Variable Explorer and Editor from a Pdb session,
58 after running any pdb command.
59
60 See publish_pdb_state and notify_spyder in spyder_kernels
61 """
62 if 'step' in pdb_state and 'fname' in pdb_state['step']:
63 fname = pdb_state['step']['fname']
64 lineno = pdb_state['step']['lineno']
65 self.sig_pdb_step.emit(fname, lineno)
66
67 if 'namespace_view' in pdb_state:
68 self.sig_namespace_view.emit(ast.literal_eval(
69 pdb_state['namespace_view']))
70
71 if 'var_properties' in pdb_state:
72 self.sig_var_properties.emit(ast.literal_eval(
73 pdb_state['var_properties']))
74
75 # ---- Private API (overrode by us) ----------------------------
76 def _handle_input_request(self, msg):
77 """Save history and add a %plot magic."""
78 if self._hidden:
79 raise RuntimeError('Request for raw input during hidden execution.')
80
81 # Make sure that all output from the SUB channel has been processed
82 # before entering readline mode.
83 self.kernel_client.iopub_channel.flush()
84
85 def callback(line):
86 # Save history to browse it later
87 if not (len(self._control.history) > 0
88 and self._control.history[-1] == line):
89 self._control.history.append(line)
90
91 # This is the Spyder addition: add a %plot magic to display
92 # plots while debugging
93 if line.startswith('%plot '):
94 line = line.split()[-1]
95 code = "__spy_code__ = get_ipython().run_cell('%s')" % line
96 self.kernel_client.input(code)
97 else:
98 self.kernel_client.input(line)
99 if self._reading:
100 self._reading = False
101 self._readline(msg['content']['prompt'], callback=callback,
102 password=msg['content']['password'])
103
104 def _event_filter_console_keypress(self, event):
105 """Handle Key_Up/Key_Down while debugging."""
106 key = event.key()
107 if self._reading:
108 self._control.current_prompt_pos = self._prompt_pos
109 if key == Qt.Key_Up:
110 self._control.browse_history(backward=True)
111 return True
112 elif key == Qt.Key_Down:
113 self._control.browse_history(backward=False)
114 return True
115 elif key in (Qt.Key_Return, Qt.Key_Enter):
116 self._control.reset_search_pos()
117 else:
118 self._control.hist_wholeline = False
119 return super(DebuggingWidget,
120 self)._event_filter_console_keypress(event)
121 else:
122 return super(DebuggingWidget,
123 self)._event_filter_console_keypress(event)
124
[end of spyder/widgets/ipythonconsole/debugging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/widgets/ipythonconsole/debugging.py b/spyder/widgets/ipythonconsole/debugging.py
--- a/spyder/widgets/ipythonconsole/debugging.py
+++ b/spyder/widgets/ipythonconsole/debugging.py
@@ -10,6 +10,7 @@
"""
import ast
+import pdb
import pickle
from qtpy.QtCore import Qt
@@ -86,7 +87,10 @@
# Save history to browse it later
if not (len(self._control.history) > 0
and self._control.history[-1] == line):
- self._control.history.append(line)
+ # do not save pdb commands
+ cmd = line.split(" ")[0]
+ if "do_" + cmd not in dir(pdb.Pdb):
+ self._control.history.append(line)
# This is the Spyder addition: add a %plot magic to display
# plots while debugging
|
{"golden_diff": "diff --git a/spyder/widgets/ipythonconsole/debugging.py b/spyder/widgets/ipythonconsole/debugging.py\n--- a/spyder/widgets/ipythonconsole/debugging.py\n+++ b/spyder/widgets/ipythonconsole/debugging.py\n@@ -10,6 +10,7 @@\n \"\"\"\n \n import ast\n+import pdb\n import pickle\n \n from qtpy.QtCore import Qt\n@@ -86,7 +87,10 @@\n # Save history to browse it later\n if not (len(self._control.history) > 0\n and self._control.history[-1] == line):\n- self._control.history.append(line)\n+ # do not save pdb commands\n+ cmd = line.split(\" \")[0]\n+ if \"do_\" + cmd not in dir(pdb.Pdb):\n+ self._control.history.append(line)\n \n # This is the Spyder addition: add a %plot magic to display\n # plots while debugging\n", "issue": "Debugging history saves both python commands and pdb commands\n<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->\r\n\r\n### Issue Report Checklist\r\n\r\n* [x] Searched the [issues page](https://github.com/spyder-ide/spyder/issues?q=is%3Aissue) for similar reports\r\n* [x] Read the relevant sections of the [Spyder Troubleshooting Guide](https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) and followed its advice\r\n* [x] Reproduced the issue after updating with ``conda update spyder`` (or ``pip``, if not using Anaconda)\r\n* [x] Could not reproduce inside ``jupyter qtconsole`` (if console-related)\r\n* [ ] Tried basic troubleshooting (if a bug/error)\r\n * [ ] Restarted Spyder\r\n * [ ] Reset preferences with ``spyder --reset``\r\n * [ ] Reinstalled the latest version of [Anaconda](https://www.anaconda.com/download/)\r\n * [ ] Tried the other applicable steps from the Troubleshooting Guide\r\n* [x] Completed the **Problem Description**, **Steps to Reproduce** and **Version** sections below\r\n\r\n\r\n## Problem Description\r\n\r\nWhen debugging, I end up with many \"s\" and \"n\" in my debugging history, preventing me from finding what I am looking for. It would be nicer to only save python commands, or at least to have an option to do so.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nWidget that handles communications between a console in debugging\nmode and Spyder\n\"\"\"\n\nimport ast\nimport pickle\n\nfrom qtpy.QtCore import Qt\nfrom qtconsole.rich_jupyter_widget import RichJupyterWidget\n\nfrom spyder.config.base import PICKLE_PROTOCOL\nfrom spyder.config.main import CONF\nfrom spyder.py3compat import to_text_string\n\n\nclass DebuggingWidget(RichJupyterWidget):\n \"\"\"\n Widget with the necessary attributes and methods to handle\n communications between a console in debugging mode and\n Spyder\n \"\"\"\n\n # --- Public API --------------------------------------------------\n def write_to_stdin(self, line):\n \"\"\"Send raw characters to the IPython kernel through stdin\"\"\"\n self.kernel_client.input(line)\n\n def set_spyder_breakpoints(self, force=False):\n \"\"\"Set Spyder breakpoints into a debugging session\"\"\"\n if self._reading or force:\n breakpoints_dict = CONF.get('run', 'breakpoints', {})\n\n # We need to enclose pickled values in a list to be able to\n # send them to the kernel in Python 2\n serialiazed_breakpoints = [pickle.dumps(breakpoints_dict,\n protocol=PICKLE_PROTOCOL)]\n breakpoints = to_text_string(serialiazed_breakpoints)\n\n cmd = u\"!get_ipython().kernel._set_spyder_breakpoints({})\"\n self.kernel_client.input(cmd.format(breakpoints))\n\n def dbg_exec_magic(self, magic, args=''):\n \"\"\"Run an IPython magic while debugging.\"\"\"\n code = \"!get_ipython().kernel.shell.run_line_magic('{}', '{}')\".format(\n magic, args)\n self.kernel_client.input(code)\n\n def refresh_from_pdb(self, pdb_state):\n \"\"\"\n Refresh Variable Explorer and Editor from a Pdb session,\n after running any pdb command.\n\n See publish_pdb_state and notify_spyder in spyder_kernels\n \"\"\"\n if 'step' in pdb_state and 'fname' in pdb_state['step']:\n fname = pdb_state['step']['fname']\n lineno = pdb_state['step']['lineno']\n self.sig_pdb_step.emit(fname, lineno)\n\n if 'namespace_view' in pdb_state:\n self.sig_namespace_view.emit(ast.literal_eval(\n pdb_state['namespace_view']))\n\n if 'var_properties' in pdb_state:\n self.sig_var_properties.emit(ast.literal_eval(\n pdb_state['var_properties']))\n\n # ---- Private API (overrode by us) ----------------------------\n def _handle_input_request(self, msg):\n \"\"\"Save history and add a %plot magic.\"\"\"\n if self._hidden:\n raise RuntimeError('Request for raw input during hidden execution.')\n\n # Make sure that all output from the SUB channel has been processed\n # before entering readline mode.\n self.kernel_client.iopub_channel.flush()\n\n def callback(line):\n # Save history to browse it later\n if not (len(self._control.history) > 0\n and self._control.history[-1] == line):\n self._control.history.append(line)\n\n # This is the Spyder addition: add a %plot magic to display\n # plots while debugging\n if line.startswith('%plot '):\n line = line.split()[-1]\n code = \"__spy_code__ = get_ipython().run_cell('%s')\" % line\n self.kernel_client.input(code)\n else:\n self.kernel_client.input(line)\n if self._reading:\n self._reading = False\n self._readline(msg['content']['prompt'], callback=callback,\n password=msg['content']['password'])\n\n def _event_filter_console_keypress(self, event):\n \"\"\"Handle Key_Up/Key_Down while debugging.\"\"\"\n key = event.key()\n if self._reading:\n self._control.current_prompt_pos = self._prompt_pos\n if key == Qt.Key_Up:\n self._control.browse_history(backward=True)\n return True\n elif key == Qt.Key_Down:\n self._control.browse_history(backward=False)\n return True\n elif key in (Qt.Key_Return, Qt.Key_Enter):\n self._control.reset_search_pos()\n else:\n self._control.hist_wholeline = False\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n else:\n return super(DebuggingWidget,\n self)._event_filter_console_keypress(event)\n", "path": "spyder/widgets/ipythonconsole/debugging.py"}]}
| 2,116 | 214 |
gh_patches_debug_26800
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-4657
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Models are lost in FluxProfileEstimator
**Gammapy version**
gammapy 1.0.1
**Bug description**
All models attached to my datasets are discarded when I calculate a flux profile with `FluxProfileEstimator`.
**Expected behavior**
The predicted counts from the models should become part of the background when `to_spectrum_datasets` is called in `FluxProfileEstimator.run()`
</issue>
<code>
[start of gammapy/estimators/points/profile.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """Tools to create profiles (i.e. 1D "slices" from 2D images)."""
3 from astropy import units as u
4 from regions import CircleAnnulusSkyRegion
5 from gammapy.datasets import Datasets
6 from gammapy.maps import MapAxis
7 from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
8 from .core import FluxPoints
9 from .sed import FluxPointsEstimator
10
11 __all__ = ["FluxProfileEstimator"]
12
13
14 class FluxProfileEstimator(FluxPointsEstimator):
15 """Estimate flux profiles
16
17 Parameters
18 ----------
19 regions : list of `~regions.SkyRegion`
20 regions to use
21 spectrum : `~gammapy.modeling.models.SpectralModel` (optional)
22 Spectral model to compute the fluxes or brightness.
23 Default is power-law with spectral index of 2.
24 **kwargs : dict
25 Keywords forwarded to the `FluxPointsEstimator` (see documentation
26 there for further description of valid keywords)
27
28 Examples
29 --------
30 This example shows how to compute a counts profile for the Fermi galactic
31 center region::
32
33 >>> from astropy import units as u
34 >>> from astropy.coordinates import SkyCoord
35 >>> from gammapy.data import GTI
36 >>> from gammapy.estimators import FluxProfileEstimator
37 >>> from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions
38 >>> from gammapy.datasets import MapDataset
39 >>> from gammapy.maps import RegionGeom
40
41 >>> # load example data
42 >>> filename = "$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc.fits.gz"
43 >>> dataset = MapDataset.read(filename, name="fermi-dataset")
44
45 >>> # configuration
46 >>> dataset.gti = GTI.create("0s", "1e7s", "2010-01-01")
47
48 >>> # creation of the boxes and axis
49 >>> start_pos = SkyCoord("-1d", "0d", frame='galactic')
50 >>> end_pos = SkyCoord("1d", "0d", frame='galactic')
51
52 >>> regions = make_orthogonal_rectangle_sky_regions(
53 start_pos=start_pos,
54 end_pos=end_pos,
55 wcs=dataset.counts.geom.wcs,
56 height=2 * u.deg,
57 nbin=21
58 )
59
60 >>> # set up profile estimator and run
61 >>> prof_maker = FluxProfileEstimator(regions=regions, energy_edges=[10, 2000] * u.GeV)
62 >>> fermi_prof = prof_maker.run(dataset)
63 >>> print(fermi_prof)
64 FluxPoints
65 ----------
66 <BLANKLINE>
67 geom : RegionGeom
68 axes : ['lon', 'lat', 'energy', 'projected-distance']
69 shape : (1, 1, 1, 21)
70 quantities : ['norm', 'norm_err', 'ts', 'npred', 'npred_excess', 'stat', 'counts', 'success'] # noqa: E501
71 ref. model : pl
72 n_sigma : 1
73 n_sigma_ul : 2
74 sqrt_ts_threshold_ul : 2
75 sed type init : likelihood
76
77 """
78
79 tag = "FluxProfileEstimator"
80
81 def __init__(self, regions, spectrum=None, **kwargs):
82 if len(regions) <= 1:
83 raise ValueError(
84 "Please provide at least two regions for flux profile estimation."
85 )
86
87 self.regions = regions
88
89 if spectrum is None:
90 spectrum = PowerLawSpectralModel()
91
92 self.spectrum = spectrum
93 super().__init__(**kwargs)
94
95 @property
96 def projected_distance_axis(self):
97 """Get projected distance from the first region.
98
99 For normal region this is defined as the distance from the
100 center of the region. For annulus shaped regions it is the
101 mean between the inner and outer radius.
102
103 Returns
104 -------
105 axis : `MapAxis`
106 Projected distance axis
107 """
108 distances = []
109 center = self.regions[0].center
110
111 for idx, region in enumerate(self.regions):
112 if isinstance(region, CircleAnnulusSkyRegion):
113 distance = (region.inner_radius + region.outer_radius) / 2.0
114 else:
115 distance = center.separation(region.center)
116
117 distances.append(distance)
118
119 return MapAxis.from_nodes(
120 u.Quantity(distances, "deg"), name="projected-distance"
121 )
122
123 def run(self, datasets):
124 """Run flux profile estimation
125
126 Parameters
127 ----------
128 datasets : list of `~gammapy.datasets.MapDataset`
129 Map datasets.
130
131 Returns
132 -------
133 profile : `~gammapy.estimators.FluxPoints`
134 Profile flux points.
135 """
136 datasets = Datasets(datasets=datasets)
137
138 maps = []
139
140 for region in self.regions:
141 datasets_to_fit = datasets.to_spectrum_datasets(region=region)
142 datasets_to_fit.models = SkyModel(self.spectrum, name="test-source")
143 fp = super().run(datasets_to_fit)
144 maps.append(fp)
145
146 return FluxPoints.from_stack(
147 maps=maps,
148 axis=self.projected_distance_axis,
149 )
150
151 @property
152 def config_parameters(self):
153 """Config parameters"""
154 pars = self.__dict__.copy()
155 pars = {key.strip("_"): value for key, value in pars.items()}
156 pars.pop("regions")
157 return pars
158
[end of gammapy/estimators/points/profile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gammapy/estimators/points/profile.py b/gammapy/estimators/points/profile.py
--- a/gammapy/estimators/points/profile.py
+++ b/gammapy/estimators/points/profile.py
@@ -1,5 +1,6 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tools to create profiles (i.e. 1D "slices" from 2D images)."""
+import numpy as np
from astropy import units as u
from regions import CircleAnnulusSkyRegion
from gammapy.datasets import Datasets
@@ -133,12 +134,20 @@
profile : `~gammapy.estimators.FluxPoints`
Profile flux points.
"""
+
datasets = Datasets(datasets=datasets)
maps = []
-
for region in self.regions:
datasets_to_fit = datasets.to_spectrum_datasets(region=region)
+ for dataset_spec, dataset_map in zip(datasets_to_fit, datasets):
+ dataset_spec.background.data = (
+ dataset_map.npred()
+ .to_region_nd_map(
+ region, func=np.sum, weights=dataset_map.mask_safe
+ )
+ .data
+ )
datasets_to_fit.models = SkyModel(self.spectrum, name="test-source")
fp = super().run(datasets_to_fit)
maps.append(fp)
|
{"golden_diff": "diff --git a/gammapy/estimators/points/profile.py b/gammapy/estimators/points/profile.py\n--- a/gammapy/estimators/points/profile.py\n+++ b/gammapy/estimators/points/profile.py\n@@ -1,5 +1,6 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n \"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\n+import numpy as np\n from astropy import units as u\n from regions import CircleAnnulusSkyRegion\n from gammapy.datasets import Datasets\n@@ -133,12 +134,20 @@\n profile : `~gammapy.estimators.FluxPoints`\n Profile flux points.\n \"\"\"\n+\n datasets = Datasets(datasets=datasets)\n \n maps = []\n-\n for region in self.regions:\n datasets_to_fit = datasets.to_spectrum_datasets(region=region)\n+ for dataset_spec, dataset_map in zip(datasets_to_fit, datasets):\n+ dataset_spec.background.data = (\n+ dataset_map.npred()\n+ .to_region_nd_map(\n+ region, func=np.sum, weights=dataset_map.mask_safe\n+ )\n+ .data\n+ )\n datasets_to_fit.models = SkyModel(self.spectrum, name=\"test-source\")\n fp = super().run(datasets_to_fit)\n maps.append(fp)\n", "issue": "Models are lost in FluxProfileEstimator\n**Gammapy version**\r\ngammapy 1.0.1\r\n\r\n**Bug description**\r\nAll models attached to my datasets are discarded when I calculate a flux profile with `FluxProfileEstimator`.\r\n\r\n**Expected behavior**\r\nThe predicted counts from the models should become part of the background when `to_spectrum_datasets` is called in `FluxProfileEstimator.run()`\r\n\r\n\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Tools to create profiles (i.e. 1D \"slices\" from 2D images).\"\"\"\nfrom astropy import units as u\nfrom regions import CircleAnnulusSkyRegion\nfrom gammapy.datasets import Datasets\nfrom gammapy.maps import MapAxis\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom .core import FluxPoints\nfrom .sed import FluxPointsEstimator\n\n__all__ = [\"FluxProfileEstimator\"]\n\n\nclass FluxProfileEstimator(FluxPointsEstimator):\n \"\"\"Estimate flux profiles\n\n Parameters\n ----------\n regions : list of `~regions.SkyRegion`\n regions to use\n spectrum : `~gammapy.modeling.models.SpectralModel` (optional)\n Spectral model to compute the fluxes or brightness.\n Default is power-law with spectral index of 2.\n **kwargs : dict\n Keywords forwarded to the `FluxPointsEstimator` (see documentation\n there for further description of valid keywords)\n\n Examples\n --------\n This example shows how to compute a counts profile for the Fermi galactic\n center region::\n\n >>> from astropy import units as u\n >>> from astropy.coordinates import SkyCoord\n >>> from gammapy.data import GTI\n >>> from gammapy.estimators import FluxProfileEstimator\n >>> from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions\n >>> from gammapy.datasets import MapDataset\n >>> from gammapy.maps import RegionGeom\n\n >>> # load example data\n >>> filename = \"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc.fits.gz\"\n >>> dataset = MapDataset.read(filename, name=\"fermi-dataset\")\n\n >>> # configuration\n >>> dataset.gti = GTI.create(\"0s\", \"1e7s\", \"2010-01-01\")\n\n >>> # creation of the boxes and axis\n >>> start_pos = SkyCoord(\"-1d\", \"0d\", frame='galactic')\n >>> end_pos = SkyCoord(\"1d\", \"0d\", frame='galactic')\n\n >>> regions = make_orthogonal_rectangle_sky_regions(\n start_pos=start_pos,\n end_pos=end_pos,\n wcs=dataset.counts.geom.wcs,\n height=2 * u.deg,\n nbin=21\n )\n\n >>> # set up profile estimator and run\n >>> prof_maker = FluxProfileEstimator(regions=regions, energy_edges=[10, 2000] * u.GeV)\n >>> fermi_prof = prof_maker.run(dataset)\n >>> print(fermi_prof)\n FluxPoints\n ----------\n <BLANKLINE>\n geom : RegionGeom\n axes : ['lon', 'lat', 'energy', 'projected-distance']\n shape : (1, 1, 1, 21)\n quantities : ['norm', 'norm_err', 'ts', 'npred', 'npred_excess', 'stat', 'counts', 'success'] # noqa: E501\n ref. model : pl\n n_sigma : 1\n n_sigma_ul : 2\n sqrt_ts_threshold_ul : 2\n sed type init : likelihood\n\n \"\"\"\n\n tag = \"FluxProfileEstimator\"\n\n def __init__(self, regions, spectrum=None, **kwargs):\n if len(regions) <= 1:\n raise ValueError(\n \"Please provide at least two regions for flux profile estimation.\"\n )\n\n self.regions = regions\n\n if spectrum is None:\n spectrum = PowerLawSpectralModel()\n\n self.spectrum = spectrum\n super().__init__(**kwargs)\n\n @property\n def projected_distance_axis(self):\n \"\"\"Get projected distance from the first region.\n\n For normal region this is defined as the distance from the\n center of the region. For annulus shaped regions it is the\n mean between the inner and outer radius.\n\n Returns\n -------\n axis : `MapAxis`\n Projected distance axis\n \"\"\"\n distances = []\n center = self.regions[0].center\n\n for idx, region in enumerate(self.regions):\n if isinstance(region, CircleAnnulusSkyRegion):\n distance = (region.inner_radius + region.outer_radius) / 2.0\n else:\n distance = center.separation(region.center)\n\n distances.append(distance)\n\n return MapAxis.from_nodes(\n u.Quantity(distances, \"deg\"), name=\"projected-distance\"\n )\n\n def run(self, datasets):\n \"\"\"Run flux profile estimation\n\n Parameters\n ----------\n datasets : list of `~gammapy.datasets.MapDataset`\n Map datasets.\n\n Returns\n -------\n profile : `~gammapy.estimators.FluxPoints`\n Profile flux points.\n \"\"\"\n datasets = Datasets(datasets=datasets)\n\n maps = []\n\n for region in self.regions:\n datasets_to_fit = datasets.to_spectrum_datasets(region=region)\n datasets_to_fit.models = SkyModel(self.spectrum, name=\"test-source\")\n fp = super().run(datasets_to_fit)\n maps.append(fp)\n\n return FluxPoints.from_stack(\n maps=maps,\n axis=self.projected_distance_axis,\n )\n\n @property\n def config_parameters(self):\n \"\"\"Config parameters\"\"\"\n pars = self.__dict__.copy()\n pars = {key.strip(\"_\"): value for key, value in pars.items()}\n pars.pop(\"regions\")\n return pars\n", "path": "gammapy/estimators/points/profile.py"}]}
| 2,235 | 312 |
gh_patches_debug_19013
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-732
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Celery: Unhanded exception in `task_prerun_callback`
Using celery-batches 0.2.0, celery 4.4.7, python 3.10, redis backend
What happens: Request with missing `delivery_info` raises error. This bubbles up to sentry, another tool.
Expected behavior: Drop the trace is unless error reporting is enabled on scout-apm.
```
AttributeError: 'NoneType' object has no attribute 'get'
File "celery/utils/dispatch/signal.py", line 288, in send
response = receiver(signal=self, sender=sender, **named)
File "scout_apm/celery.py", line 60, in task_prerun_callback
tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
```
```
{
args: [
[
<celery_batches.SimpleRequest object at 0x7fbd1f2f9720>,
<celery_batches.SimpleRequest object at 0x7fcd15dd65e0>,
<celery_batches.SimpleRequest object at 0x7fad18d6fa30>,
<celery_batches.SimpleRequest object at 0x7fed14d6e1a0>,
<celery_batches.SimpleRequest object at 0x7fad13d6e230>
]
],
kwargs: {},
sender: <@task: project.app.jobs.track_happening of project at 0x2faa2c2713f0 (v2 compatible)>,
signal: <Signal: task_prerun providing_args={'args', 'kwargs', 'task', 'task_id'}>,
task_id: '684c6b14-4349-42e7-bc8d-4d410bceb1c9'
}
```
</issue>
<code>
[start of src/scout_apm/celery.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import logging
6
7 from celery.signals import before_task_publish, task_failure, task_postrun, task_prerun
8
9 try:
10 import django
11
12 if django.VERSION < (3, 1):
13 from django.views.debug import get_safe_settings
14 else:
15 from django.views.debug import SafeExceptionReporterFilter
16
17 def get_safe_settings():
18 return SafeExceptionReporterFilter().get_safe_settings()
19
20 except ImportError:
21 # Django not installed
22 get_safe_settings = None
23
24 import scout_apm.core
25 from scout_apm.compat import datetime_to_timestamp, string_type
26 from scout_apm.core.config import scout_config
27 from scout_apm.core.error import ErrorMonitor
28 from scout_apm.core.tracked_request import TrackedRequest
29
30 logger = logging.getLogger(__name__)
31
32
33 def before_task_publish_callback(headers=None, properties=None, **kwargs):
34 if "scout_task_start" not in headers:
35 headers["scout_task_start"] = datetime_to_timestamp(dt.datetime.utcnow())
36
37
38 def task_prerun_callback(task=None, **kwargs):
39 tracked_request = TrackedRequest.instance()
40 tracked_request.is_real_request = True
41
42 start = getattr(task.request, "scout_task_start", None)
43 if start is not None:
44 now = datetime_to_timestamp(dt.datetime.utcnow())
45 try:
46 queue_time = now - start
47 except TypeError:
48 pass
49 else:
50 tracked_request.tag("queue_time", queue_time)
51
52 task_id = getattr(task.request, "id", None)
53 if task_id:
54 tracked_request.tag("task_id", task_id)
55 parent_task_id = getattr(task.request, "parent_id", None)
56 if parent_task_id:
57 tracked_request.tag("parent_task_id", parent_task_id)
58
59 delivery_info = task.request.delivery_info
60 tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
61 tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
62 tracked_request.tag("priority", delivery_info.get("priority", "unknown"))
63 tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
64 tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
65
66 tracked_request.start_span(operation=("Job/" + task.name))
67
68
69 def task_postrun_callback(task=None, **kwargs):
70 tracked_request = TrackedRequest.instance()
71 tracked_request.stop_span()
72
73
74 def task_failure_callback(
75 sender,
76 task_id=None,
77 exception=None,
78 args=None,
79 kwargs=None,
80 traceback=None,
81 einfo=None,
82 **remaining
83 ):
84 tracked_request = TrackedRequest.instance()
85 tracked_request.tag("error", "true")
86
87 custom_controller = sender.name
88 custom_params = {
89 "celery": {
90 "task_id": task_id,
91 "args": args,
92 "kwargs": kwargs,
93 }
94 }
95
96 # Look up the django settings if populated.
97 environment = None
98 if get_safe_settings:
99 try:
100 environment = get_safe_settings()
101 except django.core.exceptions.ImproperlyConfigured as exc:
102 # Django not setup correctly
103 logger.debug(
104 "Celery integration does not have django configured properly: %r", exc
105 )
106 pass
107 except Exception as exc:
108 logger.debug(
109 "Celery task_failure callback exception: %r", exc, exc_info=exc
110 )
111 pass
112
113 # Celery occassionally will send the traceback as a string rather
114 # than a Stack trace object as the docs indicate. In that case,
115 # fall back to the billiard ExceptionInfo instance
116 traceback = (
117 traceback if traceback and not isinstance(traceback, string_type) else einfo.tb
118 )
119 exc_info = (exception.__class__, exception, traceback)
120 ErrorMonitor.send(
121 exc_info,
122 environment=environment,
123 custom_params=custom_params,
124 custom_controller=custom_controller,
125 )
126
127
128 def install(app=None):
129 if app is not None:
130 copy_configuration(app)
131
132 installed = scout_apm.core.install()
133 if not installed:
134 return
135
136 before_task_publish.connect(before_task_publish_callback)
137 task_prerun.connect(task_prerun_callback)
138 task_failure.connect(task_failure_callback)
139 task_postrun.connect(task_postrun_callback)
140
141
142 def copy_configuration(app):
143 prefix = "scout_"
144 prefix_len = len(prefix)
145
146 to_set = {}
147 for key, value in app.conf.items():
148 key_lower = key.lower()
149 if key_lower.startswith(prefix) and len(key_lower) > prefix_len:
150 scout_key = key_lower[prefix_len:]
151 to_set[scout_key] = value
152
153 scout_config.set(**to_set)
154
155
156 def uninstall():
157 before_task_publish.disconnect(before_task_publish_callback)
158 task_prerun.disconnect(task_prerun_callback)
159 task_postrun.disconnect(task_postrun_callback)
160 task_failure.disconnect(task_failure_callback)
161
[end of src/scout_apm/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py
--- a/src/scout_apm/celery.py
+++ b/src/scout_apm/celery.py
@@ -56,12 +56,13 @@
if parent_task_id:
tracked_request.tag("parent_task_id", parent_task_id)
- delivery_info = task.request.delivery_info
- tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
- tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
- tracked_request.tag("priority", delivery_info.get("priority", "unknown"))
- tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
- tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
+ delivery_info = getattr(task.request, "delivery_info", None)
+ if delivery_info:
+ tracked_request.tag("is_eager", delivery_info.get("is_eager", False))
+ tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
+ tracked_request.tag("priority", delivery_info.get("priority", "unknown"))
+ tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown"))
+ tracked_request.tag("queue", delivery_info.get("queue", "unknown"))
tracked_request.start_span(operation=("Job/" + task.name))
|
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -56,12 +56,13 @@\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n \n- delivery_info = task.request.delivery_info\n- tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n- tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n- tracked_request.tag(\"priority\", delivery_info.get(\"priority\", \"unknown\"))\n- tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n- tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n+ delivery_info = getattr(task.request, \"delivery_info\", None)\n+ if delivery_info:\n+ tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n+ tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n+ tracked_request.tag(\"priority\", delivery_info.get(\"priority\", \"unknown\"))\n+ tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n+ tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n \n tracked_request.start_span(operation=(\"Job/\" + task.name))\n", "issue": "Celery: Unhanded exception in `task_prerun_callback`\nUsing celery-batches 0.2.0, celery 4.4.7, python 3.10, redis backend\r\n\r\nWhat happens: Request with missing `delivery_info` raises error. This bubbles up to sentry, another tool.\r\n\r\nExpected behavior: Drop the trace is unless error reporting is enabled on scout-apm.\r\n\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'get'\r\n File \"celery/utils/dispatch/signal.py\", line 288, in send\r\n response = receiver(signal=self, sender=sender, **named)\r\n File \"scout_apm/celery.py\", line 60, in task_prerun_callback\r\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\r\n```\r\n\r\n```\r\n{\r\nargs: [\r\n[\r\n<celery_batches.SimpleRequest object at 0x7fbd1f2f9720>, \r\n<celery_batches.SimpleRequest object at 0x7fcd15dd65e0>, \r\n<celery_batches.SimpleRequest object at 0x7fad18d6fa30>, \r\n<celery_batches.SimpleRequest object at 0x7fed14d6e1a0>, \r\n<celery_batches.SimpleRequest object at 0x7fad13d6e230>\r\n]\r\n], \r\nkwargs: {}, \r\nsender: <@task: project.app.jobs.track_happening of project at 0x2faa2c2713f0 (v2 compatible)>, \r\nsignal: <Signal: task_prerun providing_args={'args', 'kwargs', 'task', 'task_id'}>, \r\ntask_id: '684c6b14-4349-42e7-bc8d-4d410bceb1c9'\r\n}\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\n\nfrom celery.signals import before_task_publish, task_failure, task_postrun, task_prerun\n\ntry:\n import django\n\n if django.VERSION < (3, 1):\n from django.views.debug import get_safe_settings\n else:\n from django.views.debug import SafeExceptionReporterFilter\n\n def get_safe_settings():\n return SafeExceptionReporterFilter().get_safe_settings()\n\nexcept ImportError:\n # Django not installed\n get_safe_settings = None\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp, string_type\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.error import ErrorMonitor\nfrom scout_apm.core.tracked_request import TrackedRequest\n\nlogger = logging.getLogger(__name__)\n\n\ndef before_task_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef task_prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n task_id = getattr(task.request, \"id\", None)\n if task_id:\n tracked_request.tag(\"task_id\", task_id)\n parent_task_id = getattr(task.request, \"parent_id\", None)\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"priority\", delivery_info.get(\"priority\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef task_postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef task_failure_callback(\n sender,\n task_id=None,\n exception=None,\n args=None,\n kwargs=None,\n traceback=None,\n einfo=None,\n **remaining\n):\n tracked_request = TrackedRequest.instance()\n tracked_request.tag(\"error\", \"true\")\n\n custom_controller = sender.name\n custom_params = {\n \"celery\": {\n \"task_id\": task_id,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n }\n\n # Look up the django settings if populated.\n environment = None\n if get_safe_settings:\n try:\n environment = get_safe_settings()\n except django.core.exceptions.ImproperlyConfigured as exc:\n # Django not setup correctly\n logger.debug(\n \"Celery integration does not have django configured properly: %r\", exc\n )\n pass\n except Exception as exc:\n logger.debug(\n \"Celery task_failure callback exception: %r\", exc, exc_info=exc\n )\n pass\n\n # Celery occassionally will send the traceback as a string rather\n # than a Stack trace object as the docs indicate. In that case,\n # fall back to the billiard ExceptionInfo instance\n traceback = (\n traceback if traceback and not isinstance(traceback, string_type) else einfo.tb\n )\n exc_info = (exception.__class__, exception, traceback)\n ErrorMonitor.send(\n exc_info,\n environment=environment,\n custom_params=custom_params,\n custom_controller=custom_controller,\n )\n\n\ndef install(app=None):\n if app is not None:\n copy_configuration(app)\n\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_task_publish_callback)\n task_prerun.connect(task_prerun_callback)\n task_failure.connect(task_failure_callback)\n task_postrun.connect(task_postrun_callback)\n\n\ndef copy_configuration(app):\n prefix = \"scout_\"\n prefix_len = len(prefix)\n\n to_set = {}\n for key, value in app.conf.items():\n key_lower = key.lower()\n if key_lower.startswith(prefix) and len(key_lower) > prefix_len:\n scout_key = key_lower[prefix_len:]\n to_set[scout_key] = value\n\n scout_config.set(**to_set)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_task_publish_callback)\n task_prerun.disconnect(task_prerun_callback)\n task_postrun.disconnect(task_postrun_callback)\n task_failure.disconnect(task_failure_callback)\n", "path": "src/scout_apm/celery.py"}]}
| 2,413 | 307 |
gh_patches_debug_11127
|
rasdani/github-patches
|
git_diff
|
OpenNMT__OpenNMT-tf-702
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug with multiple source files and data_dir configuration
Sample When you add the export_vocabulary_assets with multiple files
```
model_dir: model
data:
# (optional) During export save the vocabularies as model assets, otherwise embed
# them in the graph itself (default: True).
export_vocabulary_assets: true
train_features_file:
- train/src.txt
- train/ner.txt
```
the crash:
```
File "/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py", line 200, in main
config["data"] = _prefix_paths(args.data_dir, config["data"])
File "/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py", line 42, in _prefix_paths
paths[key] = _prefix_paths(prefix, path)
File "/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py", line 50, in _prefix_paths
new_path = os.path.join(prefix, path)
File "/usr/lib/python3.6/posixpath.py", line 94, in join
genericpath._check_arg_types('join', a, *p)
File "/usr/lib/python3.6/genericpath.py", line 149, in _check_arg_types
(funcname, s.__class__.__name__)) from None
TypeError: join() argument must be str or bytes, not 'bool'
```
</issue>
<code>
[start of opennmt/bin/main.py]
1 """Main script."""
2
3 import argparse
4 import logging
5 import os
6 import sys
7
8 import tensorflow as tf
9
10 from opennmt import __version__
11 from opennmt.models import catalog
12 from opennmt.runner import Runner
13 from opennmt.config import load_model, load_config
14 from opennmt.utils import exporters
15
16
17 _PYTHON_TO_TENSORFLOW_LOGGING_LEVEL = {
18 logging.CRITICAL: 3,
19 logging.ERROR: 2,
20 logging.WARNING: 1,
21 logging.INFO: 0,
22 logging.DEBUG: 0,
23 logging.NOTSET: 0,
24 }
25
26 def _set_log_level(log_level):
27 tf.get_logger().setLevel(log_level)
28 os.environ["TF_CPP_MIN_LOG_LEVEL"] = str(_PYTHON_TO_TENSORFLOW_LOGGING_LEVEL[log_level])
29
30 def _prefix_paths(prefix, paths):
31 """Recursively prefix paths.
32
33 Args:
34 prefix: The prefix to apply.
35 data: A dict of relative paths.
36
37 Returns:
38 The updated dict.
39 """
40 if isinstance(paths, dict):
41 for key, path in paths.items():
42 paths[key] = _prefix_paths(prefix, path)
43 return paths
44 elif isinstance(paths, list):
45 for i, path in enumerate(paths):
46 paths[i] = _prefix_paths(prefix, path)
47 return paths
48 else:
49 path = paths
50 new_path = os.path.join(prefix, path)
51 if tf.io.gfile.exists(new_path):
52 return new_path
53 else:
54 return path
55
56 def main():
57 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
58 parser.add_argument("-v", "--version", action="version", version="OpenNMT-tf %s" % __version__)
59 parser.add_argument("--config", required=True, nargs="+",
60 help="List of configuration files.")
61 parser.add_argument("--auto_config", default=False, action="store_true",
62 help="Enable automatic configuration values.")
63 parser.add_argument("--model_type", default="",
64 choices=list(sorted(catalog.list_model_names_from_catalog())),
65 help="Model type from the catalog.")
66 parser.add_argument("--model", default="",
67 help="Custom model configuration file.")
68 parser.add_argument("--run_dir", default="",
69 help="If set, model_dir will be created relative to this location.")
70 parser.add_argument("--data_dir", default="",
71 help="If set, data files are expected to be relative to this location.")
72 parser.add_argument("--checkpoint_path", default=None,
73 help=("Specific checkpoint or model directory to load "
74 "(when a directory is set, the latest checkpoint is used)."))
75 parser.add_argument("--log_level", default="INFO",
76 choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"],
77 help="Logs verbosity.")
78 parser.add_argument("--seed", type=int, default=None,
79 help="Random seed.")
80 parser.add_argument("--gpu_allow_growth", default=False, action="store_true",
81 help="Allocate GPU memory dynamically.")
82 parser.add_argument("--intra_op_parallelism_threads", type=int, default=0,
83 help=("Number of intra op threads (0 means the system picks "
84 "an appropriate number)."))
85 parser.add_argument("--inter_op_parallelism_threads", type=int, default=0,
86 help=("Number of inter op threads (0 means the system picks "
87 "an appropriate number)."))
88 parser.add_argument("--mixed_precision", default=False, action="store_true",
89 help="Enable mixed precision.")
90
91 subparsers = parser.add_subparsers(help="Run type.", dest="run_type")
92 subparsers.required = True
93 parser_train = subparsers.add_parser("train", help="Training.")
94 parser_train.add_argument(
95 "--with_eval", default=False, action="store_true",
96 help="Enable automatic evaluation.")
97 parser_train.add_argument(
98 "--num_gpus", type=int, default=1,
99 help="Number of GPUs to use for in-graph replication.")
100 parser_train.add_argument(
101 "--horovod", default=False, action="store_true",
102 help="Enable Horovod training mode.")
103
104 parser_eval = subparsers.add_parser("eval", help="Evaluation.")
105 parser_eval.add_argument(
106 "--features_file", nargs="+", default=None,
107 help="Input features files.")
108 parser_eval.add_argument(
109 "--labels_file", default=None,
110 help="Output labels files.")
111
112 parser_infer = subparsers.add_parser("infer", help="Inference.")
113 parser_infer.add_argument(
114 "--features_file", nargs="+", required=True,
115 help="Run inference on this file.")
116 parser_infer.add_argument(
117 "--predictions_file", default="",
118 help=("File used to save predictions. If not set, predictions are printed "
119 "on the standard output."))
120 parser_infer.add_argument(
121 "--log_prediction_time", default=False, action="store_true",
122 help="Logs some prediction time metrics.")
123
124 parser_export = subparsers.add_parser("export", help="Model export.")
125 parser_export.add_argument(
126 "--export_dir", required=True,
127 help="The directory of the exported model.")
128 parser_export.add_argument(
129 "--export_format", choices=exporters.list_exporters(), default="saved_model",
130 help="Format of the exported model.")
131
132 parser_score = subparsers.add_parser("score", help="Scoring.")
133 parser_score.add_argument("--features_file", nargs="+", required=True,
134 help="Features file.")
135 parser_score.add_argument("--predictions_file", default=None,
136 help="Predictions to score.")
137
138 parser_average_checkpoints = subparsers.add_parser(
139 "average_checkpoints", help="Checkpoint averaging.")
140 parser_average_checkpoints.add_argument(
141 "--output_dir", required=True,
142 help="The output directory for the averaged checkpoint.")
143 parser_average_checkpoints.add_argument(
144 "--max_count", type=int, default=8,
145 help="The maximal number of checkpoints to average.")
146
147 parser_update_vocab = subparsers.add_parser(
148 "update_vocab", help="Update model vocabularies in checkpoint.")
149 parser_update_vocab.add_argument(
150 "--output_dir", required=True,
151 help="The output directory for the updated checkpoint.")
152 parser_update_vocab.add_argument(
153 "--src_vocab", default=None,
154 help="Path to the new source vocabulary.")
155 parser_update_vocab.add_argument(
156 "--tgt_vocab", default=None,
157 help="Path to the new target vocabulary.")
158
159 # When using an option that takes multiple values just before the run type,
160 # the run type is treated as a value of this option. To fix this issue, we
161 # inject a placeholder option just before the run type to clearly separate it.
162 parser.add_argument("--placeholder", action="store_true", help=argparse.SUPPRESS)
163 run_types = set(subparsers.choices.keys())
164 args = sys.argv[1:]
165 for i, arg in enumerate(args):
166 if arg in run_types:
167 args.insert(i, "--placeholder")
168 break
169
170 args = parser.parse_args(args)
171 if hasattr(args, "features_file") and args.features_file and len(args.features_file) == 1:
172 args.features_file = args.features_file[0]
173
174 _set_log_level(getattr(logging, args.log_level))
175 tf.config.threading.set_intra_op_parallelism_threads(args.intra_op_parallelism_threads)
176 tf.config.threading.set_inter_op_parallelism_threads(args.inter_op_parallelism_threads)
177
178 gpus = tf.config.list_physical_devices(device_type="GPU")
179 if hasattr(args, "horovod") and args.horovod:
180 import horovod.tensorflow as hvd # pylint: disable=import-outside-toplevel
181 hvd.init()
182 is_master = hvd.rank() == 0
183 if gpus:
184 local_gpu = gpus[hvd.local_rank()]
185 tf.config.experimental.set_visible_devices(local_gpu, device_type="GPU")
186 gpus = [local_gpu]
187 else:
188 hvd = None
189 is_master = True
190
191 if args.gpu_allow_growth:
192 for device in gpus:
193 tf.config.experimental.set_memory_growth(device, enable=True)
194
195 # Load and merge run configurations.
196 config = load_config(args.config)
197 if args.run_dir:
198 config["model_dir"] = os.path.join(args.run_dir, config["model_dir"])
199 if args.data_dir:
200 config["data"] = _prefix_paths(args.data_dir, config["data"])
201
202 if is_master and not tf.io.gfile.exists(config["model_dir"]):
203 tf.get_logger().info("Creating model directory %s", config["model_dir"])
204 tf.io.gfile.makedirs(config["model_dir"])
205
206 model = load_model(
207 config["model_dir"],
208 model_file=args.model,
209 model_name=args.model_type,
210 serialize_model=is_master,
211 as_builder=True)
212 runner = Runner(
213 model,
214 config,
215 auto_config=args.auto_config,
216 mixed_precision=args.mixed_precision,
217 seed=args.seed)
218
219 if args.run_type == "train":
220 runner.train(
221 num_devices=args.num_gpus,
222 with_eval=args.with_eval,
223 checkpoint_path=args.checkpoint_path,
224 hvd=hvd)
225 elif args.run_type == "eval":
226 metrics = runner.evaluate(
227 checkpoint_path=args.checkpoint_path,
228 features_file=args.features_file,
229 labels_file=args.labels_file)
230 print(metrics)
231 elif args.run_type == "infer":
232 runner.infer(
233 args.features_file,
234 predictions_file=args.predictions_file,
235 checkpoint_path=args.checkpoint_path,
236 log_time=args.log_prediction_time)
237 elif args.run_type == "export":
238 runner.export(
239 args.export_dir,
240 checkpoint_path=args.checkpoint_path,
241 exporter=exporters.make_exporter(args.export_format))
242 elif args.run_type == "score":
243 runner.score(
244 args.features_file,
245 args.predictions_file,
246 checkpoint_path=args.checkpoint_path)
247 elif args.run_type == "average_checkpoints":
248 runner.average_checkpoints(args.output_dir, max_count=args.max_count)
249 elif args.run_type == "update_vocab":
250 runner.update_vocab(
251 args.output_dir,
252 src_vocab=args.src_vocab,
253 tgt_vocab=args.tgt_vocab)
254
255
256 if __name__ == "__main__":
257 main()
258
[end of opennmt/bin/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opennmt/bin/main.py b/opennmt/bin/main.py
--- a/opennmt/bin/main.py
+++ b/opennmt/bin/main.py
@@ -45,13 +45,15 @@
for i, path in enumerate(paths):
paths[i] = _prefix_paths(prefix, path)
return paths
- else:
+ elif isinstance(paths, str):
path = paths
new_path = os.path.join(prefix, path)
if tf.io.gfile.exists(new_path):
return new_path
else:
return path
+ else:
+ return paths
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
{"golden_diff": "diff --git a/opennmt/bin/main.py b/opennmt/bin/main.py\n--- a/opennmt/bin/main.py\n+++ b/opennmt/bin/main.py\n@@ -45,13 +45,15 @@\n for i, path in enumerate(paths):\n paths[i] = _prefix_paths(prefix, path)\n return paths\n- else:\n+ elif isinstance(paths, str):\n path = paths\n new_path = os.path.join(prefix, path)\n if tf.io.gfile.exists(new_path):\n return new_path\n else:\n return path\n+ else:\n+ return paths\n \n def main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", "issue": "Bug with multiple source files and data_dir configuration\nSample When you add the export_vocabulary_assets with multiple files\r\n```\r\nmodel_dir: model\r\n\r\ndata:\r\n # (optional) During export save the vocabularies as model assets, otherwise embed\r\n # them in the graph itself (default: True).\r\n export_vocabulary_assets: true\r\n\r\n train_features_file:\r\n - train/src.txt\r\n - train/ner.txt\r\n```\r\n\r\nthe crash:\r\n```\r\n File \"/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py\", line 200, in main\r\n config[\"data\"] = _prefix_paths(args.data_dir, config[\"data\"])\r\n File \"/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py\", line 42, in _prefix_paths\r\n paths[key] = _prefix_paths(prefix, path)\r\n File \"/usr/local/lib/python3.6/dist-packages/opennmt/bin/main.py\", line 50, in _prefix_paths\r\n new_path = os.path.join(prefix, path)\r\n File \"/usr/lib/python3.6/posixpath.py\", line 94, in join\r\n genericpath._check_arg_types('join', a, *p)\r\n File \"/usr/lib/python3.6/genericpath.py\", line 149, in _check_arg_types\r\n (funcname, s.__class__.__name__)) from None\r\nTypeError: join() argument must be str or bytes, not 'bool'\r\n```\n", "before_files": [{"content": "\"\"\"Main script.\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport tensorflow as tf\n\nfrom opennmt import __version__\nfrom opennmt.models import catalog\nfrom opennmt.runner import Runner\nfrom opennmt.config import load_model, load_config\nfrom opennmt.utils import exporters\n\n\n_PYTHON_TO_TENSORFLOW_LOGGING_LEVEL = {\n logging.CRITICAL: 3,\n logging.ERROR: 2,\n logging.WARNING: 1,\n logging.INFO: 0,\n logging.DEBUG: 0,\n logging.NOTSET: 0,\n}\n\ndef _set_log_level(log_level):\n tf.get_logger().setLevel(log_level)\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = str(_PYTHON_TO_TENSORFLOW_LOGGING_LEVEL[log_level])\n\ndef _prefix_paths(prefix, paths):\n \"\"\"Recursively prefix paths.\n\n Args:\n prefix: The prefix to apply.\n data: A dict of relative paths.\n\n Returns:\n The updated dict.\n \"\"\"\n if isinstance(paths, dict):\n for key, path in paths.items():\n paths[key] = _prefix_paths(prefix, path)\n return paths\n elif isinstance(paths, list):\n for i, path in enumerate(paths):\n paths[i] = _prefix_paths(prefix, path)\n return paths\n else:\n path = paths\n new_path = os.path.join(prefix, path)\n if tf.io.gfile.exists(new_path):\n return new_path\n else:\n return path\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-v\", \"--version\", action=\"version\", version=\"OpenNMT-tf %s\" % __version__)\n parser.add_argument(\"--config\", required=True, nargs=\"+\",\n help=\"List of configuration files.\")\n parser.add_argument(\"--auto_config\", default=False, action=\"store_true\",\n help=\"Enable automatic configuration values.\")\n parser.add_argument(\"--model_type\", default=\"\",\n choices=list(sorted(catalog.list_model_names_from_catalog())),\n help=\"Model type from the catalog.\")\n parser.add_argument(\"--model\", default=\"\",\n help=\"Custom model configuration file.\")\n parser.add_argument(\"--run_dir\", default=\"\",\n help=\"If set, model_dir will be created relative to this location.\")\n parser.add_argument(\"--data_dir\", default=\"\",\n help=\"If set, data files are expected to be relative to this location.\")\n parser.add_argument(\"--checkpoint_path\", default=None,\n help=(\"Specific checkpoint or model directory to load \"\n \"(when a directory is set, the latest checkpoint is used).\"))\n parser.add_argument(\"--log_level\", default=\"INFO\",\n choices=[\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"],\n help=\"Logs verbosity.\")\n parser.add_argument(\"--seed\", type=int, default=None,\n help=\"Random seed.\")\n parser.add_argument(\"--gpu_allow_growth\", default=False, action=\"store_true\",\n help=\"Allocate GPU memory dynamically.\")\n parser.add_argument(\"--intra_op_parallelism_threads\", type=int, default=0,\n help=(\"Number of intra op threads (0 means the system picks \"\n \"an appropriate number).\"))\n parser.add_argument(\"--inter_op_parallelism_threads\", type=int, default=0,\n help=(\"Number of inter op threads (0 means the system picks \"\n \"an appropriate number).\"))\n parser.add_argument(\"--mixed_precision\", default=False, action=\"store_true\",\n help=\"Enable mixed precision.\")\n\n subparsers = parser.add_subparsers(help=\"Run type.\", dest=\"run_type\")\n subparsers.required = True\n parser_train = subparsers.add_parser(\"train\", help=\"Training.\")\n parser_train.add_argument(\n \"--with_eval\", default=False, action=\"store_true\",\n help=\"Enable automatic evaluation.\")\n parser_train.add_argument(\n \"--num_gpus\", type=int, default=1,\n help=\"Number of GPUs to use for in-graph replication.\")\n parser_train.add_argument(\n \"--horovod\", default=False, action=\"store_true\",\n help=\"Enable Horovod training mode.\")\n\n parser_eval = subparsers.add_parser(\"eval\", help=\"Evaluation.\")\n parser_eval.add_argument(\n \"--features_file\", nargs=\"+\", default=None,\n help=\"Input features files.\")\n parser_eval.add_argument(\n \"--labels_file\", default=None,\n help=\"Output labels files.\")\n\n parser_infer = subparsers.add_parser(\"infer\", help=\"Inference.\")\n parser_infer.add_argument(\n \"--features_file\", nargs=\"+\", required=True,\n help=\"Run inference on this file.\")\n parser_infer.add_argument(\n \"--predictions_file\", default=\"\",\n help=(\"File used to save predictions. If not set, predictions are printed \"\n \"on the standard output.\"))\n parser_infer.add_argument(\n \"--log_prediction_time\", default=False, action=\"store_true\",\n help=\"Logs some prediction time metrics.\")\n\n parser_export = subparsers.add_parser(\"export\", help=\"Model export.\")\n parser_export.add_argument(\n \"--export_dir\", required=True,\n help=\"The directory of the exported model.\")\n parser_export.add_argument(\n \"--export_format\", choices=exporters.list_exporters(), default=\"saved_model\",\n help=\"Format of the exported model.\")\n\n parser_score = subparsers.add_parser(\"score\", help=\"Scoring.\")\n parser_score.add_argument(\"--features_file\", nargs=\"+\", required=True,\n help=\"Features file.\")\n parser_score.add_argument(\"--predictions_file\", default=None,\n help=\"Predictions to score.\")\n\n parser_average_checkpoints = subparsers.add_parser(\n \"average_checkpoints\", help=\"Checkpoint averaging.\")\n parser_average_checkpoints.add_argument(\n \"--output_dir\", required=True,\n help=\"The output directory for the averaged checkpoint.\")\n parser_average_checkpoints.add_argument(\n \"--max_count\", type=int, default=8,\n help=\"The maximal number of checkpoints to average.\")\n\n parser_update_vocab = subparsers.add_parser(\n \"update_vocab\", help=\"Update model vocabularies in checkpoint.\")\n parser_update_vocab.add_argument(\n \"--output_dir\", required=True,\n help=\"The output directory for the updated checkpoint.\")\n parser_update_vocab.add_argument(\n \"--src_vocab\", default=None,\n help=\"Path to the new source vocabulary.\")\n parser_update_vocab.add_argument(\n \"--tgt_vocab\", default=None,\n help=\"Path to the new target vocabulary.\")\n\n # When using an option that takes multiple values just before the run type,\n # the run type is treated as a value of this option. To fix this issue, we\n # inject a placeholder option just before the run type to clearly separate it.\n parser.add_argument(\"--placeholder\", action=\"store_true\", help=argparse.SUPPRESS)\n run_types = set(subparsers.choices.keys())\n args = sys.argv[1:]\n for i, arg in enumerate(args):\n if arg in run_types:\n args.insert(i, \"--placeholder\")\n break\n\n args = parser.parse_args(args)\n if hasattr(args, \"features_file\") and args.features_file and len(args.features_file) == 1:\n args.features_file = args.features_file[0]\n\n _set_log_level(getattr(logging, args.log_level))\n tf.config.threading.set_intra_op_parallelism_threads(args.intra_op_parallelism_threads)\n tf.config.threading.set_inter_op_parallelism_threads(args.inter_op_parallelism_threads)\n\n gpus = tf.config.list_physical_devices(device_type=\"GPU\")\n if hasattr(args, \"horovod\") and args.horovod:\n import horovod.tensorflow as hvd # pylint: disable=import-outside-toplevel\n hvd.init()\n is_master = hvd.rank() == 0\n if gpus:\n local_gpu = gpus[hvd.local_rank()]\n tf.config.experimental.set_visible_devices(local_gpu, device_type=\"GPU\")\n gpus = [local_gpu]\n else:\n hvd = None\n is_master = True\n\n if args.gpu_allow_growth:\n for device in gpus:\n tf.config.experimental.set_memory_growth(device, enable=True)\n\n # Load and merge run configurations.\n config = load_config(args.config)\n if args.run_dir:\n config[\"model_dir\"] = os.path.join(args.run_dir, config[\"model_dir\"])\n if args.data_dir:\n config[\"data\"] = _prefix_paths(args.data_dir, config[\"data\"])\n\n if is_master and not tf.io.gfile.exists(config[\"model_dir\"]):\n tf.get_logger().info(\"Creating model directory %s\", config[\"model_dir\"])\n tf.io.gfile.makedirs(config[\"model_dir\"])\n\n model = load_model(\n config[\"model_dir\"],\n model_file=args.model,\n model_name=args.model_type,\n serialize_model=is_master,\n as_builder=True)\n runner = Runner(\n model,\n config,\n auto_config=args.auto_config,\n mixed_precision=args.mixed_precision,\n seed=args.seed)\n\n if args.run_type == \"train\":\n runner.train(\n num_devices=args.num_gpus,\n with_eval=args.with_eval,\n checkpoint_path=args.checkpoint_path,\n hvd=hvd)\n elif args.run_type == \"eval\":\n metrics = runner.evaluate(\n checkpoint_path=args.checkpoint_path,\n features_file=args.features_file,\n labels_file=args.labels_file)\n print(metrics)\n elif args.run_type == \"infer\":\n runner.infer(\n args.features_file,\n predictions_file=args.predictions_file,\n checkpoint_path=args.checkpoint_path,\n log_time=args.log_prediction_time)\n elif args.run_type == \"export\":\n runner.export(\n args.export_dir,\n checkpoint_path=args.checkpoint_path,\n exporter=exporters.make_exporter(args.export_format))\n elif args.run_type == \"score\":\n runner.score(\n args.features_file,\n args.predictions_file,\n checkpoint_path=args.checkpoint_path)\n elif args.run_type == \"average_checkpoints\":\n runner.average_checkpoints(args.output_dir, max_count=args.max_count)\n elif args.run_type == \"update_vocab\":\n runner.update_vocab(\n args.output_dir,\n src_vocab=args.src_vocab,\n tgt_vocab=args.tgt_vocab)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opennmt/bin/main.py"}]}
| 3,701 | 152 |
gh_patches_debug_37138
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-2936
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No posters for trakt show recommendations
### Before submitting your issue:
Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)
**Branch/Commit:**
**OS:**
**What you did:**
**What happened:**
**What you expected:**
**Logs:**
```
2017-06-27 05:58:49 DEBUG Thread-38 :: [b26de9c] Missing poster on TheTVDB, cause: WindowsError(3, 'The system cannot find the path specified')```
</issue>
<code>
[start of medusa/show/recommendations/trakt.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import logging
6 import os
7 import time
8
9 from medusa import app
10 from medusa.helper.common import try_int
11 from medusa.helper.exceptions import MultipleShowObjectsException
12 from medusa.indexers.indexer_api import indexerApi
13 from medusa.indexers.indexer_config import INDEXER_TVDBV2
14 from medusa.logger.adapters.style import BraceAdapter
15 from medusa.show.recommendations.recommended import RecommendedShow
16
17 import requests
18 from simpleanidb import Anidb
19 from traktor import (TokenExpiredException, TraktApi, TraktException)
20 from tvdbapiv2.exceptions import ApiException
21
22 log = BraceAdapter(logging.getLogger(__name__))
23 log.logger.addHandler(logging.NullHandler())
24
25
26 class MissingPosterList(list):
27 """Smart custom list, with a cache expiration.
28
29 A list used to store the trakt shows that do not have a poster on tvdb. This will prevent searches for posters
30 that have recently been searched using the tvdb's api, and resulted in a 404.
31 """
32
33 def __init__(self, items=None, cache_timeout=3600, implicit_clean=False):
34 """Initialize the MissingPosterList.
35
36 :param items: Provide the initial list.
37 :param cache_timeout: Timeout after which the item expires.
38 :param implicit_clean: If enabled, run the clean() method, to check for expired items. Else you'll have to run
39 this periodically.
40 """
41 list.__init__(self, items or [])
42 self.cache_timeout = cache_timeout
43 self.implicit_clean = implicit_clean
44
45 def append(self, item):
46 """Add new items to the list."""
47 if self.implicit_clean:
48 self.clean()
49 super(MissingPosterList, self).append((int(time.time()), item))
50
51 def clean(self):
52 """Use the cache_timeout to remove expired items."""
53 new_list = [_ for _ in self if _[0] + self.cache_timeout > int(time.time())]
54 self.__init__(new_list, self.cache_timeout, self.implicit_clean)
55
56 def has(self, value):
57 """Check if the value is in the list.
58
59 We need a smarter method to check if an item is already in the list. This will return a list with items that
60 match the value.
61 :param value: The value to check for.
62 :return: A list of tuples with matches. For example: (141234234, '12342').
63 """
64 if self.implicit_clean:
65 self.clean()
66 return [_ for _ in self if _[1] == value]
67
68
69 missing_posters = MissingPosterList(cache_timeout=3600 * 24 * 3) # Cache 3 days
70
71
72 class TraktPopular(object):
73 """This class retrieves a speficed recommended show list from Trakt.
74
75 The list of returned shows is mapped to a RecommendedShow object
76 """
77
78 def __init__(self):
79 """Initialize the trakt recommended list object."""
80 self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__
81 self.session = requests.Session()
82 self.recommender = "Trakt Popular"
83 self.default_img_src = 'trakt-default.png'
84 self.anidb = Anidb(cache_dir=app.CACHE_DIR)
85 self.tvdb_api_v2 = indexerApi(INDEXER_TVDBV2).indexer()
86
87 def _create_recommended_show(self, show_obj):
88 """Create the RecommendedShow object from the returned showobj."""
89 rec_show = RecommendedShow(self,
90 show_obj['show']['ids'], show_obj['show']['title'],
91 INDEXER_TVDBV2, # indexer
92 show_obj['show']['ids']['tvdb'],
93 **{'rating': show_obj['show']['rating'],
94 'votes': try_int(show_obj['show']['votes'], '0'),
95 'image_href': 'http://www.trakt.tv/shows/{0}'.format(show_obj['show']['ids']['slug']),
96 # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',
97 # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}
98 'ids': show_obj['show']['ids']
99 }
100 )
101
102 use_default = None
103 image = None
104 try:
105 if not missing_posters.has(show_obj['show']['ids']['tvdb']):
106 image = self.check_cache_for_poster(show_obj['show']['ids']['tvdb']) or \
107 self.tvdb_api_v2.series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name
108 else:
109 log.info('CACHE: Missing poster on TVDB for show {0}', show_obj['show']['title'])
110 use_default = self.default_img_src
111 except ApiException as error:
112 use_default = self.default_img_src
113 if getattr(error, 'status', None) == 404:
114 log.info('Missing poster on TheTVDB for show {0}', show_obj['show']['title'])
115 missing_posters.append(show_obj['show']['ids']['tvdb'])
116 except Exception as error:
117 use_default = self.default_img_src
118 log.debug('Missing poster on TheTVDB, cause: {0!r}', error)
119
120 rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)
121 # As the method below requires allot of resources, i've only enabled it when
122 # the shows language or country is 'jp' (japanese). Looks a litle bit akward,
123 # but alternative is allot of resource used
124 if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:
125 rec_show.check_if_anime(self.anidb, show_obj['show']['ids']['tvdb'])
126
127 return rec_show
128
129 @staticmethod
130 def fetch_and_refresh_token(trakt_api, path):
131 """Fetch shows from trakt and store the refresh token when needed."""
132 try:
133 library_shows = trakt_api.request(path) or []
134 if trakt_api.access_token_refreshed:
135 app.TRAKT_ACCESS_TOKEN = trakt_api.access_token
136 app.TRAKT_REFRESH_TOKEN = trakt_api.refresh_token
137 app.instance.save_config()
138 except TokenExpiredException:
139 app.TRAKT_ACCESS_TOKEN = ''
140 raise
141
142 return library_shows
143
144 def fetch_popular_shows(self, page_url=None, trakt_list=None): # pylint: disable=too-many-nested-blocks,too-many-branches
145 """Get a list of popular shows from different Trakt lists based on a provided trakt_list.
146
147 :param page_url: the page url opened to the base api url, for retreiving a specific list
148 :param trakt_list: a description of the trakt list
149 :return: A list of RecommendedShow objects, an empty list of none returned
150 :throw: ``Exception`` if an Exception is thrown not handled by the libtrats exceptions
151 """
152 trending_shows = []
153 removed_from_medusa = []
154
155 # Create a trakt settings dict
156 trakt_settings = {'trakt_api_secret': app.TRAKT_API_SECRET,
157 'trakt_api_key': app.TRAKT_API_KEY,
158 'trakt_access_token': app.TRAKT_ACCESS_TOKEN,
159 'trakt_refresh_token': app.TRAKT_REFRESH_TOKEN}
160
161 trakt_api = TraktApi(timeout=app.TRAKT_TIMEOUT, ssl_verify=app.SSL_VERIFY, **trakt_settings)
162
163 try: # pylint: disable=too-many-nested-blocks
164 not_liked_show = ''
165 if app.TRAKT_ACCESS_TOKEN != '':
166 library_shows = self.fetch_and_refresh_token(trakt_api, 'sync/watched/shows?extended=noseasons') + \
167 self.fetch_and_refresh_token(trakt_api, 'sync/collection/shows?extended=full')
168
169 medusa_shows = [show.indexerid for show in app.showList if show.indexerid]
170 removed_from_medusa = [lshow['show']['ids']['tvdb'] for lshow in library_shows if lshow['show']['ids']['tvdb'] not in medusa_shows]
171
172 if app.TRAKT_BLACKLIST_NAME is not None and app.TRAKT_BLACKLIST_NAME:
173 not_liked_show = trakt_api.request('users/' + app.TRAKT_USERNAME + '/lists/' +
174 app.TRAKT_BLACKLIST_NAME + '/items') or []
175 else:
176 log.debug('Trakt blacklist name is empty')
177
178 if trakt_list not in ['recommended', 'newshow', 'newseason']:
179 limit_show = '?limit=' + str(100 + len(not_liked_show)) + '&'
180 else:
181 limit_show = '?'
182
183 shows = self.fetch_and_refresh_token(trakt_api, page_url + limit_show + 'extended=full,images') or []
184
185 # Let's trigger a cache cleanup.
186 missing_posters.clean()
187
188 for show in shows:
189 try:
190 if 'show' not in show:
191 show['show'] = show
192
193 if not_liked_show:
194 if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb']
195 for show in not_liked_show if show['type'] == 'show'):
196 trending_shows.append(self._create_recommended_show(show))
197 else:
198 trending_shows.append(self._create_recommended_show(show))
199
200 except MultipleShowObjectsException:
201 continue
202
203 blacklist = app.TRAKT_BLACKLIST_NAME not in ''
204
205 except TraktException as error:
206 log.warning('Could not connect to Trakt service: {0}', error)
207 raise
208
209 return blacklist, trending_shows, removed_from_medusa
210
211 def check_cache_for_poster(self, tvdb_id):
212 """Verify if we already have a poster downloaded for this show."""
213 for image_file_name in os.listdir(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))):
214 if os.path.isfile(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder, image_file_name))):
215 if str(tvdb_id) == image_file_name.split('-')[0]:
216 return image_file_name
217 return False
218
[end of medusa/show/recommendations/trakt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/show/recommendations/trakt.py b/medusa/show/recommendations/trakt.py
--- a/medusa/show/recommendations/trakt.py
+++ b/medusa/show/recommendations/trakt.py
@@ -104,7 +104,8 @@
try:
if not missing_posters.has(show_obj['show']['ids']['tvdb']):
image = self.check_cache_for_poster(show_obj['show']['ids']['tvdb']) or \
- self.tvdb_api_v2.series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name
+ self.tvdb_api_v2.config['session'].series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'],
+ key_type='poster').data[0].file_name
else:
log.info('CACHE: Missing poster on TVDB for show {0}', show_obj['show']['title'])
use_default = self.default_img_src
@@ -117,7 +118,11 @@
use_default = self.default_img_src
log.debug('Missing poster on TheTVDB, cause: {0!r}', error)
- rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)
+ if image:
+ rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)
+ else:
+ rec_show.cache_image('', default=use_default)
+
# As the method below requires allot of resources, i've only enabled it when
# the shows language or country is 'jp' (japanese). Looks a litle bit akward,
# but alternative is allot of resource used
@@ -210,6 +215,9 @@
def check_cache_for_poster(self, tvdb_id):
"""Verify if we already have a poster downloaded for this show."""
+ if not os.path.exists(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder)):
+ os.makedirs(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))
+
for image_file_name in os.listdir(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))):
if os.path.isfile(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder, image_file_name))):
if str(tvdb_id) == image_file_name.split('-')[0]:
|
{"golden_diff": "diff --git a/medusa/show/recommendations/trakt.py b/medusa/show/recommendations/trakt.py\n--- a/medusa/show/recommendations/trakt.py\n+++ b/medusa/show/recommendations/trakt.py\n@@ -104,7 +104,8 @@\n try:\n if not missing_posters.has(show_obj['show']['ids']['tvdb']):\n image = self.check_cache_for_poster(show_obj['show']['ids']['tvdb']) or \\\n- self.tvdb_api_v2.series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n+ self.tvdb_api_v2.config['session'].series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'],\n+ key_type='poster').data[0].file_name\n else:\n log.info('CACHE: Missing poster on TVDB for show {0}', show_obj['show']['title'])\n use_default = self.default_img_src\n@@ -117,7 +118,11 @@\n use_default = self.default_img_src\n log.debug('Missing poster on TheTVDB, cause: {0!r}', error)\n \n- rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)\n+ if image:\n+ rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)\n+ else:\n+ rec_show.cache_image('', default=use_default)\n+\n # As the method below requires allot of resources, i've only enabled it when\n # the shows language or country is 'jp' (japanese). Looks a litle bit akward,\n # but alternative is allot of resource used\n@@ -210,6 +215,9 @@\n \n def check_cache_for_poster(self, tvdb_id):\n \"\"\"Verify if we already have a poster downloaded for this show.\"\"\"\n+ if not os.path.exists(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder)):\n+ os.makedirs(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))\n+\n for image_file_name in os.listdir(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))):\n if os.path.isfile(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder, image_file_name))):\n if str(tvdb_id) == image_file_name.split('-')[0]:\n", "issue": "No posters for trakt show recommendations\n### Before submitting your issue:\r\n\r\nEnable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)\r\n\r\n**Branch/Commit:**\r\n**OS:**\r\n**What you did:**\r\n**What happened:**\r\n**What you expected:**\r\n**Logs:**\r\n```\r\n2017-06-27 05:58:49 DEBUG Thread-38 :: [b26de9c] Missing poster on TheTVDB, cause: WindowsError(3, 'The system cannot find the path specified')```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport time\n\nfrom medusa import app\nfrom medusa.helper.common import try_int\nfrom medusa.helper.exceptions import MultipleShowObjectsException\nfrom medusa.indexers.indexer_api import indexerApi\nfrom medusa.indexers.indexer_config import INDEXER_TVDBV2\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.show.recommendations.recommended import RecommendedShow\n\nimport requests\nfrom simpleanidb import Anidb\nfrom traktor import (TokenExpiredException, TraktApi, TraktException)\nfrom tvdbapiv2.exceptions import ApiException\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass MissingPosterList(list):\n \"\"\"Smart custom list, with a cache expiration.\n\n A list used to store the trakt shows that do not have a poster on tvdb. This will prevent searches for posters\n that have recently been searched using the tvdb's api, and resulted in a 404.\n \"\"\"\n\n def __init__(self, items=None, cache_timeout=3600, implicit_clean=False):\n \"\"\"Initialize the MissingPosterList.\n\n :param items: Provide the initial list.\n :param cache_timeout: Timeout after which the item expires.\n :param implicit_clean: If enabled, run the clean() method, to check for expired items. Else you'll have to run\n this periodically.\n \"\"\"\n list.__init__(self, items or [])\n self.cache_timeout = cache_timeout\n self.implicit_clean = implicit_clean\n\n def append(self, item):\n \"\"\"Add new items to the list.\"\"\"\n if self.implicit_clean:\n self.clean()\n super(MissingPosterList, self).append((int(time.time()), item))\n\n def clean(self):\n \"\"\"Use the cache_timeout to remove expired items.\"\"\"\n new_list = [_ for _ in self if _[0] + self.cache_timeout > int(time.time())]\n self.__init__(new_list, self.cache_timeout, self.implicit_clean)\n\n def has(self, value):\n \"\"\"Check if the value is in the list.\n\n We need a smarter method to check if an item is already in the list. This will return a list with items that\n match the value.\n :param value: The value to check for.\n :return: A list of tuples with matches. For example: (141234234, '12342').\n \"\"\"\n if self.implicit_clean:\n self.clean()\n return [_ for _ in self if _[1] == value]\n\n\nmissing_posters = MissingPosterList(cache_timeout=3600 * 24 * 3) # Cache 3 days\n\n\nclass TraktPopular(object):\n \"\"\"This class retrieves a speficed recommended show list from Trakt.\n\n The list of returned shows is mapped to a RecommendedShow object\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the trakt recommended list object.\"\"\"\n self.cache_subfolder = __name__.split('.')[-1] if '.' in __name__ else __name__\n self.session = requests.Session()\n self.recommender = \"Trakt Popular\"\n self.default_img_src = 'trakt-default.png'\n self.anidb = Anidb(cache_dir=app.CACHE_DIR)\n self.tvdb_api_v2 = indexerApi(INDEXER_TVDBV2).indexer()\n\n def _create_recommended_show(self, show_obj):\n \"\"\"Create the RecommendedShow object from the returned showobj.\"\"\"\n rec_show = RecommendedShow(self,\n show_obj['show']['ids'], show_obj['show']['title'],\n INDEXER_TVDBV2, # indexer\n show_obj['show']['ids']['tvdb'],\n **{'rating': show_obj['show']['rating'],\n 'votes': try_int(show_obj['show']['votes'], '0'),\n 'image_href': 'http://www.trakt.tv/shows/{0}'.format(show_obj['show']['ids']['slug']),\n # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',\n # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}\n 'ids': show_obj['show']['ids']\n }\n )\n\n use_default = None\n image = None\n try:\n if not missing_posters.has(show_obj['show']['ids']['tvdb']):\n image = self.check_cache_for_poster(show_obj['show']['ids']['tvdb']) or \\\n self.tvdb_api_v2.series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'], key_type='poster').data[0].file_name\n else:\n log.info('CACHE: Missing poster on TVDB for show {0}', show_obj['show']['title'])\n use_default = self.default_img_src\n except ApiException as error:\n use_default = self.default_img_src\n if getattr(error, 'status', None) == 404:\n log.info('Missing poster on TheTVDB for show {0}', show_obj['show']['title'])\n missing_posters.append(show_obj['show']['ids']['tvdb'])\n except Exception as error:\n use_default = self.default_img_src\n log.debug('Missing poster on TheTVDB, cause: {0!r}', error)\n\n rec_show.cache_image('http://thetvdb.com/banners/{0}'.format(image), default=use_default)\n # As the method below requires allot of resources, i've only enabled it when\n # the shows language or country is 'jp' (japanese). Looks a litle bit akward,\n # but alternative is allot of resource used\n if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:\n rec_show.check_if_anime(self.anidb, show_obj['show']['ids']['tvdb'])\n\n return rec_show\n\n @staticmethod\n def fetch_and_refresh_token(trakt_api, path):\n \"\"\"Fetch shows from trakt and store the refresh token when needed.\"\"\"\n try:\n library_shows = trakt_api.request(path) or []\n if trakt_api.access_token_refreshed:\n app.TRAKT_ACCESS_TOKEN = trakt_api.access_token\n app.TRAKT_REFRESH_TOKEN = trakt_api.refresh_token\n app.instance.save_config()\n except TokenExpiredException:\n app.TRAKT_ACCESS_TOKEN = ''\n raise\n\n return library_shows\n\n def fetch_popular_shows(self, page_url=None, trakt_list=None): # pylint: disable=too-many-nested-blocks,too-many-branches\n \"\"\"Get a list of popular shows from different Trakt lists based on a provided trakt_list.\n\n :param page_url: the page url opened to the base api url, for retreiving a specific list\n :param trakt_list: a description of the trakt list\n :return: A list of RecommendedShow objects, an empty list of none returned\n :throw: ``Exception`` if an Exception is thrown not handled by the libtrats exceptions\n \"\"\"\n trending_shows = []\n removed_from_medusa = []\n\n # Create a trakt settings dict\n trakt_settings = {'trakt_api_secret': app.TRAKT_API_SECRET,\n 'trakt_api_key': app.TRAKT_API_KEY,\n 'trakt_access_token': app.TRAKT_ACCESS_TOKEN,\n 'trakt_refresh_token': app.TRAKT_REFRESH_TOKEN}\n\n trakt_api = TraktApi(timeout=app.TRAKT_TIMEOUT, ssl_verify=app.SSL_VERIFY, **trakt_settings)\n\n try: # pylint: disable=too-many-nested-blocks\n not_liked_show = ''\n if app.TRAKT_ACCESS_TOKEN != '':\n library_shows = self.fetch_and_refresh_token(trakt_api, 'sync/watched/shows?extended=noseasons') + \\\n self.fetch_and_refresh_token(trakt_api, 'sync/collection/shows?extended=full')\n\n medusa_shows = [show.indexerid for show in app.showList if show.indexerid]\n removed_from_medusa = [lshow['show']['ids']['tvdb'] for lshow in library_shows if lshow['show']['ids']['tvdb'] not in medusa_shows]\n\n if app.TRAKT_BLACKLIST_NAME is not None and app.TRAKT_BLACKLIST_NAME:\n not_liked_show = trakt_api.request('users/' + app.TRAKT_USERNAME + '/lists/' +\n app.TRAKT_BLACKLIST_NAME + '/items') or []\n else:\n log.debug('Trakt blacklist name is empty')\n\n if trakt_list not in ['recommended', 'newshow', 'newseason']:\n limit_show = '?limit=' + str(100 + len(not_liked_show)) + '&'\n else:\n limit_show = '?'\n\n shows = self.fetch_and_refresh_token(trakt_api, page_url + limit_show + 'extended=full,images') or []\n\n # Let's trigger a cache cleanup.\n missing_posters.clean()\n\n for show in shows:\n try:\n if 'show' not in show:\n show['show'] = show\n\n if not_liked_show:\n if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb']\n for show in not_liked_show if show['type'] == 'show'):\n trending_shows.append(self._create_recommended_show(show))\n else:\n trending_shows.append(self._create_recommended_show(show))\n\n except MultipleShowObjectsException:\n continue\n\n blacklist = app.TRAKT_BLACKLIST_NAME not in ''\n\n except TraktException as error:\n log.warning('Could not connect to Trakt service: {0}', error)\n raise\n\n return blacklist, trending_shows, removed_from_medusa\n\n def check_cache_for_poster(self, tvdb_id):\n \"\"\"Verify if we already have a poster downloaded for this show.\"\"\"\n for image_file_name in os.listdir(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder))):\n if os.path.isfile(os.path.abspath(os.path.join(app.CACHE_DIR, 'images', self.cache_subfolder, image_file_name))):\n if str(tvdb_id) == image_file_name.split('-')[0]:\n return image_file_name\n return False\n", "path": "medusa/show/recommendations/trakt.py"}]}
| 3,499 | 552 |
gh_patches_debug_36235
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-1823
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
torchaudio: torch.quantization -> torch.ao.quantization
Summary:
This changes the imports in the `torchaudio` to include the new import locations.
```
codemod -d pytorch/audio --extensions py 'torch.quantization' 'torch.ao.quantization'
```
Differential Revision: D31302450
</issue>
<code>
[start of examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py]
1 #!/usr/bin/env python3
2 import argparse
3 import logging
4 import os
5
6 import torch
7 import torchaudio
8 from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
9 from greedy_decoder import Decoder
10
11 _LG = logging.getLogger(__name__)
12
13
14 def _parse_args():
15 parser = argparse.ArgumentParser(
16 description=__doc__,
17 )
18 parser.add_argument(
19 '--model',
20 required=True,
21 help='Path to the input pretrained weight file.'
22 )
23 parser.add_argument(
24 '--output-path',
25 help='Path to the directory, where the Torchscript-ed pipelines are saved.',
26 )
27 parser.add_argument(
28 '--test-file',
29 help='Path to a test audio file.',
30 )
31 parser.add_argument(
32 '--quantize',
33 action='store_true',
34 help='Quantize the model.',
35 )
36 parser.add_argument(
37 '--debug',
38 action='store_true',
39 help=(
40 'When enabled, individual components are separately tested '
41 'for the numerical compatibility and TorchScript compatibility.'
42 )
43 )
44 return parser.parse_args()
45
46
47 class Loader(torch.nn.Module):
48 def forward(self, audio_path: str) -> torch.Tensor:
49 waveform, sample_rate = torchaudio.load(audio_path)
50 if sample_rate != 16000:
51 waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
52 return waveform
53
54
55 class Encoder(torch.nn.Module):
56 def __init__(self, encoder: torch.nn.Module):
57 super().__init__()
58 self.encoder = encoder
59
60 def forward(self, waveform: torch.Tensor) -> torch.Tensor:
61 result, _ = self.encoder(waveform)
62 return result[0]
63
64
65 def _get_model(model_id):
66 from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
67 tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer
68 labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]
69 original = Wav2Vec2ForCTC.from_pretrained(model_id)
70 model = import_huggingface_model(original)
71 return model.eval(), labels
72
73
74 def _get_decoder(labels):
75 return Decoder(labels)
76
77
78 def _main():
79 args = _parse_args()
80 _init_logging(args.debug)
81 _LG.info('Loading model: %s', args.model)
82 model, labels = _get_model(args.model)
83 _LG.info('Labels: %s', labels)
84 _LG.info('Building pipeline')
85 loader = Loader()
86 encoder = Encoder(model)
87 decoder = _get_decoder(labels)
88 _LG.info(encoder)
89
90 if args.quantize:
91 _LG.info('Quantizing the model')
92 model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
93 encoder = torch.quantization.quantize_dynamic(
94 encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
95 _LG.info(encoder)
96
97 # test
98 if args.test_file:
99 _LG.info('Testing with %s', args.test_file)
100 waveform = loader(args.test_file)
101 emission = encoder(waveform)
102 transcript = decoder(emission)
103 _LG.info(transcript)
104
105 torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
106 torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))
107 torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
108
109
110 def _init_logging(debug=False):
111 level = logging.DEBUG if debug else logging.INFO
112 format_ = (
113 '%(message)s' if not debug else
114 '%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
115 )
116 logging.basicConfig(level=level, format=format_)
117
118
119 if __name__ == '__main__':
120 _main()
121
[end of examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py]
[start of examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py]
1 #!/usr/bin/evn python3
2 """Build Speech Recognition pipeline based on fairseq's wav2vec2.0 and dump it to TorchScript file.
3
4 To use this script, you need `fairseq`.
5 """
6 import os
7 import argparse
8 import logging
9
10 import torch
11 from torch.utils.mobile_optimizer import optimize_for_mobile
12 import torchaudio
13 from torchaudio.models.wav2vec2.utils.import_fairseq import import_fairseq_model
14 import fairseq
15
16 from greedy_decoder import Decoder
17
18 _LG = logging.getLogger(__name__)
19
20
21 def _parse_args():
22 parser = argparse.ArgumentParser(
23 description=__doc__,
24 )
25 parser.add_argument(
26 '--model-file',
27 required=True,
28 help='Path to the input pretrained weight file.'
29 )
30 parser.add_argument(
31 '--dict-dir',
32 help=(
33 'Path to the directory in which `dict.ltr.txt` file is found. '
34 'Required only when the model is finetuned.'
35 )
36 )
37 parser.add_argument(
38 '--output-path',
39 help='Path to the directory, where the TorchScript-ed pipelines are saved.',
40 )
41 parser.add_argument(
42 '--test-file',
43 help='Path to a test audio file.',
44 )
45 parser.add_argument(
46 '--debug',
47 action='store_true',
48 help=(
49 'When enabled, individual components are separately tested '
50 'for the numerical compatibility and TorchScript compatibility.'
51 )
52 )
53 parser.add_argument(
54 '--quantize',
55 action='store_true',
56 help='Apply quantization to model.'
57 )
58 parser.add_argument(
59 '--optimize-for-mobile',
60 action='store_true',
61 help='Apply optmization for mobile.'
62 )
63 return parser.parse_args()
64
65
66 class Loader(torch.nn.Module):
67 def forward(self, audio_path: str) -> torch.Tensor:
68 waveform, sample_rate = torchaudio.load(audio_path)
69 if sample_rate != 16000:
70 waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
71 return waveform
72
73
74 class Encoder(torch.nn.Module):
75 def __init__(self, encoder: torch.nn.Module):
76 super().__init__()
77 self.encoder = encoder
78
79 def forward(self, waveform: torch.Tensor) -> torch.Tensor:
80 result, _ = self.encoder(waveform)
81 return result[0]
82
83
84 def _get_decoder():
85 labels = [
86 "<s>",
87 "<pad>",
88 "</s>",
89 "<unk>",
90 "|",
91 "E",
92 "T",
93 "A",
94 "O",
95 "N",
96 "I",
97 "H",
98 "S",
99 "R",
100 "D",
101 "L",
102 "U",
103 "M",
104 "W",
105 "C",
106 "F",
107 "G",
108 "Y",
109 "P",
110 "B",
111 "V",
112 "K",
113 "'",
114 "X",
115 "J",
116 "Q",
117 "Z",
118 ]
119 return Decoder(labels)
120
121
122 def _load_fairseq_model(input_file, data_dir=None):
123 overrides = {}
124 if data_dir:
125 overrides['data'] = data_dir
126
127 model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
128 [input_file], arg_overrides=overrides
129 )
130 model = model[0]
131 return model
132
133
134 def _get_model(model_file, dict_dir):
135 original = _load_fairseq_model(model_file, dict_dir)
136 model = import_fairseq_model(original.w2v_encoder)
137 return model
138
139
140 def _main():
141 args = _parse_args()
142 _init_logging(args.debug)
143 loader = Loader()
144 model = _get_model(args.model_file, args.dict_dir).eval()
145 encoder = Encoder(model)
146 decoder = _get_decoder()
147 _LG.info(encoder)
148
149 if args.quantize:
150 _LG.info('Quantizing the model')
151 model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
152 encoder = torch.quantization.quantize_dynamic(
153 encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
154 _LG.info(encoder)
155
156 # test
157 if args.test_file:
158 _LG.info('Testing with %s', args.test_file)
159 waveform = loader(args.test_file)
160 emission = encoder(waveform)
161 transcript = decoder(emission)
162 _LG.info(transcript)
163
164 torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
165 torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
166 scripted = torch.jit.script(encoder)
167 if args.optimize_for_mobile:
168 scripted = optimize_for_mobile(scripted)
169 scripted.save(os.path.join(args.output_path, 'encoder.zip'))
170
171
172 def _init_logging(debug=False):
173 level = logging.DEBUG if debug else logging.INFO
174 format_ = (
175 '%(message)s' if not debug else
176 '%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
177 )
178 logging.basicConfig(level=level, format=format_)
179
180
181 if __name__ == '__main__':
182 _main()
183
[end of examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py b/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py
--- a/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py
+++ b/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py
@@ -6,6 +6,7 @@
import os
import argparse
import logging
+from typing import Tuple
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
@@ -15,6 +16,12 @@
from greedy_decoder import Decoder
+TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
+if TORCH_VERSION >= (1, 10):
+ import torch.ao.quantization as tq
+else:
+ import torch.quantization as tq
+
_LG = logging.getLogger(__name__)
@@ -149,7 +156,7 @@
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
- encoder = torch.quantization.quantize_dynamic(
+ encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
diff --git a/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py b/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py
--- a/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py
+++ b/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py
@@ -2,12 +2,19 @@
import argparse
import logging
import os
+from typing import Tuple
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from greedy_decoder import Decoder
+TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
+if TORCH_VERSION >= (1, 10):
+ import torch.ao.quantization as tq
+else:
+ import torch.quantization as tq
+
_LG = logging.getLogger(__name__)
@@ -90,7 +97,7 @@
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
- encoder = torch.quantization.quantize_dynamic(
+ encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
|
{"golden_diff": "diff --git a/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py b/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py\n--- a/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py\n+++ b/examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py\n@@ -6,6 +6,7 @@\n import os\n import argparse\n import logging\n+from typing import Tuple\n \n import torch\n from torch.utils.mobile_optimizer import optimize_for_mobile\n@@ -15,6 +16,12 @@\n \n from greedy_decoder import Decoder\n \n+TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(\".\")[:2])\n+if TORCH_VERSION >= (1, 10):\n+ import torch.ao.quantization as tq\n+else:\n+ import torch.quantization as tq\n+\n _LG = logging.getLogger(__name__)\n \n \n@@ -149,7 +156,7 @@\n if args.quantize:\n _LG.info('Quantizing the model')\n model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()\n- encoder = torch.quantization.quantize_dynamic(\n+ encoder = tq.quantize_dynamic(\n encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)\n _LG.info(encoder)\n \ndiff --git a/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py b/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py\n--- a/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py\n+++ b/examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py\n@@ -2,12 +2,19 @@\n import argparse\n import logging\n import os\n+from typing import Tuple\n \n import torch\n import torchaudio\n from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model\n from greedy_decoder import Decoder\n \n+TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(\".\")[:2])\n+if TORCH_VERSION >= (1, 10):\n+ import torch.ao.quantization as tq\n+else:\n+ import torch.quantization as tq\n+\n _LG = logging.getLogger(__name__)\n \n \n@@ -90,7 +97,7 @@\n if args.quantize:\n _LG.info('Quantizing the model')\n model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()\n- encoder = torch.quantization.quantize_dynamic(\n+ encoder = tq.quantize_dynamic(\n encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)\n _LG.info(encoder)\n", "issue": "torchaudio: torch.quantization -> torch.ao.quantization\nSummary:\nThis changes the imports in the `torchaudio` to include the new import locations.\n\n```\ncodemod -d pytorch/audio --extensions py 'torch.quantization' 'torch.ao.quantization'\n```\n\nDifferential Revision: D31302450\n\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport os\n\nimport torch\nimport torchaudio\nfrom torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model\nfrom greedy_decoder import Decoder\n\n_LG = logging.getLogger(__name__)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__,\n )\n parser.add_argument(\n '--model',\n required=True,\n help='Path to the input pretrained weight file.'\n )\n parser.add_argument(\n '--output-path',\n help='Path to the directory, where the Torchscript-ed pipelines are saved.',\n )\n parser.add_argument(\n '--test-file',\n help='Path to a test audio file.',\n )\n parser.add_argument(\n '--quantize',\n action='store_true',\n help='Quantize the model.',\n )\n parser.add_argument(\n '--debug',\n action='store_true',\n help=(\n 'When enabled, individual components are separately tested '\n 'for the numerical compatibility and TorchScript compatibility.'\n )\n )\n return parser.parse_args()\n\n\nclass Loader(torch.nn.Module):\n def forward(self, audio_path: str) -> torch.Tensor:\n waveform, sample_rate = torchaudio.load(audio_path)\n if sample_rate != 16000:\n waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)\n return waveform\n\n\nclass Encoder(torch.nn.Module):\n def __init__(self, encoder: torch.nn.Module):\n super().__init__()\n self.encoder = encoder\n\n def forward(self, waveform: torch.Tensor) -> torch.Tensor:\n result, _ = self.encoder(waveform)\n return result[0]\n\n\ndef _get_model(model_id):\n from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer\n labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]\n original = Wav2Vec2ForCTC.from_pretrained(model_id)\n model = import_huggingface_model(original)\n return model.eval(), labels\n\n\ndef _get_decoder(labels):\n return Decoder(labels)\n\n\ndef _main():\n args = _parse_args()\n _init_logging(args.debug)\n _LG.info('Loading model: %s', args.model)\n model, labels = _get_model(args.model)\n _LG.info('Labels: %s', labels)\n _LG.info('Building pipeline')\n loader = Loader()\n encoder = Encoder(model)\n decoder = _get_decoder(labels)\n _LG.info(encoder)\n\n if args.quantize:\n _LG.info('Quantizing the model')\n model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()\n encoder = torch.quantization.quantize_dynamic(\n encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)\n _LG.info(encoder)\n\n # test\n if args.test_file:\n _LG.info('Testing with %s', args.test_file)\n waveform = loader(args.test_file)\n emission = encoder(waveform)\n transcript = decoder(emission)\n _LG.info(transcript)\n\n torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))\n torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))\n torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))\n\n\ndef _init_logging(debug=False):\n level = logging.DEBUG if debug else logging.INFO\n format_ = (\n '%(message)s' if not debug else\n '%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'\n )\n logging.basicConfig(level=level, format=format_)\n\n\nif __name__ == '__main__':\n _main()\n", "path": "examples/libtorchaudio/speech_recognition/build_pipeline_from_huggingface_transformers.py"}, {"content": "#!/usr/bin/evn python3\n\"\"\"Build Speech Recognition pipeline based on fairseq's wav2vec2.0 and dump it to TorchScript file.\n\nTo use this script, you need `fairseq`.\n\"\"\"\nimport os\nimport argparse\nimport logging\n\nimport torch\nfrom torch.utils.mobile_optimizer import optimize_for_mobile\nimport torchaudio\nfrom torchaudio.models.wav2vec2.utils.import_fairseq import import_fairseq_model\nimport fairseq\n\nfrom greedy_decoder import Decoder\n\n_LG = logging.getLogger(__name__)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__,\n )\n parser.add_argument(\n '--model-file',\n required=True,\n help='Path to the input pretrained weight file.'\n )\n parser.add_argument(\n '--dict-dir',\n help=(\n 'Path to the directory in which `dict.ltr.txt` file is found. '\n 'Required only when the model is finetuned.'\n )\n )\n parser.add_argument(\n '--output-path',\n help='Path to the directory, where the TorchScript-ed pipelines are saved.',\n )\n parser.add_argument(\n '--test-file',\n help='Path to a test audio file.',\n )\n parser.add_argument(\n '--debug',\n action='store_true',\n help=(\n 'When enabled, individual components are separately tested '\n 'for the numerical compatibility and TorchScript compatibility.'\n )\n )\n parser.add_argument(\n '--quantize',\n action='store_true',\n help='Apply quantization to model.'\n )\n parser.add_argument(\n '--optimize-for-mobile',\n action='store_true',\n help='Apply optmization for mobile.'\n )\n return parser.parse_args()\n\n\nclass Loader(torch.nn.Module):\n def forward(self, audio_path: str) -> torch.Tensor:\n waveform, sample_rate = torchaudio.load(audio_path)\n if sample_rate != 16000:\n waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)\n return waveform\n\n\nclass Encoder(torch.nn.Module):\n def __init__(self, encoder: torch.nn.Module):\n super().__init__()\n self.encoder = encoder\n\n def forward(self, waveform: torch.Tensor) -> torch.Tensor:\n result, _ = self.encoder(waveform)\n return result[0]\n\n\ndef _get_decoder():\n labels = [\n \"<s>\",\n \"<pad>\",\n \"</s>\",\n \"<unk>\",\n \"|\",\n \"E\",\n \"T\",\n \"A\",\n \"O\",\n \"N\",\n \"I\",\n \"H\",\n \"S\",\n \"R\",\n \"D\",\n \"L\",\n \"U\",\n \"M\",\n \"W\",\n \"C\",\n \"F\",\n \"G\",\n \"Y\",\n \"P\",\n \"B\",\n \"V\",\n \"K\",\n \"'\",\n \"X\",\n \"J\",\n \"Q\",\n \"Z\",\n ]\n return Decoder(labels)\n\n\ndef _load_fairseq_model(input_file, data_dir=None):\n overrides = {}\n if data_dir:\n overrides['data'] = data_dir\n\n model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(\n [input_file], arg_overrides=overrides\n )\n model = model[0]\n return model\n\n\ndef _get_model(model_file, dict_dir):\n original = _load_fairseq_model(model_file, dict_dir)\n model = import_fairseq_model(original.w2v_encoder)\n return model\n\n\ndef _main():\n args = _parse_args()\n _init_logging(args.debug)\n loader = Loader()\n model = _get_model(args.model_file, args.dict_dir).eval()\n encoder = Encoder(model)\n decoder = _get_decoder()\n _LG.info(encoder)\n\n if args.quantize:\n _LG.info('Quantizing the model')\n model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()\n encoder = torch.quantization.quantize_dynamic(\n encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)\n _LG.info(encoder)\n\n # test\n if args.test_file:\n _LG.info('Testing with %s', args.test_file)\n waveform = loader(args.test_file)\n emission = encoder(waveform)\n transcript = decoder(emission)\n _LG.info(transcript)\n\n torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))\n torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))\n scripted = torch.jit.script(encoder)\n if args.optimize_for_mobile:\n scripted = optimize_for_mobile(scripted)\n scripted.save(os.path.join(args.output_path, 'encoder.zip'))\n\n\ndef _init_logging(debug=False):\n level = logging.DEBUG if debug else logging.INFO\n format_ = (\n '%(message)s' if not debug else\n '%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'\n )\n logging.basicConfig(level=level, format=format_)\n\n\nif __name__ == '__main__':\n _main()\n", "path": "examples/libtorchaudio/speech_recognition/build_pipeline_from_fairseq.py"}]}
| 3,333 | 597 |
gh_patches_debug_13734
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-944
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
History entry fields consistency
We reached a situation where we have `record_id` different than `data.target.id`. This should not be possible oO
```json
{
"action": "update",
"collection_id": "gfx",
"date": "2016-11-21T10:09:00.225241",
"id": "ae7e6831-11ad-49da-a739-a6fc25a913e8",
"last_modified": 1479722940230,
"record_id": "5bbd4751-5616-a3cc-e31c-1d04ade888bf",
"resource_name": "record",
"target": {
"data": {
"blockID": "g1239",
"details": {
"bug": "https://bugzilla.mozilla.org/show_bug.cgi?id=838845",
"created": "2016-10-18T09:29:16Z",
"name": "GFX1-Val",
"who": "test",
"why": "test"
},
"devices": [
"0x4396"
],
"driverVersion": "6.1.7601.18328",
"driverVersionComparator": "LESS_THAN_OR_EQUAL",
"enabled": true,
"feature": "DIRECT3D_10_LAYERS",
"featureStatus": "BLOCKED_DRIVER_VERSION",
"id": "cf217981-2c42-76bf-df19-4324a194a075",
"last_modified": 1479722940206,
"os": "All",
"schema": 1478713951432,
"vendor": "0x1002"
},
"permissions": {
"write": [
"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f"
]
}
},
"timestamp": 1478713951077,
"uri": "/buckets/staging/collections/gfx/records/cf217981-2c42-76bf-df19-4324a194a075",
"user_id": "basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f"
},
```
History entry fields consistency
We reached a situation where we have `record_id` different than `data.target.id`. This should not be possible oO
```json
{
"action": "update",
"collection_id": "gfx",
"date": "2016-11-21T10:09:00.225241",
"id": "ae7e6831-11ad-49da-a739-a6fc25a913e8",
"last_modified": 1479722940230,
"record_id": "5bbd4751-5616-a3cc-e31c-1d04ade888bf",
"resource_name": "record",
"target": {
"data": {
"blockID": "g1239",
"details": {
"bug": "https://bugzilla.mozilla.org/show_bug.cgi?id=838845",
"created": "2016-10-18T09:29:16Z",
"name": "GFX1-Val",
"who": "test",
"why": "test"
},
"devices": [
"0x4396"
],
"driverVersion": "6.1.7601.18328",
"driverVersionComparator": "LESS_THAN_OR_EQUAL",
"enabled": true,
"feature": "DIRECT3D_10_LAYERS",
"featureStatus": "BLOCKED_DRIVER_VERSION",
"id": "cf217981-2c42-76bf-df19-4324a194a075",
"last_modified": 1479722940206,
"os": "All",
"schema": 1478713951432,
"vendor": "0x1002"
},
"permissions": {
"write": [
"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f"
]
}
},
"timestamp": 1478713951077,
"uri": "/buckets/staging/collections/gfx/records/cf217981-2c42-76bf-df19-4324a194a075",
"user_id": "basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f"
},
```
</issue>
<code>
[start of kinto/plugins/history/listener.py]
1 from kinto.core.utils import instance_uri
2 from datetime import datetime
3
4
5 def on_resource_changed(event):
6 """
7 Everytime an object is created/changed/deleted, we create an entry in the
8 ``history`` resource. The entries are served as read-only in the
9 :mod:`kinto.plugins.history.views` module.
10 """
11 payload = event.payload
12 resource_name = payload['resource_name']
13 event_uri = payload['uri']
14
15 bucket_id = None
16 bucket_uri = None
17 collection_uri = None
18
19 storage = event.request.registry.storage
20 permission = event.request.registry.permission
21
22 targets = []
23 for impacted in event.impacted_records:
24 target = impacted['new']
25 obj_id = target['id']
26
27 try:
28 bucket_id = payload['bucket_id']
29 except KeyError:
30 # e.g. DELETE /buckets
31 bucket_id = obj_id
32 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
33
34 if 'collection_id' in payload:
35 collection_id = payload['collection_id']
36 collection_uri = instance_uri(event.request,
37 'collection',
38 bucket_id=bucket_id,
39 id=collection_id)
40
41 # On POST .../records, the URI does not contain the newly created
42 # record id.
43 parts = event_uri.split('/')
44 if resource_name in parts[-1]:
45 parts.append(obj_id)
46 else:
47 # Make sure the id is correct on grouped events.
48 parts[-1] = obj_id
49 uri = '/'.join(parts)
50 targets.append((uri, target))
51
52 # Prepare a list of object ids to be fetched from permission backend,
53 # and fetch them all at once. Use a mapping for later convenience.
54 all_perms_objects_ids = [oid for (oid, _) in targets]
55 all_perms_objects_ids.append(bucket_uri)
56 if collection_uri is not None:
57 all_perms_objects_ids.append(collection_uri)
58 all_perms_objects_ids = list(set(all_perms_objects_ids))
59 all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
60 perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))
61
62 bucket_perms = perms_by_object_id[bucket_uri]
63 collection_perms = {}
64 if collection_uri is not None:
65 collection_perms = perms_by_object_id[collection_uri]
66
67 # The principals allowed to read the bucket and collection.
68 # (Note: ``write`` means ``read``)
69 read_principals = set(bucket_perms.get('read', []))
70 read_principals.update(bucket_perms.get('write', []))
71 read_principals.update(collection_perms.get('read', []))
72 read_principals.update(collection_perms.get('write', []))
73
74 # Create a history entry for each impacted record.
75 for (uri, target) in targets:
76 obj_id = target['id']
77 # Prepare the history entry attributes.
78 perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
79 eventattrs = dict(**payload)
80 eventattrs.pop('bucket_id', None)
81 eventattrs.setdefault('%s_id' % resource_name, obj_id)
82 eventattrs['uri'] = uri
83 attrs = dict(date=datetime.now().isoformat(),
84 target={'data': target, 'permissions': perms},
85 **eventattrs)
86
87 # Create a record for the 'history' resource, whose parent_id is
88 # the bucket URI (c.f. views.py).
89 # Note: this will be rolledback if the transaction is rolledback.
90 entry = storage.create(parent_id=bucket_uri,
91 collection_id='history',
92 record=attrs)
93
94 # The read permission on the newly created history entry is the union
95 # of the record permissions with the one from bucket and collection.
96 entry_principals = set(read_principals)
97 entry_principals.update(perms.get('read', []))
98 entry_principals.update(perms.get('write', []))
99 entry_perms = {'read': list(entry_principals)}
100 # /buckets/{id}/history is the URI for the list of history entries.
101 entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])
102 permission.replace_object_permissions(entry_perm_id, entry_perms)
103
[end of kinto/plugins/history/listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py
--- a/kinto/plugins/history/listener.py
+++ b/kinto/plugins/history/listener.py
@@ -77,8 +77,9 @@
# Prepare the history entry attributes.
perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
eventattrs = dict(**payload)
+ eventattrs.pop('timestamp', None) # Already in target `last_modified`.
eventattrs.pop('bucket_id', None)
- eventattrs.setdefault('%s_id' % resource_name, obj_id)
+ eventattrs['%s_id' % resource_name] = obj_id
eventattrs['uri'] = uri
attrs = dict(date=datetime.now().isoformat(),
target={'data': target, 'permissions': perms},
|
{"golden_diff": "diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py\n--- a/kinto/plugins/history/listener.py\n+++ b/kinto/plugins/history/listener.py\n@@ -77,8 +77,9 @@\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n+ eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n- eventattrs.setdefault('%s_id' % resource_name, obj_id)\n+ eventattrs['%s_id' % resource_name] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n", "issue": "History entry fields consistency\nWe reached a situation where we have `record_id` different than `data.target.id`. This should not be possible oO\r\n\r\n```json\r\n {\r\n \"action\": \"update\",\r\n \"collection_id\": \"gfx\",\r\n \"date\": \"2016-11-21T10:09:00.225241\",\r\n \"id\": \"ae7e6831-11ad-49da-a739-a6fc25a913e8\",\r\n \"last_modified\": 1479722940230,\r\n \"record_id\": \"5bbd4751-5616-a3cc-e31c-1d04ade888bf\",\r\n \"resource_name\": \"record\",\r\n \"target\": {\r\n \"data\": {\r\n \"blockID\": \"g1239\",\r\n \"details\": {\r\n \"bug\": \"https://bugzilla.mozilla.org/show_bug.cgi?id=838845\",\r\n \"created\": \"2016-10-18T09:29:16Z\",\r\n \"name\": \"GFX1-Val\",\r\n \"who\": \"test\",\r\n \"why\": \"test\"\r\n },\r\n \"devices\": [\r\n \"0x4396\"\r\n ],\r\n \"driverVersion\": \"6.1.7601.18328\",\r\n \"driverVersionComparator\": \"LESS_THAN_OR_EQUAL\",\r\n \"enabled\": true,\r\n \"feature\": \"DIRECT3D_10_LAYERS\",\r\n \"featureStatus\": \"BLOCKED_DRIVER_VERSION\",\r\n \"id\": \"cf217981-2c42-76bf-df19-4324a194a075\",\r\n \"last_modified\": 1479722940206,\r\n \"os\": \"All\",\r\n \"schema\": 1478713951432,\r\n \"vendor\": \"0x1002\"\r\n },\r\n \"permissions\": {\r\n \"write\": [\r\n \"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f\"\r\n ]\r\n }\r\n },\r\n \"timestamp\": 1478713951077,\r\n \"uri\": \"/buckets/staging/collections/gfx/records/cf217981-2c42-76bf-df19-4324a194a075\",\r\n \"user_id\": \"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f\"\r\n },\r\n\r\n```\nHistory entry fields consistency\nWe reached a situation where we have `record_id` different than `data.target.id`. This should not be possible oO\r\n\r\n```json\r\n {\r\n \"action\": \"update\",\r\n \"collection_id\": \"gfx\",\r\n \"date\": \"2016-11-21T10:09:00.225241\",\r\n \"id\": \"ae7e6831-11ad-49da-a739-a6fc25a913e8\",\r\n \"last_modified\": 1479722940230,\r\n \"record_id\": \"5bbd4751-5616-a3cc-e31c-1d04ade888bf\",\r\n \"resource_name\": \"record\",\r\n \"target\": {\r\n \"data\": {\r\n \"blockID\": \"g1239\",\r\n \"details\": {\r\n \"bug\": \"https://bugzilla.mozilla.org/show_bug.cgi?id=838845\",\r\n \"created\": \"2016-10-18T09:29:16Z\",\r\n \"name\": \"GFX1-Val\",\r\n \"who\": \"test\",\r\n \"why\": \"test\"\r\n },\r\n \"devices\": [\r\n \"0x4396\"\r\n ],\r\n \"driverVersion\": \"6.1.7601.18328\",\r\n \"driverVersionComparator\": \"LESS_THAN_OR_EQUAL\",\r\n \"enabled\": true,\r\n \"feature\": \"DIRECT3D_10_LAYERS\",\r\n \"featureStatus\": \"BLOCKED_DRIVER_VERSION\",\r\n \"id\": \"cf217981-2c42-76bf-df19-4324a194a075\",\r\n \"last_modified\": 1479722940206,\r\n \"os\": \"All\",\r\n \"schema\": 1478713951432,\r\n \"vendor\": \"0x1002\"\r\n },\r\n \"permissions\": {\r\n \"write\": [\r\n \"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f\"\r\n ]\r\n }\r\n },\r\n \"timestamp\": 1478713951077,\r\n \"uri\": \"/buckets/staging/collections/gfx/records/cf217981-2c42-76bf-df19-4324a194a075\",\r\n \"user_id\": \"basicauth:3fa5a813e05061f219048d6b0d0a398ed99868213af0571886b76c2423dc097f\"\r\n },\r\n\r\n```\n", "before_files": [{"content": "from kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n targets.append((uri, target))\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('bucket_id', None)\n eventattrs.setdefault('%s_id' % resource_name, obj_id)\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/%s/history/%s' % (bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}]}
| 3,036 | 185 |
gh_patches_debug_1912
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-4057
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: 'ABCMeta' aiohttp==3.6.0, Python 3.6.9
## Long story short
Cant import aiohttp
pip freeze gives: aiohttp==3.6.0
python3 version: Python 3.6.9
import aiohttp
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/aiohttp/__init__.py", line 6, in <module>
from .client import BaseConnector as BaseConnector
File "/usr/local/lib/python3.6/site-packages/aiohttp/client.py", line 63, in <module>
from .client_reqrep import ClientRequest as ClientRequest
File "/usr/local/lib/python3.6/site-packages/aiohttp/client_reqrep.py", line 29, in <module>
from . import hdrs, helpers, http, multipart, payload
File "/usr/local/lib/python3.6/site-packages/aiohttp/multipart.py", line 703, in <module>
class MultipartWriter(Payload):
File "/usr/local/lib/python3.6/site-packages/aiohttp/multipart.py", line 786, in MultipartWriter
headers: Optional[MultiMapping[str]]=None
TypeError: 'ABCMeta' object is not subscriptable
Any known restriction, what I am missing?
</issue>
<code>
[start of setup.py]
1 import codecs
2 import os
3 import pathlib
4 import re
5 import sys
6 from distutils.command.build_ext import build_ext
7 from distutils.errors import (CCompilerError, DistutilsExecError,
8 DistutilsPlatformError)
9
10 from setuptools import Extension, setup
11
12
13 if sys.version_info < (3, 5, 3):
14 raise RuntimeError("aiohttp 3.x requires Python 3.5.3+")
15
16
17 NO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS')) # type: bool
18
19 if sys.implementation.name != "cpython":
20 NO_EXTENSIONS = True
21
22
23 here = pathlib.Path(__file__).parent
24
25 if (here / '.git').exists() and not (here / 'vendor/http-parser/README.md').exists():
26 print("Install submodules when building from git clone", file=sys.stderr)
27 print("Hint:", file=sys.stderr)
28 print(" git submodule update --init", file=sys.stderr)
29 sys.exit(2)
30
31
32 # NOTE: makefile cythonizes all Cython modules
33
34 extensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket.c']),
35 Extension('aiohttp._http_parser',
36 ['aiohttp/_http_parser.c',
37 'vendor/http-parser/http_parser.c',
38 'aiohttp/_find_header.c'],
39 define_macros=[('HTTP_PARSER_STRICT', 0)],
40 ),
41 Extension('aiohttp._frozenlist',
42 ['aiohttp/_frozenlist.c']),
43 Extension('aiohttp._helpers',
44 ['aiohttp/_helpers.c']),
45 Extension('aiohttp._http_writer',
46 ['aiohttp/_http_writer.c'])]
47
48
49 class BuildFailed(Exception):
50 pass
51
52
53 class ve_build_ext(build_ext):
54 # This class allows C extension building to fail.
55
56 def run(self):
57 try:
58 build_ext.run(self)
59 except (DistutilsPlatformError, FileNotFoundError):
60 raise BuildFailed()
61
62 def build_extension(self, ext):
63 try:
64 build_ext.build_extension(self, ext)
65 except (CCompilerError, DistutilsExecError,
66 DistutilsPlatformError, ValueError):
67 raise BuildFailed()
68
69
70
71 txt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')
72 try:
73 version = re.findall(r"^__version__ = '([^']+)'\r?$",
74 txt, re.M)[0]
75 except IndexError:
76 raise RuntimeError('Unable to determine version.')
77
78 install_requires = [
79 'attrs>=17.3.0',
80 'chardet>=2.0,<4.0',
81 'multidict>=4.0,<5.0',
82 'async_timeout>=3.0,<4.0',
83 'yarl>=1.0,<2.0',
84 'idna-ssl>=1.0; python_version<"3.7"',
85 'typing_extensions>=3.6.5',
86 ]
87
88
89 def read(f):
90 return (here / f).read_text('utf-8').strip()
91
92
93 args = dict(
94 name='aiohttp',
95 version=version,
96 description='Async http client/server framework (asyncio)',
97 long_description='\n\n'.join((read('README.rst'), read('CHANGES.rst'))),
98 long_description_content_type="text/x-rst",
99 classifiers=[
100 'License :: OSI Approved :: Apache Software License',
101 'Intended Audience :: Developers',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 3',
104 'Programming Language :: Python :: 3.5',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Development Status :: 5 - Production/Stable',
108 'Operating System :: POSIX',
109 'Operating System :: MacOS :: MacOS X',
110 'Operating System :: Microsoft :: Windows',
111 'Topic :: Internet :: WWW/HTTP',
112 'Framework :: AsyncIO',
113 ],
114 author='Nikolay Kim',
115 author_email='[email protected]',
116 maintainer=', '.join(('Nikolay Kim <[email protected]>',
117 'Andrew Svetlov <[email protected]>')),
118 maintainer_email='[email protected]',
119 url='https://github.com/aio-libs/aiohttp',
120 project_urls={
121 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',
122 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',
123 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',
124 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',
125 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',
126 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',
127 'Docs: RTD': 'https://docs.aiohttp.org',
128 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',
129 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',
130 },
131 license='Apache 2',
132 packages=['aiohttp'],
133 python_requires='>=3.5.3',
134 install_requires=install_requires,
135 extras_require={
136 'speedups': [
137 'aiodns',
138 'Brotli',
139 'cchardet',
140 ],
141 },
142 include_package_data=True,
143 )
144
145 if not NO_EXTENSIONS:
146 print("**********************")
147 print("* Accellerated build *")
148 print("**********************")
149 setup(ext_modules=extensions,
150 cmdclass=dict(build_ext=ve_build_ext),
151 **args)
152 else:
153 print("*********************")
154 print("* Pure Python build *")
155 print("*********************")
156 setup(**args)
157
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@
install_requires = [
'attrs>=17.3.0',
'chardet>=2.0,<4.0',
- 'multidict>=4.0,<5.0',
+ 'multidict>=4.5,<5.0',
'async_timeout>=3.0,<4.0',
'yarl>=1.0,<2.0',
'idna-ssl>=1.0; python_version<"3.7"',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,7 +78,7 @@\n install_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n- 'multidict>=4.0,<5.0',\n+ 'multidict>=4.5,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n", "issue": "TypeError: 'ABCMeta' aiohttp==3.6.0, Python 3.6.9\n## Long story short\r\nCant import aiohttp\r\n\r\npip freeze gives: aiohttp==3.6.0 \r\npython3 version: Python 3.6.9\r\n\r\n import aiohttp\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.6/site-packages/aiohttp/__init__.py\", line 6, in <module>\r\n from .client import BaseConnector as BaseConnector\r\n File \"/usr/local/lib/python3.6/site-packages/aiohttp/client.py\", line 63, in <module>\r\n from .client_reqrep import ClientRequest as ClientRequest\r\n File \"/usr/local/lib/python3.6/site-packages/aiohttp/client_reqrep.py\", line 29, in <module>\r\n from . import hdrs, helpers, http, multipart, payload\r\n File \"/usr/local/lib/python3.6/site-packages/aiohttp/multipart.py\", line 703, in <module>\r\n class MultipartWriter(Payload):\r\n File \"/usr/local/lib/python3.6/site-packages/aiohttp/multipart.py\", line 786, in MultipartWriter\r\n headers: Optional[MultiMapping[str]]=None\r\nTypeError: 'ABCMeta' object is not subscriptable\r\n\r\n\r\nAny known restriction, what I am missing? \r\n\r\n\n", "before_files": [{"content": "import codecs\nimport os\nimport pathlib\nimport re\nimport sys\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (CCompilerError, DistutilsExecError,\n DistutilsPlatformError)\n\nfrom setuptools import Extension, setup\n\n\nif sys.version_info < (3, 5, 3):\n raise RuntimeError(\"aiohttp 3.x requires Python 3.5.3+\")\n\n\nNO_EXTENSIONS = bool(os.environ.get('AIOHTTP_NO_EXTENSIONS')) # type: bool\n\nif sys.implementation.name != \"cpython\":\n NO_EXTENSIONS = True\n\n\nhere = pathlib.Path(__file__).parent\n\nif (here / '.git').exists() and not (here / 'vendor/http-parser/README.md').exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\n# NOTE: makefile cythonizes all Cython modules\n\nextensions = [Extension('aiohttp._websocket', ['aiohttp/_websocket.c']),\n Extension('aiohttp._http_parser',\n ['aiohttp/_http_parser.c',\n 'vendor/http-parser/http_parser.c',\n 'aiohttp/_find_header.c'],\n define_macros=[('HTTP_PARSER_STRICT', 0)],\n ),\n Extension('aiohttp._frozenlist',\n ['aiohttp/_frozenlist.c']),\n Extension('aiohttp._helpers',\n ['aiohttp/_helpers.c']),\n Extension('aiohttp._http_writer',\n ['aiohttp/_http_writer.c'])]\n\n\nclass BuildFailed(Exception):\n pass\n\n\nclass ve_build_ext(build_ext):\n # This class allows C extension building to fail.\n\n def run(self):\n try:\n build_ext.run(self)\n except (DistutilsPlatformError, FileNotFoundError):\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError,\n DistutilsPlatformError, ValueError):\n raise BuildFailed()\n\n\n\ntxt = (here / 'aiohttp' / '__init__.py').read_text('utf-8')\ntry:\n version = re.findall(r\"^__version__ = '([^']+)'\\r?$\",\n txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError('Unable to determine version.')\n\ninstall_requires = [\n 'attrs>=17.3.0',\n 'chardet>=2.0,<4.0',\n 'multidict>=4.0,<5.0',\n 'async_timeout>=3.0,<4.0',\n 'yarl>=1.0,<2.0',\n 'idna-ssl>=1.0; python_version<\"3.7\"',\n 'typing_extensions>=3.6.5',\n]\n\n\ndef read(f):\n return (here / f).read_text('utf-8').strip()\n\n\nargs = dict(\n name='aiohttp',\n version=version,\n description='Async http client/server framework (asyncio)',\n long_description='\\n\\n'.join((read('README.rst'), read('CHANGES.rst'))),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Topic :: Internet :: WWW/HTTP',\n 'Framework :: AsyncIO',\n ],\n author='Nikolay Kim',\n author_email='[email protected]',\n maintainer=', '.join(('Nikolay Kim <[email protected]>',\n 'Andrew Svetlov <[email protected]>')),\n maintainer_email='[email protected]',\n url='https://github.com/aio-libs/aiohttp',\n project_urls={\n 'Chat: Gitter': 'https://gitter.im/aio-libs/Lobby',\n 'CI: AppVeyor': 'https://ci.appveyor.com/project/aio-libs/aiohttp',\n 'CI: Circle': 'https://circleci.com/gh/aio-libs/aiohttp',\n 'CI: Shippable': 'https://app.shippable.com/github/aio-libs/aiohttp',\n 'CI: Travis': 'https://travis-ci.com/aio-libs/aiohttp',\n 'Coverage: codecov': 'https://codecov.io/github/aio-libs/aiohttp',\n 'Docs: RTD': 'https://docs.aiohttp.org',\n 'GitHub: issues': 'https://github.com/aio-libs/aiohttp/issues',\n 'GitHub: repo': 'https://github.com/aio-libs/aiohttp',\n },\n license='Apache 2',\n packages=['aiohttp'],\n python_requires='>=3.5.3',\n install_requires=install_requires,\n extras_require={\n 'speedups': [\n 'aiodns',\n 'Brotli',\n 'cchardet',\n ],\n },\n include_package_data=True,\n)\n\nif not NO_EXTENSIONS:\n print(\"**********************\")\n print(\"* Accellerated build *\")\n print(\"**********************\")\n setup(ext_modules=extensions,\n cmdclass=dict(build_ext=ve_build_ext),\n **args)\nelse:\n print(\"*********************\")\n print(\"* Pure Python build *\")\n print(\"*********************\")\n setup(**args)\n", "path": "setup.py"}]}
| 2,486 | 135 |
gh_patches_debug_3819
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2395
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
single module project without timeline is missing any discription
my single module project [https://meinberlin-dev.liqd.net/projects/schillerkasse-ihre-kiezkasse-fur-den-schillerkiez/](url) which is module Kiezkasse should have a phase information. It is currently running.

</issue>
<code>
[start of meinberlin/apps/contrib/mixins.py]
1 from django import forms
2 from django.db.models import Max
3 from django.db.models import Min
4 from django.db.models import Q
5 from django.urls import resolve
6 from django.utils.translation import ugettext_lazy as _
7 from django.views import generic
8
9 from adhocracy4.modules.models import Module
10
11 RIGHT_OF_USE_LABEL = _('I hereby confirm that the copyrights for this '
12 'photo are with me or that I have received '
13 'rights of use from the author. I also confirm '
14 'that the privacy rights of depicted third persons '
15 'are not violated. ')
16
17
18 class DynamicChoicesMixin(object):
19 """Dynamic choices mixin.
20
21 Add callable functionality to filters that support the ``choices``
22 argument. If the ``choices`` is callable, then it **must** accept the
23 ``view`` object as a single argument.
24 The ``view`` object may be None if the parent FilterSet is not class based.
25
26 This is useful for dymanic ``choices`` determined properties on the
27 ``view`` object.
28 """
29
30 def __init__(self, *args, **kwargs):
31 self.choices = kwargs.pop('choices')
32 super().__init__(*args, **kwargs)
33
34 def get_choices(self, view):
35 choices = self.choices
36
37 if callable(choices):
38 return choices(view)
39 return choices
40
41 @property
42 def field(self):
43 choices = self.get_choices(getattr(self, 'view', None))
44
45 if choices is not None:
46 self.extra['choices'] = choices
47
48 return super(DynamicChoicesMixin, self).field
49
50
51 class ImageRightOfUseMixin(forms.ModelForm):
52 right_of_use = forms.BooleanField(required=False, label=RIGHT_OF_USE_LABEL)
53
54 def __init__(self, *args, **kwargs):
55 super().__init__(*args, **kwargs)
56 if self.instance.image:
57 self.initial['right_of_use'] = True
58
59 def clean(self):
60 cleaned_data = super().clean()
61 image = cleaned_data.get('image')
62 right_of_use = cleaned_data.get('right_of_use')
63 if image and not right_of_use:
64 self.add_error('right_of_use',
65 _("You want to upload an image. "
66 "Please check that you have the "
67 "right of use for the image."))
68
69
70 class ModuleClusterMixin:
71
72 def _get_module_dict(self, count, start_date, end_date):
73 return {
74 'count': count,
75 'date': start_date,
76 'end_date': end_date,
77 'modules': []
78 }
79
80 def get_module_clusters(self, modules):
81 modules = modules\
82 .exclude(Q(start_date=None) | Q(end_date=None))
83 clusters = []
84 try:
85 start_date = modules.first().start_date
86 end_date = modules.first().end_date
87 count = 1
88 first_cluster = self._get_module_dict(
89 count, start_date, end_date)
90 first_cluster['modules'].append(modules.first())
91 current_cluster = first_cluster
92 clusters.append(first_cluster)
93
94 for module in modules[1:]:
95 if module.start_date > end_date:
96 start_date = module.start_date
97 end_date = module.end_date
98 count += 1
99 next_cluster = self._get_module_dict(
100 count, start_date, end_date)
101 next_cluster['modules'].append(module)
102 current_cluster = next_cluster
103 clusters.append(next_cluster)
104 else:
105 current_cluster['modules'].append(module)
106 if module.end_date > end_date:
107 end_date = module.end_date
108 current_cluster['end_date'] = end_date
109 except AttributeError:
110 return clusters
111 if len(clusters) == 1:
112 clusters[0]['title'] = _('Online Participation')
113 return clusters
114
115
116 class DisplayProjectOrModuleMixin(generic.base.ContextMixin,
117 ModuleClusterMixin):
118
119 def module_clusters(self, modules):
120 return super().get_module_clusters(modules)
121
122 @property
123 def url_name(self):
124 return resolve(self.request.path_info).url_name
125
126 @property
127 def other_modules(self):
128 modules = Module.objects.filter(project=self.project)\
129 .annotate(start_date=Min('phase__start_date'))\
130 .annotate(end_date=Max('phase__end_date'))\
131 .order_by('start_date')
132
133 for cluster in self.module_clusters(modules):
134 if self.module in cluster['modules']:
135 idx = cluster['modules'].index(self.module)
136 modules = cluster['modules']
137 return modules, idx
138 return []
139
140 @property
141 def extends(self):
142 if self.url_name == 'module-detail':
143 return 'a4modules/module_detail.html'
144 return 'meinberlin_projects/project_detail.html'
145
146 def get_context_data(self, **kwargs):
147 context = super().get_context_data(**kwargs)
148 context['url_name'] = self.url_name
149 context['extends'] = self.extends
150 if self.url_name == 'module-detail':
151 cluster, idx = self.other_modules
152 next_module = None
153 previous_module = None
154 try:
155 next_module = cluster[idx + 1]
156 except IndexError:
157 pass
158 try:
159 previous_module = cluster[idx - 1]
160 except IndexError:
161 pass
162 context['other_modules'] = cluster
163 context['index'] = idx + 1
164 context['next'] = next_module
165 context['previous'] = previous_module
166 return context
167
[end of meinberlin/apps/contrib/mixins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/contrib/mixins.py b/meinberlin/apps/contrib/mixins.py
--- a/meinberlin/apps/contrib/mixins.py
+++ b/meinberlin/apps/contrib/mixins.py
@@ -156,7 +156,8 @@
except IndexError:
pass
try:
- previous_module = cluster[idx - 1]
+ if idx > 0:
+ previous_module = cluster[idx - 1]
except IndexError:
pass
context['other_modules'] = cluster
|
{"golden_diff": "diff --git a/meinberlin/apps/contrib/mixins.py b/meinberlin/apps/contrib/mixins.py\n--- a/meinberlin/apps/contrib/mixins.py\n+++ b/meinberlin/apps/contrib/mixins.py\n@@ -156,7 +156,8 @@\n except IndexError:\n pass\n try:\n- previous_module = cluster[idx - 1]\n+ if idx > 0:\n+ previous_module = cluster[idx - 1]\n except IndexError:\n pass\n context['other_modules'] = cluster\n", "issue": "single module project without timeline is missing any discription\nmy single module project [https://meinberlin-dev.liqd.net/projects/schillerkasse-ihre-kiezkasse-fur-den-schillerkiez/](url) which is module Kiezkasse should have a phase information. It is currently running.\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.db.models import Max\nfrom django.db.models import Min\nfrom django.db.models import Q\nfrom django.urls import resolve\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.modules.models import Module\n\nRIGHT_OF_USE_LABEL = _('I hereby confirm that the copyrights for this '\n 'photo are with me or that I have received '\n 'rights of use from the author. I also confirm '\n 'that the privacy rights of depicted third persons '\n 'are not violated. ')\n\n\nclass DynamicChoicesMixin(object):\n \"\"\"Dynamic choices mixin.\n\n Add callable functionality to filters that support the ``choices``\n argument. If the ``choices`` is callable, then it **must** accept the\n ``view`` object as a single argument.\n The ``view`` object may be None if the parent FilterSet is not class based.\n\n This is useful for dymanic ``choices`` determined properties on the\n ``view`` object.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.choices = kwargs.pop('choices')\n super().__init__(*args, **kwargs)\n\n def get_choices(self, view):\n choices = self.choices\n\n if callable(choices):\n return choices(view)\n return choices\n\n @property\n def field(self):\n choices = self.get_choices(getattr(self, 'view', None))\n\n if choices is not None:\n self.extra['choices'] = choices\n\n return super(DynamicChoicesMixin, self).field\n\n\nclass ImageRightOfUseMixin(forms.ModelForm):\n right_of_use = forms.BooleanField(required=False, label=RIGHT_OF_USE_LABEL)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.image:\n self.initial['right_of_use'] = True\n\n def clean(self):\n cleaned_data = super().clean()\n image = cleaned_data.get('image')\n right_of_use = cleaned_data.get('right_of_use')\n if image and not right_of_use:\n self.add_error('right_of_use',\n _(\"You want to upload an image. \"\n \"Please check that you have the \"\n \"right of use for the image.\"))\n\n\nclass ModuleClusterMixin:\n\n def _get_module_dict(self, count, start_date, end_date):\n return {\n 'count': count,\n 'date': start_date,\n 'end_date': end_date,\n 'modules': []\n }\n\n def get_module_clusters(self, modules):\n modules = modules\\\n .exclude(Q(start_date=None) | Q(end_date=None))\n clusters = []\n try:\n start_date = modules.first().start_date\n end_date = modules.first().end_date\n count = 1\n first_cluster = self._get_module_dict(\n count, start_date, end_date)\n first_cluster['modules'].append(modules.first())\n current_cluster = first_cluster\n clusters.append(first_cluster)\n\n for module in modules[1:]:\n if module.start_date > end_date:\n start_date = module.start_date\n end_date = module.end_date\n count += 1\n next_cluster = self._get_module_dict(\n count, start_date, end_date)\n next_cluster['modules'].append(module)\n current_cluster = next_cluster\n clusters.append(next_cluster)\n else:\n current_cluster['modules'].append(module)\n if module.end_date > end_date:\n end_date = module.end_date\n current_cluster['end_date'] = end_date\n except AttributeError:\n return clusters\n if len(clusters) == 1:\n clusters[0]['title'] = _('Online Participation')\n return clusters\n\n\nclass DisplayProjectOrModuleMixin(generic.base.ContextMixin,\n ModuleClusterMixin):\n\n def module_clusters(self, modules):\n return super().get_module_clusters(modules)\n\n @property\n def url_name(self):\n return resolve(self.request.path_info).url_name\n\n @property\n def other_modules(self):\n modules = Module.objects.filter(project=self.project)\\\n .annotate(start_date=Min('phase__start_date'))\\\n .annotate(end_date=Max('phase__end_date'))\\\n .order_by('start_date')\n\n for cluster in self.module_clusters(modules):\n if self.module in cluster['modules']:\n idx = cluster['modules'].index(self.module)\n modules = cluster['modules']\n return modules, idx\n return []\n\n @property\n def extends(self):\n if self.url_name == 'module-detail':\n return 'a4modules/module_detail.html'\n return 'meinberlin_projects/project_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['url_name'] = self.url_name\n context['extends'] = self.extends\n if self.url_name == 'module-detail':\n cluster, idx = self.other_modules\n next_module = None\n previous_module = None\n try:\n next_module = cluster[idx + 1]\n except IndexError:\n pass\n try:\n previous_module = cluster[idx - 1]\n except IndexError:\n pass\n context['other_modules'] = cluster\n context['index'] = idx + 1\n context['next'] = next_module\n context['previous'] = previous_module\n return context\n", "path": "meinberlin/apps/contrib/mixins.py"}]}
| 2,260 | 126 |
gh_patches_debug_21184
|
rasdani/github-patches
|
git_diff
|
google__mobly-95
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
logging.exception("Failed to create socket connection!") results in double stracktrace
In jsonrpc_client_base:
try:
self._conn = socket.create_connection(('127.0.0.1', self.host_port),
_SOCKET_TIMEOUT)
self._conn.settimeout(_SOCKET_TIMEOUT)
except (socket.timeout, socket.error, IOError):
logging.exception("Failed to create socket connection!")
raise
Would result in the trace being printed twice.
</issue>
<code>
[start of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
1 #/usr/bin/env python3.4
2 #
3 # Copyright 2016 Google Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Base class for clients that communicate with apps over a JSON RPC interface.
18
19 The JSON protocol expected by this module is:
20
21 Request:
22 {
23 "id": <monotonically increasing integer containing the ID of this request>
24 "method": <string containing the name of the method to execute>
25 "params": <JSON array containing the arguments to the method>
26 }
27
28 Response:
29 {
30 "id": <int id of request that this response maps to>,
31 "result": <Arbitrary JSON object containing the result of executing the
32 method. If the method could not be executed or returned void,
33 contains 'null'.>,
34 "error": <String containing the error thrown by executing the method.
35 If no error occurred, contains 'null'.>
36 }
37 """
38
39 from builtins import str
40
41 import json
42 import logging
43 import socket
44 import threading
45 import time
46
47 # Maximum time to wait for the app to start on the device.
48 APP_START_WAIT_TIME = 15
49
50 # UID of the 'unknown' jsonrpc session. Will cause creation of a new session.
51 UNKNOWN_UID = -1
52
53 # Maximum time to wait for the socket to open on the device.
54 _SOCKET_TIMEOUT = 60
55
56
57 class Error(Exception):
58 pass
59
60
61 class AppStartError(Error):
62 """Raised when the app is not able to be started."""
63
64
65 class ApiError(Error):
66 """Raised when remote API reports an error."""
67
68
69 class ProtocolError(Error):
70 """Raised when there is some error in exchanging data with server."""
71 NO_RESPONSE_FROM_HANDSHAKE = "No response from handshake."
72 NO_RESPONSE_FROM_SERVER = "No response from server."
73 MISMATCHED_API_ID = "Mismatched API id."
74
75
76 class JsonRpcCommand(object):
77 """Commands that can be invoked on all jsonrpc clients.
78
79 INIT: Initializes a new session.
80 CONTINUE: Creates a connection.
81 """
82 INIT = 'initiate'
83 CONTINUE = 'continue'
84
85
86 class JsonRpcClientBase(object):
87 """Base class for jsonrpc clients that connect to remote servers.
88
89 Connects to a remote device running a jsonrpc-compatible app. Before opening
90 a connection a port forward must be setup to go over usb. This be done using
91 adb.tcp_forward(). This calls the shell command adb forward <local> remote>.
92 Once the port has been forwarded it can be used in this object as the port
93 of communication.
94
95 Attributes:
96 host_port: (int) The host port of this RPC client.
97 device_port: (int) The device port of this RPC client.
98 app_name: (str) The user-visible name of the app being communicated
99 with.
100 uid: (int) The uid of this session.
101 """
102
103 def __init__(self, host_port, device_port, app_name, adb_proxy):
104 """
105 Args:
106 host_port: (int) The host port of this RPC client.
107 device_port: (int) The device port of this RPC client.
108 app_name: (str) The user-visible name of the app being communicated
109 with.
110 adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.
111 """
112 self.host_port = host_port
113 self.device_port = device_port
114 self.app_name = app_name
115 self.uid = None
116 self._adb = adb_proxy
117 self._client = None # prevent close errors on connect failure
118 self._conn = None
119 self._counter = None
120 self._lock = threading.Lock()
121
122 def __del__(self):
123 self.close()
124
125 # Methods to be implemented by subclasses.
126
127 def _do_start_app(self):
128 """Starts the server app on the android device.
129
130 Must be implemented by subclasses.
131 """
132 raise NotImplementedError()
133
134 def stop_app(self):
135 """Kills any running instance of the app.
136
137 Must be implemented by subclasses.
138 """
139 raise NotImplementedError()
140
141 def _is_app_installed(self):
142 """Checks if app is installed.
143
144 Must be implemented by subclasses.
145
146 Returns:
147 True if installed, False otherwise.
148 """
149 raise NotImplementedError()
150
151 # Rest of the client methods.
152
153 def check_app_installed(self):
154 if not self._is_app_installed():
155 raise AppStartError(
156 '%s is not installed on %s' % (self.app_name, self._adb.serial))
157
158 def start_app(self, wait_time=APP_START_WAIT_TIME):
159 """Starts the server app on the android device.
160
161 Args:
162 wait_time: float, The time to wait for the app to come up before
163 raising an error.
164
165 Raises:
166 AppStartError: When the app was not able to be started.
167 """
168 self.check_app_installed()
169 self._do_start_app()
170 for _ in range(wait_time):
171 time.sleep(1)
172 if self._is_app_running():
173 return
174 raise AppStartError(
175 '%s failed to start on %s.' % (self.app_name, self._adb.serial))
176
177 def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
178 """Opens a connection to a JSON RPC server.
179
180 Opens a connection to a remote client. The connection attempt will time
181 out if it takes longer than _SOCKET_TIMEOUT seconds. Each subsequent
182 operation over this socket will time out after _SOCKET_TIMEOUT seconds
183 as well.
184
185 Args:
186 uid: int, The uid of the session to join, or UNKNOWN_UID to start a
187 new session.
188 cmd: JsonRpcCommand, The command to use for creating the connection.
189
190 Raises:
191 IOError: Raised when the socket times out from io error
192 socket.timeout: Raised when the socket waits to long for connection.
193 ProtocolError: Raised when there is an error in the protocol.
194 """
195 self._counter = self._id_counter()
196 try:
197 self._conn = socket.create_connection(('127.0.0.1', self.host_port),
198 _SOCKET_TIMEOUT)
199 self._conn.settimeout(_SOCKET_TIMEOUT)
200 except (socket.timeout, socket.error, IOError):
201 logging.exception("Failed to create socket connection!")
202 raise
203 self._client = self._conn.makefile(mode="brw")
204
205 resp = self._cmd(cmd, uid)
206 if not resp:
207 raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
208 result = json.loads(str(resp, encoding="utf8"))
209 if result['status']:
210 self.uid = result['uid']
211 else:
212 self.uid = UNKNOWN_UID
213
214 def close(self):
215 """Close the connection to the remote client."""
216 if self._conn:
217 self._conn.close()
218 self._conn = None
219
220 def _cmd(self, command, uid=None):
221 """Send a command to the server.
222
223 Args:
224 command: str, The name of the command to execute.
225 uid: int, the uid of the session to send the command to.
226
227 Returns:
228 The line that was written back.
229 """
230 if not uid:
231 uid = self.uid
232 self._client.write(
233 json.dumps({'cmd': command, 'uid': uid}).encode("utf8") + b'\n')
234 self._client.flush()
235 return self._client.readline()
236
237 def _rpc(self, method, *args):
238 """Sends an rpc to the app.
239
240 Args:
241 method: str, The name of the method to execute.
242 args: any, The args of the method.
243
244 Returns:
245 The result of the rpc.
246
247 Raises:
248 ProtocolError: Something went wrong with the protocol.
249 ApiError: The rpc went through, however executed with errors.
250 """
251 with self._lock:
252 apiid = next(self._counter)
253 data = {'id': apiid, 'method': method, 'params': args}
254 request = json.dumps(data)
255 self._client.write(request.encode("utf8") + b'\n')
256 self._client.flush()
257 response = self._client.readline()
258 if not response:
259 raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_SERVER)
260 result = json.loads(str(response, encoding="utf8"))
261 if result['error']:
262 raise ApiError(result['error'])
263 if result['id'] != apiid:
264 raise ProtocolError(ProtocolError.MISMATCHED_API_ID)
265 return result['result']
266
267 def _is_app_running(self):
268 """Checks if the app is currently running on an android device.
269
270 May be overridden by subclasses with custom sanity checks.
271 """
272 running = False
273 try:
274 self.connect()
275 running = True
276 finally:
277 self.close()
278 # This 'return' squashes exceptions from connect()
279 return running
280
281 def __getattr__(self, name):
282 """Wrapper for python magic to turn method calls into RPC calls."""
283 def rpc_call(*args):
284 return self._rpc(name, *args)
285 return rpc_call
286
287 def _id_counter(self):
288 i = 0
289 while True:
290 yield i
291 i += 1
292
[end of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py
+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
@@ -39,7 +39,6 @@
from builtins import str
import json
-import logging
import socket
import threading
import time
@@ -193,13 +192,9 @@
ProtocolError: Raised when there is an error in the protocol.
"""
self._counter = self._id_counter()
- try:
- self._conn = socket.create_connection(('127.0.0.1', self.host_port),
- _SOCKET_TIMEOUT)
- self._conn.settimeout(_SOCKET_TIMEOUT)
- except (socket.timeout, socket.error, IOError):
- logging.exception("Failed to create socket connection!")
- raise
+ self._conn = socket.create_connection(('127.0.0.1', self.host_port),
+ _SOCKET_TIMEOUT)
+ self._conn.settimeout(_SOCKET_TIMEOUT)
self._client = self._conn.makefile(mode="brw")
resp = self._cmd(cmd, uid)
|
{"golden_diff": "diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n@@ -39,7 +39,6 @@\n from builtins import str\n \n import json\n-import logging\n import socket\n import threading\n import time\n@@ -193,13 +192,9 @@\n ProtocolError: Raised when there is an error in the protocol.\n \"\"\"\n self._counter = self._id_counter()\n- try:\n- self._conn = socket.create_connection(('127.0.0.1', self.host_port),\n- _SOCKET_TIMEOUT)\n- self._conn.settimeout(_SOCKET_TIMEOUT)\n- except (socket.timeout, socket.error, IOError):\n- logging.exception(\"Failed to create socket connection!\")\n- raise\n+ self._conn = socket.create_connection(('127.0.0.1', self.host_port),\n+ _SOCKET_TIMEOUT)\n+ self._conn.settimeout(_SOCKET_TIMEOUT)\n self._client = self._conn.makefile(mode=\"brw\")\n \n resp = self._cmd(cmd, uid)\n", "issue": "logging.exception(\"Failed to create socket connection!\") results in double stracktrace\nIn jsonrpc_client_base:\r\n\r\n try:\r\n self._conn = socket.create_connection(('127.0.0.1', self.host_port),\r\n _SOCKET_TIMEOUT)\r\n self._conn.settimeout(_SOCKET_TIMEOUT)\r\n except (socket.timeout, socket.error, IOError):\r\n logging.exception(\"Failed to create socket connection!\")\r\n raise\r\n\r\nWould result in the trace being printed twice.\n", "before_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for clients that communicate with apps over a JSON RPC interface.\n\nThe JSON protocol expected by this module is:\n\nRequest:\n{\n \"id\": <monotonically increasing integer containing the ID of this request>\n \"method\": <string containing the name of the method to execute>\n \"params\": <JSON array containing the arguments to the method>\n}\n\nResponse:\n{\n \"id\": <int id of request that this response maps to>,\n \"result\": <Arbitrary JSON object containing the result of executing the\n method. If the method could not be executed or returned void,\n contains 'null'.>,\n \"error\": <String containing the error thrown by executing the method.\n If no error occurred, contains 'null'.>\n}\n\"\"\"\n\nfrom builtins import str\n\nimport json\nimport logging\nimport socket\nimport threading\nimport time\n\n# Maximum time to wait for the app to start on the device.\nAPP_START_WAIT_TIME = 15\n\n# UID of the 'unknown' jsonrpc session. Will cause creation of a new session.\nUNKNOWN_UID = -1\n\n# Maximum time to wait for the socket to open on the device.\n_SOCKET_TIMEOUT = 60\n\n\nclass Error(Exception):\n pass\n\n\nclass AppStartError(Error):\n \"\"\"Raised when the app is not able to be started.\"\"\"\n\n\nclass ApiError(Error):\n \"\"\"Raised when remote API reports an error.\"\"\"\n\n\nclass ProtocolError(Error):\n \"\"\"Raised when there is some error in exchanging data with server.\"\"\"\n NO_RESPONSE_FROM_HANDSHAKE = \"No response from handshake.\"\n NO_RESPONSE_FROM_SERVER = \"No response from server.\"\n MISMATCHED_API_ID = \"Mismatched API id.\"\n\n\nclass JsonRpcCommand(object):\n \"\"\"Commands that can be invoked on all jsonrpc clients.\n\n INIT: Initializes a new session.\n CONTINUE: Creates a connection.\n \"\"\"\n INIT = 'initiate'\n CONTINUE = 'continue'\n\n\nclass JsonRpcClientBase(object):\n \"\"\"Base class for jsonrpc clients that connect to remote servers.\n\n Connects to a remote device running a jsonrpc-compatible app. Before opening\n a connection a port forward must be setup to go over usb. This be done using\n adb.tcp_forward(). This calls the shell command adb forward <local> remote>.\n Once the port has been forwarded it can be used in this object as the port\n of communication.\n\n Attributes:\n host_port: (int) The host port of this RPC client.\n device_port: (int) The device port of this RPC client.\n app_name: (str) The user-visible name of the app being communicated\n with.\n uid: (int) The uid of this session.\n \"\"\"\n\n def __init__(self, host_port, device_port, app_name, adb_proxy):\n \"\"\"\n Args:\n host_port: (int) The host port of this RPC client.\n device_port: (int) The device port of this RPC client.\n app_name: (str) The user-visible name of the app being communicated\n with.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n self.host_port = host_port\n self.device_port = device_port\n self.app_name = app_name\n self.uid = None\n self._adb = adb_proxy\n self._client = None # prevent close errors on connect failure\n self._conn = None\n self._counter = None\n self._lock = threading.Lock()\n\n def __del__(self):\n self.close()\n\n # Methods to be implemented by subclasses.\n\n def _do_start_app(self):\n \"\"\"Starts the server app on the android device.\n\n Must be implemented by subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def stop_app(self):\n \"\"\"Kills any running instance of the app.\n\n Must be implemented by subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def _is_app_installed(self):\n \"\"\"Checks if app is installed.\n\n Must be implemented by subclasses.\n\n Returns:\n True if installed, False otherwise.\n \"\"\"\n raise NotImplementedError()\n\n # Rest of the client methods.\n\n def check_app_installed(self):\n if not self._is_app_installed():\n raise AppStartError(\n '%s is not installed on %s' % (self.app_name, self._adb.serial))\n\n def start_app(self, wait_time=APP_START_WAIT_TIME):\n \"\"\"Starts the server app on the android device.\n\n Args:\n wait_time: float, The time to wait for the app to come up before\n raising an error.\n\n Raises:\n AppStartError: When the app was not able to be started.\n \"\"\"\n self.check_app_installed()\n self._do_start_app()\n for _ in range(wait_time):\n time.sleep(1)\n if self._is_app_running():\n return\n raise AppStartError(\n '%s failed to start on %s.' % (self.app_name, self._adb.serial))\n\n def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):\n \"\"\"Opens a connection to a JSON RPC server.\n\n Opens a connection to a remote client. The connection attempt will time\n out if it takes longer than _SOCKET_TIMEOUT seconds. Each subsequent\n operation over this socket will time out after _SOCKET_TIMEOUT seconds\n as well.\n\n Args:\n uid: int, The uid of the session to join, or UNKNOWN_UID to start a\n new session.\n cmd: JsonRpcCommand, The command to use for creating the connection.\n\n Raises:\n IOError: Raised when the socket times out from io error\n socket.timeout: Raised when the socket waits to long for connection.\n ProtocolError: Raised when there is an error in the protocol.\n \"\"\"\n self._counter = self._id_counter()\n try:\n self._conn = socket.create_connection(('127.0.0.1', self.host_port),\n _SOCKET_TIMEOUT)\n self._conn.settimeout(_SOCKET_TIMEOUT)\n except (socket.timeout, socket.error, IOError):\n logging.exception(\"Failed to create socket connection!\")\n raise\n self._client = self._conn.makefile(mode=\"brw\")\n\n resp = self._cmd(cmd, uid)\n if not resp:\n raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding=\"utf8\"))\n if result['status']:\n self.uid = result['uid']\n else:\n self.uid = UNKNOWN_UID\n\n def close(self):\n \"\"\"Close the connection to the remote client.\"\"\"\n if self._conn:\n self._conn.close()\n self._conn = None\n\n def _cmd(self, command, uid=None):\n \"\"\"Send a command to the server.\n\n Args:\n command: str, The name of the command to execute.\n uid: int, the uid of the session to send the command to.\n\n Returns:\n The line that was written back.\n \"\"\"\n if not uid:\n uid = self.uid\n self._client.write(\n json.dumps({'cmd': command, 'uid': uid}).encode(\"utf8\") + b'\\n')\n self._client.flush()\n return self._client.readline()\n\n def _rpc(self, method, *args):\n \"\"\"Sends an rpc to the app.\n\n Args:\n method: str, The name of the method to execute.\n args: any, The args of the method.\n\n Returns:\n The result of the rpc.\n\n Raises:\n ProtocolError: Something went wrong with the protocol.\n ApiError: The rpc went through, however executed with errors.\n \"\"\"\n with self._lock:\n apiid = next(self._counter)\n data = {'id': apiid, 'method': method, 'params': args}\n request = json.dumps(data)\n self._client.write(request.encode(\"utf8\") + b'\\n')\n self._client.flush()\n response = self._client.readline()\n if not response:\n raise ProtocolError(ProtocolError.NO_RESPONSE_FROM_SERVER)\n result = json.loads(str(response, encoding=\"utf8\"))\n if result['error']:\n raise ApiError(result['error'])\n if result['id'] != apiid:\n raise ProtocolError(ProtocolError.MISMATCHED_API_ID)\n return result['result']\n\n def _is_app_running(self):\n \"\"\"Checks if the app is currently running on an android device.\n\n May be overridden by subclasses with custom sanity checks.\n \"\"\"\n running = False\n try:\n self.connect()\n running = True\n finally:\n self.close()\n # This 'return' squashes exceptions from connect()\n return running\n\n def __getattr__(self, name):\n \"\"\"Wrapper for python magic to turn method calls into RPC calls.\"\"\"\n def rpc_call(*args):\n return self._rpc(name, *args)\n return rpc_call\n\n def _id_counter(self):\n i = 0\n while True:\n yield i\n i += 1\n", "path": "mobly/controllers/android_device_lib/jsonrpc_client_base.py"}]}
| 3,510 | 277 |
gh_patches_debug_36163
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-495
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Have @Python add @everyone pings to checkpoint on a monthly basis
We've noticed that adding an @everyone message to the checkpoint channel every once in a while triggers a flood of users accepting our rules to gain the Developers role. Currently, those @everyone messages are added manually every once in a while, but it should be very straightforward to automate the process.
I would leave the current welcome message untouched, since it's a message from a human, but do it like this:
1. Remove previous @everyone message from @Python in that channel
2. Post message that says something like:
- @everyone Please ping `@Admins` if you encounter any problems during the verification process.
I'm not sure what frequency is best for this. I wouldn't mind it once a month or even once a week. If people get bothered by it, they either accept and join for real or leave the server in some kind of self-purge (they've not accepted our rules anyway).
</issue>
<code>
[start of bot/cogs/verification.py]
1 import logging
2
3 from discord import Message, NotFound, Object
4 from discord.ext.commands import Bot, Cog, Context, command
5
6 from bot.cogs.moderation import ModLog
7 from bot.constants import Channels, Event, Roles
8 from bot.decorators import InChannelCheckFailure, in_channel, without_role
9
10 log = logging.getLogger(__name__)
11
12 WELCOME_MESSAGE = f"""
13 Hello! Welcome to the server, and thanks for verifying yourself!
14
15 For your records, these are the documents you accepted:
16
17 `1)` Our rules, here: <https://pythondiscord.com/pages/rules>
18 `2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \
19 your information removed here as well.
20
21 Feel free to review them at any point!
22
23 Additionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \
24 from time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \
25 **Announcements** role. We'll mention this role every time we make an announcement.
26
27 If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.
28 """
29
30
31 class Verification(Cog):
32 """User verification and role self-management."""
33
34 def __init__(self, bot: Bot):
35 self.bot = bot
36
37 @property
38 def mod_log(self) -> ModLog:
39 """Get currently loaded ModLog cog instance."""
40 return self.bot.get_cog("ModLog")
41
42 @Cog.listener()
43 async def on_message(self, message: Message) -> None:
44 """Check new message event for messages to the checkpoint channel & process."""
45 if message.author.bot:
46 return # They're a bot, ignore
47
48 ctx = await self.bot.get_context(message) # type: Context
49
50 if ctx.command is not None and ctx.command.name == "accept":
51 return # They used the accept command
52
53 if ctx.channel.id == Channels.verification: # We're in the verification channel
54 for role in ctx.author.roles:
55 if role.id == Roles.verified:
56 log.warning(f"{ctx.author} posted '{ctx.message.content}' "
57 "in the verification channel, but is already verified.")
58 return # They're already verified
59
60 log.debug(f"{ctx.author} posted '{ctx.message.content}' in the verification "
61 "channel. We are providing instructions how to verify.")
62 await ctx.send(
63 f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, "
64 f"and gain access to the rest of the server.",
65 delete_after=20
66 )
67
68 log.trace(f"Deleting the message posted by {ctx.author}")
69
70 try:
71 await ctx.message.delete()
72 except NotFound:
73 log.trace("No message found, it must have been deleted by another bot.")
74
75 @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)
76 @without_role(Roles.verified)
77 @in_channel(Channels.verification)
78 async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
79 """Accept our rules and gain access to the rest of the server."""
80 log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.")
81 await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules")
82 try:
83 await ctx.author.send(WELCOME_MESSAGE)
84 except Exception:
85 # Catch the exception, in case they have DMs off or something
86 log.exception(f"Unable to send welcome message to user {ctx.author}.")
87
88 log.trace(f"Deleting the message posted by {ctx.author}.")
89
90 try:
91 self.mod_log.ignore(Event.message_delete, ctx.message.id)
92 await ctx.message.delete()
93 except NotFound:
94 log.trace("No message found, it must have been deleted by another bot.")
95
96 @command(name='subscribe')
97 @in_channel(Channels.bot)
98 async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
99 """Subscribe to announcement notifications by assigning yourself the role."""
100 has_role = False
101
102 for role in ctx.author.roles:
103 if role.id == Roles.announcements:
104 has_role = True
105 break
106
107 if has_role:
108 await ctx.send(f"{ctx.author.mention} You're already subscribed!")
109 return
110
111 log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
112 await ctx.author.add_roles(Object(Roles.announcements), reason="Subscribed to announcements")
113
114 log.trace(f"Deleting the message posted by {ctx.author}.")
115
116 await ctx.send(
117 f"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.",
118 )
119
120 @command(name='unsubscribe')
121 @in_channel(Channels.bot)
122 async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
123 """Unsubscribe from announcement notifications by removing the role from yourself."""
124 has_role = False
125
126 for role in ctx.author.roles:
127 if role.id == Roles.announcements:
128 has_role = True
129 break
130
131 if not has_role:
132 await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
133 return
134
135 log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
136 await ctx.author.remove_roles(Object(Roles.announcements), reason="Unsubscribed from announcements")
137
138 log.trace(f"Deleting the message posted by {ctx.author}.")
139
140 await ctx.send(
141 f"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications."
142 )
143
144 # This cannot be static (must have a __func__ attribute).
145 async def cog_command_error(self, ctx: Context, error: Exception) -> None:
146 """Check for & ignore any InChannelCheckFailure."""
147 if isinstance(error, InChannelCheckFailure):
148 error.handled = True
149
150 @staticmethod
151 def bot_check(ctx: Context) -> bool:
152 """Block any command within the verification channel that is not !accept."""
153 if ctx.channel.id == Channels.verification:
154 return ctx.command.name == "accept"
155 else:
156 return True
157
158
159 def setup(bot: Bot) -> None:
160 """Verification cog load."""
161 bot.add_cog(Verification(bot))
162 log.info("Cog loaded: Verification")
163
[end of bot/cogs/verification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py
--- a/bot/cogs/verification.py
+++ b/bot/cogs/verification.py
@@ -1,10 +1,12 @@
import logging
+from datetime import datetime
from discord import Message, NotFound, Object
+from discord.ext import tasks
from discord.ext.commands import Bot, Cog, Context, command
from bot.cogs.moderation import ModLog
-from bot.constants import Channels, Event, Roles
+from bot.constants import Bot as BotConfig, Channels, Event, Roles
from bot.decorators import InChannelCheckFailure, in_channel, without_role
log = logging.getLogger(__name__)
@@ -27,12 +29,18 @@
If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.
"""
+PERIODIC_PING = (
+ f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`."
+ f" Ping <@&{Roles.admin}> if you encounter any problems during the verification process."
+)
+
class Verification(Cog):
"""User verification and role self-management."""
def __init__(self, bot: Bot):
self.bot = bot
+ self.periodic_ping.start()
@property
def mod_log(self) -> ModLog:
@@ -155,6 +163,34 @@
else:
return True
+ @tasks.loop(hours=12)
+ async def periodic_ping(self) -> None:
+ """Every week, mention @everyone to remind them to verify."""
+ messages = self.bot.get_channel(Channels.verification).history(limit=10)
+ need_to_post = True # True if a new message needs to be sent.
+
+ async for message in messages:
+ if message.author == self.bot.user and message.content == PERIODIC_PING:
+ delta = datetime.utcnow() - message.created_at # Time since last message.
+ if delta.days >= 7: # Message is older than a week.
+ await message.delete()
+ else:
+ need_to_post = False
+
+ break
+
+ if need_to_post:
+ await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)
+
+ @periodic_ping.before_loop
+ async def before_ping(self) -> None:
+ """Only start the loop when the bot is ready."""
+ await self.bot.wait_until_ready()
+
+ def cog_unload(self) -> None:
+ """Cancel the periodic ping task when the cog is unloaded."""
+ self.periodic_ping.cancel()
+
def setup(bot: Bot) -> None:
"""Verification cog load."""
|
{"golden_diff": "diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py\n--- a/bot/cogs/verification.py\n+++ b/bot/cogs/verification.py\n@@ -1,10 +1,12 @@\n import logging\n+from datetime import datetime\n \n from discord import Message, NotFound, Object\n+from discord.ext import tasks\n from discord.ext.commands import Bot, Cog, Context, command\n \n from bot.cogs.moderation import ModLog\n-from bot.constants import Channels, Event, Roles\n+from bot.constants import Bot as BotConfig, Channels, Event, Roles\n from bot.decorators import InChannelCheckFailure, in_channel, without_role\n \n log = logging.getLogger(__name__)\n@@ -27,12 +29,18 @@\n If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n \"\"\"\n \n+PERIODIC_PING = (\n+ f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n+ f\" Ping <@&{Roles.admin}> if you encounter any problems during the verification process.\"\n+)\n+\n \n class Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n \n def __init__(self, bot: Bot):\n self.bot = bot\n+ self.periodic_ping.start()\n \n @property\n def mod_log(self) -> ModLog:\n@@ -155,6 +163,34 @@\n else:\n return True\n \n+ @tasks.loop(hours=12)\n+ async def periodic_ping(self) -> None:\n+ \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n+ messages = self.bot.get_channel(Channels.verification).history(limit=10)\n+ need_to_post = True # True if a new message needs to be sent.\n+\n+ async for message in messages:\n+ if message.author == self.bot.user and message.content == PERIODIC_PING:\n+ delta = datetime.utcnow() - message.created_at # Time since last message.\n+ if delta.days >= 7: # Message is older than a week.\n+ await message.delete()\n+ else:\n+ need_to_post = False\n+\n+ break\n+\n+ if need_to_post:\n+ await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)\n+\n+ @periodic_ping.before_loop\n+ async def before_ping(self) -> None:\n+ \"\"\"Only start the loop when the bot is ready.\"\"\"\n+ await self.bot.wait_until_ready()\n+\n+ def cog_unload(self) -> None:\n+ \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n+ self.periodic_ping.cancel()\n+\n \n def setup(bot: Bot) -> None:\n \"\"\"Verification cog load.\"\"\"\n", "issue": "Have @Python add @everyone pings to checkpoint on a monthly basis\nWe've noticed that adding an @everyone message to the checkpoint channel every once in a while triggers a flood of users accepting our rules to gain the Developers role. Currently, those @everyone messages are added manually every once in a while, but it should be very straightforward to automate the process.\r\n\r\nI would leave the current welcome message untouched, since it's a message from a human, but do it like this:\r\n\r\n1. Remove previous @everyone message from @Python in that channel\r\n2. Post message that says something like:\r\n - @everyone Please ping `@Admins` if you encounter any problems during the verification process.\r\n\r\nI'm not sure what frequency is best for this. I wouldn't mind it once a month or even once a week. If people get bothered by it, they either accept and join for real or leave the server in some kind of self-purge (they've not accepted our rules anyway).\n", "before_files": [{"content": "import logging\n\nfrom discord import Message, NotFound, Object\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.cogs.moderation import ModLog\nfrom bot.constants import Channels, Event, Roles\nfrom bot.decorators import InChannelCheckFailure, in_channel, without_role\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \\\nfrom time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \\\n**Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n\"\"\"\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.author.bot:\n return # They're a bot, ignore\n\n ctx = await self.bot.get_context(message) # type: Context\n\n if ctx.command is not None and ctx.command.name == \"accept\":\n return # They used the accept command\n\n if ctx.channel.id == Channels.verification: # We're in the verification channel\n for role in ctx.author.roles:\n if role.id == Roles.verified:\n log.warning(f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\")\n return # They're already verified\n\n log.debug(f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\")\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n\n try:\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(Roles.verified)\n @in_channel(Channels.verification)\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Exception:\n # Catch the exception, in case they have DMs off or something\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n try:\n self.mod_log.ignore(Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='subscribe')\n @in_channel(Channels.bot)\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_channel(Channels.bot)\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InChannelCheckFailure.\"\"\"\n if isinstance(error, InChannelCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == Channels.verification:\n return ctx.command.name == \"accept\"\n else:\n return True\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Verification cog load.\"\"\"\n bot.add_cog(Verification(bot))\n log.info(\"Cog loaded: Verification\")\n", "path": "bot/cogs/verification.py"}]}
| 2,536 | 608 |
gh_patches_debug_20305
|
rasdani/github-patches
|
git_diff
|
frappe__hrms-1538
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IFSC Code showing wrong value in Bank Remittance Report
### Information about bug
IFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.
### Module
Payroll
### Version
ERPNext: v14.52.1 (HEAD)
Frappe Framework: v14.57.0 (HEAD)
Frappe HR: v14.18.1 (HEAD)
### Installation method
FrappeCloud
### Relevant log output / Stack trace / Full Error Message.
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of hrms/payroll/report/bank_remittance/bank_remittance.py]
1 # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
2 # For license information, please see license.txt
3
4
5 import frappe
6 from frappe import _, get_all
7
8
9 def execute(filters=None):
10 columns = [
11 {
12 "label": _("Payroll Number"),
13 "fieldtype": "Link",
14 "fieldname": "payroll_no",
15 "options": "Payroll Entry",
16 "width": 150,
17 },
18 {
19 "label": _("Debit A/C Number"),
20 "fieldtype": "Int",
21 "fieldname": "debit_account",
22 "hidden": 1,
23 "width": 200,
24 },
25 {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
26 {
27 "label": _("Employee Name"),
28 "fieldtype": "Link",
29 "fieldname": "employee_name",
30 "options": "Employee",
31 "width": 200,
32 },
33 {"label": _("Bank Name"), "fieldtype": "Data", "fieldname": "bank_name", "width": 50},
34 {
35 "label": _("Employee A/C Number"),
36 "fieldtype": "Int",
37 "fieldname": "employee_account_no",
38 "width": 50,
39 },
40 ]
41
42 if frappe.db.has_column("Employee", "ifsc_code"):
43 columns.append(
44 {"label": _("IFSC Code"), "fieldtype": "Data", "fieldname": "bank_code", "width": 100}
45 )
46
47 columns += [
48 {"label": _("Currency"), "fieldtype": "Data", "fieldname": "currency", "width": 50},
49 {
50 "label": _("Net Salary Amount"),
51 "fieldtype": "Currency",
52 "options": "currency",
53 "fieldname": "amount",
54 "width": 100,
55 },
56 ]
57
58 data = []
59
60 accounts = get_bank_accounts()
61 payroll_entries = get_payroll_entries(accounts, filters)
62 salary_slips = get_salary_slips(payroll_entries)
63
64 if frappe.db.has_column("Employee", "ifsc_code"):
65 get_emp_bank_ifsc_code(salary_slips)
66
67 for salary in salary_slips:
68 if (
69 salary.bank_name
70 and salary.bank_account_no
71 and salary.debit_acc_no
72 and salary.status in ["Submitted", "Paid"]
73 ):
74 row = {
75 "payroll_no": salary.payroll_entry,
76 "debit_account": salary.debit_acc_no,
77 "payment_date": frappe.utils.formatdate(salary.modified.strftime("%Y-%m-%d")),
78 "bank_name": salary.bank_name,
79 "employee_account_no": salary.bank_account_no,
80 "bank_code": salary.ifsc_code,
81 "employee_name": salary.employee + ": " + salary.employee_name,
82 "currency": frappe.get_cached_value("Company", filters.company, "default_currency"),
83 "amount": salary.net_pay,
84 }
85 data.append(row)
86
87 return columns, data
88
89
90 def get_bank_accounts():
91 accounts = [d.name for d in get_all("Account", filters={"account_type": "Bank"})]
92 return accounts
93
94
95 def get_payroll_entries(accounts, filters):
96 payroll_filter = [
97 ("payment_account", "IN", accounts),
98 ("number_of_employees", ">", 0),
99 ("Company", "=", filters.company),
100 ]
101 if filters.to_date:
102 payroll_filter.append(("posting_date", "<", filters.to_date))
103
104 if filters.from_date:
105 payroll_filter.append(("posting_date", ">", filters.from_date))
106
107 entries = get_all("Payroll Entry", payroll_filter, ["name", "payment_account"])
108
109 payment_accounts = [d.payment_account for d in entries]
110 entries = set_company_account(payment_accounts, entries)
111 return entries
112
113
114 def get_salary_slips(payroll_entries):
115 payroll = [d.name for d in payroll_entries]
116 salary_slips = get_all(
117 "Salary Slip",
118 filters=[("payroll_entry", "IN", payroll)],
119 fields=[
120 "modified",
121 "net_pay",
122 "bank_name",
123 "bank_account_no",
124 "payroll_entry",
125 "employee",
126 "employee_name",
127 "status",
128 ],
129 )
130
131 payroll_entry_map = {}
132 for entry in payroll_entries:
133 payroll_entry_map[entry.name] = entry
134
135 # appending company debit accounts
136 for slip in salary_slips:
137 if slip.payroll_entry:
138 slip["debit_acc_no"] = payroll_entry_map[slip.payroll_entry]["company_account"]
139 else:
140 slip["debit_acc_no"] = None
141
142 return salary_slips
143
144
145 def get_emp_bank_ifsc_code(salary_slips):
146 emp_names = [d.employee for d in salary_slips]
147 ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
148
149 ifsc_codes_map = {}
150 for code in ifsc_codes:
151 ifsc_codes_map[code.name] = code
152
153 for slip in salary_slips:
154 slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
155
156 return salary_slips
157
158
159 def set_company_account(payment_accounts, payroll_entries):
160 company_accounts = get_all(
161 "Bank Account", [("account", "in", payment_accounts)], ["account", "bank_account_no"]
162 )
163 company_accounts_map = {}
164 for acc in company_accounts:
165 company_accounts_map[acc.account] = acc
166
167 for entry in payroll_entries:
168 company_account = ""
169 if entry.payment_account in company_accounts_map:
170 company_account = company_accounts_map[entry.payment_account]["bank_account_no"]
171 entry["company_account"] = company_account
172
173 return payroll_entries
174
[end of hrms/payroll/report/bank_remittance/bank_remittance.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py
--- a/hrms/payroll/report/bank_remittance/bank_remittance.py
+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py
@@ -22,7 +22,12 @@
"hidden": 1,
"width": 200,
},
- {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
+ {
+ "label": _("Payment Date"),
+ "fieldtype": "Data",
+ "fieldname": "payment_date",
+ "width": 100,
+ },
{
"label": _("Employee Name"),
"fieldtype": "Link",
@@ -146,12 +151,10 @@
emp_names = [d.employee for d in salary_slips]
ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
- ifsc_codes_map = {}
- for code in ifsc_codes:
- ifsc_codes_map[code.name] = code
+ ifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}
for slip in salary_slips:
- slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
+ slip["ifsc_code"] = ifsc_codes_map[slip.employee]
return salary_slips
|
{"golden_diff": "diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py\n--- a/hrms/payroll/report/bank_remittance/bank_remittance.py\n+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py\n@@ -22,7 +22,12 @@\n \t\t\t\"hidden\": 1,\n \t\t\t\"width\": 200,\n \t\t},\n-\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n+\t\t{\n+\t\t\t\"label\": _(\"Payment Date\"),\n+\t\t\t\"fieldtype\": \"Data\",\n+\t\t\t\"fieldname\": \"payment_date\",\n+\t\t\t\"width\": 100,\n+\t\t},\n \t\t{\n \t\t\t\"label\": _(\"Employee Name\"),\n \t\t\t\"fieldtype\": \"Link\",\n@@ -146,12 +151,10 @@\n \temp_names = [d.employee for d in salary_slips]\n \tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n \n-\tifsc_codes_map = {}\n-\tfor code in ifsc_codes:\n-\t\tifsc_codes_map[code.name] = code\n+\tifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}\n \n \tfor slip in salary_slips:\n-\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n+\t\tslip[\"ifsc_code\"] = ifsc_codes_map[slip.employee]\n \n \treturn salary_slips\n", "issue": "IFSC Code showing wrong value in Bank Remittance Report\n### Information about bug\n\nIFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.\n\n### Module\n\nPayroll\n\n### Version\n\nERPNext: v14.52.1 (HEAD)\r\nFrappe Framework: v14.57.0 (HEAD)\r\nFrappe HR: v14.18.1 (HEAD)\n\n### Installation method\n\nFrappeCloud\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\n\nimport frappe\nfrom frappe import _, get_all\n\n\ndef execute(filters=None):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Payroll Number\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"payroll_no\",\n\t\t\t\"options\": \"Payroll Entry\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Debit A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"debit_account\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n\t\t{\n\t\t\t\"label\": _(\"Employee Name\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"employee_name\",\n\t\t\t\"options\": \"Employee\",\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Bank Name\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_name\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Employee A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"employee_account_no\",\n\t\t\t\"width\": 50,\n\t\t},\n\t]\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tcolumns.append(\n\t\t\t{\"label\": _(\"IFSC Code\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_code\", \"width\": 100}\n\t\t)\n\n\tcolumns += [\n\t\t{\"label\": _(\"Currency\"), \"fieldtype\": \"Data\", \"fieldname\": \"currency\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Net Salary Amount\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"options\": \"currency\",\n\t\t\t\"fieldname\": \"amount\",\n\t\t\t\"width\": 100,\n\t\t},\n\t]\n\n\tdata = []\n\n\taccounts = get_bank_accounts()\n\tpayroll_entries = get_payroll_entries(accounts, filters)\n\tsalary_slips = get_salary_slips(payroll_entries)\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tget_emp_bank_ifsc_code(salary_slips)\n\n\tfor salary in salary_slips:\n\t\tif (\n\t\t\tsalary.bank_name\n\t\t\tand salary.bank_account_no\n\t\t\tand salary.debit_acc_no\n\t\t\tand salary.status in [\"Submitted\", \"Paid\"]\n\t\t):\n\t\t\trow = {\n\t\t\t\t\"payroll_no\": salary.payroll_entry,\n\t\t\t\t\"debit_account\": salary.debit_acc_no,\n\t\t\t\t\"payment_date\": frappe.utils.formatdate(salary.modified.strftime(\"%Y-%m-%d\")),\n\t\t\t\t\"bank_name\": salary.bank_name,\n\t\t\t\t\"employee_account_no\": salary.bank_account_no,\n\t\t\t\t\"bank_code\": salary.ifsc_code,\n\t\t\t\t\"employee_name\": salary.employee + \": \" + salary.employee_name,\n\t\t\t\t\"currency\": frappe.get_cached_value(\"Company\", filters.company, \"default_currency\"),\n\t\t\t\t\"amount\": salary.net_pay,\n\t\t\t}\n\t\t\tdata.append(row)\n\n\treturn columns, data\n\n\ndef get_bank_accounts():\n\taccounts = [d.name for d in get_all(\"Account\", filters={\"account_type\": \"Bank\"})]\n\treturn accounts\n\n\ndef get_payroll_entries(accounts, filters):\n\tpayroll_filter = [\n\t\t(\"payment_account\", \"IN\", accounts),\n\t\t(\"number_of_employees\", \">\", 0),\n\t\t(\"Company\", \"=\", filters.company),\n\t]\n\tif filters.to_date:\n\t\tpayroll_filter.append((\"posting_date\", \"<\", filters.to_date))\n\n\tif filters.from_date:\n\t\tpayroll_filter.append((\"posting_date\", \">\", filters.from_date))\n\n\tentries = get_all(\"Payroll Entry\", payroll_filter, [\"name\", \"payment_account\"])\n\n\tpayment_accounts = [d.payment_account for d in entries]\n\tentries = set_company_account(payment_accounts, entries)\n\treturn entries\n\n\ndef get_salary_slips(payroll_entries):\n\tpayroll = [d.name for d in payroll_entries]\n\tsalary_slips = get_all(\n\t\t\"Salary Slip\",\n\t\tfilters=[(\"payroll_entry\", \"IN\", payroll)],\n\t\tfields=[\n\t\t\t\"modified\",\n\t\t\t\"net_pay\",\n\t\t\t\"bank_name\",\n\t\t\t\"bank_account_no\",\n\t\t\t\"payroll_entry\",\n\t\t\t\"employee\",\n\t\t\t\"employee_name\",\n\t\t\t\"status\",\n\t\t],\n\t)\n\n\tpayroll_entry_map = {}\n\tfor entry in payroll_entries:\n\t\tpayroll_entry_map[entry.name] = entry\n\n\t# appending company debit accounts\n\tfor slip in salary_slips:\n\t\tif slip.payroll_entry:\n\t\t\tslip[\"debit_acc_no\"] = payroll_entry_map[slip.payroll_entry][\"company_account\"]\n\t\telse:\n\t\t\tslip[\"debit_acc_no\"] = None\n\n\treturn salary_slips\n\n\ndef get_emp_bank_ifsc_code(salary_slips):\n\temp_names = [d.employee for d in salary_slips]\n\tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n\n\tifsc_codes_map = {}\n\tfor code in ifsc_codes:\n\t\tifsc_codes_map[code.name] = code\n\n\tfor slip in salary_slips:\n\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n\n\treturn salary_slips\n\n\ndef set_company_account(payment_accounts, payroll_entries):\n\tcompany_accounts = get_all(\n\t\t\"Bank Account\", [(\"account\", \"in\", payment_accounts)], [\"account\", \"bank_account_no\"]\n\t)\n\tcompany_accounts_map = {}\n\tfor acc in company_accounts:\n\t\tcompany_accounts_map[acc.account] = acc\n\n\tfor entry in payroll_entries:\n\t\tcompany_account = \"\"\n\t\tif entry.payment_account in company_accounts_map:\n\t\t\tcompany_account = company_accounts_map[entry.payment_account][\"bank_account_no\"]\n\t\tentry[\"company_account\"] = company_account\n\n\treturn payroll_entries\n", "path": "hrms/payroll/report/bank_remittance/bank_remittance.py"}]}
| 2,478 | 365 |
gh_patches_debug_55161
|
rasdani/github-patches
|
git_diff
|
spack__spack-5031
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adios2 missing zeromq dependency
@ax3l : I did a normal build without specifying any variants
Call Stack (most recent call first):
..../cmake-3.9/Modules/FindPackageHandleStandardArgs.cmake:377 (_FPHSA_FAILURE_MESSAGE)
cmake/FindZeroMQ.cmake:44 (find_package_handle_standard_args)
source/dataman/CMakeLists.txt:51 (find_package)
</issue>
<code>
[start of var/spack/repos/builtin/packages/adios2/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Adios2(CMakePackage):
29 """Next generation of ADIOS developed in the Exascale Computing Program"""
30
31 homepage = "https://www.olcf.ornl.gov/center-projects/adios/"
32 url = "https://github.com/ornladios/ADIOS2/archive/v2.0.0.tar.gz"
33
34 version('develop', branch='master',
35 git='https://github.com/ornladios/ADIOS2.git')
36
37 version('2.0.0', '019115e5c6ac28bd0f4201f590f5d994')
38
39 variant('shared', default=True,
40 description='Also build shared libraries')
41 variant('mpi', default=True,
42 description='Enable MPI')
43 # transforms (not yet implemented)
44 # variant('bzip2', default=True,
45 # description='Enable BZip2 compression')
46 # variant('zfp', default=True,
47 # description='Enable ZFP compression')
48 # transport engines
49 variant('dataman', default=True,
50 description='Enable the DataMan engine for WAN transports')
51 # currently required by DataMan, optional in the future
52 # variant('zeromq', default=False,
53 # description='Enable ZeroMQ for the DataMan engine')
54 variant('hdf5', default=False,
55 description='Enable the HDF5 engine')
56 variant('adios1', default=False,
57 description='Enable the ADIOS 1.x engine')
58 # language bindings
59 variant('python', default=True,
60 description='Enable the Python >= 2.7 bindings')
61
62 # requires mature C++11 implementations
63 conflicts('%gcc@:4.7')
64 conflicts('%intel@:15')
65 conflicts('%pgi@:14')
66
67 # DataMan needs dlopen
68 conflicts('+dataman', when='~shared')
69
70 depends_on('[email protected]:', type='build')
71
72 # contained in thirdparty/
73 # depends_on('googletest')
74 # depends_on('pugixml')
75 # depends_on('kwsys')
76 # depends_on('nlohmannjson')
77 # depends_on('[email protected]:', when='+python')
78
79 depends_on('mpi', when='+mpi')
80
81 depends_on('hdf5', when='+hdf5')
82 depends_on('hdf5+mpi', when='+hdf5+mpi')
83 depends_on('adios', when='+adios1')
84 depends_on('adios+mpi', when='+adios1+mpi')
85
86 depends_on('bzip2', when='+bzip2')
87 depends_on('zfp', when='+zfp')
88
89 extends('python', when='+python')
90 depends_on('[email protected]:', type=('build', 'run'), when='+python')
91 depends_on('[email protected]:', type=('build', 'run'), when='+python')
92 depends_on('[email protected]:', type=('build', 'run'), when='+mpi +python')
93
94 def cmake_args(self):
95 spec = self.spec
96
97 args = [
98 '-DADIOS2_BUILD_SHARED_LIBS:BOOL={0}'.format((
99 'ON' if '+shared' in spec else 'OFF')),
100 '-DADIOS2_BUILD_TESTING=OFF',
101 '-DADIOS2_USE_MPI={0}'.format((
102 'ON' if '+mpi' in spec else 'OFF')),
103 '-DADIOS2_USE_BZip2={0}'.format((
104 'ON' if '+bzip2' in spec else 'OFF')),
105 '-DADIOS2_USE_ZFP={0}'.format((
106 'ON' if '+zfp' in spec else 'OFF')),
107 '-DADIOS2_USE_DataMan={0}'.format((
108 'ON' if '+dataman' in spec else 'OFF')),
109 '-DADIOS2_USE_ZeroMQ={0}'.format((
110 'ON' if '+dataman' in spec else 'OFF')),
111 '-DADIOS2_USE_HDF5={0}'.format((
112 'ON' if '+hdf5' in spec else 'OFF')),
113 '-DADIOS2_USE_ADIOS1={0}'.format((
114 'ON' if '+adios1' in spec else 'OFF')),
115 '-DADIOS2_USE_Python={0}'.format((
116 'ON' if '+python' in spec else 'OFF'))
117 ]
118 return args
119
[end of var/spack/repos/builtin/packages/adios2/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/adios2/package.py b/var/spack/repos/builtin/packages/adios2/package.py
--- a/var/spack/repos/builtin/packages/adios2/package.py
+++ b/var/spack/repos/builtin/packages/adios2/package.py
@@ -77,6 +77,7 @@
# depends_on('[email protected]:', when='+python')
depends_on('mpi', when='+mpi')
+ depends_on('zeromq', when='+dataman')
depends_on('hdf5', when='+hdf5')
depends_on('hdf5+mpi', when='+hdf5+mpi')
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/adios2/package.py b/var/spack/repos/builtin/packages/adios2/package.py\n--- a/var/spack/repos/builtin/packages/adios2/package.py\n+++ b/var/spack/repos/builtin/packages/adios2/package.py\n@@ -77,6 +77,7 @@\n # depends_on('[email protected]:', when='+python')\n \n depends_on('mpi', when='+mpi')\n+ depends_on('zeromq', when='+dataman')\n \n depends_on('hdf5', when='+hdf5')\n depends_on('hdf5+mpi', when='+hdf5+mpi')\n", "issue": "Adios2 missing zeromq dependency\n@ax3l : I did a normal build without specifying any variants\r\n\r\n Call Stack (most recent call first):\r\n ..../cmake-3.9/Modules/FindPackageHandleStandardArgs.cmake:377 (_FPHSA_FAILURE_MESSAGE)\r\n cmake/FindZeroMQ.cmake:44 (find_package_handle_standard_args)\r\n source/dataman/CMakeLists.txt:51 (find_package)\r\n\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Adios2(CMakePackage):\n \"\"\"Next generation of ADIOS developed in the Exascale Computing Program\"\"\"\n\n homepage = \"https://www.olcf.ornl.gov/center-projects/adios/\"\n url = \"https://github.com/ornladios/ADIOS2/archive/v2.0.0.tar.gz\"\n\n version('develop', branch='master',\n git='https://github.com/ornladios/ADIOS2.git')\n\n version('2.0.0', '019115e5c6ac28bd0f4201f590f5d994')\n\n variant('shared', default=True,\n description='Also build shared libraries')\n variant('mpi', default=True,\n description='Enable MPI')\n # transforms (not yet implemented)\n # variant('bzip2', default=True,\n # description='Enable BZip2 compression')\n # variant('zfp', default=True,\n # description='Enable ZFP compression')\n # transport engines\n variant('dataman', default=True,\n description='Enable the DataMan engine for WAN transports')\n # currently required by DataMan, optional in the future\n # variant('zeromq', default=False,\n # description='Enable ZeroMQ for the DataMan engine')\n variant('hdf5', default=False,\n description='Enable the HDF5 engine')\n variant('adios1', default=False,\n description='Enable the ADIOS 1.x engine')\n # language bindings\n variant('python', default=True,\n description='Enable the Python >= 2.7 bindings')\n\n # requires mature C++11 implementations\n conflicts('%gcc@:4.7')\n conflicts('%intel@:15')\n conflicts('%pgi@:14')\n\n # DataMan needs dlopen\n conflicts('+dataman', when='~shared')\n\n depends_on('[email protected]:', type='build')\n\n # contained in thirdparty/\n # depends_on('googletest')\n # depends_on('pugixml')\n # depends_on('kwsys')\n # depends_on('nlohmannjson')\n # depends_on('[email protected]:', when='+python')\n\n depends_on('mpi', when='+mpi')\n\n depends_on('hdf5', when='+hdf5')\n depends_on('hdf5+mpi', when='+hdf5+mpi')\n depends_on('adios', when='+adios1')\n depends_on('adios+mpi', when='+adios1+mpi')\n\n depends_on('bzip2', when='+bzip2')\n depends_on('zfp', when='+zfp')\n\n extends('python', when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+python')\n depends_on('[email protected]:', type=('build', 'run'), when='+mpi +python')\n\n def cmake_args(self):\n spec = self.spec\n\n args = [\n '-DADIOS2_BUILD_SHARED_LIBS:BOOL={0}'.format((\n 'ON' if '+shared' in spec else 'OFF')),\n '-DADIOS2_BUILD_TESTING=OFF',\n '-DADIOS2_USE_MPI={0}'.format((\n 'ON' if '+mpi' in spec else 'OFF')),\n '-DADIOS2_USE_BZip2={0}'.format((\n 'ON' if '+bzip2' in spec else 'OFF')),\n '-DADIOS2_USE_ZFP={0}'.format((\n 'ON' if '+zfp' in spec else 'OFF')),\n '-DADIOS2_USE_DataMan={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_ZeroMQ={0}'.format((\n 'ON' if '+dataman' in spec else 'OFF')),\n '-DADIOS2_USE_HDF5={0}'.format((\n 'ON' if '+hdf5' in spec else 'OFF')),\n '-DADIOS2_USE_ADIOS1={0}'.format((\n 'ON' if '+adios1' in spec else 'OFF')),\n '-DADIOS2_USE_Python={0}'.format((\n 'ON' if '+python' in spec else 'OFF'))\n ]\n return args\n", "path": "var/spack/repos/builtin/packages/adios2/package.py"}]}
| 2,179 | 154 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.