problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_42018
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add BLEU as a metric
</issue>
<code>
[start of parlai/core/metrics.py]
1 # Copyright (c) 2017-present, Facebook, Inc.
2 # All rights reserved.
3 # This source code is licensed under the BSD-style license found in the
4 # LICENSE file in the root directory of this source tree. An additional grant
5 # of patent rights can be found in the PATENTS file in the same directory.
6 """Provides standard metric evaluations for dialog.
7 Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics
8 between processes.
9 """
10
11 from parlai.core.thread_utils import SharedTable
12 from parlai.core.utils import round_sigfigs, no_lock
13 from collections import Counter
14
15 import re
16 import math
17
18 re_art = re.compile(r'\b(a|an|the)\b')
19 re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
20 def normalize_answer(s):
21 """Lower text and remove punctuation, articles and extra whitespace."""
22 def remove_articles(text):
23 return re_art.sub(' ', text)
24
25 def white_space_fix(text):
26 return ' '.join(text.split())
27
28 def remove_punc(text):
29 return re_punc.sub(' ', text) # convert punctuation to spaces
30
31 def lower(text):
32 return text.lower()
33
34 return white_space_fix(remove_articles(remove_punc(lower(s))))
35
36
37 def _exact_match(guess, answers):
38 """Check if guess is a (normalized) exact match with any answer."""
39 if guess is None or answers is None:
40 return False
41 guess = normalize_answer(guess)
42 for a in answers:
43 if guess == normalize_answer(a):
44 return True
45 return False
46
47
48 def _f1_score(guess, answers):
49 """Return the max F1 score between the guess and any answer."""
50 def _score(g_tokens, a_tokens):
51 common = Counter(g_tokens) & Counter(a_tokens)
52 num_same = sum(common.values())
53 if num_same == 0:
54 return 0
55 precision = 1.0 * num_same / len(g_tokens)
56 recall = 1.0 * num_same / len(a_tokens)
57 f1 = (2 * precision * recall) / (precision + recall)
58 return f1
59
60 if guess is None or answers is None:
61 return 0
62 g_tokens = normalize_answer(guess).split()
63 scores = [_score(g_tokens, normalize_answer(a).split()) for a in answers]
64 return max(scores)
65
66
67 def aggregate_metrics(reporters):
68 #reporters is a list of teachers or worlds
69 m = {}
70 m['tasks'] = {}
71 sums = {'accuracy': 0, 'f1': 0, 'loss': 0, 'ppl': 0}
72 num_tasks = 0
73 total = 0
74 for i in range(len(reporters)):
75 tid = reporters[i].getID()
76 mt = reporters[i].report()
77 while tid in m['tasks']:
78 # prevent name cloberring if using multiple tasks with same ID
79 tid += '_'
80 m['tasks'][tid] = mt
81 total += mt['exs']
82 found_any = False
83 for k in sums.keys():
84 if k in mt:
85 sums[k] += mt[k]
86 found_any = True
87 if found_any:
88 num_tasks += 1
89 m['exs'] = total
90 m['accuracy'] = 0
91 if num_tasks > 0:
92 for k in sums.keys():
93 m[k] = round_sigfigs(sums[k] / num_tasks, 4)
94 return m
95
96
97 def compute_time_metrics(world, max_time):
98 # Determine time_left and num_epochs
99 exs_per_epoch = world.num_examples() if world.num_examples() else 0
100 num_epochs = world.opt.get('num_epochs', 0)
101 max_exs = exs_per_epoch * num_epochs
102 total_exs = world.get_total_exs()
103
104 m = {}
105 if (max_exs > 0 and total_exs > 0) or max_time > 0:
106 m = {}
107 time_left = None
108 time = world.get_time()
109 total_epochs = world.get_total_epochs()
110
111 if (num_epochs > 0 and total_exs > 0 and max_exs > 0):
112 exs_per_sec = time / total_exs
113 time_left = (max_exs - total_exs) * exs_per_sec
114 if max_time > 0:
115 other_time_left = max_time - time
116 if time_left is not None:
117 time_left = min(time_left, other_time_left)
118 else:
119 time_left = other_time_left
120 if time_left is not None:
121 m['time_left'] = math.floor(time_left)
122 if num_epochs > 0:
123 if (total_exs > 0 and exs_per_epoch > 0):
124 display_epochs = int(total_exs / exs_per_epoch)
125 else:
126 display_epochs = total_epochs
127 m['num_epochs'] = display_epochs
128 return m
129
130
131 class Metrics(object):
132 """Class that maintains evaluation metrics over dialog."""
133
134 def __init__(self, opt):
135 self.metrics = {}
136 self.metrics['cnt'] = 0
137 self.metrics_list = ['mean_rank', 'loss', 'correct', 'f1', 'ppl']
138 for k in self.metrics_list:
139 self.metrics[k] = 0.0
140 self.metrics[k + '_cnt'] = 0
141 self.eval_pr = [1, 5, 10, 100]
142 for k in self.eval_pr:
143 self.metrics['hits@' + str(k)] = 0
144 self.metrics['hits@_cnt'] = 0
145 self.flags = {'has_text_cands': False, 'print_prediction_metrics': False}
146 if opt.get('numthreads', 1) > 1:
147 self.metrics = SharedTable(self.metrics)
148 self.flags = SharedTable(self.flags)
149
150 def __str__(self):
151 return str(self.metrics)
152
153 def __repr__(self):
154 representation = super().__repr__()
155 return representation.replace('>', ': {}>'.format(repr(self.metrics)))
156
157 def _lock(self):
158 if hasattr(self.metrics, 'get_lock'):
159 # use the shared_table's lock
160 return self.metrics.get_lock()
161 else:
162 # otherwise do nothing
163 return no_lock()
164
165 def update_ranking_metrics(self, observation, labels):
166 text_cands = observation.get('text_candidates', None)
167 if text_cands is None:
168 return
169 else:
170 text = observation.get('text', None)
171
172 # Now loop through text candidates, assuming they are sorted.
173 # If any of them is a label then score a point.
174 # maintain hits@1, 5, 10, 50, 100, etc.
175 label_set = set(normalize_answer(l) for l in labels)
176 cnts = {k: 0 for k in self.eval_pr}
177 cnt = 0
178 for c in text_cands:
179 cnt += 1
180 if normalize_answer(c) in label_set:
181 for k in self.eval_pr:
182 if cnt <= k:
183 cnts[k] += 1
184 # hits metric is 1 if cnts[k] > 0.
185 # (other metrics such as p@k and r@k take
186 # the value of cnt into account.)
187 with self._lock():
188 self.flags['has_text_cands'] = True
189 for k in self.eval_pr:
190 if cnts[k] > 0:
191 self.metrics['hits@' + str(k)] += 1
192 self.metrics['hits@_cnt'] += 1
193
194 def update(self, observation, labels):
195 with self._lock():
196 self.metrics['cnt'] += 1
197
198 # Exact match metric.
199 correct = 0
200 prediction = observation.get('text', None)
201 if prediction is not None:
202 if _exact_match(prediction, labels):
203 correct = 1
204 with self._lock():
205 self.flags['print_prediction_metrics'] = True
206 self.metrics['correct'] += correct
207 self.metrics['correct_cnt'] += 1
208
209 # F1 metric.
210 f1 = _f1_score(prediction, labels)
211 with self._lock():
212 self.metrics['f1'] += f1
213 self.metrics['f1_cnt'] += 1
214
215 # Ranking metrics.
216 self.update_ranking_metrics(observation, labels)
217
218 # User-reported metrics
219 if 'metrics' in observation:
220 for k, v in observation['metrics'].items():
221 if k not in ['correct', 'f1', 'hits@k']:
222 if k in self.metrics_list:
223 with self._lock():
224 self.metrics[k] += v
225 self.metrics[k + '_cnt'] += 1
226 else:
227 if type(self.metrics) is SharedTable:
228 # can't share custom metrics during hogwild
229 pass
230 else:
231 # no need to lock because not SharedTable
232 if k not in self.metrics:
233 self.metrics[k] = v
234 self.metrics_list.append(k)
235 self.metrics[k + '_cnt'] = 1.0
236 else:
237 self.metrics[k] += v
238
239 # Return a dict containing the metrics for this specific example.
240 # Metrics across all data is stored internally in the class, and
241 # can be accessed with the report method.
242 loss = {}
243 loss['correct'] = correct
244 return loss
245
246 def report(self):
247 # Report the metrics over all data seen so far.
248 m = {}
249 total = self.metrics['cnt']
250 m['exs'] = total
251 if total > 0:
252 if self.flags['print_prediction_metrics']:
253 m['accuracy'] = round_sigfigs(self.metrics['correct'] / max(1, self.metrics['correct_cnt']), 4)
254 m['f1'] = round_sigfigs(self.metrics['f1'] / max(1, self.metrics['f1_cnt']), 4)
255 if self.flags['has_text_cands']:
256 for k in self.eval_pr:
257 m['hits@' + str(k)] = round_sigfigs(
258 self.metrics['hits@' + str(k)] / max(1, self.metrics['hits@_cnt']), 3)
259 for k in self.metrics_list:
260 if self.metrics[k + '_cnt'] > 0 and k != 'correct' and k != 'f1':
261 m[k] = round_sigfigs(self.metrics[k] / max(1, self.metrics[k + '_cnt']), 4)
262 return m
263
264 def clear(self):
265 with self._lock():
266 self.metrics['cnt'] = 0
267 for k in self.metrics_list:
268 v = self.metrics[k]
269 v_typ = type(v)
270 if 'Tensor' in str(v_typ):
271 self.metrics[k].zero_()
272 else:
273 self.metrics[k] = 0.0
274 self.metrics[k + '_cnt'] = 0
275 for k in self.eval_pr:
276 self.metrics['hits@' + str(k)] = 0
277 self.metrics['hits@_cnt'] = 0
278
[end of parlai/core/metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parlai/core/metrics.py b/parlai/core/metrics.py
--- a/parlai/core/metrics.py
+++ b/parlai/core/metrics.py
@@ -15,8 +15,17 @@
import re
import math
+try:
+ from nltk.translate import bleu_score as nltkbleu
+except ImportError:
+ # User doesn't have nltk installed, so we can't use it for bleu
+ # We'll just turn off things, but we might want to warn the user
+ nltkbleu = None
+
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
+
+
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
@@ -64,11 +73,29 @@
return max(scores)
+def _bleu(guess, answers):
+ if nltkbleu is None:
+ # bleu library not installed, just return a default value
+ return None
+ # Warning: BLEU calculation *should* include proper tokenization and
+ # punctuation etc. We're using the normalize_answer for everything though,
+ # so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
+ # going to be slower than fairseq's (which is written in C), but fairseq's
+ # requires that everything be in arrays of ints (i.e. as tensors). NLTK's
+ # works with strings, which is better suited for this module.
+ return nltkbleu.sentence_bleu(
+ [normalize_answer(a).split(" ") for a in answers],
+ normalize_answer(guess).split(" ")
+ )
+
+
def aggregate_metrics(reporters):
#reporters is a list of teachers or worlds
m = {}
m['tasks'] = {}
sums = {'accuracy': 0, 'f1': 0, 'loss': 0, 'ppl': 0}
+ if nltkbleu is not None:
+ sums['bleu'] = 0
num_tasks = 0
total = 0
for i in range(len(reporters)):
@@ -135,6 +162,9 @@
self.metrics = {}
self.metrics['cnt'] = 0
self.metrics_list = ['mean_rank', 'loss', 'correct', 'f1', 'ppl']
+ if nltkbleu is not None:
+ # only compute bleu if we can
+ self.metrics_list.append('bleu')
for k in self.metrics_list:
self.metrics[k] = 0.0
self.metrics[k + '_cnt'] = 0
@@ -206,11 +236,15 @@
self.metrics['correct'] += correct
self.metrics['correct_cnt'] += 1
- # F1 metric.
+ # F1 and BLEU metrics.
f1 = _f1_score(prediction, labels)
+ bleu = _bleu(prediction, labels)
with self._lock():
self.metrics['f1'] += f1
self.metrics['f1_cnt'] += 1
+ if bleu is not None:
+ self.metrics['bleu'] += bleu
+ self.metrics['bleu_cnt'] += 1
# Ranking metrics.
self.update_ranking_metrics(observation, labels)
@@ -218,7 +252,7 @@
# User-reported metrics
if 'metrics' in observation:
for k, v in observation['metrics'].items():
- if k not in ['correct', 'f1', 'hits@k']:
+ if k not in ['correct', 'f1', 'hits@k', 'bleu']:
if k in self.metrics_list:
with self._lock():
self.metrics[k] += v
|
{"golden_diff": "diff --git a/parlai/core/metrics.py b/parlai/core/metrics.py\n--- a/parlai/core/metrics.py\n+++ b/parlai/core/metrics.py\n@@ -15,8 +15,17 @@\n import re\n import math\n \n+try:\n+ from nltk.translate import bleu_score as nltkbleu\n+except ImportError:\n+ # User doesn't have nltk installed, so we can't use it for bleu\n+ # We'll just turn off things, but we might want to warn the user\n+ nltkbleu = None\n+\n re_art = re.compile(r'\\b(a|an|the)\\b')\n re_punc = re.compile(r'[!\"#$%&()*+,-./:;<=>?@\\[\\]\\\\^`{|}~_\\']')\n+\n+\n def normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n@@ -64,11 +73,29 @@\n return max(scores)\n \n \n+def _bleu(guess, answers):\n+ if nltkbleu is None:\n+ # bleu library not installed, just return a default value\n+ return None\n+ # Warning: BLEU calculation *should* include proper tokenization and\n+ # punctuation etc. We're using the normalize_answer for everything though,\n+ # so we're over-estimating our BLEU scores. Also note that NLTK's bleu is\n+ # going to be slower than fairseq's (which is written in C), but fairseq's\n+ # requires that everything be in arrays of ints (i.e. as tensors). NLTK's\n+ # works with strings, which is better suited for this module.\n+ return nltkbleu.sentence_bleu(\n+ [normalize_answer(a).split(\" \") for a in answers],\n+ normalize_answer(guess).split(\" \")\n+ )\n+\n+\n def aggregate_metrics(reporters):\n #reporters is a list of teachers or worlds\n m = {}\n m['tasks'] = {}\n sums = {'accuracy': 0, 'f1': 0, 'loss': 0, 'ppl': 0}\n+ if nltkbleu is not None:\n+ sums['bleu'] = 0\n num_tasks = 0\n total = 0\n for i in range(len(reporters)):\n@@ -135,6 +162,9 @@\n self.metrics = {}\n self.metrics['cnt'] = 0\n self.metrics_list = ['mean_rank', 'loss', 'correct', 'f1', 'ppl']\n+ if nltkbleu is not None:\n+ # only compute bleu if we can\n+ self.metrics_list.append('bleu')\n for k in self.metrics_list:\n self.metrics[k] = 0.0\n self.metrics[k + '_cnt'] = 0\n@@ -206,11 +236,15 @@\n self.metrics['correct'] += correct\n self.metrics['correct_cnt'] += 1\n \n- # F1 metric.\n+ # F1 and BLEU metrics.\n f1 = _f1_score(prediction, labels)\n+ bleu = _bleu(prediction, labels)\n with self._lock():\n self.metrics['f1'] += f1\n self.metrics['f1_cnt'] += 1\n+ if bleu is not None:\n+ self.metrics['bleu'] += bleu\n+ self.metrics['bleu_cnt'] += 1\n \n # Ranking metrics.\n self.update_ranking_metrics(observation, labels)\n@@ -218,7 +252,7 @@\n # User-reported metrics\n if 'metrics' in observation:\n for k, v in observation['metrics'].items():\n- if k not in ['correct', 'f1', 'hits@k']:\n+ if k not in ['correct', 'f1', 'hits@k', 'bleu']:\n if k in self.metrics_list:\n with self._lock():\n self.metrics[k] += v\n", "issue": "Add BLEU as a metric\n\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\"\"\"Provides standard metric evaluations for dialog.\nUses locking and shared memory when ``numthreads`` is set to >1 to share metrics\nbetween processes.\n\"\"\"\n\nfrom parlai.core.thread_utils import SharedTable\nfrom parlai.core.utils import round_sigfigs, no_lock\nfrom collections import Counter\n\nimport re\nimport math\n\nre_art = re.compile(r'\\b(a|an|the)\\b')\nre_punc = re.compile(r'[!\"#$%&()*+,-./:;<=>?@\\[\\]\\\\^`{|}~_\\']')\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n return re_art.sub(' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n return re_punc.sub(' ', text) # convert punctuation to spaces\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef _exact_match(guess, answers):\n \"\"\"Check if guess is a (normalized) exact match with any answer.\"\"\"\n if guess is None or answers is None:\n return False\n guess = normalize_answer(guess)\n for a in answers:\n if guess == normalize_answer(a):\n return True\n return False\n\n\ndef _f1_score(guess, answers):\n \"\"\"Return the max F1 score between the guess and any answer.\"\"\"\n def _score(g_tokens, a_tokens):\n common = Counter(g_tokens) & Counter(a_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(g_tokens)\n recall = 1.0 * num_same / len(a_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n if guess is None or answers is None:\n return 0\n g_tokens = normalize_answer(guess).split()\n scores = [_score(g_tokens, normalize_answer(a).split()) for a in answers]\n return max(scores)\n\n\ndef aggregate_metrics(reporters):\n #reporters is a list of teachers or worlds\n m = {}\n m['tasks'] = {}\n sums = {'accuracy': 0, 'f1': 0, 'loss': 0, 'ppl': 0}\n num_tasks = 0\n total = 0\n for i in range(len(reporters)):\n tid = reporters[i].getID()\n mt = reporters[i].report()\n while tid in m['tasks']:\n # prevent name cloberring if using multiple tasks with same ID\n tid += '_'\n m['tasks'][tid] = mt\n total += mt['exs']\n found_any = False\n for k in sums.keys():\n if k in mt:\n sums[k] += mt[k]\n found_any = True\n if found_any:\n num_tasks += 1\n m['exs'] = total\n m['accuracy'] = 0\n if num_tasks > 0:\n for k in sums.keys():\n m[k] = round_sigfigs(sums[k] / num_tasks, 4)\n return m\n\n\ndef compute_time_metrics(world, max_time):\n # Determine time_left and num_epochs\n exs_per_epoch = world.num_examples() if world.num_examples() else 0\n num_epochs = world.opt.get('num_epochs', 0)\n max_exs = exs_per_epoch * num_epochs\n total_exs = world.get_total_exs()\n\n m = {}\n if (max_exs > 0 and total_exs > 0) or max_time > 0:\n m = {}\n time_left = None\n time = world.get_time()\n total_epochs = world.get_total_epochs()\n\n if (num_epochs > 0 and total_exs > 0 and max_exs > 0):\n exs_per_sec = time / total_exs\n time_left = (max_exs - total_exs) * exs_per_sec\n if max_time > 0:\n other_time_left = max_time - time\n if time_left is not None:\n time_left = min(time_left, other_time_left)\n else:\n time_left = other_time_left\n if time_left is not None:\n m['time_left'] = math.floor(time_left)\n if num_epochs > 0:\n if (total_exs > 0 and exs_per_epoch > 0):\n display_epochs = int(total_exs / exs_per_epoch)\n else:\n display_epochs = total_epochs\n m['num_epochs'] = display_epochs\n return m\n\n\nclass Metrics(object):\n \"\"\"Class that maintains evaluation metrics over dialog.\"\"\"\n\n def __init__(self, opt):\n self.metrics = {}\n self.metrics['cnt'] = 0\n self.metrics_list = ['mean_rank', 'loss', 'correct', 'f1', 'ppl']\n for k in self.metrics_list:\n self.metrics[k] = 0.0\n self.metrics[k + '_cnt'] = 0\n self.eval_pr = [1, 5, 10, 100]\n for k in self.eval_pr:\n self.metrics['hits@' + str(k)] = 0\n self.metrics['hits@_cnt'] = 0\n self.flags = {'has_text_cands': False, 'print_prediction_metrics': False}\n if opt.get('numthreads', 1) > 1:\n self.metrics = SharedTable(self.metrics)\n self.flags = SharedTable(self.flags)\n\n def __str__(self):\n return str(self.metrics)\n\n def __repr__(self):\n representation = super().__repr__()\n return representation.replace('>', ': {}>'.format(repr(self.metrics)))\n\n def _lock(self):\n if hasattr(self.metrics, 'get_lock'):\n # use the shared_table's lock\n return self.metrics.get_lock()\n else:\n # otherwise do nothing\n return no_lock()\n\n def update_ranking_metrics(self, observation, labels):\n text_cands = observation.get('text_candidates', None)\n if text_cands is None:\n return\n else:\n text = observation.get('text', None)\n\n # Now loop through text candidates, assuming they are sorted.\n # If any of them is a label then score a point.\n # maintain hits@1, 5, 10, 50, 100, etc.\n label_set = set(normalize_answer(l) for l in labels)\n cnts = {k: 0 for k in self.eval_pr}\n cnt = 0\n for c in text_cands:\n cnt += 1\n if normalize_answer(c) in label_set:\n for k in self.eval_pr:\n if cnt <= k:\n cnts[k] += 1\n # hits metric is 1 if cnts[k] > 0.\n # (other metrics such as p@k and r@k take\n # the value of cnt into account.)\n with self._lock():\n self.flags['has_text_cands'] = True\n for k in self.eval_pr:\n if cnts[k] > 0:\n self.metrics['hits@' + str(k)] += 1\n self.metrics['hits@_cnt'] += 1\n\n def update(self, observation, labels):\n with self._lock():\n self.metrics['cnt'] += 1\n\n # Exact match metric.\n correct = 0\n prediction = observation.get('text', None)\n if prediction is not None:\n if _exact_match(prediction, labels):\n correct = 1\n with self._lock():\n self.flags['print_prediction_metrics'] = True\n self.metrics['correct'] += correct\n self.metrics['correct_cnt'] += 1\n\n # F1 metric.\n f1 = _f1_score(prediction, labels)\n with self._lock():\n self.metrics['f1'] += f1\n self.metrics['f1_cnt'] += 1\n\n # Ranking metrics.\n self.update_ranking_metrics(observation, labels)\n\n # User-reported metrics\n if 'metrics' in observation:\n for k, v in observation['metrics'].items():\n if k not in ['correct', 'f1', 'hits@k']:\n if k in self.metrics_list:\n with self._lock():\n self.metrics[k] += v\n self.metrics[k + '_cnt'] += 1\n else:\n if type(self.metrics) is SharedTable:\n # can't share custom metrics during hogwild\n pass\n else:\n # no need to lock because not SharedTable\n if k not in self.metrics:\n self.metrics[k] = v\n self.metrics_list.append(k)\n self.metrics[k + '_cnt'] = 1.0\n else:\n self.metrics[k] += v\n\n # Return a dict containing the metrics for this specific example.\n # Metrics across all data is stored internally in the class, and\n # can be accessed with the report method.\n loss = {}\n loss['correct'] = correct\n return loss\n\n def report(self):\n # Report the metrics over all data seen so far.\n m = {}\n total = self.metrics['cnt']\n m['exs'] = total\n if total > 0:\n if self.flags['print_prediction_metrics']:\n m['accuracy'] = round_sigfigs(self.metrics['correct'] / max(1, self.metrics['correct_cnt']), 4)\n m['f1'] = round_sigfigs(self.metrics['f1'] / max(1, self.metrics['f1_cnt']), 4)\n if self.flags['has_text_cands']:\n for k in self.eval_pr:\n m['hits@' + str(k)] = round_sigfigs(\n self.metrics['hits@' + str(k)] / max(1, self.metrics['hits@_cnt']), 3)\n for k in self.metrics_list:\n if self.metrics[k + '_cnt'] > 0 and k != 'correct' and k != 'f1':\n m[k] = round_sigfigs(self.metrics[k] / max(1, self.metrics[k + '_cnt']), 4)\n return m\n\n def clear(self):\n with self._lock():\n self.metrics['cnt'] = 0\n for k in self.metrics_list:\n v = self.metrics[k]\n v_typ = type(v)\n if 'Tensor' in str(v_typ):\n self.metrics[k].zero_()\n else:\n self.metrics[k] = 0.0\n self.metrics[k + '_cnt'] = 0\n for k in self.eval_pr:\n self.metrics['hits@' + str(k)] = 0\n self.metrics['hits@_cnt'] = 0\n", "path": "parlai/core/metrics.py"}]}
| 3,707 | 895 |
gh_patches_debug_33802
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-2501
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cesm2_alpha10d doesn't attempt to download missing input files
I recently ported both alpha10c and alpha10d on my macbook. While alpha10c successfully downloads all missing input files, alpha10d doesn't appear to attempt to download missing input files. I've encountered this behavior both with A and C compsets.
Here is a snippet of ./case.submit standart output for alpha10c:
```
Loading input file list: 'Buildconf/cpl.input_data_list'
Model cpl missing file ocn2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
Trying to download file: 'cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc' to path '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
SUCCESS
```
And here is the corresponding ./case.submit standart output for alpha10d:
```
Loading input file list: 'Buildconf/cpl.input_data_list'
Model cpl missing file ocn2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
Model cpl missing file ocn2atm_smapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
Model cpl missing file ice2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
Model cpl missing file ice2atm_smapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'
```
While alpha10c runs sucessfully on my macbook (high sierra), alpha10d eventually fails due to missing input files.
</issue>
<code>
[start of scripts/lib/CIME/case/check_input_data.py]
1 """
2 API for checking input for testcase
3 """
4 from CIME.XML.standard_module_setup import *
5 from CIME.utils import SharedArea, find_files, safe_copy, expect
6 from CIME.XML.inputdata import Inputdata
7 import CIME.Servers
8
9 import glob
10
11 logger = logging.getLogger(__name__)
12
13 def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False):
14 """
15 Return True if successfully downloaded
16 """
17 if not server.fileexists(rel_path):
18 return False
19
20 full_path = os.path.join(input_data_root, rel_path)
21 logging.info("Trying to download file: '{}' to path '{}'".format(rel_path, full_path))
22 # Make sure local path exists, create if it does not
23 if isdirectory or full_path.endswith(os.sep):
24 if not os.path.exists(full_path):
25 logger.info("Creating directory {}".format(full_path))
26 os.makedirs(full_path)
27 isdirectory = True
28 elif not os.path.exists(os.path.dirname(full_path)):
29 os.makedirs(os.path.dirname(full_path))
30
31 # Use umask to make sure files are group read/writable. As long as parent directories
32 # have +s, then everything should work.
33 with SharedArea():
34 if isdirectory:
35 return server.getdirectory(rel_path, full_path)
36 else:
37 return server.getfile(rel_path, full_path)
38
39 ###############################################################################
40 def check_all_input_data(self, protocal=None, address=None, input_data_root=None, data_list_dir="Buildconf", download=True):
41 ###############################################################################
42 success = False
43 if protocal is not None and address is not None:
44 success = self.check_input_data(protocal=protocal, address=address, download=download,
45 input_data_root=input_data_root, data_list_dir=data_list_dir)
46 else:
47 success = self.check_input_data(protocal=protocal, address=address, download=False,
48 input_data_root=input_data_root, data_list_dir=data_list_dir)
49 if download and not success:
50 success = _downloadfromserver(self, input_data_root, data_list_dir)
51
52 self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)
53 return success
54
55 def _downloadfromserver(case, input_data_root, data_list_dir):
56 # needs to be downloaded
57 success = False
58 protocal = 'svn'
59 inputdata = Inputdata()
60 while not success and protocal is not None:
61 protocal, address = inputdata.get_next_server()
62 logger.info("Checking server {} with protocal {}".format(address, protocal))
63 success = case.check_input_data(protocal=protocal, address=address, download=True,
64 input_data_root=input_data_root, data_list_dir=data_list_dir)
65 return success
66
67 def stage_refcase(self, input_data_root=None, data_list_dir=None):
68 get_refcase = self.get_value("GET_REFCASE")
69 run_type = self.get_value("RUN_TYPE")
70 continue_run = self.get_value("CONTINUE_RUN")
71
72 # We do not fully populate the inputdata directory on every
73 # machine and do not expect every user to download the 3TB+ of
74 # data in our inputdata repository. This code checks for the
75 # existence of inputdata in the local inputdata directory and
76 # attempts to download data from the server if it's needed and
77 # missing.
78 if get_refcase and run_type != "startup" and not continue_run:
79 din_loc_root = self.get_value("DIN_LOC_ROOT")
80 run_refdate = self.get_value("RUN_REFDATE")
81 run_refcase = self.get_value("RUN_REFCASE")
82 run_refdir = self.get_value("RUN_REFDIR")
83 rundir = self.get_value("RUNDIR")
84
85 refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)
86 if not os.path.isdir(refdir):
87 logger.warning("Refcase not found in {}, will attempt to download from inputdata".format(refdir))
88 with open(os.path.join("Buildconf","refcase.input_data_list"),"w") as fd:
89 fd.write("refdir = {}{}".format(refdir, os.sep))
90 if input_data_root is None:
91 input_data_root = din_loc_root
92 if data_list_dir is None:
93 data_list_dir = "Buildconf"
94 success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir)
95 expect(success, "Could not download refcase from any server")
96
97 logger.info(" - Prestaging REFCASE ({}) to {}".format(refdir, rundir))
98
99 # prestage the reference case's files.
100
101 if (not os.path.exists(rundir)):
102 logger.debug("Creating run directory: {}".format(rundir))
103 os.makedirs(rundir)
104
105 # copy the refcases' rpointer files to the run directory
106 for rpointerfile in glob.iglob(os.path.join("{}","*rpointer*").format(refdir)):
107 logger.info("Copy rpointer {}".format(rpointerfile))
108 safe_copy(rpointerfile, rundir)
109
110 # link everything else
111
112 for rcfile in glob.iglob(os.path.join(refdir,"*")):
113 rcbaseline = os.path.basename(rcfile)
114 if not os.path.exists("{}/{}".format(rundir, rcbaseline)):
115 logger.info("Staging file {}".format(rcfile))
116 os.symlink(rcfile, "{}/{}".format(rundir, rcbaseline))
117 # Backward compatibility, some old refcases have cam2 in the name
118 # link to local cam file.
119 for cam2file in glob.iglob(os.path.join("{}","*.cam2.*").format(rundir)):
120 camfile = cam2file.replace("cam2", "cam")
121 os.symlink(cam2file, camfile)
122
123 return True
124
125 def check_input_data(case, protocal="svn", address=None, input_data_root=None, data_list_dir="Buildconf", download=False):
126 """
127 Return True if no files missing
128 """
129 case.load_env(reset=True)
130 # Fill in defaults as needed
131 input_data_root = case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root
132
133 expect(os.path.isdir(input_data_root), "Invalid input_data_root directory: '{}'".format(input_data_root))
134 expect(os.path.isdir(data_list_dir), "Invalid data_list_dir directory: '{}'".format(data_list_dir))
135
136 data_list_files = find_files(data_list_dir, "*.input_data_list")
137 expect(data_list_files, "No .input_data_list files found in dir '{}'".format(data_list_dir))
138
139 no_files_missing = True
140
141 if download:
142 if protocal not in vars(CIME.Servers):
143 logger.warning("Client protocal {} not enabled".format(protocal))
144 return False
145
146 if protocal == "svn":
147 server = CIME.Servers.SVN(address)
148 elif protocal == "gftp":
149 server = CIME.Servers.GridFTP(address)
150 elif protocal == "ftp":
151 server = CIME.Servers.FTP(address)
152 elif protocal == "wget":
153 server = CIME.Servers.WGET(address)
154 else:
155 expect(False, "Unsupported inputdata protocal: {}".format(protocal))
156
157
158
159 for data_list_file in data_list_files:
160 logging.info("Loading input file list: '{}'".format(data_list_file))
161 with open(data_list_file, "r") as fd:
162 lines = fd.readlines()
163
164 for line in lines:
165 line = line.strip()
166 if (line and not line.startswith("#")):
167 tokens = line.split('=')
168 description, full_path = tokens[0].strip(), tokens[1].strip()
169 if(full_path):
170 # expand xml variables
171 full_path = case.get_resolved_value(full_path)
172 rel_path = full_path.replace(input_data_root, "")
173 model = os.path.basename(data_list_file).split('.')[0]
174
175 if ("/" in rel_path and rel_path == full_path):
176 # User pointing to a file outside of input_data_root, we cannot determine
177 # rel_path, and so cannot download the file. If it already exists, we can
178 # proceed
179 if not os.path.exists(full_path):
180 logging.warning(" Model {} missing file {} = '{}'".format(model, description, full_path))
181 if download:
182 logging.warning(" Cannot download file since it lives outside of the input_data_root '{}'".format(input_data_root))
183 no_files_missing = False
184 else:
185 logging.debug(" Found input file: '{}'".format(full_path))
186
187 else:
188 # There are some special values of rel_path that
189 # we need to ignore - some of the component models
190 # set things like 'NULL' or 'same_as_TS' -
191 # basically if rel_path does not contain '/' (a
192 # directory tree) you can assume it's a special
193 # value and ignore it (perhaps with a warning)
194 if ("/" in rel_path and not os.path.exists(full_path)):
195 logging.warning(" Model {} missing file {} = '{}'".format(model, description, full_path))
196
197 if (download):
198 success = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),
199 isdirectory=rel_path.endswith(os.sep))
200 if not success:
201 no_files_missing = False
202 else:
203 logging.debug(" Already had input file: '{}'".format(full_path))
204
205 else:
206 model = os.path.basename(data_list_file).split('.')[0]
207 logging.warning("Model {} no file specified for {}".format(model, description))
208
209 return no_files_missing
210
[end of scripts/lib/CIME/case/check_input_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/lib/CIME/case/check_input_data.py b/scripts/lib/CIME/case/check_input_data.py
--- a/scripts/lib/CIME/case/check_input_data.py
+++ b/scripts/lib/CIME/case/check_input_data.py
@@ -177,7 +177,7 @@
# rel_path, and so cannot download the file. If it already exists, we can
# proceed
if not os.path.exists(full_path):
- logging.warning(" Model {} missing file {} = '{}'".format(model, description, full_path))
+ logging.warning("Model {} missing file {} = '{}'".format(model, description, full_path))
if download:
logging.warning(" Cannot download file since it lives outside of the input_data_root '{}'".format(input_data_root))
no_files_missing = False
@@ -193,12 +193,11 @@
# value and ignore it (perhaps with a warning)
if ("/" in rel_path and not os.path.exists(full_path)):
logging.warning(" Model {} missing file {} = '{}'".format(model, description, full_path))
+ no_files_missing = False
if (download):
- success = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),
+ no_files_missing = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),
isdirectory=rel_path.endswith(os.sep))
- if not success:
- no_files_missing = False
else:
logging.debug(" Already had input file: '{}'".format(full_path))
|
{"golden_diff": "diff --git a/scripts/lib/CIME/case/check_input_data.py b/scripts/lib/CIME/case/check_input_data.py\n--- a/scripts/lib/CIME/case/check_input_data.py\n+++ b/scripts/lib/CIME/case/check_input_data.py\n@@ -177,7 +177,7 @@\n # rel_path, and so cannot download the file. If it already exists, we can\n # proceed\n if not os.path.exists(full_path):\n- logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n+ logging.warning(\"Model {} missing file {} = '{}'\".format(model, description, full_path))\n if download:\n logging.warning(\" Cannot download file since it lives outside of the input_data_root '{}'\".format(input_data_root))\n no_files_missing = False\n@@ -193,12 +193,11 @@\n # value and ignore it (perhaps with a warning)\n if (\"/\" in rel_path and not os.path.exists(full_path)):\n logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n+ no_files_missing = False\n \n if (download):\n- success = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),\n+ no_files_missing = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),\n isdirectory=rel_path.endswith(os.sep))\n- if not success:\n- no_files_missing = False\n else:\n logging.debug(\" Already had input file: '{}'\".format(full_path))\n", "issue": "cesm2_alpha10d doesn't attempt to download missing input files\nI recently ported both alpha10c and alpha10d on my macbook. While alpha10c successfully downloads all missing input files, alpha10d doesn't appear to attempt to download missing input files. I've encountered this behavior both with A and C compsets.\r\n\r\nHere is a snippet of ./case.submit standart output for alpha10c:\r\n```\r\nLoading input file list: 'Buildconf/cpl.input_data_list'\r\n Model cpl missing file ocn2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\nTrying to download file: 'cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc' to path '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\nSUCCESS\r\n```\r\n\r\nAnd here is the corresponding ./case.submit standart output for alpha10d:\r\n```\r\nLoading input file list: 'Buildconf/cpl.input_data_list'\r\n Model cpl missing file ocn2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\n Model cpl missing file ocn2atm_smapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\n Model cpl missing file ice2atm_fmapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\n Model cpl missing file ice2atm_smapname = '/Users/altuntas/projects/cesm-inputdata/cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc'\r\n```\r\n\r\nWhile alpha10c runs sucessfully on my macbook (high sierra), alpha10d eventually fails due to missing input files.\r\n\n", "before_files": [{"content": "\"\"\"\nAPI for checking input for testcase\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import SharedArea, find_files, safe_copy, expect\nfrom CIME.XML.inputdata import Inputdata\nimport CIME.Servers\n\nimport glob\n\nlogger = logging.getLogger(__name__)\n\ndef _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False):\n \"\"\"\n Return True if successfully downloaded\n \"\"\"\n if not server.fileexists(rel_path):\n return False\n\n full_path = os.path.join(input_data_root, rel_path)\n logging.info(\"Trying to download file: '{}' to path '{}'\".format(rel_path, full_path))\n # Make sure local path exists, create if it does not\n if isdirectory or full_path.endswith(os.sep):\n if not os.path.exists(full_path):\n logger.info(\"Creating directory {}\".format(full_path))\n os.makedirs(full_path)\n isdirectory = True\n elif not os.path.exists(os.path.dirname(full_path)):\n os.makedirs(os.path.dirname(full_path))\n\n # Use umask to make sure files are group read/writable. As long as parent directories\n # have +s, then everything should work.\n with SharedArea():\n if isdirectory:\n return server.getdirectory(rel_path, full_path)\n else:\n return server.getfile(rel_path, full_path)\n\n###############################################################################\ndef check_all_input_data(self, protocal=None, address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=True):\n###############################################################################\n success = False\n if protocal is not None and address is not None:\n success = self.check_input_data(protocal=protocal, address=address, download=download,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n else:\n success = self.check_input_data(protocal=protocal, address=address, download=False,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n if download and not success:\n success = _downloadfromserver(self, input_data_root, data_list_dir)\n\n self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef _downloadfromserver(case, input_data_root, data_list_dir):\n # needs to be downloaded\n success = False\n protocal = 'svn'\n inputdata = Inputdata()\n while not success and protocal is not None:\n protocal, address = inputdata.get_next_server()\n logger.info(\"Checking server {} with protocal {}\".format(address, protocal))\n success = case.check_input_data(protocal=protocal, address=address, download=True,\n input_data_root=input_data_root, data_list_dir=data_list_dir)\n return success\n\ndef stage_refcase(self, input_data_root=None, data_list_dir=None):\n get_refcase = self.get_value(\"GET_REFCASE\")\n run_type = self.get_value(\"RUN_TYPE\")\n continue_run = self.get_value(\"CONTINUE_RUN\")\n\n # We do not fully populate the inputdata directory on every\n # machine and do not expect every user to download the 3TB+ of\n # data in our inputdata repository. This code checks for the\n # existence of inputdata in the local inputdata directory and\n # attempts to download data from the server if it's needed and\n # missing.\n if get_refcase and run_type != \"startup\" and not continue_run:\n din_loc_root = self.get_value(\"DIN_LOC_ROOT\")\n run_refdate = self.get_value(\"RUN_REFDATE\")\n run_refcase = self.get_value(\"RUN_REFCASE\")\n run_refdir = self.get_value(\"RUN_REFDIR\")\n rundir = self.get_value(\"RUNDIR\")\n\n refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate)\n if not os.path.isdir(refdir):\n logger.warning(\"Refcase not found in {}, will attempt to download from inputdata\".format(refdir))\n with open(os.path.join(\"Buildconf\",\"refcase.input_data_list\"),\"w\") as fd:\n fd.write(\"refdir = {}{}\".format(refdir, os.sep))\n if input_data_root is None:\n input_data_root = din_loc_root\n if data_list_dir is None:\n data_list_dir = \"Buildconf\"\n success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir)\n expect(success, \"Could not download refcase from any server\")\n\n logger.info(\" - Prestaging REFCASE ({}) to {}\".format(refdir, rundir))\n\n # prestage the reference case's files.\n\n if (not os.path.exists(rundir)):\n logger.debug(\"Creating run directory: {}\".format(rundir))\n os.makedirs(rundir)\n\n # copy the refcases' rpointer files to the run directory\n for rpointerfile in glob.iglob(os.path.join(\"{}\",\"*rpointer*\").format(refdir)):\n logger.info(\"Copy rpointer {}\".format(rpointerfile))\n safe_copy(rpointerfile, rundir)\n\n # link everything else\n\n for rcfile in glob.iglob(os.path.join(refdir,\"*\")):\n rcbaseline = os.path.basename(rcfile)\n if not os.path.exists(\"{}/{}\".format(rundir, rcbaseline)):\n logger.info(\"Staging file {}\".format(rcfile))\n os.symlink(rcfile, \"{}/{}\".format(rundir, rcbaseline))\n # Backward compatibility, some old refcases have cam2 in the name\n # link to local cam file.\n for cam2file in glob.iglob(os.path.join(\"{}\",\"*.cam2.*\").format(rundir)):\n camfile = cam2file.replace(\"cam2\", \"cam\")\n os.symlink(cam2file, camfile)\n\n return True\n\ndef check_input_data(case, protocal=\"svn\", address=None, input_data_root=None, data_list_dir=\"Buildconf\", download=False):\n \"\"\"\n Return True if no files missing\n \"\"\"\n case.load_env(reset=True)\n # Fill in defaults as needed\n input_data_root = case.get_value(\"DIN_LOC_ROOT\") if input_data_root is None else input_data_root\n\n expect(os.path.isdir(input_data_root), \"Invalid input_data_root directory: '{}'\".format(input_data_root))\n expect(os.path.isdir(data_list_dir), \"Invalid data_list_dir directory: '{}'\".format(data_list_dir))\n\n data_list_files = find_files(data_list_dir, \"*.input_data_list\")\n expect(data_list_files, \"No .input_data_list files found in dir '{}'\".format(data_list_dir))\n\n no_files_missing = True\n\n if download:\n if protocal not in vars(CIME.Servers):\n logger.warning(\"Client protocal {} not enabled\".format(protocal))\n return False\n\n if protocal == \"svn\":\n server = CIME.Servers.SVN(address)\n elif protocal == \"gftp\":\n server = CIME.Servers.GridFTP(address)\n elif protocal == \"ftp\":\n server = CIME.Servers.FTP(address)\n elif protocal == \"wget\":\n server = CIME.Servers.WGET(address)\n else:\n expect(False, \"Unsupported inputdata protocal: {}\".format(protocal))\n\n\n\n for data_list_file in data_list_files:\n logging.info(\"Loading input file list: '{}'\".format(data_list_file))\n with open(data_list_file, \"r\") as fd:\n lines = fd.readlines()\n\n for line in lines:\n line = line.strip()\n if (line and not line.startswith(\"#\")):\n tokens = line.split('=')\n description, full_path = tokens[0].strip(), tokens[1].strip()\n if(full_path):\n # expand xml variables\n full_path = case.get_resolved_value(full_path)\n rel_path = full_path.replace(input_data_root, \"\")\n model = os.path.basename(data_list_file).split('.')[0]\n\n if (\"/\" in rel_path and rel_path == full_path):\n # User pointing to a file outside of input_data_root, we cannot determine\n # rel_path, and so cannot download the file. If it already exists, we can\n # proceed\n if not os.path.exists(full_path):\n logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n if download:\n logging.warning(\" Cannot download file since it lives outside of the input_data_root '{}'\".format(input_data_root))\n no_files_missing = False\n else:\n logging.debug(\" Found input file: '{}'\".format(full_path))\n\n else:\n # There are some special values of rel_path that\n # we need to ignore - some of the component models\n # set things like 'NULL' or 'same_as_TS' -\n # basically if rel_path does not contain '/' (a\n # directory tree) you can assume it's a special\n # value and ignore it (perhaps with a warning)\n if (\"/\" in rel_path and not os.path.exists(full_path)):\n logging.warning(\" Model {} missing file {} = '{}'\".format(model, description, full_path))\n\n if (download):\n success = _download_if_in_repo(server, input_data_root, rel_path.strip(os.sep),\n isdirectory=rel_path.endswith(os.sep))\n if not success:\n no_files_missing = False\n else:\n logging.debug(\" Already had input file: '{}'\".format(full_path))\n\n else:\n model = os.path.basename(data_list_file).split('.')[0]\n logging.warning(\"Model {} no file specified for {}\".format(model, description))\n\n return no_files_missing\n", "path": "scripts/lib/CIME/case/check_input_data.py"}]}
| 3,746 | 341 |
gh_patches_debug_4274
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-980
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Permissions: Renaming a permission group is not possible
Existing groups cannot be renamed.
</issue>
<code>
[start of login/views.py]
1 from django import forms
2 from django.contrib.auth import update_session_auth_hash
3 from django.contrib.auth.mixins import LoginRequiredMixin
4 from django.contrib.auth.models import Group
5 from django.contrib.auth.views import PasswordChangeView, PasswordResetView
6 from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
7 from django.http import Http404
8 from django.shortcuts import get_object_or_404, redirect, render
9 from django.views.generic import FormView, View
10 from django.views.generic.edit import UpdateView
11
12 import login.models as models
13
14 from .forms import ChangeEmailForm, CreateUserForm, DetachForm, EditUserForm, GroupForm
15 from .models import ADMIN_PERM, GroupMembership, UserGroup
16 from .models import myuser as OepUser
17
18 from oeplatform.settings import URL
19
20 class ProfileView(View):
21 def get(self, request, user_id):
22 """
23 Load the user identified by user_id and is OAuth-token. If latter does not exist yet, create one.
24 :param request: A HTTP-request object sent by the Django framework.
25 :param user_id: An user id
26 :return: Profile renderer
27 """
28 from rest_framework.authtoken.models import Token
29
30 for user in OepUser.objects.all():
31 Token.objects.get_or_create(user=user)
32 user = get_object_or_404(OepUser, pk=user_id)
33 token = None
34 if request.user.is_authenticated:
35 token = Token.objects.get(user=request.user)
36 return render(
37 request, "login/profile.html", {"profile_user": user, "token": token}
38 )
39
40
41 class GroupManagement(View, LoginRequiredMixin):
42 def get(self, request):
43 """
44 Load and list the available groups by groupadmin.
45 :param request: A HTTP-request object sent by the Django framework.
46 :param user_id: An user id
47 :return: Profile renderer
48 """
49
50 membership = request.user.memberships
51 return render(
52 request, "login/list_memberships.html", {"membership": membership}
53 )
54
55
56 class GroupCreate(View, LoginRequiredMixin):
57 def get(self, request, group_id=None):
58 """
59 Load the chosen action(create or edit) for a group.
60 :param request: A HTTP-request object sent by the Django framework.
61 :param user_id: An user id
62 :param user_id: An group id
63 :return: Profile renderer
64 """
65
66 if group_id:
67 group = UserGroup.objects.get(id=group_id)
68 form = GroupForm(instance=group)
69 membership = get_object_or_404(
70 GroupMembership, group=group, user=request.user
71 )
72 if membership.level < ADMIN_PERM:
73 raise PermissionDenied
74 else:
75 form = GroupForm()
76 return render(request, "login/group_create.html", {"form": form})
77
78 def post(self, request, group_id=None):
79 """
80 Performs selected action(save or delete) for a group. If a groupname already exists, then a error
81 will be output.
82 The selected users become members of this group. The groupadmin is already set.
83 :param request: A HTTP-request object sent by the Django framework.
84 :param user_id: An user id
85 :param user_id: An group id
86 :return: Profile renderer
87 """
88 group = UserGroup.objects.get(id=group_id) if group_id else None
89 form = GroupForm(request.POST, instance=group)
90 if form.is_valid():
91 if group_id:
92 membership = get_object_or_404(
93 GroupMembership, group=group, user=request.user
94 )
95 if membership.level < ADMIN_PERM:
96 raise PermissionDenied
97 else:
98 group = form.save()
99 membership = GroupMembership.objects.create(
100 user=request.user, group=group, level=ADMIN_PERM
101 )
102 membership.save()
103 return redirect("/user/groups/{id}".format(id=group.id), {"group": group})
104 else:
105 return render(request, "login/group_create.html", {"form": form})
106
107
108 class GroupView(View, LoginRequiredMixin):
109 def get(self, request, group_id):
110 """
111 Load the chosen action(create or edit) for a group.
112 :param request: A HTTP-request object sent by the Django framework.
113 :param user_id: An user id
114 :param user_id: An group id
115 :return: Profile renderer
116 """
117 group = get_object_or_404(UserGroup, pk=group_id)
118 return render(
119 request,
120 "login/group.html",
121 {"group": group},
122 )
123
124
125 class GroupEdit(View, LoginRequiredMixin):
126 def get(self, request, group_id):
127 """
128 Load the chosen action(create or edit) for a group.
129 :param request: A HTTP-request object sent by the Django framework.
130 :param user_id: An user id
131 :param user_id: An group id
132 :return: Profile renderer
133 """
134 group = get_object_or_404(UserGroup, pk=group_id)
135 is_admin = False
136 membership = GroupMembership.objects.filter(
137 group=group, user=request.user
138 ).first()
139 if membership:
140 is_admin = membership.level >= ADMIN_PERM
141 return render(
142 request,
143 "login/change_form.html",
144 {"group": group, "choices": GroupMembership.choices, "is_admin": is_admin},
145 )
146
147 def post(self, request, group_id):
148 """
149 Performs selected action(save or delete) for a group. If a groupname already exists, then a error
150 will be output.
151 The selected users become members of this group. The groupadmin is already set.
152 :param request: A HTTP-request object sent by the Django framework.
153 :param user_id: An user id
154 :param user_id: An group id
155 :return: Profile renderer
156 """
157 mode = request.POST["mode"]
158 group = get_object_or_404(UserGroup, id=group_id)
159 membership = get_object_or_404(GroupMembership, group=group, user=request.user)
160
161 errors = {}
162 if mode == "add_user":
163 if membership.level < models.WRITE_PERM:
164 raise PermissionDenied
165 try:
166 user = OepUser.objects.get(name=request.POST["name"])
167 membership, _ = GroupMembership.objects.get_or_create(
168 group=group, user=user
169 )
170 membership.save()
171 except OepUser.DoesNotExist:
172 errors["name"] = "User does not exist"
173 elif mode == "remove_user":
174 if membership.level < models.DELETE_PERM:
175 raise PermissionDenied
176 user = OepUser.objects.get(id=request.POST["user_id"])
177 membership = GroupMembership.objects.get(group=group, user=user)
178 if membership.level >= ADMIN_PERM:
179 admins = GroupMembership.objects.filter(group=group).exclude(user=user)
180 if not admins:
181 errors["name"] = "A group needs at least one admin"
182 else:
183 membership.delete()
184 else:
185 membership.delete()
186 elif mode == "alter_user":
187 if membership.level < models.ADMIN_PERM:
188 raise PermissionDenied
189 user = OepUser.objects.get(id=request.POST["user_id"])
190 if user == request.user:
191 errors["name"] = "You can not change your own permissions"
192 else:
193 membership = GroupMembership.objects.get(group=group, user=user)
194 membership.level = request.POST["level"]
195 membership.save()
196 elif mode == "delete_group":
197 if membership.level < models.ADMIN_PERM:
198 raise PermissionDenied
199 group.delete()
200 return redirect("/user/groups")
201 else:
202 raise PermissionDenied
203 return render(
204 request,
205 "login/change_form.html",
206 {
207 "group": group,
208 "choices": GroupMembership.choices,
209 "errors": errors,
210 "is_admin": True,
211 },
212 )
213
214 def __add_user(self, request, group):
215 user = OepUser.objects.filter(id=request.POST["user_id"]).first()
216 g = user.groups.add(group)
217 g.save()
218 return self.get(request)
219
220
221 class ProfileUpdateView(UpdateView, LoginRequiredMixin):
222 """
223 Autogenerate a update form for users.
224 """
225
226 model = OepUser
227 fields = ["name", "affiliation", "email"]
228 template_name_suffix = "_update_form"
229
230
231 class EditUserView(View):
232 def get(self, request, user_id):
233 if not request.user.id == int(user_id):
234 raise PermissionDenied
235 form = EditUserForm(instance=request.user)
236 return render(request, "login/oepuser_edit_form.html", {"form": form})
237
238 def post(self, request, user_id):
239 if not request.user.id == int(user_id):
240 raise PermissionDenied
241 form = EditUserForm(request.POST, instance=request.user)
242 if form.is_valid():
243 form.save()
244 return redirect("/user/profile/{id}".format(id=request.user.id))
245 else:
246 return render(request, "login/oepuser_edit_form.html", {"form": form})
247
248
249 class CreateUserView(View):
250 def get(self, request):
251 form = CreateUserForm()
252 return render(request, "login/oepuser_create_form.html", {"form": form})
253
254 def post(self, request):
255 form = CreateUserForm(request.POST)
256 if form.is_valid():
257 user = form.save()
258 return redirect("activate")
259 else:
260 return render(request, "login/oepuser_create_form.html", {"form": form})
261
262
263 class DetachView(LoginRequiredMixin, View):
264 def get(self, request):
265 if request.user.is_native:
266 raise PermissionDenied
267 form = DetachForm(request.user)
268 return render(request, "login/detach.html", {"form": form})
269
270 def post(self, request):
271 if request.user.is_native:
272 raise PermissionDenied
273 form = DetachForm(request.user, request.POST)
274 if form.is_valid():
275 form.save()
276 return redirect("/")
277 else:
278 print(form.errors)
279 return render(request, "login/detach.html", {"form": form})
280
281
282 class OEPPasswordChangeView(PasswordChangeView):
283 template_name = "login/generic_form.html"
284 success_url = "/"
285
286
287 class ActivationNoteView(FormView):
288 template_name = "login/activate.html"
289 form_class = ChangeEmailForm
290 success_url = "user/activate"
291
292 def form_valid(self, form):
293 if self.request.user.is_anonymous or self.request.user.is_mail_verified:
294 raise PermissionDenied
295 form.save(self.request.user)
296 return super(ActivationNoteView, self).form_valid(form)
297
298
299 def activate(request, token):
300 token_obj = models.ActivationToken.objects.filter(value=token).first()
301 if not token_obj:
302 form = ChangeEmailForm()
303 form._errors = {
304 forms.forms.NON_FIELD_ERRORS: form.error_class(
305 ["Your token was invalid or expired"]
306 )
307 }
308 return render(request, "login/activate.html", {"form": form})
309 else:
310 token_obj.user.is_mail_verified = True
311 token_obj.user.save()
312 token_obj.delete()
313 return redirect("/user/profile/{id}".format(id=token_obj.user.id))
314
[end of login/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/login/views.py b/login/views.py
--- a/login/views.py
+++ b/login/views.py
@@ -89,6 +89,7 @@
form = GroupForm(request.POST, instance=group)
if form.is_valid():
if group_id:
+ group = form.save()
membership = get_object_or_404(
GroupMembership, group=group, user=request.user
)
|
{"golden_diff": "diff --git a/login/views.py b/login/views.py\n--- a/login/views.py\n+++ b/login/views.py\n@@ -89,6 +89,7 @@\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n if group_id:\n+ group = form.save()\n membership = get_object_or_404(\n GroupMembership, group=group, user=request.user\n )\n", "issue": "Permissions: Renaming a permission group is not possible\nExisting groups cannot be renamed.\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.views import PasswordChangeView, PasswordResetView\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.generic import FormView, View\nfrom django.views.generic.edit import UpdateView\n\nimport login.models as models\n\nfrom .forms import ChangeEmailForm, CreateUserForm, DetachForm, EditUserForm, GroupForm\nfrom .models import ADMIN_PERM, GroupMembership, UserGroup\nfrom .models import myuser as OepUser\n\nfrom oeplatform.settings import URL\n\nclass ProfileView(View):\n def get(self, request, user_id):\n \"\"\"\n Load the user identified by user_id and is OAuth-token. If latter does not exist yet, create one.\n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :return: Profile renderer\n \"\"\"\n from rest_framework.authtoken.models import Token\n\n for user in OepUser.objects.all():\n Token.objects.get_or_create(user=user)\n user = get_object_or_404(OepUser, pk=user_id)\n token = None\n if request.user.is_authenticated:\n token = Token.objects.get(user=request.user)\n return render(\n request, \"login/profile.html\", {\"profile_user\": user, \"token\": token}\n )\n\n\nclass GroupManagement(View, LoginRequiredMixin):\n def get(self, request):\n \"\"\"\n Load and list the available groups by groupadmin. \n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :return: Profile renderer \n \"\"\"\n\n membership = request.user.memberships\n return render(\n request, \"login/list_memberships.html\", {\"membership\": membership}\n )\n\n\nclass GroupCreate(View, LoginRequiredMixin):\n def get(self, request, group_id=None):\n \"\"\"\n Load the chosen action(create or edit) for a group.\n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :param user_id: An group id\n :return: Profile renderer\n \"\"\"\n\n if group_id:\n group = UserGroup.objects.get(id=group_id)\n form = GroupForm(instance=group)\n membership = get_object_or_404(\n GroupMembership, group=group, user=request.user\n )\n if membership.level < ADMIN_PERM:\n raise PermissionDenied\n else:\n form = GroupForm()\n return render(request, \"login/group_create.html\", {\"form\": form})\n\n def post(self, request, group_id=None):\n \"\"\"\n Performs selected action(save or delete) for a group. If a groupname already exists, then a error\n will be output.\n The selected users become members of this group. The groupadmin is already set.\n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :param user_id: An group id\n :return: Profile renderer\n \"\"\"\n group = UserGroup.objects.get(id=group_id) if group_id else None\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n if group_id:\n membership = get_object_or_404(\n GroupMembership, group=group, user=request.user\n )\n if membership.level < ADMIN_PERM:\n raise PermissionDenied\n else:\n group = form.save()\n membership = GroupMembership.objects.create(\n user=request.user, group=group, level=ADMIN_PERM\n )\n membership.save()\n return redirect(\"/user/groups/{id}\".format(id=group.id), {\"group\": group})\n else:\n return render(request, \"login/group_create.html\", {\"form\": form})\n\n\nclass GroupView(View, LoginRequiredMixin):\n def get(self, request, group_id):\n \"\"\"\n Load the chosen action(create or edit) for a group.\n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :param user_id: An group id\n :return: Profile renderer\n \"\"\"\n group = get_object_or_404(UserGroup, pk=group_id)\n return render(\n request,\n \"login/group.html\",\n {\"group\": group},\n )\n\n\nclass GroupEdit(View, LoginRequiredMixin):\n def get(self, request, group_id):\n \"\"\"\n Load the chosen action(create or edit) for a group. \n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :param user_id: An group id\n :return: Profile renderer \n \"\"\"\n group = get_object_or_404(UserGroup, pk=group_id)\n is_admin = False\n membership = GroupMembership.objects.filter(\n group=group, user=request.user\n ).first()\n if membership:\n is_admin = membership.level >= ADMIN_PERM\n return render(\n request,\n \"login/change_form.html\",\n {\"group\": group, \"choices\": GroupMembership.choices, \"is_admin\": is_admin},\n )\n\n def post(self, request, group_id):\n \"\"\"\n Performs selected action(save or delete) for a group. If a groupname already exists, then a error \n will be output. \n The selected users become members of this group. The groupadmin is already set.\n :param request: A HTTP-request object sent by the Django framework.\n :param user_id: An user id\n :param user_id: An group id\n :return: Profile renderer \n \"\"\"\n mode = request.POST[\"mode\"]\n group = get_object_or_404(UserGroup, id=group_id)\n membership = get_object_or_404(GroupMembership, group=group, user=request.user)\n\n errors = {}\n if mode == \"add_user\":\n if membership.level < models.WRITE_PERM:\n raise PermissionDenied\n try:\n user = OepUser.objects.get(name=request.POST[\"name\"])\n membership, _ = GroupMembership.objects.get_or_create(\n group=group, user=user\n )\n membership.save()\n except OepUser.DoesNotExist:\n errors[\"name\"] = \"User does not exist\"\n elif mode == \"remove_user\":\n if membership.level < models.DELETE_PERM:\n raise PermissionDenied\n user = OepUser.objects.get(id=request.POST[\"user_id\"])\n membership = GroupMembership.objects.get(group=group, user=user)\n if membership.level >= ADMIN_PERM:\n admins = GroupMembership.objects.filter(group=group).exclude(user=user)\n if not admins:\n errors[\"name\"] = \"A group needs at least one admin\"\n else:\n membership.delete()\n else:\n membership.delete()\n elif mode == \"alter_user\":\n if membership.level < models.ADMIN_PERM:\n raise PermissionDenied\n user = OepUser.objects.get(id=request.POST[\"user_id\"])\n if user == request.user:\n errors[\"name\"] = \"You can not change your own permissions\"\n else:\n membership = GroupMembership.objects.get(group=group, user=user)\n membership.level = request.POST[\"level\"]\n membership.save()\n elif mode == \"delete_group\":\n if membership.level < models.ADMIN_PERM:\n raise PermissionDenied\n group.delete()\n return redirect(\"/user/groups\")\n else:\n raise PermissionDenied\n return render(\n request,\n \"login/change_form.html\",\n {\n \"group\": group,\n \"choices\": GroupMembership.choices,\n \"errors\": errors,\n \"is_admin\": True,\n },\n )\n\n def __add_user(self, request, group):\n user = OepUser.objects.filter(id=request.POST[\"user_id\"]).first()\n g = user.groups.add(group)\n g.save()\n return self.get(request)\n\n\nclass ProfileUpdateView(UpdateView, LoginRequiredMixin):\n \"\"\"\n Autogenerate a update form for users.\n \"\"\"\n\n model = OepUser\n fields = [\"name\", \"affiliation\", \"email\"]\n template_name_suffix = \"_update_form\"\n\n\nclass EditUserView(View):\n def get(self, request, user_id):\n if not request.user.id == int(user_id):\n raise PermissionDenied\n form = EditUserForm(instance=request.user)\n return render(request, \"login/oepuser_edit_form.html\", {\"form\": form})\n\n def post(self, request, user_id):\n if not request.user.id == int(user_id):\n raise PermissionDenied\n form = EditUserForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect(\"/user/profile/{id}\".format(id=request.user.id))\n else:\n return render(request, \"login/oepuser_edit_form.html\", {\"form\": form})\n\n\nclass CreateUserView(View):\n def get(self, request):\n form = CreateUserForm()\n return render(request, \"login/oepuser_create_form.html\", {\"form\": form})\n\n def post(self, request):\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n return redirect(\"activate\")\n else:\n return render(request, \"login/oepuser_create_form.html\", {\"form\": form})\n\n\nclass DetachView(LoginRequiredMixin, View):\n def get(self, request):\n if request.user.is_native:\n raise PermissionDenied\n form = DetachForm(request.user)\n return render(request, \"login/detach.html\", {\"form\": form})\n\n def post(self, request):\n if request.user.is_native:\n raise PermissionDenied\n form = DetachForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/\")\n else:\n print(form.errors)\n return render(request, \"login/detach.html\", {\"form\": form})\n\n\nclass OEPPasswordChangeView(PasswordChangeView):\n template_name = \"login/generic_form.html\"\n success_url = \"/\"\n\n\nclass ActivationNoteView(FormView):\n template_name = \"login/activate.html\"\n form_class = ChangeEmailForm\n success_url = \"user/activate\"\n\n def form_valid(self, form):\n if self.request.user.is_anonymous or self.request.user.is_mail_verified:\n raise PermissionDenied\n form.save(self.request.user)\n return super(ActivationNoteView, self).form_valid(form)\n\n\ndef activate(request, token):\n token_obj = models.ActivationToken.objects.filter(value=token).first()\n if not token_obj:\n form = ChangeEmailForm()\n form._errors = {\n forms.forms.NON_FIELD_ERRORS: form.error_class(\n [\"Your token was invalid or expired\"]\n )\n }\n return render(request, \"login/activate.html\", {\"form\": form})\n else:\n token_obj.user.is_mail_verified = True\n token_obj.user.save()\n token_obj.delete()\n return redirect(\"/user/profile/{id}\".format(id=token_obj.user.id))\n", "path": "login/views.py"}]}
| 3,785 | 91 |
gh_patches_debug_1474
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-9429
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[rllib] MARWIL tuned cartpole example (and my own experiments) produce nan rewards only.
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem? + Reproduction
I have a custom example that produces offline data and picks it up with MARWIL for training. I observed that I get `nan` reward values for my example every time, so I went a step back and used your cartpole example:
https://github.com/ray-project/ray/blob/cd5a207d69cdaf05b47d956c18e89d928585eec7/rllib/tuned_examples/marwil/cartpole-marwil.yaml
I'm following the exact steps there, i.e. first run
```
./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
```
followed by
```
rllib train -f cartpole-marwil.yaml
```
I did this both on my currently preferred stable version `0.8.5`, as well as on the `0.9.0.dev0` wheel. The result is this:
```
== Status ==
Memory usage on this node: 19.4/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/12 CPUs, 0/0 GPUs, 0.0/9.96 GiB heap, 0.0/3.42 GiB objects
Result logdir: /Users/maxpumperla/ray_results/cartpole-marwil
Number of trials: 2 (2 TERMINATED)
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
| Trial name | status | loc | beta | iter | total time (s) | ts | reward |
|--------------------------------+------------+-------+--------+--------+------------------+--------+----------|
| MARWIL_CartPole-v0_7af06_00000 | TERMINATED | | 0 | 2206 | 58.5661 | 500007 | nan |
| MARWIL_CartPole-v0_7af06_00001 | TERMINATED | | 1 | 2248 | 58.6117 | 500286 | nan |
+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+
```
Also, I've noticed that your MARWIL unit test is a pure smoke test and doesn't check reward values, but I didn't run that locally. Maybe it produces nan values as well.
In any case I'd appreciate any input here, as we'd love to use MARWIL for our "real" use case, in which we see the same behaviour.
</issue>
<code>
[start of rllib/examples/custom_loss.py]
1 """Example of using custom_loss() with an imitation learning loss.
2
3 The default input file is too small to learn a good policy, but you can
4 generate new experiences for IL training as follows:
5
6 To generate experiences:
7 $ ./train.py --run=PG --config='{"output": "/tmp/cartpole"}' --env=CartPole-v0
8
9 To train on experiences with joint PG + IL loss:
10 $ python custom_loss.py --input-files=/tmp/cartpole
11 """
12
13 import argparse
14 from pathlib import Path
15 import os
16
17 import ray
18 from ray import tune
19 from ray.rllib.examples.models.custom_loss_model import CustomLossModel, \
20 TorchCustomLossModel
21 from ray.rllib.models import ModelCatalog
22 from ray.rllib.utils.framework import try_import_tf
23
24 tf1, tf, tfv = try_import_tf()
25
26 parser = argparse.ArgumentParser()
27 parser.add_argument("--torch", action="store_true")
28 parser.add_argument("--stop-iters", type=int, default=200)
29 parser.add_argument(
30 "--input-files",
31 type=str,
32 default=os.path.join(
33 os.path.dirname(os.path.abspath(__file__)),
34 "../tests/data/cartpole_small"))
35
36 if __name__ == "__main__":
37 ray.init()
38 args = parser.parse_args()
39
40 # Bazel makes it hard to find files specified in `args` (and `data`).
41 # Look for them here.
42 if not os.path.exists(args.input_files):
43 # This script runs in the ray/rllib/examples dir.
44 rllib_dir = Path(__file__).parent.parent
45 input_dir = rllib_dir.absolute().joinpath(args.input_files)
46 args.input_files = str(input_dir)
47
48 ModelCatalog.register_custom_model(
49 "custom_loss", TorchCustomLossModel if args.torch else CustomLossModel)
50
51 config = {
52 "env": "CartPole-v0",
53 "num_workers": 0,
54 "model": {
55 "custom_model": "custom_loss",
56 "custom_model_config": {
57 "input_files": args.input_files,
58 },
59 },
60 "framework": "torch" if args.torch else "tf",
61 }
62
63 stop = {
64 "training_iteration": args.stop_iters,
65 }
66
67 tune.run("PG", config=config, stop=stop)
68
[end of rllib/examples/custom_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rllib/examples/custom_loss.py b/rllib/examples/custom_loss.py
--- a/rllib/examples/custom_loss.py
+++ b/rllib/examples/custom_loss.py
@@ -31,7 +31,7 @@
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
- "../tests/data/cartpole_small"))
+ "../tests/data/cartpole/small"))
if __name__ == "__main__":
ray.init()
|
{"golden_diff": "diff --git a/rllib/examples/custom_loss.py b/rllib/examples/custom_loss.py\n--- a/rllib/examples/custom_loss.py\n+++ b/rllib/examples/custom_loss.py\n@@ -31,7 +31,7 @@\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n- \"../tests/data/cartpole_small\"))\n+ \"../tests/data/cartpole/small\"))\n \n if __name__ == \"__main__\":\n ray.init()\n", "issue": "[rllib] MARWIL tuned cartpole example (and my own experiments) produce nan rewards only.\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem? + Reproduction\r\n\r\nI have a custom example that produces offline data and picks it up with MARWIL for training. I observed that I get `nan` reward values for my example every time, so I went a step back and used your cartpole example:\r\n\r\nhttps://github.com/ray-project/ray/blob/cd5a207d69cdaf05b47d956c18e89d928585eec7/rllib/tuned_examples/marwil/cartpole-marwil.yaml\r\n\r\nI'm following the exact steps there, i.e. first run \r\n\r\n```\r\n./train.py --run=PPO --env=CartPole-v0 \\\r\n --stop='{\"timesteps_total\": 50000}' \\\r\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\r\n```\r\n\r\nfollowed by \r\n\r\n```\r\nrllib train -f cartpole-marwil.yaml\r\n```\r\n\r\nI did this both on my currently preferred stable version `0.8.5`, as well as on the `0.9.0.dev0` wheel. The result is this:\r\n\r\n```\r\n== Status ==\r\nMemory usage on this node: 19.4/32.0 GiB\r\nUsing FIFO scheduling algorithm.\r\nResources requested: 0/12 CPUs, 0/0 GPUs, 0.0/9.96 GiB heap, 0.0/3.42 GiB objects\r\nResult logdir: /Users/maxpumperla/ray_results/cartpole-marwil\r\nNumber of trials: 2 (2 TERMINATED)\r\n+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+\r\n| Trial name | status | loc | beta | iter | total time (s) | ts | reward |\r\n|--------------------------------+------------+-------+--------+--------+------------------+--------+----------|\r\n| MARWIL_CartPole-v0_7af06_00000 | TERMINATED | | 0 | 2206 | 58.5661 | 500007 | nan |\r\n| MARWIL_CartPole-v0_7af06_00001 | TERMINATED | | 1 | 2248 | 58.6117 | 500286 | nan |\r\n+--------------------------------+------------+-------+--------+--------+------------------+--------+----------+\r\n```\r\n\r\nAlso, I've noticed that your MARWIL unit test is a pure smoke test and doesn't check reward values, but I didn't run that locally. Maybe it produces nan values as well.\r\n\r\nIn any case I'd appreciate any input here, as we'd love to use MARWIL for our \"real\" use case, in which we see the same behaviour.\n", "before_files": [{"content": "\"\"\"Example of using custom_loss() with an imitation learning loss.\n\nThe default input file is too small to learn a good policy, but you can\ngenerate new experiences for IL training as follows:\n\nTo generate experiences:\n$ ./train.py --run=PG --config='{\"output\": \"/tmp/cartpole\"}' --env=CartPole-v0\n\nTo train on experiences with joint PG + IL loss:\n$ python custom_loss.py --input-files=/tmp/cartpole\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.models.custom_loss_model import CustomLossModel, \\\n TorchCustomLossModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.framework import try_import_tf\n\ntf1, tf, tfv = try_import_tf()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\n \"--input-files\",\n type=str,\n default=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"../tests/data/cartpole_small\"))\n\nif __name__ == \"__main__\":\n ray.init()\n args = parser.parse_args()\n\n # Bazel makes it hard to find files specified in `args` (and `data`).\n # Look for them here.\n if not os.path.exists(args.input_files):\n # This script runs in the ray/rllib/examples dir.\n rllib_dir = Path(__file__).parent.parent\n input_dir = rllib_dir.absolute().joinpath(args.input_files)\n args.input_files = str(input_dir)\n\n ModelCatalog.register_custom_model(\n \"custom_loss\", TorchCustomLossModel if args.torch else CustomLossModel)\n\n config = {\n \"env\": \"CartPole-v0\",\n \"num_workers\": 0,\n \"model\": {\n \"custom_model\": \"custom_loss\",\n \"custom_model_config\": {\n \"input_files\": args.input_files,\n },\n },\n \"framework\": \"torch\" if args.torch else \"tf\",\n }\n\n stop = {\n \"training_iteration\": args.stop_iters,\n }\n\n tune.run(\"PG\", config=config, stop=stop)\n", "path": "rllib/examples/custom_loss.py"}]}
| 1,822 | 100 |
gh_patches_debug_35952
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1773
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `otelTraceSampled` field to LogEntry for OLTP Logging Instrumentation module
Before opening a feature request against this repo, consider whether the feature should/could be implemented in the [other OpenTelemetry client libraries](https://github.com/open-telemetry/). If so, please [open an issue on opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first.
**Is your feature request related to a problem?**
Getting span id and trace id in the log record is a must. Cloud provider libraries, e.g. Google Cloud Logging also provides a `logging.googleapis.com/trace_sampled` field under structured logging, which can be populated using this library.
**Describe the solution you'd like**
Add a `record.otelTraceSampled` field similar to `record.otelSpanID` and `record.otelTraceID` in the log entry using the `trace_flags` property in `SpanContext`.
**Describe alternatives you've considered**
Manually injecting the value of `trace_flags` property into the log record by using the current `SpanContext`.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 DEFAULT_LOGGING_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s"
16
17
18 _MODULE_DOC = """
19 The OpenTelemetry ``logging`` integration automatically injects tracing context into log statements.
20
21 The integration registers a custom log record factory with the the standard library logging module that automatically inject
22 tracing context into log record objects. Optionally, the integration can also call ``logging.basicConfig()`` to set a logging
23 format with placeholders for span ID, trace ID and service name.
24
25 The following keys are injected into log record objects by the factory:
26
27 - ``otelSpanID``
28 - ``otelTraceID``
29 - ``otelServiceName``
30
31 The integration uses the following logging format by default:
32
33 .. code-block::
34
35 {default_logging_format}
36
37 Enable trace context injection
38 ------------------------------
39
40 The integration is opt-in and must be enabled explicitly by setting the environment variable ``OTEL_PYTHON_LOG_CORRELATION`` to ``true``.
41
42 The integration always registers the custom factory that injects the tracing context into the log record objects. Setting
43 ``OTEL_PYTHON_LOG_CORRELATION`` to ``true`` calls ``logging.basicConfig()`` to set a logging format that actually makes
44 use of the injected variables.
45
46
47 Environment variables
48 ---------------------
49
50 .. envvar:: OTEL_PYTHON_LOG_CORRELATION
51
52 This env var must be set to ``true`` in order to enable trace context injection into logs by calling ``logging.basicConfig()`` and
53 setting a logging format that makes use of the injected tracing variables.
54
55 Alternatively, ``set_logging_format`` argument can be set to ``True`` when initializing the ``LoggingInstrumentor`` class to achieve the
56 same effect.
57
58 .. code-block::
59
60 LoggingInstrumentor(set_logging_format=True)
61
62 The default value is ``false``.
63
64 .. envvar:: OTEL_PYTHON_LOG_FORMAT
65
66 This env var can be used to instruct the instrumentation to use a custom logging format.
67
68 Alternatively, a custom logging format can be passed to the ``LoggingInstrumentor`` as the ``logging_format`` argument. For example:
69
70 .. code-block::
71
72 LoggingInstrumentor(logging_format='%(msg)s [span_id=%(span_id)s]')
73
74
75 The default value is:
76
77 .. code-block::
78
79 {default_logging_format}
80
81 .. envvar:: OTEL_PYTHON_LOG_LEVEL
82
83 This env var can be used to set a custom logging level.
84
85 Alternatively, log level can be passed to the ``LoggingInstrumentor`` during initialization. For example:
86
87 .. code-block::
88
89 LoggingInstrumentor(log_level=logging.DEBUG)
90
91
92 The default value is ``info``.
93
94 Options are:
95
96 - ``info``
97 - ``error``
98 - ``debug``
99 - ``warning``
100
101 Manually calling logging.basicConfig
102 ------------------------------------
103
104 ``logging.basicConfig()`` can be called to set a global logging level and format. Only the first ever call has any effect on the global logger.
105 Any subsequent calls have no effect and do not override a previously configured global logger. This integration calls ``logging.basicConfig()`` for you
106 when ``OTEL_PYTHON_LOG_CORRELATION`` is set to ``true``. It uses the format and level specified by ``OTEL_PYTHON_LOG_FORMAT`` and ``OTEL_PYTHON_LOG_LEVEL``
107 environment variables respectively.
108
109 If you code or some other library/framework you are using calls logging.basicConfig before this integration is enabled, then this integration's logging
110 format will not be used and log statements will not contain tracing context. For this reason, you'll need to make sure this integration is enabled as early
111 as possible in the service lifecycle or your framework is configured to use a logging format with placeholders for tracing context. This can be achieved by
112 adding the following placeholders to your logging format:
113
114 .. code-block::
115
116 %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s
117
118
119
120 API
121 -----
122
123 .. code-block:: python
124
125 from opentelemetry.instrumentation.logging import LoggingInstrumentor
126
127 LoggingInstrumentor().instrument(set_logging_format=True)
128
129
130 Note
131 -----
132
133 If you do not set ``OTEL_PYTHON_LOG_CORRELATION`` to ``true`` but instead set the logging format manually or through your framework, you must ensure that this
134 integration is enabled before you set the logging format. This is important because unless the integration is enabled, the tracing context variables
135 are not injected into the log record objects. This means any attempted log statements made after setting the logging format and before enabling this integration
136 will result in KeyError exceptions. Such exceptions are automatically swallowed by the logging module and do not result in crashes but you may still lose out
137 on important log messages.
138 """.format(
139 default_logging_format=DEFAULT_LOGGING_FORMAT
140 )
141
[end of instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py]
[start of instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=empty-docstring,no-value-for-parameter,no-member,no-name-in-module
16
17 import logging # pylint: disable=import-self
18 from os import environ
19 from typing import Collection
20
21 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
22 from opentelemetry.instrumentation.logging.constants import (
23 _MODULE_DOC,
24 DEFAULT_LOGGING_FORMAT,
25 )
26 from opentelemetry.instrumentation.logging.environment_variables import (
27 OTEL_PYTHON_LOG_CORRELATION,
28 OTEL_PYTHON_LOG_FORMAT,
29 OTEL_PYTHON_LOG_LEVEL,
30 )
31 from opentelemetry.instrumentation.logging.package import _instruments
32 from opentelemetry.trace import (
33 INVALID_SPAN,
34 INVALID_SPAN_CONTEXT,
35 get_current_span,
36 get_tracer_provider,
37 )
38
39 __doc__ = _MODULE_DOC
40
41 LEVELS = {
42 "debug": logging.DEBUG,
43 "info": logging.INFO,
44 "warning": logging.WARNING,
45 "error": logging.ERROR,
46 }
47
48
49 class LoggingInstrumentor(BaseInstrumentor): # pylint: disable=empty-docstring
50 __doc__ = f"""An instrumentor for stdlib logging module.
51
52 This instrumentor injects tracing context into logging records and optionally sets the global logging format to the following:
53
54 .. code-block::
55
56 {DEFAULT_LOGGING_FORMAT}
57
58 def log_hook(span: Span, record: LogRecord):
59 if span and span.is_recording():
60 record.custom_user_attribute_from_log_hook = "some-value"
61
62 Args:
63 tracer_provider: Tracer provider instance that can be used to fetch a tracer.
64 set_logging_format: When set to True, it calls logging.basicConfig() and sets a logging format.
65 logging_format: Accepts a string and sets it as the logging format when set_logging_format
66 is set to True.
67 log_level: Accepts one of the following values and sets the logging level to it.
68 logging.INFO
69 logging.DEBUG
70 logging.WARN
71 logging.ERROR
72 logging.FATAL
73 log_hook: execute custom logic when record is created
74
75 See `BaseInstrumentor`
76 """
77
78 _old_factory = None
79 _log_hook = None
80
81 def instrumentation_dependencies(self) -> Collection[str]:
82 return _instruments
83
84 def _instrument(self, **kwargs):
85 provider = kwargs.get("tracer_provider", None) or get_tracer_provider()
86 old_factory = logging.getLogRecordFactory()
87 LoggingInstrumentor._old_factory = old_factory
88 LoggingInstrumentor._log_hook = kwargs.get("log_hook", None)
89
90 service_name = None
91
92 def record_factory(*args, **kwargs):
93 record = old_factory(*args, **kwargs)
94
95 record.otelSpanID = "0"
96 record.otelTraceID = "0"
97
98 nonlocal service_name
99 if service_name is None:
100 resource = getattr(provider, "resource", None)
101 if resource:
102 service_name = (
103 resource.attributes.get("service.name") or ""
104 )
105 else:
106 service_name = ""
107
108 record.otelServiceName = service_name
109
110 span = get_current_span()
111 if span != INVALID_SPAN:
112 ctx = span.get_span_context()
113 if ctx != INVALID_SPAN_CONTEXT:
114 record.otelSpanID = format(ctx.span_id, "016x")
115 record.otelTraceID = format(ctx.trace_id, "032x")
116 if callable(LoggingInstrumentor._log_hook):
117 try:
118 LoggingInstrumentor._log_hook( # pylint: disable=E1102
119 span, record
120 )
121 except Exception: # pylint: disable=W0703
122 pass
123
124 return record
125
126 logging.setLogRecordFactory(record_factory)
127
128 set_logging_format = kwargs.get(
129 "set_logging_format",
130 environ.get(OTEL_PYTHON_LOG_CORRELATION, "false").lower()
131 == "true",
132 )
133
134 if set_logging_format:
135 log_format = kwargs.get(
136 "logging_format", environ.get(OTEL_PYTHON_LOG_FORMAT, None)
137 )
138 log_format = log_format or DEFAULT_LOGGING_FORMAT
139
140 log_level = kwargs.get(
141 "log_level", LEVELS.get(environ.get(OTEL_PYTHON_LOG_LEVEL))
142 )
143 log_level = log_level or logging.INFO
144
145 logging.basicConfig(format=log_format, level=log_level)
146
147 def _uninstrument(self, **kwargs):
148 if LoggingInstrumentor._old_factory:
149 logging.setLogRecordFactory(LoggingInstrumentor._old_factory)
150 LoggingInstrumentor._old_factory = None
151
[end of instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py
@@ -94,6 +94,7 @@
record.otelSpanID = "0"
record.otelTraceID = "0"
+ record.otelTraceSampled = False
nonlocal service_name
if service_name is None:
@@ -113,6 +114,7 @@
if ctx != INVALID_SPAN_CONTEXT:
record.otelSpanID = format(ctx.span_id, "016x")
record.otelTraceID = format(ctx.trace_id, "032x")
+ record.otelTraceSampled = ctx.trace_flags.sampled
if callable(LoggingInstrumentor._log_hook):
try:
LoggingInstrumentor._log_hook( # pylint: disable=E1102
diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py
--- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py
+++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-DEFAULT_LOGGING_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s"
+DEFAULT_LOGGING_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s trace_sampled=%(otelTraceSampled)s] - %(message)s"
_MODULE_DOC = """
@@ -27,6 +27,7 @@
- ``otelSpanID``
- ``otelTraceID``
- ``otelServiceName``
+- ``otelTraceSampled``
The integration uses the following logging format by default:
@@ -113,7 +114,7 @@
.. code-block::
- %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s
+ %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s %(otelTraceSampled)s
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py\n@@ -94,6 +94,7 @@\n \n record.otelSpanID = \"0\"\n record.otelTraceID = \"0\"\n+ record.otelTraceSampled = False\n \n nonlocal service_name\n if service_name is None:\n@@ -113,6 +114,7 @@\n if ctx != INVALID_SPAN_CONTEXT:\n record.otelSpanID = format(ctx.span_id, \"016x\")\n record.otelTraceID = format(ctx.trace_id, \"032x\")\n+ record.otelTraceSampled = ctx.trace_flags.sampled\n if callable(LoggingInstrumentor._log_hook):\n try:\n LoggingInstrumentor._log_hook( # pylint: disable=E1102\ndiff --git a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py\n--- a/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py\n+++ b/instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-DEFAULT_LOGGING_FORMAT = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s\"\n+DEFAULT_LOGGING_FORMAT = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s trace_sampled=%(otelTraceSampled)s] - %(message)s\"\n \n \n _MODULE_DOC = \"\"\"\n@@ -27,6 +27,7 @@\n - ``otelSpanID``\n - ``otelTraceID``\n - ``otelServiceName``\n+- ``otelTraceSampled``\n \n The integration uses the following logging format by default:\n \n@@ -113,7 +114,7 @@\n \n .. code-block::\n \n- %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s\n+ %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s %(otelTraceSampled)s\n", "issue": "Add `otelTraceSampled` field to LogEntry for OLTP Logging Instrumentation module\nBefore opening a feature request against this repo, consider whether the feature should/could be implemented in the [other OpenTelemetry client libraries](https://github.com/open-telemetry/). If so, please [open an issue on opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first.\r\n\r\n**Is your feature request related to a problem?**\r\nGetting span id and trace id in the log record is a must. Cloud provider libraries, e.g. Google Cloud Logging also provides a `logging.googleapis.com/trace_sampled` field under structured logging, which can be populated using this library. \r\n\r\n\r\n**Describe the solution you'd like**\r\nAdd a `record.otelTraceSampled` field similar to `record.otelSpanID` and `record.otelTraceID` in the log entry using the `trace_flags` property in `SpanContext`. \r\n\r\n**Describe alternatives you've considered**\r\nManually injecting the value of `trace_flags` property into the log record by using the current `SpanContext`.\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nDEFAULT_LOGGING_FORMAT = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] [trace_id=%(otelTraceID)s span_id=%(otelSpanID)s resource.service.name=%(otelServiceName)s] - %(message)s\"\n\n\n_MODULE_DOC = \"\"\"\nThe OpenTelemetry ``logging`` integration automatically injects tracing context into log statements.\n\nThe integration registers a custom log record factory with the the standard library logging module that automatically inject\ntracing context into log record objects. Optionally, the integration can also call ``logging.basicConfig()`` to set a logging\nformat with placeholders for span ID, trace ID and service name.\n\nThe following keys are injected into log record objects by the factory:\n\n- ``otelSpanID``\n- ``otelTraceID``\n- ``otelServiceName``\n\nThe integration uses the following logging format by default:\n\n.. code-block::\n\n {default_logging_format}\n\nEnable trace context injection\n------------------------------\n\nThe integration is opt-in and must be enabled explicitly by setting the environment variable ``OTEL_PYTHON_LOG_CORRELATION`` to ``true``.\n\nThe integration always registers the custom factory that injects the tracing context into the log record objects. Setting\n``OTEL_PYTHON_LOG_CORRELATION`` to ``true`` calls ``logging.basicConfig()`` to set a logging format that actually makes\nuse of the injected variables.\n\n\nEnvironment variables\n---------------------\n\n.. envvar:: OTEL_PYTHON_LOG_CORRELATION\n\nThis env var must be set to ``true`` in order to enable trace context injection into logs by calling ``logging.basicConfig()`` and\nsetting a logging format that makes use of the injected tracing variables.\n\nAlternatively, ``set_logging_format`` argument can be set to ``True`` when initializing the ``LoggingInstrumentor`` class to achieve the\nsame effect.\n\n.. code-block::\n\n LoggingInstrumentor(set_logging_format=True)\n\nThe default value is ``false``.\n\n.. envvar:: OTEL_PYTHON_LOG_FORMAT\n\nThis env var can be used to instruct the instrumentation to use a custom logging format.\n\nAlternatively, a custom logging format can be passed to the ``LoggingInstrumentor`` as the ``logging_format`` argument. For example:\n\n.. code-block::\n\n LoggingInstrumentor(logging_format='%(msg)s [span_id=%(span_id)s]')\n\n\nThe default value is:\n\n.. code-block::\n\n {default_logging_format}\n\n.. envvar:: OTEL_PYTHON_LOG_LEVEL\n\nThis env var can be used to set a custom logging level.\n\nAlternatively, log level can be passed to the ``LoggingInstrumentor`` during initialization. For example:\n\n.. code-block::\n\n LoggingInstrumentor(log_level=logging.DEBUG)\n\n\nThe default value is ``info``.\n\nOptions are:\n\n- ``info``\n- ``error``\n- ``debug``\n- ``warning``\n\nManually calling logging.basicConfig\n------------------------------------\n\n``logging.basicConfig()`` can be called to set a global logging level and format. Only the first ever call has any effect on the global logger.\nAny subsequent calls have no effect and do not override a previously configured global logger. This integration calls ``logging.basicConfig()`` for you\nwhen ``OTEL_PYTHON_LOG_CORRELATION`` is set to ``true``. It uses the format and level specified by ``OTEL_PYTHON_LOG_FORMAT`` and ``OTEL_PYTHON_LOG_LEVEL``\nenvironment variables respectively.\n\nIf you code or some other library/framework you are using calls logging.basicConfig before this integration is enabled, then this integration's logging\nformat will not be used and log statements will not contain tracing context. For this reason, you'll need to make sure this integration is enabled as early\nas possible in the service lifecycle or your framework is configured to use a logging format with placeholders for tracing context. This can be achieved by\nadding the following placeholders to your logging format:\n\n.. code-block::\n\n %(otelSpanID)s %(otelTraceID)s %(otelServiceName)s\n\n\n\nAPI\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.logging import LoggingInstrumentor\n\n LoggingInstrumentor().instrument(set_logging_format=True)\n\n\nNote\n-----\n\nIf you do not set ``OTEL_PYTHON_LOG_CORRELATION`` to ``true`` but instead set the logging format manually or through your framework, you must ensure that this\nintegration is enabled before you set the logging format. This is important because unless the integration is enabled, the tracing context variables\nare not injected into the log record objects. This means any attempted log statements made after setting the logging format and before enabling this integration\nwill result in KeyError exceptions. Such exceptions are automatically swallowed by the logging module and do not result in crashes but you may still lose out\non important log messages.\n\"\"\".format(\n default_logging_format=DEFAULT_LOGGING_FORMAT\n)\n", "path": "instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/constants.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=empty-docstring,no-value-for-parameter,no-member,no-name-in-module\n\nimport logging # pylint: disable=import-self\nfrom os import environ\nfrom typing import Collection\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.logging.constants import (\n _MODULE_DOC,\n DEFAULT_LOGGING_FORMAT,\n)\nfrom opentelemetry.instrumentation.logging.environment_variables import (\n OTEL_PYTHON_LOG_CORRELATION,\n OTEL_PYTHON_LOG_FORMAT,\n OTEL_PYTHON_LOG_LEVEL,\n)\nfrom opentelemetry.instrumentation.logging.package import _instruments\nfrom opentelemetry.trace import (\n INVALID_SPAN,\n INVALID_SPAN_CONTEXT,\n get_current_span,\n get_tracer_provider,\n)\n\n__doc__ = _MODULE_DOC\n\nLEVELS = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n}\n\n\nclass LoggingInstrumentor(BaseInstrumentor): # pylint: disable=empty-docstring\n __doc__ = f\"\"\"An instrumentor for stdlib logging module.\n\n This instrumentor injects tracing context into logging records and optionally sets the global logging format to the following:\n\n .. code-block::\n\n {DEFAULT_LOGGING_FORMAT}\n\n def log_hook(span: Span, record: LogRecord):\n if span and span.is_recording():\n record.custom_user_attribute_from_log_hook = \"some-value\"\n\n Args:\n tracer_provider: Tracer provider instance that can be used to fetch a tracer.\n set_logging_format: When set to True, it calls logging.basicConfig() and sets a logging format.\n logging_format: Accepts a string and sets it as the logging format when set_logging_format\n is set to True.\n log_level: Accepts one of the following values and sets the logging level to it.\n logging.INFO\n logging.DEBUG\n logging.WARN\n logging.ERROR\n logging.FATAL\n log_hook: execute custom logic when record is created\n\n See `BaseInstrumentor`\n \"\"\"\n\n _old_factory = None\n _log_hook = None\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n provider = kwargs.get(\"tracer_provider\", None) or get_tracer_provider()\n old_factory = logging.getLogRecordFactory()\n LoggingInstrumentor._old_factory = old_factory\n LoggingInstrumentor._log_hook = kwargs.get(\"log_hook\", None)\n\n service_name = None\n\n def record_factory(*args, **kwargs):\n record = old_factory(*args, **kwargs)\n\n record.otelSpanID = \"0\"\n record.otelTraceID = \"0\"\n\n nonlocal service_name\n if service_name is None:\n resource = getattr(provider, \"resource\", None)\n if resource:\n service_name = (\n resource.attributes.get(\"service.name\") or \"\"\n )\n else:\n service_name = \"\"\n\n record.otelServiceName = service_name\n\n span = get_current_span()\n if span != INVALID_SPAN:\n ctx = span.get_span_context()\n if ctx != INVALID_SPAN_CONTEXT:\n record.otelSpanID = format(ctx.span_id, \"016x\")\n record.otelTraceID = format(ctx.trace_id, \"032x\")\n if callable(LoggingInstrumentor._log_hook):\n try:\n LoggingInstrumentor._log_hook( # pylint: disable=E1102\n span, record\n )\n except Exception: # pylint: disable=W0703\n pass\n\n return record\n\n logging.setLogRecordFactory(record_factory)\n\n set_logging_format = kwargs.get(\n \"set_logging_format\",\n environ.get(OTEL_PYTHON_LOG_CORRELATION, \"false\").lower()\n == \"true\",\n )\n\n if set_logging_format:\n log_format = kwargs.get(\n \"logging_format\", environ.get(OTEL_PYTHON_LOG_FORMAT, None)\n )\n log_format = log_format or DEFAULT_LOGGING_FORMAT\n\n log_level = kwargs.get(\n \"log_level\", LEVELS.get(environ.get(OTEL_PYTHON_LOG_LEVEL))\n )\n log_level = log_level or logging.INFO\n\n logging.basicConfig(format=log_format, level=log_level)\n\n def _uninstrument(self, **kwargs):\n if LoggingInstrumentor._old_factory:\n logging.setLogRecordFactory(LoggingInstrumentor._old_factory)\n LoggingInstrumentor._old_factory = None\n", "path": "instrumentation/opentelemetry-instrumentation-logging/src/opentelemetry/instrumentation/logging/__init__.py"}]}
| 3,757 | 640 |
gh_patches_debug_57313
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-3129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v0.3.3] Release Tracker
**ETA**: Feb 29th - Mar 1st
## Major changes
* StarCoder2 support
* Performance optimization and LoRA support for Gemma
* Performance optimization for MoE kernel
* 2/3/8-bit GPTQ support
* [Experimental] AWS Inferentia2 support
## PRs to be merged before the release
- [x] #2330 #2223
- [ ] ~~#2761~~
- [x] #2819
- [x] #3087 #3099
- [x] #3089
</issue>
<code>
[start of vllm/__init__.py]
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.3.2"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
[end of vllm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.3.2"
+__version__ = "0.3.3"
__all__ = [
"LLM",
|
{"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.3.2\"\n+__version__ = \"0.3.3\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.3.3] Release Tracker\n**ETA**: Feb 29th - Mar 1st\r\n\r\n## Major changes\r\n\r\n* StarCoder2 support\r\n* Performance optimization and LoRA support for Gemma\r\n* Performance optimization for MoE kernel\r\n* 2/3/8-bit GPTQ support\r\n* [Experimental] AWS Inferentia2 support\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] #2330 #2223\r\n- [ ] ~~#2761~~\r\n- [x] #2819 \r\n- [x] #3087 #3099\r\n- [x] #3089 \n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.3.2\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]}
| 904 | 108 |
gh_patches_debug_24140
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1340
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop pulling data from RedShift
Starting in version 72 (Jan 2020), Firefox Telemetry uses BigQuery instead of RedShift.
We currently pull data from both data sources for frequency capping and performance reports.
In about a year from now the usage of pre-72 versions will be limited and we will be able to remove the RedShift queries from the codebase.
- [x] Stop pulling for Freq Capped Jobs
- [x] Stop pulling Daily Data
</issue>
<code>
[start of snippets/base/etl.py]
1 import collections
2 import json
3
4 from urllib.parse import urlencode
5
6 from django.conf import settings
7 from django.db.transaction import atomic
8 from redash_dynamic_query import RedashDynamicQuery
9
10 from snippets.base.models import CHANNELS, DailyImpressions, JobDailyPerformance, Job
11
12
13 REDASH_QUERY_IDS = {
14 'redshift-job': 68135,
15 'bq-job': 68136,
16 'redshift-impressions': 68345,
17 'bq-impressions': 68341,
18 }
19
20 redash = RedashDynamicQuery(
21 endpoint=settings.REDASH_ENDPOINT,
22 apikey=settings.REDASH_API_KEY,
23 max_wait=settings.REDASH_MAX_WAIT)
24
25
26 def redash_source_url(query_id_or_name, **params):
27 query_id = REDASH_QUERY_IDS.get(query_id_or_name, query_id_or_name)
28 url = f'{settings.REDASH_ENDPOINT}/queries/{query_id}/source'
29 if params:
30 url += '?' + urlencode({f'p_{key}_{query_id}': value
31 for key, value in params.items()})
32 return url
33
34
35 def redash_rows(query_name, date):
36 query_id = REDASH_QUERY_IDS[query_name]
37 bind_data = {'date': str(date)}
38 result = redash.query(query_id, bind_data)
39 return result['query_result']['data']['rows']
40
41
42 def prosses_rows(rows, key='message_id'):
43 job_ids = [str(x) for x in Job.objects.all().values_list('id', flat=True)]
44 new_rows = []
45 for row in sorted(rows, key=lambda x: x[key]):
46 # Remove rows with invalid Job IDs
47 if row['message_id'] not in job_ids:
48 continue
49
50 # Redash uses {} instead of null
51 if row['event_context'] == '{}':
52 row['event_context'] = ''
53
54 # Sometimes data in Telemetry populate `event_context`, some
55 # other times it uses `additional_properties['value']` to
56 # place the event context. Extract information from both
57 # places to identify the event.
58 properties = json.loads(row.get('additional_properties', '{}'))
59 event = row['event_context'] or properties.get('value', '') or row['event']
60
61 if event in ['CLICK_BUTTON', 'CLICK']:
62 event = 'click'
63 elif event == 'IMPRESSION':
64 event = 'impression'
65 elif event == 'BLOCK':
66 event = 'block'
67 elif event == 'DISMISS':
68 event = 'dismiss'
69 elif event == 'scene1-button-learn-more':
70 event = 'go_to_scene2'
71 elif event in ['subscribe-success',
72 'subscribe-error',
73 'conversion-subscribe-activation']:
74 event = event.replace('-', '_')
75 else:
76 # Ignore invalid event
77 continue
78
79 row['event'] = event
80
81 # Normalize channel name, based on what kind of snippets they get.
82 channel = row['channel']
83 if not channel:
84 channel = 'release'
85 row['channel'] = next(
86 (item for item in CHANNELS if
87 channel.startswith(item)), 'release'
88 )
89
90 # Normalize country
91 country_code = row['country_code']
92 if country_code in ['ERROR', None]:
93 row['country_code'] = 'XX'
94
95 # Not needed anymore
96 row.pop('event_context', None)
97 row.pop('additional_properties', None)
98
99 new_rows.append(row)
100
101 # Aggregate counts of same events for the global count.
102 processed = collections.defaultdict(dict)
103 for row in new_rows:
104 event = row['event']
105 processed[row[key]][event] = processed[row[key]].get(event, 0) + row['counts']
106
107 detail = [{
108 'event': row['event'],
109 'channel': row['channel'],
110 'country': row['country_code'],
111 'counts': row['counts'],
112 }]
113
114 if not processed[row[key]].get('details'):
115 processed[row[key]]['details'] = detail
116 else:
117 for drow in processed[row[key]]['details']:
118 if ((drow['event'] == row['event'] and
119 drow['channel'] == row['channel'] and
120 drow['country'] == row['country_code'])):
121 drow['counts'] += row['counts']
122 break
123 else:
124 processed[row[key]]['details'] += detail
125
126 # Last pass for multi-scene snippets: Click events here refer to
127 # clicks of secondary links listed on the template that go to
128 # terms of services or additional information and are displayed
129 # in the small text below the input element. These do not count
130 # clicking on `Learn more` (i.e. going from scene 1 to scene 2)
131 # or the main Call To Action. The later is measured in
132 # `conversion_subscribe_activation` and this is the value which
133 # is important to us and thus we rename this to `clicks`.
134 for k, v in processed.items():
135 if 'conversion_subscribe_activation' in v:
136 processed[k]['other_click'] = processed[k].get('click', 0)
137 processed[k]['click'] = processed[k].pop('conversion_subscribe_activation')
138 for row in processed[k]['details']:
139 if row['event'] == 'click':
140 row['event'] = 'other_click'
141 elif row['event'] == 'conversion_subscribe_activation':
142 row['event'] = 'click'
143
144 return processed
145
146
147 def update_job_metrics(date):
148 rows = []
149 for query in ['redshift-job', 'bq-job']:
150 rows += redash_rows(query, date)
151
152 processed = prosses_rows(rows, key='message_id')
153 with atomic():
154 JobDailyPerformance.objects.filter(date=date).delete()
155 for job, data in processed.items():
156 JobDailyPerformance.objects.create(
157 date=date,
158 job=Job.objects.get(id=job),
159 **data
160 )
161 return len(processed) > 0
162
163
164 def update_impressions(date):
165 rows = []
166
167 for query in ['redshift-impressions', 'bq-impressions']:
168 rows += redash_rows(query, date)
169
170 details = []
171 for row in rows:
172 # Normalize channel name, based on what kind of snippets they get.
173 channel = row['channel']
174 if not channel:
175 channel = 'release'
176 channel = next(
177 (item for item in CHANNELS if
178 channel.startswith(item)), 'release'
179 )
180
181 # Aggregate counts of the same duration and the same channel.
182 for item in details:
183 if (item['channel'] == channel and item['duration'] == row['duration']):
184 item['counts'] += row['counts']
185 break
186 else:
187 details.append({
188 'channel': channel,
189 'duration': row['duration'],
190 'counts': row['counts'],
191 })
192
193 with atomic():
194 DailyImpressions.objects.filter(date=date).delete()
195 DailyImpressions.objects.create(
196 date=date,
197 details=details
198 )
199
200 return len(details)
201
[end of snippets/base/etl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snippets/base/etl.py b/snippets/base/etl.py
--- a/snippets/base/etl.py
+++ b/snippets/base/etl.py
@@ -11,10 +11,12 @@
REDASH_QUERY_IDS = {
- 'redshift-job': 68135,
'bq-job': 68136,
- 'redshift-impressions': 68345,
'bq-impressions': 68341,
+
+ # Not currently used but kept here for reference.
+ 'redshift-job': 68135,
+ 'redshift-impressions': 68345,
}
redash = RedashDynamicQuery(
@@ -145,10 +147,7 @@
def update_job_metrics(date):
- rows = []
- for query in ['redshift-job', 'bq-job']:
- rows += redash_rows(query, date)
-
+ rows = redash_rows('bq-job', date)
processed = prosses_rows(rows, key='message_id')
with atomic():
JobDailyPerformance.objects.filter(date=date).delete()
@@ -162,11 +161,7 @@
def update_impressions(date):
- rows = []
-
- for query in ['redshift-impressions', 'bq-impressions']:
- rows += redash_rows(query, date)
-
+ rows = redash_rows('bq-impressions', date)
details = []
for row in rows:
# Normalize channel name, based on what kind of snippets they get.
|
{"golden_diff": "diff --git a/snippets/base/etl.py b/snippets/base/etl.py\n--- a/snippets/base/etl.py\n+++ b/snippets/base/etl.py\n@@ -11,10 +11,12 @@\n \n \n REDASH_QUERY_IDS = {\n- 'redshift-job': 68135,\n 'bq-job': 68136,\n- 'redshift-impressions': 68345,\n 'bq-impressions': 68341,\n+\n+ # Not currently used but kept here for reference.\n+ 'redshift-job': 68135,\n+ 'redshift-impressions': 68345,\n }\n \n redash = RedashDynamicQuery(\n@@ -145,10 +147,7 @@\n \n \n def update_job_metrics(date):\n- rows = []\n- for query in ['redshift-job', 'bq-job']:\n- rows += redash_rows(query, date)\n-\n+ rows = redash_rows('bq-job', date)\n processed = prosses_rows(rows, key='message_id')\n with atomic():\n JobDailyPerformance.objects.filter(date=date).delete()\n@@ -162,11 +161,7 @@\n \n \n def update_impressions(date):\n- rows = []\n-\n- for query in ['redshift-impressions', 'bq-impressions']:\n- rows += redash_rows(query, date)\n-\n+ rows = redash_rows('bq-impressions', date)\n details = []\n for row in rows:\n # Normalize channel name, based on what kind of snippets they get.\n", "issue": "Stop pulling data from RedShift \nStarting in version 72 (Jan 2020), Firefox Telemetry uses BigQuery instead of RedShift. \r\n\r\nWe currently pull data from both data sources for frequency capping and performance reports. \r\n\r\nIn about a year from now the usage of pre-72 versions will be limited and we will be able to remove the RedShift queries from the codebase.\r\n\r\n- [x] Stop pulling for Freq Capped Jobs\r\n- [x] Stop pulling Daily Data\n", "before_files": [{"content": "import collections\nimport json\n\nfrom urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.db.transaction import atomic\nfrom redash_dynamic_query import RedashDynamicQuery\n\nfrom snippets.base.models import CHANNELS, DailyImpressions, JobDailyPerformance, Job\n\n\nREDASH_QUERY_IDS = {\n 'redshift-job': 68135,\n 'bq-job': 68136,\n 'redshift-impressions': 68345,\n 'bq-impressions': 68341,\n}\n\nredash = RedashDynamicQuery(\n endpoint=settings.REDASH_ENDPOINT,\n apikey=settings.REDASH_API_KEY,\n max_wait=settings.REDASH_MAX_WAIT)\n\n\ndef redash_source_url(query_id_or_name, **params):\n query_id = REDASH_QUERY_IDS.get(query_id_or_name, query_id_or_name)\n url = f'{settings.REDASH_ENDPOINT}/queries/{query_id}/source'\n if params:\n url += '?' + urlencode({f'p_{key}_{query_id}': value\n for key, value in params.items()})\n return url\n\n\ndef redash_rows(query_name, date):\n query_id = REDASH_QUERY_IDS[query_name]\n bind_data = {'date': str(date)}\n result = redash.query(query_id, bind_data)\n return result['query_result']['data']['rows']\n\n\ndef prosses_rows(rows, key='message_id'):\n job_ids = [str(x) for x in Job.objects.all().values_list('id', flat=True)]\n new_rows = []\n for row in sorted(rows, key=lambda x: x[key]):\n # Remove rows with invalid Job IDs\n if row['message_id'] not in job_ids:\n continue\n\n # Redash uses {} instead of null\n if row['event_context'] == '{}':\n row['event_context'] = ''\n\n # Sometimes data in Telemetry populate `event_context`, some\n # other times it uses `additional_properties['value']` to\n # place the event context. Extract information from both\n # places to identify the event.\n properties = json.loads(row.get('additional_properties', '{}'))\n event = row['event_context'] or properties.get('value', '') or row['event']\n\n if event in ['CLICK_BUTTON', 'CLICK']:\n event = 'click'\n elif event == 'IMPRESSION':\n event = 'impression'\n elif event == 'BLOCK':\n event = 'block'\n elif event == 'DISMISS':\n event = 'dismiss'\n elif event == 'scene1-button-learn-more':\n event = 'go_to_scene2'\n elif event in ['subscribe-success',\n 'subscribe-error',\n 'conversion-subscribe-activation']:\n event = event.replace('-', '_')\n else:\n # Ignore invalid event\n continue\n\n row['event'] = event\n\n # Normalize channel name, based on what kind of snippets they get.\n channel = row['channel']\n if not channel:\n channel = 'release'\n row['channel'] = next(\n (item for item in CHANNELS if\n channel.startswith(item)), 'release'\n )\n\n # Normalize country\n country_code = row['country_code']\n if country_code in ['ERROR', None]:\n row['country_code'] = 'XX'\n\n # Not needed anymore\n row.pop('event_context', None)\n row.pop('additional_properties', None)\n\n new_rows.append(row)\n\n # Aggregate counts of same events for the global count.\n processed = collections.defaultdict(dict)\n for row in new_rows:\n event = row['event']\n processed[row[key]][event] = processed[row[key]].get(event, 0) + row['counts']\n\n detail = [{\n 'event': row['event'],\n 'channel': row['channel'],\n 'country': row['country_code'],\n 'counts': row['counts'],\n }]\n\n if not processed[row[key]].get('details'):\n processed[row[key]]['details'] = detail\n else:\n for drow in processed[row[key]]['details']:\n if ((drow['event'] == row['event'] and\n drow['channel'] == row['channel'] and\n drow['country'] == row['country_code'])):\n drow['counts'] += row['counts']\n break\n else:\n processed[row[key]]['details'] += detail\n\n # Last pass for multi-scene snippets: Click events here refer to\n # clicks of secondary links listed on the template that go to\n # terms of services or additional information and are displayed\n # in the small text below the input element. These do not count\n # clicking on `Learn more` (i.e. going from scene 1 to scene 2)\n # or the main Call To Action. The later is measured in\n # `conversion_subscribe_activation` and this is the value which\n # is important to us and thus we rename this to `clicks`.\n for k, v in processed.items():\n if 'conversion_subscribe_activation' in v:\n processed[k]['other_click'] = processed[k].get('click', 0)\n processed[k]['click'] = processed[k].pop('conversion_subscribe_activation')\n for row in processed[k]['details']:\n if row['event'] == 'click':\n row['event'] = 'other_click'\n elif row['event'] == 'conversion_subscribe_activation':\n row['event'] = 'click'\n\n return processed\n\n\ndef update_job_metrics(date):\n rows = []\n for query in ['redshift-job', 'bq-job']:\n rows += redash_rows(query, date)\n\n processed = prosses_rows(rows, key='message_id')\n with atomic():\n JobDailyPerformance.objects.filter(date=date).delete()\n for job, data in processed.items():\n JobDailyPerformance.objects.create(\n date=date,\n job=Job.objects.get(id=job),\n **data\n )\n return len(processed) > 0\n\n\ndef update_impressions(date):\n rows = []\n\n for query in ['redshift-impressions', 'bq-impressions']:\n rows += redash_rows(query, date)\n\n details = []\n for row in rows:\n # Normalize channel name, based on what kind of snippets they get.\n channel = row['channel']\n if not channel:\n channel = 'release'\n channel = next(\n (item for item in CHANNELS if\n channel.startswith(item)), 'release'\n )\n\n # Aggregate counts of the same duration and the same channel.\n for item in details:\n if (item['channel'] == channel and item['duration'] == row['duration']):\n item['counts'] += row['counts']\n break\n else:\n details.append({\n 'channel': channel,\n 'duration': row['duration'],\n 'counts': row['counts'],\n })\n\n with atomic():\n DailyImpressions.objects.filter(date=date).delete()\n DailyImpressions.objects.create(\n date=date,\n details=details\n )\n\n return len(details)\n", "path": "snippets/base/etl.py"}]}
| 2,678 | 363 |
gh_patches_debug_33139
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-2535
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Baggage span processor - key predicate
This issue is to track adding a method of selecting what baggage key entries should be copied.
Feedback in the JS contrib PR was to allow a user-provided predicate function. This puts the responsibility on the user to ensure sensitive baggage keys are not copied while also not prescribing how that is determined.
- https://github.com/open-telemetry/opentelemetry-js-contrib/issues/2166
We had a similar feedback in the .NET contrib project but thought it was more complicated than just using a set of prefixes so created an issue to continue the discussion. The plain processor that copies all baggage entries (like using `*` in your example) is likely to be accepted first.
- https://github.com/open-telemetry/opentelemetry-dotnet-contrib/issues/1695
</issue>
<code>
[start of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional
16
17 from opentelemetry.baggage import get_all as get_all_baggage
18 from opentelemetry.context import Context
19 from opentelemetry.sdk.trace.export import SpanProcessor
20 from opentelemetry.trace import Span
21
22
23 class BaggageSpanProcessor(SpanProcessor):
24 """
25 The BaggageSpanProcessor reads entries stored in Baggage
26 from the parent context and adds the baggage entries' keys and
27 values to the span as attributes on span start.
28
29 Add this span processor to a tracer provider.
30
31 Keys and values added to Baggage will appear on subsequent child
32 spans for a trace within this service *and* be propagated to external
33 services in accordance with any configured propagation formats
34 configured. If the external services also have a Baggage span
35 processor, the keys and values will appear in those child spans as
36 well.
37
38 ⚠ Warning ⚠️
39
40 Do not put sensitive information in Baggage.
41
42 To repeat: a consequence of adding data to Baggage is that the keys and
43 values will appear in all outgoing HTTP headers from the application.
44
45 """
46
47 def __init__(self) -> None:
48 pass
49
50 def on_start(
51 self, span: "Span", parent_context: Optional[Context] = None
52 ) -> None:
53 baggage = get_all_baggage(parent_context)
54 for key, value in baggage.items():
55 span.set_attribute(key, value)
56
[end of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py]
[start of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=import-error
16
17 from .processor import BaggageSpanProcessor
18 from .version import __version__
19
20 __all__ = ["BaggageSpanProcessor", "__version__"]
21
[end of processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py
@@ -14,7 +14,7 @@
# pylint: disable=import-error
-from .processor import BaggageSpanProcessor
+from .processor import ALLOW_ALL_BAGGAGE_KEYS, BaggageSpanProcessor
from .version import __version__
-__all__ = ["BaggageSpanProcessor", "__version__"]
+__all__ = ["ALLOW_ALL_BAGGAGE_KEYS", "BaggageSpanProcessor", "__version__"]
diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py
@@ -12,13 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Optional
+from typing import Callable, Optional
from opentelemetry.baggage import get_all as get_all_baggage
from opentelemetry.context import Context
from opentelemetry.sdk.trace.export import SpanProcessor
from opentelemetry.trace import Span
+# A BaggageKeyPredicate is a function that takes a baggage key and returns a boolean
+BaggageKeyPredicateT = Callable[[str], bool]
+
+# A BaggageKeyPredicate that always returns True, allowing all baggage keys to be added to spans
+ALLOW_ALL_BAGGAGE_KEYS: BaggageKeyPredicateT = lambda _: True
+
class BaggageSpanProcessor(SpanProcessor):
"""
@@ -44,12 +50,13 @@
"""
- def __init__(self) -> None:
- pass
+ def __init__(self, baggage_key_predicate: BaggageKeyPredicateT) -> None:
+ self._baggage_key_predicate = baggage_key_predicate
def on_start(
self, span: "Span", parent_context: Optional[Context] = None
) -> None:
baggage = get_all_baggage(parent_context)
for key, value in baggage.items():
- span.set_attribute(key, value)
+ if self._baggage_key_predicate(key):
+ span.set_attribute(key, value)
|
{"golden_diff": "diff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py\n@@ -14,7 +14,7 @@\n \n # pylint: disable=import-error\n \n-from .processor import BaggageSpanProcessor\n+from .processor import ALLOW_ALL_BAGGAGE_KEYS, BaggageSpanProcessor\n from .version import __version__\n \n-__all__ = [\"BaggageSpanProcessor\", \"__version__\"]\n+__all__ = [\"ALLOW_ALL_BAGGAGE_KEYS\", \"BaggageSpanProcessor\", \"__version__\"]\ndiff --git a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n--- a/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n+++ b/processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py\n@@ -12,13 +12,19 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from typing import Optional\n+from typing import Callable, Optional\n \n from opentelemetry.baggage import get_all as get_all_baggage\n from opentelemetry.context import Context\n from opentelemetry.sdk.trace.export import SpanProcessor\n from opentelemetry.trace import Span\n \n+# A BaggageKeyPredicate is a function that takes a baggage key and returns a boolean\n+BaggageKeyPredicateT = Callable[[str], bool]\n+\n+# A BaggageKeyPredicate that always returns True, allowing all baggage keys to be added to spans\n+ALLOW_ALL_BAGGAGE_KEYS: BaggageKeyPredicateT = lambda _: True\n+\n \n class BaggageSpanProcessor(SpanProcessor):\n \"\"\"\n@@ -44,12 +50,13 @@\n \n \"\"\"\n \n- def __init__(self) -> None:\n- pass\n+ def __init__(self, baggage_key_predicate: BaggageKeyPredicateT) -> None:\n+ self._baggage_key_predicate = baggage_key_predicate\n \n def on_start(\n self, span: \"Span\", parent_context: Optional[Context] = None\n ) -> None:\n baggage = get_all_baggage(parent_context)\n for key, value in baggage.items():\n- span.set_attribute(key, value)\n+ if self._baggage_key_predicate(key):\n+ span.set_attribute(key, value)\n", "issue": "Baggage span processor - key predicate\nThis issue is to track adding a method of selecting what baggage key entries should be copied.\r\n\r\nFeedback in the JS contrib PR was to allow a user-provided predicate function. This puts the responsibility on the user to ensure sensitive baggage keys are not copied while also not prescribing how that is determined.\r\n- https://github.com/open-telemetry/opentelemetry-js-contrib/issues/2166\r\n\r\n\r\nWe had a similar feedback in the .NET contrib project but thought it was more complicated than just using a set of prefixes so created an issue to continue the discussion. The plain processor that copies all baggage entries (like using `*` in your example) is likely to be accepted first.\r\n- https://github.com/open-telemetry/opentelemetry-dotnet-contrib/issues/1695\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nfrom opentelemetry.baggage import get_all as get_all_baggage\nfrom opentelemetry.context import Context\nfrom opentelemetry.sdk.trace.export import SpanProcessor\nfrom opentelemetry.trace import Span\n\n\nclass BaggageSpanProcessor(SpanProcessor):\n \"\"\"\n The BaggageSpanProcessor reads entries stored in Baggage\n from the parent context and adds the baggage entries' keys and\n values to the span as attributes on span start.\n\n Add this span processor to a tracer provider.\n\n Keys and values added to Baggage will appear on subsequent child\n spans for a trace within this service *and* be propagated to external\n services in accordance with any configured propagation formats\n configured. If the external services also have a Baggage span\n processor, the keys and values will appear in those child spans as\n well.\n\n \u26a0 Warning \u26a0\ufe0f\n\n Do not put sensitive information in Baggage.\n\n To repeat: a consequence of adding data to Baggage is that the keys and\n values will appear in all outgoing HTTP headers from the application.\n\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def on_start(\n self, span: \"Span\", parent_context: Optional[Context] = None\n ) -> None:\n baggage = get_all_baggage(parent_context)\n for key, value in baggage.items():\n span.set_attribute(key, value)\n", "path": "processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/processor.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=import-error\n\nfrom .processor import BaggageSpanProcessor\nfrom .version import __version__\n\n__all__ = [\"BaggageSpanProcessor\", \"__version__\"]\n", "path": "processor/opentelemetry-processor-baggage/src/opentelemetry/processor/baggage/__init__.py"}]}
| 1,529 | 617 |
gh_patches_debug_12860
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-297
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DISCUSSION: Should dataset ID be set on datastore key?
This question came up in the review in #282 while trying to define the "correct" behavior of `datastore.Key.__eq__`.
The only remaining use of `Key._dataset_id` is in [`to_protobuf`](https://github.com/GoogleCloudPlatform/gcloud-python/blob/b6d3e74a48e8554804ea3d33f53385bbbdb5c4b7/gcloud/datastore/key.py#L53) but #121 seems to indicate that the dataset ID is not needed on a `Key`.
ISTM we should just remove `_dataset_id` from the `Key` class, even though it is returned in the protobuf after an entity is stored/retrieved. @pcostell WDYT?
</issue>
<code>
[start of gcloud/datastore/key.py]
1 """Create / interact with gcloud datastore keys."""
2
3 import copy
4 from itertools import izip
5
6 from gcloud.datastore import datastore_v1_pb2 as datastore_pb
7
8
9 class Key(object):
10 """An immutable representation of a datastore Key.
11
12 .. automethod:: __init__
13 """
14
15 def __init__(self, path=None, namespace=None, dataset_id=None):
16 """Constructor / initializer for a key.
17
18 :type namespace: :class:`str`
19 :param namespace: A namespace identifier for the key.
20
21 :type path: sequence of dicts
22 :param path: Each dict must have keys 'kind' (a string) and optionally
23 'name' (a string) or 'id' (an integer).
24
25 :type dataset_id: string
26 :param dataset: The dataset ID assigned by back-end for the key.
27 Leave as None for newly-created keys.
28 """
29 self._path = path or [{'kind': ''}]
30 self._namespace = namespace
31 self._dataset_id = dataset_id
32
33 def _clone(self):
34 """Duplicates the Key.
35
36 We make a shallow copy of the :class:`gcloud.datastore.dataset.Dataset`
37 because it holds a reference an authenticated connection,
38 which we don't want to lose.
39
40 :rtype: :class:`gcloud.datastore.key.Key`
41 :returns: a new `Key` instance
42 """
43 return copy.deepcopy(self)
44
45 def to_protobuf(self):
46 """Return a protobuf corresponding to the key.
47
48 :rtype: :class:`gcloud.datastore.datastore_v1_pb2.Key`
49 :returns: The Protobuf representing the key.
50 """
51 key = datastore_pb.Key()
52
53 if self._dataset_id is not None:
54 key.partition_id.dataset_id = self._dataset_id
55
56 if self._namespace:
57 key.partition_id.namespace = self._namespace
58
59 for item in self.path():
60 element = key.path_element.add()
61 if 'kind' in item:
62 element.kind = item['kind']
63 if 'id' in item:
64 element.id = item['id']
65 if 'name' in item:
66 element.name = item['name']
67
68 return key
69
70 @classmethod
71 def from_path(cls, *args, **kwargs):
72 """Factory method for creating a key based on a path.
73
74 :type args: :class:`tuple`
75 :param args: sequence of even length, where the first of each pair is a
76 string representing the 'kind' of the path element, and
77 the second of the pair is either a string (for the path
78 element's name) or an integer (for its id).
79
80 :type kwargs: :class:`dict`
81 :param kwargs: Other named parameters which can be passed to
82 :func:`Key.__init__`.
83
84 :rtype: :class:`gcloud.datastore.key.Key`
85 :returns: a new :class:`Key` instance
86 """
87 if len(args) % 2:
88 raise ValueError('Must pass an even number of args.')
89
90 path = []
91 items = iter(args)
92
93 for kind, id_or_name in izip(items, items):
94 entry = {'kind': kind}
95 if isinstance(id_or_name, basestring):
96 entry['name'] = id_or_name
97 else:
98 entry['id'] = id_or_name
99 path.append(entry)
100
101 kwargs['path'] = path
102 return cls(**kwargs)
103
104 def is_partial(self):
105 """Boolean test: is the key fully mapped onto a backend entity?
106
107 :rtype: :class:`bool`
108 :returns: True if the last element of the key's path does not have
109 an 'id' or a 'name'.
110 """
111 return self.id_or_name() is None
112
113 def namespace(self, namespace=None):
114 """Namespace setter / getter.
115
116 :type namespace: :class:`str`
117 :param namespace: A namespace identifier for the key.
118
119 :rtype: :class:`Key` (for setter); or :class:`str` (for getter)
120 :returns: a new key, cloned from self., with the given namespace
121 (setter); or self's namespace (getter).
122 """
123 if namespace:
124 clone = self._clone()
125 clone._namespace = namespace
126 return clone
127 else:
128 return self._namespace
129
130 def path(self, path=None):
131 """Path setter / getter.
132
133 :type path: sequence of dicts
134 :param path: Each dict must have keys 'kind' (a string) and optionally
135 'name' (a string) or 'id' (an integer).
136
137 :rtype: :class:`Key` (for setter); or :class:`str` (for getter)
138 :returns: a new key, cloned from self., with the given path (setter);
139 or self's path (getter).
140 """
141 if path:
142 clone = self._clone()
143 clone._path = path
144 return clone
145 else:
146 return self._path
147
148 def kind(self, kind=None):
149 """Kind setter / getter. Based on the last element of path.
150
151 :type kind: :class:`str`
152 :param kind: The new kind for the key.
153
154 :rtype: :class:`Key` (for setter); or :class:`str` (for getter)
155 :returns: a new key, cloned from self., with the given kind (setter);
156 or self's kind (getter).
157 """
158 if kind:
159 clone = self._clone()
160 clone._path[-1]['kind'] = kind
161 return clone
162 elif self.path():
163 return self._path[-1]['kind']
164
165 def id(self, id_to_set=None):
166 """ID setter / getter. Based on the last element of path.
167
168 :type id_to_set: :class:`int`
169 :param id_to_set: The new ID for the key.
170
171 :rtype: :class:`Key` (for setter); or :class:`int` (for getter)
172 :returns: a new key, cloned from self., with the given id (setter);
173 or self's id (getter).
174 """
175 if id_to_set:
176 clone = self._clone()
177 clone._path[-1]['id'] = id_to_set
178 return clone
179 elif self.path():
180 return self._path[-1].get('id')
181
182 def name(self, name=None):
183 """Name setter / getter. Based on the last element of path.
184
185 :type kind: :class:`str`
186 :param kind: The new name for the key.
187
188 :rtype: :class:`Key` (for setter); or :class:`str` (for getter)
189 :returns: a new key, cloned from self., with the given name (setter);
190 or self's name (getter).
191 """
192 if name:
193 clone = self._clone()
194 clone._path[-1]['name'] = name
195 return clone
196 elif self.path():
197 return self._path[-1].get('name')
198
199 def id_or_name(self):
200 """Getter. Based on the last element of path.
201
202 :rtype: :class:`int` (if 'id' is set); or :class:`str` (the 'name')
203 :returns: True if the last element of the key's path has either an 'id'
204 or a 'name'.
205 """
206 return self.id() or self.name()
207
208 def parent(self):
209 """Getter: return a new key for the next highest element in path.
210
211 :rtype: :class:`gcloud.datastore.key.Key`
212 :returns: a new `Key` instance, whose path consists of all but the last
213 element of self's path. If self has only one path element,
214 return None.
215 """
216 if len(self._path) <= 1:
217 return None
218 return self.path(self.path()[:-1])
219
220 def __repr__(self):
221 return '<Key%s>' % self.path()
222
[end of gcloud/datastore/key.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gcloud/datastore/key.py b/gcloud/datastore/key.py
--- a/gcloud/datastore/key.py
+++ b/gcloud/datastore/key.py
@@ -24,7 +24,15 @@
:type dataset_id: string
:param dataset: The dataset ID assigned by back-end for the key.
- Leave as None for newly-created keys.
+
+ .. note::
+
+ The key's ``_dataset_id`` field must be None for keys created
+ by application code. The
+ :func:`gcloud.datastore.helpers.key_from_protobuf` factory
+ will be set the field to an appropriate value for keys returned
+ from the datastore backend. The application **must** treat any
+ value set by the back-end as opaque.
"""
self._path = path or [{'kind': ''}]
self._namespace = namespace
|
{"golden_diff": "diff --git a/gcloud/datastore/key.py b/gcloud/datastore/key.py\n--- a/gcloud/datastore/key.py\n+++ b/gcloud/datastore/key.py\n@@ -24,7 +24,15 @@\n \n :type dataset_id: string\n :param dataset: The dataset ID assigned by back-end for the key.\n- Leave as None for newly-created keys.\n+\n+ .. note::\n+\n+ The key's ``_dataset_id`` field must be None for keys created\n+ by application code. The\n+ :func:`gcloud.datastore.helpers.key_from_protobuf` factory\n+ will be set the field to an appropriate value for keys returned\n+ from the datastore backend. The application **must** treat any\n+ value set by the back-end as opaque.\n \"\"\"\n self._path = path or [{'kind': ''}]\n self._namespace = namespace\n", "issue": "DISCUSSION: Should dataset ID be set on datastore key?\nThis question came up in the review in #282 while trying to define the \"correct\" behavior of `datastore.Key.__eq__`.\n\nThe only remaining use of `Key._dataset_id` is in [`to_protobuf`](https://github.com/GoogleCloudPlatform/gcloud-python/blob/b6d3e74a48e8554804ea3d33f53385bbbdb5c4b7/gcloud/datastore/key.py#L53) but #121 seems to indicate that the dataset ID is not needed on a `Key`.\n\nISTM we should just remove `_dataset_id` from the `Key` class, even though it is returned in the protobuf after an entity is stored/retrieved. @pcostell WDYT?\n\n", "before_files": [{"content": "\"\"\"Create / interact with gcloud datastore keys.\"\"\"\n\nimport copy\nfrom itertools import izip\n\nfrom gcloud.datastore import datastore_v1_pb2 as datastore_pb\n\n\nclass Key(object):\n \"\"\"An immutable representation of a datastore Key.\n\n .. automethod:: __init__\n \"\"\"\n\n def __init__(self, path=None, namespace=None, dataset_id=None):\n \"\"\"Constructor / initializer for a key.\n\n :type namespace: :class:`str`\n :param namespace: A namespace identifier for the key.\n\n :type path: sequence of dicts\n :param path: Each dict must have keys 'kind' (a string) and optionally\n 'name' (a string) or 'id' (an integer).\n\n :type dataset_id: string\n :param dataset: The dataset ID assigned by back-end for the key.\n Leave as None for newly-created keys.\n \"\"\"\n self._path = path or [{'kind': ''}]\n self._namespace = namespace\n self._dataset_id = dataset_id\n\n def _clone(self):\n \"\"\"Duplicates the Key.\n\n We make a shallow copy of the :class:`gcloud.datastore.dataset.Dataset`\n because it holds a reference an authenticated connection,\n which we don't want to lose.\n\n :rtype: :class:`gcloud.datastore.key.Key`\n :returns: a new `Key` instance\n \"\"\"\n return copy.deepcopy(self)\n\n def to_protobuf(self):\n \"\"\"Return a protobuf corresponding to the key.\n\n :rtype: :class:`gcloud.datastore.datastore_v1_pb2.Key`\n :returns: The Protobuf representing the key.\n \"\"\"\n key = datastore_pb.Key()\n\n if self._dataset_id is not None:\n key.partition_id.dataset_id = self._dataset_id\n\n if self._namespace:\n key.partition_id.namespace = self._namespace\n\n for item in self.path():\n element = key.path_element.add()\n if 'kind' in item:\n element.kind = item['kind']\n if 'id' in item:\n element.id = item['id']\n if 'name' in item:\n element.name = item['name']\n\n return key\n\n @classmethod\n def from_path(cls, *args, **kwargs):\n \"\"\"Factory method for creating a key based on a path.\n\n :type args: :class:`tuple`\n :param args: sequence of even length, where the first of each pair is a\n string representing the 'kind' of the path element, and\n the second of the pair is either a string (for the path\n element's name) or an integer (for its id).\n\n :type kwargs: :class:`dict`\n :param kwargs: Other named parameters which can be passed to\n :func:`Key.__init__`.\n\n :rtype: :class:`gcloud.datastore.key.Key`\n :returns: a new :class:`Key` instance\n \"\"\"\n if len(args) % 2:\n raise ValueError('Must pass an even number of args.')\n\n path = []\n items = iter(args)\n\n for kind, id_or_name in izip(items, items):\n entry = {'kind': kind}\n if isinstance(id_or_name, basestring):\n entry['name'] = id_or_name\n else:\n entry['id'] = id_or_name\n path.append(entry)\n\n kwargs['path'] = path\n return cls(**kwargs)\n\n def is_partial(self):\n \"\"\"Boolean test: is the key fully mapped onto a backend entity?\n\n :rtype: :class:`bool`\n :returns: True if the last element of the key's path does not have\n an 'id' or a 'name'.\n \"\"\"\n return self.id_or_name() is None\n\n def namespace(self, namespace=None):\n \"\"\"Namespace setter / getter.\n\n :type namespace: :class:`str`\n :param namespace: A namespace identifier for the key.\n\n :rtype: :class:`Key` (for setter); or :class:`str` (for getter)\n :returns: a new key, cloned from self., with the given namespace\n (setter); or self's namespace (getter).\n \"\"\"\n if namespace:\n clone = self._clone()\n clone._namespace = namespace\n return clone\n else:\n return self._namespace\n\n def path(self, path=None):\n \"\"\"Path setter / getter.\n\n :type path: sequence of dicts\n :param path: Each dict must have keys 'kind' (a string) and optionally\n 'name' (a string) or 'id' (an integer).\n\n :rtype: :class:`Key` (for setter); or :class:`str` (for getter)\n :returns: a new key, cloned from self., with the given path (setter);\n or self's path (getter).\n \"\"\"\n if path:\n clone = self._clone()\n clone._path = path\n return clone\n else:\n return self._path\n\n def kind(self, kind=None):\n \"\"\"Kind setter / getter. Based on the last element of path.\n\n :type kind: :class:`str`\n :param kind: The new kind for the key.\n\n :rtype: :class:`Key` (for setter); or :class:`str` (for getter)\n :returns: a new key, cloned from self., with the given kind (setter);\n or self's kind (getter).\n \"\"\"\n if kind:\n clone = self._clone()\n clone._path[-1]['kind'] = kind\n return clone\n elif self.path():\n return self._path[-1]['kind']\n\n def id(self, id_to_set=None):\n \"\"\"ID setter / getter. Based on the last element of path.\n\n :type id_to_set: :class:`int`\n :param id_to_set: The new ID for the key.\n\n :rtype: :class:`Key` (for setter); or :class:`int` (for getter)\n :returns: a new key, cloned from self., with the given id (setter);\n or self's id (getter).\n \"\"\"\n if id_to_set:\n clone = self._clone()\n clone._path[-1]['id'] = id_to_set\n return clone\n elif self.path():\n return self._path[-1].get('id')\n\n def name(self, name=None):\n \"\"\"Name setter / getter. Based on the last element of path.\n\n :type kind: :class:`str`\n :param kind: The new name for the key.\n\n :rtype: :class:`Key` (for setter); or :class:`str` (for getter)\n :returns: a new key, cloned from self., with the given name (setter);\n or self's name (getter).\n \"\"\"\n if name:\n clone = self._clone()\n clone._path[-1]['name'] = name\n return clone\n elif self.path():\n return self._path[-1].get('name')\n\n def id_or_name(self):\n \"\"\"Getter. Based on the last element of path.\n\n :rtype: :class:`int` (if 'id' is set); or :class:`str` (the 'name')\n :returns: True if the last element of the key's path has either an 'id'\n or a 'name'.\n \"\"\"\n return self.id() or self.name()\n\n def parent(self):\n \"\"\"Getter: return a new key for the next highest element in path.\n\n :rtype: :class:`gcloud.datastore.key.Key`\n :returns: a new `Key` instance, whose path consists of all but the last\n element of self's path. If self has only one path element,\n return None.\n \"\"\"\n if len(self._path) <= 1:\n return None\n return self.path(self.path()[:-1])\n\n def __repr__(self):\n return '<Key%s>' % self.path()\n", "path": "gcloud/datastore/key.py"}]}
| 3,020 | 198 |
gh_patches_debug_19161
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-5810
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dm - use national sites
Is it possible to use the national sites for dm stores instead of the German one? The format is `dm.[country code]` for all countries except for Bulgaria, Bosnia and Italy (which use `dm-drogeriemarkt.[country code]`) and Slovakia (`mojadm.sk`).
</issue>
<code>
[start of locations/spiders/dm.py]
1 import scrapy
2
3 from locations.categories import Categories, apply_category
4 from locations.dict_parser import DictParser
5 from locations.hours import DAYS, OpeningHours
6
7
8 class DmSpider(scrapy.Spider):
9 name = "dm"
10 item_attributes = {"brand": "dm", "brand_wikidata": "Q266572"}
11 allowed_domains = ["store-data-service.services.dmtech.com"]
12 start_urls = ["https://store-data-service.services.dmtech.com/stores/bbox/89.999,-179.999,-89.999,179.999"]
13
14 @staticmethod
15 def parse_hours(store_hours: [dict]) -> OpeningHours:
16 opening_hours = OpeningHours()
17
18 for store_day in store_hours:
19 for times in store_day["timeRanges"]:
20 open_time = times["opening"]
21 close_time = times["closing"]
22
23 opening_hours.add_range(DAYS[store_day["weekDay"] - 1], open_time, close_time)
24
25 return opening_hours
26
27 def parse(self, response, **kwargs):
28 for location in response.json()["stores"]:
29 location["address"]["street_address"] = location["address"].pop("street")
30 location["address"]["country"] = location["countryCode"]
31 location["name"] = location["address"].get("name")
32 item = DictParser.parse(location)
33 item["website"] = f'https://www.dm.de/store{location["storeUrlPath"]}'
34 item["extras"]["check_date"] = location["updateTimeStamp"]
35 item["opening_hours"] = self.parse_hours(location["openingHours"])
36
37 apply_category(Categories.SHOP_CHEMIST, item)
38
39 yield item
40
[end of locations/spiders/dm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/dm.py b/locations/spiders/dm.py
--- a/locations/spiders/dm.py
+++ b/locations/spiders/dm.py
@@ -30,7 +30,14 @@
location["address"]["country"] = location["countryCode"]
location["name"] = location["address"].get("name")
item = DictParser.parse(location)
- item["website"] = f'https://www.dm.de/store{location["storeUrlPath"]}'
+ if location["countryCode"] in ["BG", "BA", "IT"]:
+ item[
+ "website"
+ ] = f'https://www.dm-drogeriemarkt.{location["countryCode"].lower()}/store{location["storeUrlPath"]}'
+ elif location["countryCode"] == "SK":
+ item["website"] = f'https://www.mojadm.sk/store{location["storeUrlPath"]}'
+ else:
+ item["website"] = f'https://www.dm.{location["countryCode"].lower()}/store{location["storeUrlPath"]}'
item["extras"]["check_date"] = location["updateTimeStamp"]
item["opening_hours"] = self.parse_hours(location["openingHours"])
|
{"golden_diff": "diff --git a/locations/spiders/dm.py b/locations/spiders/dm.py\n--- a/locations/spiders/dm.py\n+++ b/locations/spiders/dm.py\n@@ -30,7 +30,14 @@\n location[\"address\"][\"country\"] = location[\"countryCode\"]\n location[\"name\"] = location[\"address\"].get(\"name\")\n item = DictParser.parse(location)\n- item[\"website\"] = f'https://www.dm.de/store{location[\"storeUrlPath\"]}'\n+ if location[\"countryCode\"] in [\"BG\", \"BA\", \"IT\"]:\n+ item[\n+ \"website\"\n+ ] = f'https://www.dm-drogeriemarkt.{location[\"countryCode\"].lower()}/store{location[\"storeUrlPath\"]}'\n+ elif location[\"countryCode\"] == \"SK\":\n+ item[\"website\"] = f'https://www.mojadm.sk/store{location[\"storeUrlPath\"]}'\n+ else:\n+ item[\"website\"] = f'https://www.dm.{location[\"countryCode\"].lower()}/store{location[\"storeUrlPath\"]}'\n item[\"extras\"][\"check_date\"] = location[\"updateTimeStamp\"]\n item[\"opening_hours\"] = self.parse_hours(location[\"openingHours\"])\n", "issue": "dm - use national sites\nIs it possible to use the national sites for dm stores instead of the German one? The format is `dm.[country code]` for all countries except for Bulgaria, Bosnia and Italy (which use `dm-drogeriemarkt.[country code]`) and Slovakia (`mojadm.sk`).\n", "before_files": [{"content": "import scrapy\n\nfrom locations.categories import Categories, apply_category\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\n\n\nclass DmSpider(scrapy.Spider):\n name = \"dm\"\n item_attributes = {\"brand\": \"dm\", \"brand_wikidata\": \"Q266572\"}\n allowed_domains = [\"store-data-service.services.dmtech.com\"]\n start_urls = [\"https://store-data-service.services.dmtech.com/stores/bbox/89.999,-179.999,-89.999,179.999\"]\n\n @staticmethod\n def parse_hours(store_hours: [dict]) -> OpeningHours:\n opening_hours = OpeningHours()\n\n for store_day in store_hours:\n for times in store_day[\"timeRanges\"]:\n open_time = times[\"opening\"]\n close_time = times[\"closing\"]\n\n opening_hours.add_range(DAYS[store_day[\"weekDay\"] - 1], open_time, close_time)\n\n return opening_hours\n\n def parse(self, response, **kwargs):\n for location in response.json()[\"stores\"]:\n location[\"address\"][\"street_address\"] = location[\"address\"].pop(\"street\")\n location[\"address\"][\"country\"] = location[\"countryCode\"]\n location[\"name\"] = location[\"address\"].get(\"name\")\n item = DictParser.parse(location)\n item[\"website\"] = f'https://www.dm.de/store{location[\"storeUrlPath\"]}'\n item[\"extras\"][\"check_date\"] = location[\"updateTimeStamp\"]\n item[\"opening_hours\"] = self.parse_hours(location[\"openingHours\"])\n\n apply_category(Categories.SHOP_CHEMIST, item)\n\n yield item\n", "path": "locations/spiders/dm.py"}]}
| 1,044 | 269 |
gh_patches_debug_7973
|
rasdani/github-patches
|
git_diff
|
celery__celery-5870
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Continuous memory leak
There is a memory leak in the parent process of Celery's worker.
It is not a child process executing a task.
It happens suddenly every few days.
Unless you stop Celery, it consumes server memory in tens of hours.
This problem happens at least in Celery 4.1, and it also occurs in Celery 4.2.
Celery is running on Ubuntu 16 and brokers use RabbitMQ.

</issue>
<code>
[start of celery/events/receiver.py]
1 """Event receiver implementation."""
2 from __future__ import absolute_import, unicode_literals
3
4 import time
5 from operator import itemgetter
6
7 from kombu import Queue
8 from kombu.connection import maybe_channel
9 from kombu.mixins import ConsumerMixin
10
11 from celery import uuid
12 from celery.app import app_or_default
13 from celery.utils.time import adjust_timestamp
14
15 from .event import get_exchange
16
17 __all__ = ('EventReceiver',)
18
19 CLIENT_CLOCK_SKEW = -1
20
21 _TZGETTER = itemgetter('utcoffset', 'timestamp')
22
23
24 class EventReceiver(ConsumerMixin):
25 """Capture events.
26
27 Arguments:
28 connection (kombu.Connection): Connection to the broker.
29 handlers (Mapping[Callable]): Event handlers.
30 This is a map of event type names and their handlers.
31 The special handler `"*"` captures all events that don't have a
32 handler.
33 """
34
35 app = None
36
37 def __init__(self, channel, handlers=None, routing_key='#',
38 node_id=None, app=None, queue_prefix=None,
39 accept=None, queue_ttl=None, queue_expires=None):
40 self.app = app_or_default(app or self.app)
41 self.channel = maybe_channel(channel)
42 self.handlers = {} if handlers is None else handlers
43 self.routing_key = routing_key
44 self.node_id = node_id or uuid()
45 self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix
46 self.exchange = get_exchange(
47 self.connection or self.app.connection_for_write(),
48 name=self.app.conf.event_exchange)
49 if queue_ttl is None:
50 queue_ttl = self.app.conf.event_queue_ttl
51 if queue_expires is None:
52 queue_expires = self.app.conf.event_queue_expires
53 self.queue = Queue(
54 '.'.join([self.queue_prefix, self.node_id]),
55 exchange=self.exchange,
56 routing_key=self.routing_key,
57 auto_delete=True, durable=False,
58 message_ttl=queue_ttl,
59 expires=queue_expires,
60 )
61 self.clock = self.app.clock
62 self.adjust_clock = self.clock.adjust
63 self.forward_clock = self.clock.forward
64 if accept is None:
65 accept = {self.app.conf.event_serializer, 'json'}
66 self.accept = accept
67
68 def process(self, type, event):
69 """Process event by dispatching to configured handler."""
70 handler = self.handlers.get(type) or self.handlers.get('*')
71 handler and handler(event)
72
73 def get_consumers(self, Consumer, channel):
74 return [Consumer(queues=[self.queue],
75 callbacks=[self._receive], no_ack=True,
76 accept=self.accept)]
77
78 def on_consume_ready(self, connection, channel, consumers,
79 wakeup=True, **kwargs):
80 if wakeup:
81 self.wakeup_workers(channel=channel)
82
83 def itercapture(self, limit=None, timeout=None, wakeup=True):
84 return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)
85
86 def capture(self, limit=None, timeout=None, wakeup=True):
87 """Open up a consumer capturing events.
88
89 This has to run in the main process, and it will never stop
90 unless :attr:`EventDispatcher.should_stop` is set to True, or
91 forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
92 """
93 return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
94
95 def wakeup_workers(self, channel=None):
96 self.app.control.broadcast('heartbeat',
97 connection=self.connection,
98 channel=channel)
99
100 def event_from_message(self, body, localize=True,
101 now=time.time, tzfields=_TZGETTER,
102 adjust_timestamp=adjust_timestamp,
103 CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):
104 type = body['type']
105 if type == 'task-sent':
106 # clients never sync so cannot use their clock value
107 _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW
108 self.adjust_clock(_c)
109 else:
110 try:
111 clock = body['clock']
112 except KeyError:
113 body['clock'] = self.forward_clock()
114 else:
115 self.adjust_clock(clock)
116
117 if localize:
118 try:
119 offset, timestamp = tzfields(body)
120 except KeyError:
121 pass
122 else:
123 body['timestamp'] = adjust_timestamp(timestamp, offset)
124 body['local_received'] = now()
125 return type, body
126
127 def _receive(self, body, message, list=list, isinstance=isinstance):
128 if isinstance(body, list): # celery 4.0: List of events
129 process, from_message = self.process, self.event_from_message
130 [process(*from_message(event)) for event in body]
131 else:
132 self.process(*self.event_from_message(body))
133
134 @property
135 def connection(self):
136 return self.channel.connection.client if self.channel else None
137
[end of celery/events/receiver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/celery/events/receiver.py b/celery/events/receiver.py
--- a/celery/events/receiver.py
+++ b/celery/events/receiver.py
@@ -90,7 +90,8 @@
unless :attr:`EventDispatcher.should_stop` is set to True, or
forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.
"""
- return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))
+ for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup):
+ pass
def wakeup_workers(self, channel=None):
self.app.control.broadcast('heartbeat',
|
{"golden_diff": "diff --git a/celery/events/receiver.py b/celery/events/receiver.py\n--- a/celery/events/receiver.py\n+++ b/celery/events/receiver.py\n@@ -90,7 +90,8 @@\n unless :attr:`EventDispatcher.should_stop` is set to True, or\n forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.\n \"\"\"\n- return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))\n+ for _ in self.consume(limit=limit, timeout=timeout, wakeup=wakeup):\n+ pass\n \n def wakeup_workers(self, channel=None):\n self.app.control.broadcast('heartbeat',\n", "issue": "Continuous memory leak\nThere is a memory leak in the parent process of Celery's worker.\nIt is not a child process executing a task.\nIt happens suddenly every few days.\nUnless you stop Celery, it consumes server memory in tens of hours.\n\nThis problem happens at least in Celery 4.1, and it also occurs in Celery 4.2.\nCelery is running on Ubuntu 16 and brokers use RabbitMQ.\n\n\n\n\n", "before_files": [{"content": "\"\"\"Event receiver implementation.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport time\nfrom operator import itemgetter\n\nfrom kombu import Queue\nfrom kombu.connection import maybe_channel\nfrom kombu.mixins import ConsumerMixin\n\nfrom celery import uuid\nfrom celery.app import app_or_default\nfrom celery.utils.time import adjust_timestamp\n\nfrom .event import get_exchange\n\n__all__ = ('EventReceiver',)\n\nCLIENT_CLOCK_SKEW = -1\n\n_TZGETTER = itemgetter('utcoffset', 'timestamp')\n\n\nclass EventReceiver(ConsumerMixin):\n \"\"\"Capture events.\n\n Arguments:\n connection (kombu.Connection): Connection to the broker.\n handlers (Mapping[Callable]): Event handlers.\n This is a map of event type names and their handlers.\n The special handler `\"*\"` captures all events that don't have a\n handler.\n \"\"\"\n\n app = None\n\n def __init__(self, channel, handlers=None, routing_key='#',\n node_id=None, app=None, queue_prefix=None,\n accept=None, queue_ttl=None, queue_expires=None):\n self.app = app_or_default(app or self.app)\n self.channel = maybe_channel(channel)\n self.handlers = {} if handlers is None else handlers\n self.routing_key = routing_key\n self.node_id = node_id or uuid()\n self.queue_prefix = queue_prefix or self.app.conf.event_queue_prefix\n self.exchange = get_exchange(\n self.connection or self.app.connection_for_write(),\n name=self.app.conf.event_exchange)\n if queue_ttl is None:\n queue_ttl = self.app.conf.event_queue_ttl\n if queue_expires is None:\n queue_expires = self.app.conf.event_queue_expires\n self.queue = Queue(\n '.'.join([self.queue_prefix, self.node_id]),\n exchange=self.exchange,\n routing_key=self.routing_key,\n auto_delete=True, durable=False,\n message_ttl=queue_ttl,\n expires=queue_expires,\n )\n self.clock = self.app.clock\n self.adjust_clock = self.clock.adjust\n self.forward_clock = self.clock.forward\n if accept is None:\n accept = {self.app.conf.event_serializer, 'json'}\n self.accept = accept\n\n def process(self, type, event):\n \"\"\"Process event by dispatching to configured handler.\"\"\"\n handler = self.handlers.get(type) or self.handlers.get('*')\n handler and handler(event)\n\n def get_consumers(self, Consumer, channel):\n return [Consumer(queues=[self.queue],\n callbacks=[self._receive], no_ack=True,\n accept=self.accept)]\n\n def on_consume_ready(self, connection, channel, consumers,\n wakeup=True, **kwargs):\n if wakeup:\n self.wakeup_workers(channel=channel)\n\n def itercapture(self, limit=None, timeout=None, wakeup=True):\n return self.consume(limit=limit, timeout=timeout, wakeup=wakeup)\n\n def capture(self, limit=None, timeout=None, wakeup=True):\n \"\"\"Open up a consumer capturing events.\n\n This has to run in the main process, and it will never stop\n unless :attr:`EventDispatcher.should_stop` is set to True, or\n forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`.\n \"\"\"\n return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup))\n\n def wakeup_workers(self, channel=None):\n self.app.control.broadcast('heartbeat',\n connection=self.connection,\n channel=channel)\n\n def event_from_message(self, body, localize=True,\n now=time.time, tzfields=_TZGETTER,\n adjust_timestamp=adjust_timestamp,\n CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW):\n type = body['type']\n if type == 'task-sent':\n # clients never sync so cannot use their clock value\n _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW\n self.adjust_clock(_c)\n else:\n try:\n clock = body['clock']\n except KeyError:\n body['clock'] = self.forward_clock()\n else:\n self.adjust_clock(clock)\n\n if localize:\n try:\n offset, timestamp = tzfields(body)\n except KeyError:\n pass\n else:\n body['timestamp'] = adjust_timestamp(timestamp, offset)\n body['local_received'] = now()\n return type, body\n\n def _receive(self, body, message, list=list, isinstance=isinstance):\n if isinstance(body, list): # celery 4.0: List of events\n process, from_message = self.process, self.event_from_message\n [process(*from_message(event)) for event in body]\n else:\n self.process(*self.event_from_message(body))\n\n @property\n def connection(self):\n return self.channel.connection.client if self.channel else None\n", "path": "celery/events/receiver.py"}]}
| 2,023 | 145 |
gh_patches_debug_24430
|
rasdani/github-patches
|
git_diff
|
opentensor__bittensor-1974
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove nest_asyncio from bittensor to allow uvloop support
### Is your feature request related to a problem? Please describe.
Uvloop, which provides supperior speed, does not allow loop nesting.
It is also the case that uvloop is pulled in by popular packages, which [forces some subnets develop hacks to combat this](https://github.com/synapsec-ai/llm-defender-subnet/blob/6c37925c4f34a298607c97dfceebcc01fb74d562/scripts/run_neuron.sh#L140-L146).
And perhaps more importantly, https://github.com/erdewit/nest_asyncio seems to have been abandodend
### Describe the solution you'd like
Remove nest_asyncio, and let bittensor users decide which asyncio loop they want to run. Perhaps even suggest (not mandate) running uvloop, since it consistently shows better results in benchmarks than CPython asyncio stdlib loop.
Seems like there was some attempt of this in the past https://github.com/opentensor/bittensor/pull/1501 for some reason (?)
### Describe alternatives you've considered
_No response_
### Additional context
_No response_
</issue>
<code>
[start of bittensor/__init__.py]
1 # The MIT License (MIT)
2 # Copyright © 2021 Yuma Rao
3 # Copyright © 2022-2023 Opentensor Foundation
4 # Copyright © 2023 Opentensor Technologies Inc
5
6 # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
7 # documentation files (the “Software”), to deal in the Software without restriction, including without limitation
8 # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
9 # and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
10
11 # The above copyright notice and this permission notice shall be included in all copies or substantial portions of
12 # the Software.
13
14 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
15 # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
17 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 # DEALINGS IN THE SOFTWARE.
19
20 from rich.console import Console
21 from rich.traceback import install
22
23 # Install and apply nest asyncio to allow the async functions
24 # to run in a .ipynb
25 import nest_asyncio
26
27 nest_asyncio.apply()
28
29 # Bittensor code and protocol version.
30 __version__ = "7.0.0"
31
32 version_split = __version__.split(".")
33 __version_as_int__: int = (
34 (100 * int(version_split[0]))
35 + (10 * int(version_split[1]))
36 + (1 * int(version_split[2]))
37 )
38 __new_signature_version__ = 360
39
40 # Rich console.
41 __console__ = Console()
42 __use_console__ = True
43
44 # Remove overdue locals in debug training.
45 install(show_locals=False)
46
47
48 def turn_console_off():
49 global __use_console__
50 global __console__
51 from io import StringIO
52
53 __use_console__ = False
54 __console__ = Console(file=StringIO(), stderr=False)
55
56
57 def turn_console_on():
58 global __use_console__
59 global __console__
60 __use_console__ = True
61 __console__ = Console()
62
63
64 turn_console_off()
65
66
67 # Logging helpers.
68 def trace(on: bool = True):
69 logging.set_trace(on)
70
71
72 def debug(on: bool = True):
73 logging.set_debug(on)
74
75
76 # Substrate chain block time (seconds).
77 __blocktime__ = 12
78
79 # Pip address for versioning
80 __pipaddress__ = "https://pypi.org/pypi/bittensor/json"
81
82 # Raw GitHub url for delegates registry file
83 __delegates_details_url__: str = "https://raw.githubusercontent.com/opentensor/bittensor-delegates/main/public/delegates.json"
84
85 # Substrate ss58_format
86 __ss58_format__ = 42
87
88 # Wallet ss58 address length
89 __ss58_address_length__ = 48
90
91 __networks__ = ["local", "finney", "test", "archive"]
92
93 __finney_entrypoint__ = "wss://entrypoint-finney.opentensor.ai:443"
94
95 __finney_test_entrypoint__ = "wss://test.finney.opentensor.ai:443/"
96
97 __archive_entrypoint__ = "wss://archive.chain.opentensor.ai:443/"
98
99 # Needs to use wss://
100 __bellagene_entrypoint__ = "wss://parachain.opentensor.ai:443"
101
102 __local_entrypoint__ = "ws://127.0.0.1:9944"
103
104 __tao_symbol__: str = chr(0x03C4)
105
106 __rao_symbol__: str = chr(0x03C1)
107
108 # Block Explorers map network to explorer url
109 # Must all be polkadotjs explorer urls
110 __network_explorer_map__ = {
111 "opentensor": {
112 "local": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer",
113 "endpoint": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer",
114 "finney": "https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer",
115 },
116 "taostats": {
117 "local": "https://x.taostats.io",
118 "endpoint": "https://x.taostats.io",
119 "finney": "https://x.taostats.io",
120 },
121 }
122
123 # --- Type Registry ---
124 __type_registry__ = {
125 "types": {
126 "Balance": "u64", # Need to override default u128
127 },
128 "runtime_api": {
129 "NeuronInfoRuntimeApi": {
130 "methods": {
131 "get_neuron_lite": {
132 "params": [
133 {
134 "name": "netuid",
135 "type": "u16",
136 },
137 {
138 "name": "uid",
139 "type": "u16",
140 },
141 ],
142 "type": "Vec<u8>",
143 },
144 "get_neurons_lite": {
145 "params": [
146 {
147 "name": "netuid",
148 "type": "u16",
149 },
150 ],
151 "type": "Vec<u8>",
152 },
153 }
154 },
155 "StakeInfoRuntimeApi": {
156 "methods": {
157 "get_stake_info_for_coldkey": {
158 "params": [
159 {
160 "name": "coldkey_account_vec",
161 "type": "Vec<u8>",
162 },
163 ],
164 "type": "Vec<u8>",
165 },
166 "get_stake_info_for_coldkeys": {
167 "params": [
168 {
169 "name": "coldkey_account_vecs",
170 "type": "Vec<Vec<u8>>",
171 },
172 ],
173 "type": "Vec<u8>",
174 },
175 },
176 },
177 "ValidatorIPRuntimeApi": {
178 "methods": {
179 "get_associated_validator_ip_info_for_subnet": {
180 "params": [
181 {
182 "name": "netuid",
183 "type": "u16",
184 },
185 ],
186 "type": "Vec<u8>",
187 },
188 },
189 },
190 "SubnetInfoRuntimeApi": {
191 "methods": {
192 "get_subnet_hyperparams": {
193 "params": [
194 {
195 "name": "netuid",
196 "type": "u16",
197 },
198 ],
199 "type": "Vec<u8>",
200 }
201 }
202 },
203 "SubnetRegistrationRuntimeApi": {
204 "methods": {"get_network_registration_cost": {"params": [], "type": "u64"}}
205 },
206 },
207 }
208
209 from .errors import (
210 BlacklistedException,
211 ChainConnectionError,
212 ChainError,
213 ChainQueryError,
214 ChainTransactionError,
215 IdentityError,
216 InternalServerError,
217 InvalidRequestNameError,
218 KeyFileError,
219 MetadataError,
220 NominationError,
221 NotDelegateError,
222 NotRegisteredError,
223 NotVerifiedException,
224 PostProcessException,
225 PriorityException,
226 RegistrationError,
227 RunException,
228 StakeError,
229 SynapseDendriteNoneException,
230 SynapseParsingError,
231 TransferError,
232 UnknownSynapseError,
233 UnstakeError,
234 )
235
236 from substrateinterface import Keypair # noqa: F401
237 from .config import InvalidConfigFile, DefaultConfig, config, T
238 from .keyfile import (
239 serialized_keypair_to_keyfile_data,
240 deserialize_keypair_from_keyfile_data,
241 validate_password,
242 ask_password_to_encrypt,
243 keyfile_data_is_encrypted_nacl,
244 keyfile_data_is_encrypted_ansible,
245 keyfile_data_is_encrypted_legacy,
246 keyfile_data_is_encrypted,
247 keyfile_data_encryption_method,
248 legacy_encrypt_keyfile_data,
249 encrypt_keyfile_data,
250 get_coldkey_password_from_environment,
251 decrypt_keyfile_data,
252 keyfile,
253 Mockkeyfile,
254 )
255 from .wallet import display_mnemonic_msg, wallet
256
257 from .utils import (
258 ss58_to_vec_u8,
259 unbiased_topk,
260 version_checking,
261 strtobool,
262 strtobool_with_default,
263 get_explorer_root_url_by_network_from_map,
264 get_explorer_root_url_by_network_from_map,
265 get_explorer_url_for_network,
266 ss58_address_to_bytes,
267 U16_NORMALIZED_FLOAT,
268 U64_NORMALIZED_FLOAT,
269 u8_key_to_ss58,
270 hash,
271 wallet_utils,
272 )
273
274 from .utils.balance import Balance as Balance
275 from .chain_data import (
276 AxonInfo,
277 NeuronInfo,
278 NeuronInfoLite,
279 PrometheusInfo,
280 DelegateInfo,
281 DelegateInfoLite,
282 StakeInfo,
283 SubnetInfo,
284 SubnetHyperparameters,
285 IPInfo,
286 ProposalCallData,
287 ProposalVoteData,
288 )
289
290 # Allows avoiding name spacing conflicts and continue access to the `subtensor` module with `subtensor_module` name
291 from . import subtensor as subtensor_module
292
293 # Double import allows using class `Subtensor` by referencing `bittensor.Subtensor` and `bittensor.subtensor`.
294 # This will be available for a while until we remove reference `bittensor.subtensor`
295 from .subtensor import Subtensor
296 from .subtensor import Subtensor as subtensor
297
298 from .cli import cli as cli, COMMANDS as ALL_COMMANDS
299 from .btlogging import logging
300 from .metagraph import metagraph as metagraph
301 from .threadpool import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor
302
303 from .synapse import TerminalInfo, Synapse
304 from .stream import StreamingSynapse
305 from .tensor import tensor, Tensor
306 from .axon import axon as axon
307 from .dendrite import dendrite as dendrite
308
309 from .mock.keyfile_mock import MockKeyfile as MockKeyfile
310 from .mock.subtensor_mock import MockSubtensor as MockSubtensor
311 from .mock.wallet_mock import MockWallet as MockWallet
312
313 from .subnets import SubnetsAPI as SubnetsAPI
314
315 configs = [
316 axon.config(),
317 subtensor.config(),
318 PriorityThreadPoolExecutor.config(),
319 wallet.config(),
320 logging.get_config(),
321 ]
322 defaults = config.merge_all(configs)
323
[end of bittensor/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bittensor/__init__.py b/bittensor/__init__.py
--- a/bittensor/__init__.py
+++ b/bittensor/__init__.py
@@ -16,15 +16,28 @@
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+import os
+import warnings
from rich.console import Console
from rich.traceback import install
-# Install and apply nest asyncio to allow the async functions
-# to run in a .ipynb
-import nest_asyncio
-nest_asyncio.apply()
+if (NEST_ASYNCIO_ENV := os.getenv("NEST_ASYNCIO")) in ("1", None):
+ if NEST_ASYNCIO_ENV is None:
+ warnings.warn(
+ "NEST_ASYNCIO implicitly set to '1'. In the future, the default value will be '0'."
+ "If you use `nest_asyncio` make sure to add it explicitly to your project dependencies,"
+ "as it will be removed from `bittensor` package dependencies in the future."
+ "To silence this warning, explicitly set the environment variable, e.g. `export NEST_ASYNCIO=0`.",
+ DeprecationWarning,
+ )
+ # Install and apply nest asyncio to allow the async functions
+ # to run in a .ipynb
+ import nest_asyncio
+
+ nest_asyncio.apply()
+
# Bittensor code and protocol version.
__version__ = "7.0.0"
|
{"golden_diff": "diff --git a/bittensor/__init__.py b/bittensor/__init__.py\n--- a/bittensor/__init__.py\n+++ b/bittensor/__init__.py\n@@ -16,15 +16,28 @@\n # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n # DEALINGS IN THE SOFTWARE.\n+import os\n+import warnings\n \n from rich.console import Console\n from rich.traceback import install\n \n-# Install and apply nest asyncio to allow the async functions\n-# to run in a .ipynb\n-import nest_asyncio\n \n-nest_asyncio.apply()\n+if (NEST_ASYNCIO_ENV := os.getenv(\"NEST_ASYNCIO\")) in (\"1\", None):\n+ if NEST_ASYNCIO_ENV is None:\n+ warnings.warn(\n+ \"NEST_ASYNCIO implicitly set to '1'. In the future, the default value will be '0'.\"\n+ \"If you use `nest_asyncio` make sure to add it explicitly to your project dependencies,\"\n+ \"as it will be removed from `bittensor` package dependencies in the future.\"\n+ \"To silence this warning, explicitly set the environment variable, e.g. `export NEST_ASYNCIO=0`.\",\n+ DeprecationWarning,\n+ )\n+ # Install and apply nest asyncio to allow the async functions\n+ # to run in a .ipynb\n+ import nest_asyncio\n+\n+ nest_asyncio.apply()\n+\n \n # Bittensor code and protocol version.\n __version__ = \"7.0.0\"\n", "issue": "Remove nest_asyncio from bittensor to allow uvloop support\n### Is your feature request related to a problem? Please describe.\r\n\r\nUvloop, which provides supperior speed, does not allow loop nesting.\r\n\r\nIt is also the case that uvloop is pulled in by popular packages, which [forces some subnets develop hacks to combat this](https://github.com/synapsec-ai/llm-defender-subnet/blob/6c37925c4f34a298607c97dfceebcc01fb74d562/scripts/run_neuron.sh#L140-L146).\r\n\r\nAnd perhaps more importantly, https://github.com/erdewit/nest_asyncio seems to have been abandodend \r\n\r\n### Describe the solution you'd like\r\n\r\nRemove nest_asyncio, and let bittensor users decide which asyncio loop they want to run. Perhaps even suggest (not mandate) running uvloop, since it consistently shows better results in benchmarks than CPython asyncio stdlib loop.\r\n\r\nSeems like there was some attempt of this in the past https://github.com/opentensor/bittensor/pull/1501 for some reason (?) \r\n\r\n### Describe alternatives you've considered\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# The MIT License (MIT)\n# Copyright \u00a9 2021 Yuma Rao\n# Copyright \u00a9 2022-2023 Opentensor Foundation\n# Copyright \u00a9 2023 Opentensor Technologies Inc\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software.\n\n# THE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rich.console import Console\nfrom rich.traceback import install\n\n# Install and apply nest asyncio to allow the async functions\n# to run in a .ipynb\nimport nest_asyncio\n\nnest_asyncio.apply()\n\n# Bittensor code and protocol version.\n__version__ = \"7.0.0\"\n\nversion_split = __version__.split(\".\")\n__version_as_int__: int = (\n (100 * int(version_split[0]))\n + (10 * int(version_split[1]))\n + (1 * int(version_split[2]))\n)\n__new_signature_version__ = 360\n\n# Rich console.\n__console__ = Console()\n__use_console__ = True\n\n# Remove overdue locals in debug training.\ninstall(show_locals=False)\n\n\ndef turn_console_off():\n global __use_console__\n global __console__\n from io import StringIO\n\n __use_console__ = False\n __console__ = Console(file=StringIO(), stderr=False)\n\n\ndef turn_console_on():\n global __use_console__\n global __console__\n __use_console__ = True\n __console__ = Console()\n\n\nturn_console_off()\n\n\n# Logging helpers.\ndef trace(on: bool = True):\n logging.set_trace(on)\n\n\ndef debug(on: bool = True):\n logging.set_debug(on)\n\n\n# Substrate chain block time (seconds).\n__blocktime__ = 12\n\n# Pip address for versioning\n__pipaddress__ = \"https://pypi.org/pypi/bittensor/json\"\n\n# Raw GitHub url for delegates registry file\n__delegates_details_url__: str = \"https://raw.githubusercontent.com/opentensor/bittensor-delegates/main/public/delegates.json\"\n\n# Substrate ss58_format\n__ss58_format__ = 42\n\n# Wallet ss58 address length\n__ss58_address_length__ = 48\n\n__networks__ = [\"local\", \"finney\", \"test\", \"archive\"]\n\n__finney_entrypoint__ = \"wss://entrypoint-finney.opentensor.ai:443\"\n\n__finney_test_entrypoint__ = \"wss://test.finney.opentensor.ai:443/\"\n\n__archive_entrypoint__ = \"wss://archive.chain.opentensor.ai:443/\"\n\n# Needs to use wss://\n__bellagene_entrypoint__ = \"wss://parachain.opentensor.ai:443\"\n\n__local_entrypoint__ = \"ws://127.0.0.1:9944\"\n\n__tao_symbol__: str = chr(0x03C4)\n\n__rao_symbol__: str = chr(0x03C1)\n\n# Block Explorers map network to explorer url\n# Must all be polkadotjs explorer urls\n__network_explorer_map__ = {\n \"opentensor\": {\n \"local\": \"https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer\",\n \"endpoint\": \"https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer\",\n \"finney\": \"https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fentrypoint-finney.opentensor.ai%3A443#/explorer\",\n },\n \"taostats\": {\n \"local\": \"https://x.taostats.io\",\n \"endpoint\": \"https://x.taostats.io\",\n \"finney\": \"https://x.taostats.io\",\n },\n}\n\n# --- Type Registry ---\n__type_registry__ = {\n \"types\": {\n \"Balance\": \"u64\", # Need to override default u128\n },\n \"runtime_api\": {\n \"NeuronInfoRuntimeApi\": {\n \"methods\": {\n \"get_neuron_lite\": {\n \"params\": [\n {\n \"name\": \"netuid\",\n \"type\": \"u16\",\n },\n {\n \"name\": \"uid\",\n \"type\": \"u16\",\n },\n ],\n \"type\": \"Vec<u8>\",\n },\n \"get_neurons_lite\": {\n \"params\": [\n {\n \"name\": \"netuid\",\n \"type\": \"u16\",\n },\n ],\n \"type\": \"Vec<u8>\",\n },\n }\n },\n \"StakeInfoRuntimeApi\": {\n \"methods\": {\n \"get_stake_info_for_coldkey\": {\n \"params\": [\n {\n \"name\": \"coldkey_account_vec\",\n \"type\": \"Vec<u8>\",\n },\n ],\n \"type\": \"Vec<u8>\",\n },\n \"get_stake_info_for_coldkeys\": {\n \"params\": [\n {\n \"name\": \"coldkey_account_vecs\",\n \"type\": \"Vec<Vec<u8>>\",\n },\n ],\n \"type\": \"Vec<u8>\",\n },\n },\n },\n \"ValidatorIPRuntimeApi\": {\n \"methods\": {\n \"get_associated_validator_ip_info_for_subnet\": {\n \"params\": [\n {\n \"name\": \"netuid\",\n \"type\": \"u16\",\n },\n ],\n \"type\": \"Vec<u8>\",\n },\n },\n },\n \"SubnetInfoRuntimeApi\": {\n \"methods\": {\n \"get_subnet_hyperparams\": {\n \"params\": [\n {\n \"name\": \"netuid\",\n \"type\": \"u16\",\n },\n ],\n \"type\": \"Vec<u8>\",\n }\n }\n },\n \"SubnetRegistrationRuntimeApi\": {\n \"methods\": {\"get_network_registration_cost\": {\"params\": [], \"type\": \"u64\"}}\n },\n },\n}\n\nfrom .errors import (\n BlacklistedException,\n ChainConnectionError,\n ChainError,\n ChainQueryError,\n ChainTransactionError,\n IdentityError,\n InternalServerError,\n InvalidRequestNameError,\n KeyFileError,\n MetadataError,\n NominationError,\n NotDelegateError,\n NotRegisteredError,\n NotVerifiedException,\n PostProcessException,\n PriorityException,\n RegistrationError,\n RunException,\n StakeError,\n SynapseDendriteNoneException,\n SynapseParsingError,\n TransferError,\n UnknownSynapseError,\n UnstakeError,\n)\n\nfrom substrateinterface import Keypair # noqa: F401\nfrom .config import InvalidConfigFile, DefaultConfig, config, T\nfrom .keyfile import (\n serialized_keypair_to_keyfile_data,\n deserialize_keypair_from_keyfile_data,\n validate_password,\n ask_password_to_encrypt,\n keyfile_data_is_encrypted_nacl,\n keyfile_data_is_encrypted_ansible,\n keyfile_data_is_encrypted_legacy,\n keyfile_data_is_encrypted,\n keyfile_data_encryption_method,\n legacy_encrypt_keyfile_data,\n encrypt_keyfile_data,\n get_coldkey_password_from_environment,\n decrypt_keyfile_data,\n keyfile,\n Mockkeyfile,\n)\nfrom .wallet import display_mnemonic_msg, wallet\n\nfrom .utils import (\n ss58_to_vec_u8,\n unbiased_topk,\n version_checking,\n strtobool,\n strtobool_with_default,\n get_explorer_root_url_by_network_from_map,\n get_explorer_root_url_by_network_from_map,\n get_explorer_url_for_network,\n ss58_address_to_bytes,\n U16_NORMALIZED_FLOAT,\n U64_NORMALIZED_FLOAT,\n u8_key_to_ss58,\n hash,\n wallet_utils,\n)\n\nfrom .utils.balance import Balance as Balance\nfrom .chain_data import (\n AxonInfo,\n NeuronInfo,\n NeuronInfoLite,\n PrometheusInfo,\n DelegateInfo,\n DelegateInfoLite,\n StakeInfo,\n SubnetInfo,\n SubnetHyperparameters,\n IPInfo,\n ProposalCallData,\n ProposalVoteData,\n)\n\n# Allows avoiding name spacing conflicts and continue access to the `subtensor` module with `subtensor_module` name\nfrom . import subtensor as subtensor_module\n\n# Double import allows using class `Subtensor` by referencing `bittensor.Subtensor` and `bittensor.subtensor`.\n# This will be available for a while until we remove reference `bittensor.subtensor`\nfrom .subtensor import Subtensor\nfrom .subtensor import Subtensor as subtensor\n\nfrom .cli import cli as cli, COMMANDS as ALL_COMMANDS\nfrom .btlogging import logging\nfrom .metagraph import metagraph as metagraph\nfrom .threadpool import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor\n\nfrom .synapse import TerminalInfo, Synapse\nfrom .stream import StreamingSynapse\nfrom .tensor import tensor, Tensor\nfrom .axon import axon as axon\nfrom .dendrite import dendrite as dendrite\n\nfrom .mock.keyfile_mock import MockKeyfile as MockKeyfile\nfrom .mock.subtensor_mock import MockSubtensor as MockSubtensor\nfrom .mock.wallet_mock import MockWallet as MockWallet\n\nfrom .subnets import SubnetsAPI as SubnetsAPI\n\nconfigs = [\n axon.config(),\n subtensor.config(),\n PriorityThreadPoolExecutor.config(),\n wallet.config(),\n logging.get_config(),\n]\ndefaults = config.merge_all(configs)\n", "path": "bittensor/__init__.py"}]}
| 3,989 | 370 |
gh_patches_debug_23730
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1276
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prevent event registrations cancelling when registration is paid (or where you were present)
### Is your feature request related to a problem? Please describe.
Currently it is technically possible to pay for an event and afterwards cancel your registration. That should not be possible (at least not by users themselves). This never was a real problem in practice but now with Thalia Pay the problem appears.
### Describe the solution you'd like
When a payment exists for an event registration, don't allow cancelling. Also when paying with Thalia Pay we might want to make explicit that after paying you can't cancel anymore (without maybe contacting the board).
### Motivation
### Describe alternatives you've considered
- Only really creating the payment at the date of the event → too complex (code)
- As proposed here, but do allow cancellation when the payment is a TPay payment that is not processed yet → too complex (for users)
- Keeping things as it is (allow cancelling and accept people may pay without joining - it could be considered a feature) → felt undesirable, also without TPay
### Additional context
</issue>
<code>
[start of website/events/services.py]
1 from collections import OrderedDict
2
3 from django.utils import timezone
4 from django.utils.datetime_safe import date
5 from django.utils.translation import gettext_lazy as _, get_language
6
7 from events import emails
8 from events.exceptions import RegistrationError
9 from events.models import EventRegistration, RegistrationInformationField, Event
10 from payments.api.fields import PaymentTypeField
11 from payments.services import create_payment, delete_payment
12 from utils.snippets import datetime_to_lectureyear
13
14
15 def is_user_registered(member, event):
16 """
17 Returns if the user is registered for the specified event
18
19 :param member: the user
20 :param event: the event
21 :return: None if registration is not required or no member else True/False
22 """
23 if not event.registration_required or not member.is_authenticated:
24 return None
25
26 return event.registrations.filter(member=member, date_cancelled=None).count() > 0
27
28
29 def event_permissions(member, event, name=None):
30 """
31 Returns a dictionary with the available event permissions of the user
32
33 :param member: the user
34 :param event: the event
35 :param name: the name of a non member registration
36 :return: the permission dictionary
37 """
38 perms = {
39 "create_registration": False,
40 "cancel_registration": False,
41 "update_registration": False,
42 }
43 if not member:
44 return perms
45 if not (member.is_authenticated or name):
46 return perms
47
48 registration = None
49 try:
50 registration = EventRegistration.objects.get(
51 event=event, member=member, name=name
52 )
53 except EventRegistration.DoesNotExist:
54 pass
55
56 perms["create_registration"] = (
57 (registration is None or registration.date_cancelled is not None)
58 and event.registration_allowed
59 and (name or member.can_attend_events)
60 )
61 perms["cancel_registration"] = (
62 registration is not None
63 and registration.date_cancelled is None
64 and (event.cancellation_allowed or name)
65 )
66 perms["update_registration"] = (
67 registration is not None
68 and registration.date_cancelled is None
69 and event.has_fields()
70 and event.registration_allowed
71 and (name or member.can_attend_events)
72 )
73 return perms
74
75
76 def is_organiser(member, event):
77 if member and member.is_authenticated:
78 if member.is_superuser or member.has_perm("events.override_organiser"):
79 return True
80
81 if event:
82 return member.get_member_groups().filter(pk=event.organiser.pk).count() != 0
83
84 return False
85
86
87 def create_registration(member, event):
88 """
89 Creates a new user registration for an event
90
91 :param member: the user
92 :param event: the event
93 :return: returns the registration if successful
94 """
95 if event_permissions(member, event)["create_registration"]:
96 registration = None
97 try:
98 registration = EventRegistration.objects.get(event=event, member=member)
99 except EventRegistration.DoesNotExist:
100 pass
101
102 if registration is None:
103 return EventRegistration.objects.create(event=event, member=member)
104 elif registration.date_cancelled is not None:
105 if registration.is_late_cancellation():
106 raise RegistrationError(
107 _(
108 "You cannot re-register anymore "
109 "since you've cancelled after the "
110 "deadline."
111 )
112 )
113 else:
114 registration.date = timezone.now()
115 registration.date_cancelled = None
116 registration.save()
117
118 return registration
119 elif event_permissions(member, event)["cancel_registration"]:
120 raise RegistrationError(_("You were already registered."))
121 else:
122 raise RegistrationError(_("You may not register."))
123
124
125 def cancel_registration(member, event):
126 """
127 Cancel a user registration for an event
128
129 :param member: the user
130 :param event: the event
131 """
132 registration = None
133 try:
134 registration = EventRegistration.objects.get(event=event, member=member)
135 except EventRegistration.DoesNotExist:
136 pass
137
138 if event_permissions(member, event)["cancel_registration"] and registration:
139 if registration.payment is not None:
140 delete_payment(registration)
141 if registration.queue_position == 0:
142 emails.notify_first_waiting(event)
143
144 if event.send_cancel_email and event.after_cancel_deadline:
145 emails.notify_organiser(event, registration)
146
147 # Note that this doesn"t remove the values for the
148 # information fields that the user entered upon registering.
149 # But this is regarded as a feature, not a bug. Especially
150 # since the values will still appear in the backend.
151 registration.date_cancelled = timezone.now()
152 registration.save()
153 else:
154 raise RegistrationError(_("You are not registered for this event."))
155
156
157 def update_registration(
158 member=None, event=None, name=None, registration=None, field_values=None
159 ):
160 """
161 Updates a user registration of an event
162
163 :param request: http request
164 :param member: the user
165 :param event: the event
166 :param name: the name of a registration not associated with a user
167 :param registration: the registration
168 :param field_values: values for the information fields
169 """
170 if not registration:
171 try:
172 registration = EventRegistration.objects.get(
173 event=event, member=member, name=name
174 )
175 except EventRegistration.DoesNotExist as error:
176 raise RegistrationError(
177 _("You are not registered for this event.")
178 ) from error
179 else:
180 member = registration.member
181 event = registration.event
182 name = registration.name
183
184 if (
185 not event_permissions(member, event, name)["update_registration"]
186 or not field_values
187 ):
188 return
189
190 for field_id, field_value in field_values:
191 field = RegistrationInformationField.objects.get(
192 id=field_id.replace("info_field_", "")
193 )
194
195 if (
196 field.type == RegistrationInformationField.INTEGER_FIELD
197 and field_value is None
198 ):
199 field_value = 0
200 elif (
201 field.type == RegistrationInformationField.BOOLEAN_FIELD
202 and field_value is None
203 ):
204 field_value = False
205 elif (
206 field.type == RegistrationInformationField.TEXT_FIELD
207 and field_value is None
208 ):
209 field_value = ""
210
211 field.set_value_for(registration, field_value)
212
213
214 def registration_fields(request, member=None, event=None, registration=None, name=None):
215 """
216 Returns information about the registration fields of a registration
217
218 :param member: the user (optional if registration provided)
219 :param name: the name of a non member registration
220 (optional if registration provided)
221 :param event: the event (optional if registration provided)
222 :param registration: the registration (optional if member & event provided)
223 :return: the fields
224 """
225
226 if registration is None:
227 try:
228 registration = EventRegistration.objects.get(
229 event=event, member=member, name=name
230 )
231 except EventRegistration.DoesNotExist as error:
232 raise RegistrationError(
233 _("You are not registered for this event.")
234 ) from error
235 except EventRegistration.MultipleObjectsReturned as error:
236 raise RegistrationError(
237 _("Unable to find the right registration.")
238 ) from error
239 else:
240 member = registration.member
241 event = registration.event
242 name = registration.name
243
244 perms = event_permissions(member, event, name)[
245 "update_registration"
246 ] or is_organiser(request.member, event)
247 if perms and registration:
248 information_fields = registration.information_fields
249 fields = OrderedDict()
250
251 for information_field in information_fields:
252 field = information_field["field"]
253
254 fields["info_field_{}".format(field.id)] = {
255 "type": field.type,
256 "label": getattr(field, "{}_{}".format("name", get_language())),
257 "description": getattr(
258 field, "{}_{}".format("description", get_language())
259 ),
260 "value": information_field["value"],
261 "required": field.required,
262 }
263
264 return fields
265 else:
266 raise RegistrationError(_("You are not allowed to update this registration."))
267
268
269 def update_registration_by_organiser(registration, member, data):
270 if not is_organiser(member, registration.event):
271 raise RegistrationError(_("You are not allowed to update this registration."))
272
273 if "payment" in data:
274 if data["payment"]["type"] == PaymentTypeField.NO_PAYMENT:
275 if registration.payment is not None:
276 delete_payment(registration)
277 else:
278 registration.payment = create_payment(
279 payable=registration,
280 processed_by=member,
281 pay_type=data["payment"]["type"],
282 )
283
284 if "present" in data:
285 registration.present = data["present"]
286
287 registration.save()
288
289
290 def generate_category_statistics():
291 """
292 Generate statistics about events, number of events per category
293 :return: Dict with key, value resp. being category, event count.
294 """
295 year = datetime_to_lectureyear(timezone.now())
296
297 data = {}
298 for i in range(5):
299 year_start = date(year=year - i, month=9, day=1)
300 year_end = date(year=year - i + 1, month=9, day=1)
301 data[str(year - i)] = {
302 str(display): Event.objects.filter(
303 category=key, start__gte=year_start, end__lte=year_end
304 ).count()
305 for key, display in Event.EVENT_CATEGORIES
306 }
307
308 return data
309
[end of website/events/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/events/services.py b/website/events/services.py
--- a/website/events/services.py
+++ b/website/events/services.py
@@ -62,6 +62,7 @@
registration is not None
and registration.date_cancelled is None
and (event.cancellation_allowed or name)
+ and registration.payment is None
)
perms["update_registration"] = (
registration is not None
@@ -136,8 +137,6 @@
pass
if event_permissions(member, event)["cancel_registration"] and registration:
- if registration.payment is not None:
- delete_payment(registration)
if registration.queue_position == 0:
emails.notify_first_waiting(event)
@@ -151,7 +150,7 @@
registration.date_cancelled = timezone.now()
registration.save()
else:
- raise RegistrationError(_("You are not registered for this event."))
+ raise RegistrationError(_("You are not allowed to deregister for this event."))
def update_registration(
|
{"golden_diff": "diff --git a/website/events/services.py b/website/events/services.py\n--- a/website/events/services.py\n+++ b/website/events/services.py\n@@ -62,6 +62,7 @@\n registration is not None\n and registration.date_cancelled is None\n and (event.cancellation_allowed or name)\n+ and registration.payment is None\n )\n perms[\"update_registration\"] = (\n registration is not None\n@@ -136,8 +137,6 @@\n pass\n \n if event_permissions(member, event)[\"cancel_registration\"] and registration:\n- if registration.payment is not None:\n- delete_payment(registration)\n if registration.queue_position == 0:\n emails.notify_first_waiting(event)\n \n@@ -151,7 +150,7 @@\n registration.date_cancelled = timezone.now()\n registration.save()\n else:\n- raise RegistrationError(_(\"You are not registered for this event.\"))\n+ raise RegistrationError(_(\"You are not allowed to deregister for this event.\"))\n \n \n def update_registration(\n", "issue": "Prevent event registrations cancelling when registration is paid (or where you were present)\n### Is your feature request related to a problem? Please describe.\r\nCurrently it is technically possible to pay for an event and afterwards cancel your registration. That should not be possible (at least not by users themselves). This never was a real problem in practice but now with Thalia Pay the problem appears.\r\n\r\n### Describe the solution you'd like\r\nWhen a payment exists for an event registration, don't allow cancelling. Also when paying with Thalia Pay we might want to make explicit that after paying you can't cancel anymore (without maybe contacting the board). \r\n\r\n### Motivation\r\n\r\n### Describe alternatives you've considered\r\n- Only really creating the payment at the date of the event \u2192 too complex (code)\r\n- As proposed here, but do allow cancellation when the payment is a TPay payment that is not processed yet \u2192 too complex (for users)\r\n- Keeping things as it is (allow cancelling and accept people may pay without joining - it could be considered a feature) \u2192 felt undesirable, also without TPay\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.utils import timezone\nfrom django.utils.datetime_safe import date\nfrom django.utils.translation import gettext_lazy as _, get_language\n\nfrom events import emails\nfrom events.exceptions import RegistrationError\nfrom events.models import EventRegistration, RegistrationInformationField, Event\nfrom payments.api.fields import PaymentTypeField\nfrom payments.services import create_payment, delete_payment\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef is_user_registered(member, event):\n \"\"\"\n Returns if the user is registered for the specified event\n\n :param member: the user\n :param event: the event\n :return: None if registration is not required or no member else True/False\n \"\"\"\n if not event.registration_required or not member.is_authenticated:\n return None\n\n return event.registrations.filter(member=member, date_cancelled=None).count() > 0\n\n\ndef event_permissions(member, event, name=None):\n \"\"\"\n Returns a dictionary with the available event permissions of the user\n\n :param member: the user\n :param event: the event\n :param name: the name of a non member registration\n :return: the permission dictionary\n \"\"\"\n perms = {\n \"create_registration\": False,\n \"cancel_registration\": False,\n \"update_registration\": False,\n }\n if not member:\n return perms\n if not (member.is_authenticated or name):\n return perms\n\n registration = None\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist:\n pass\n\n perms[\"create_registration\"] = (\n (registration is None or registration.date_cancelled is not None)\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n perms[\"cancel_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and (event.cancellation_allowed or name)\n )\n perms[\"update_registration\"] = (\n registration is not None\n and registration.date_cancelled is None\n and event.has_fields()\n and event.registration_allowed\n and (name or member.can_attend_events)\n )\n return perms\n\n\ndef is_organiser(member, event):\n if member and member.is_authenticated:\n if member.is_superuser or member.has_perm(\"events.override_organiser\"):\n return True\n\n if event:\n return member.get_member_groups().filter(pk=event.organiser.pk).count() != 0\n\n return False\n\n\ndef create_registration(member, event):\n \"\"\"\n Creates a new user registration for an event\n\n :param member: the user\n :param event: the event\n :return: returns the registration if successful\n \"\"\"\n if event_permissions(member, event)[\"create_registration\"]:\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if registration is None:\n return EventRegistration.objects.create(event=event, member=member)\n elif registration.date_cancelled is not None:\n if registration.is_late_cancellation():\n raise RegistrationError(\n _(\n \"You cannot re-register anymore \"\n \"since you've cancelled after the \"\n \"deadline.\"\n )\n )\n else:\n registration.date = timezone.now()\n registration.date_cancelled = None\n registration.save()\n\n return registration\n elif event_permissions(member, event)[\"cancel_registration\"]:\n raise RegistrationError(_(\"You were already registered.\"))\n else:\n raise RegistrationError(_(\"You may not register.\"))\n\n\ndef cancel_registration(member, event):\n \"\"\"\n Cancel a user registration for an event\n\n :param member: the user\n :param event: the event\n \"\"\"\n registration = None\n try:\n registration = EventRegistration.objects.get(event=event, member=member)\n except EventRegistration.DoesNotExist:\n pass\n\n if event_permissions(member, event)[\"cancel_registration\"] and registration:\n if registration.payment is not None:\n delete_payment(registration)\n if registration.queue_position == 0:\n emails.notify_first_waiting(event)\n\n if event.send_cancel_email and event.after_cancel_deadline:\n emails.notify_organiser(event, registration)\n\n # Note that this doesn\"t remove the values for the\n # information fields that the user entered upon registering.\n # But this is regarded as a feature, not a bug. Especially\n # since the values will still appear in the backend.\n registration.date_cancelled = timezone.now()\n registration.save()\n else:\n raise RegistrationError(_(\"You are not registered for this event.\"))\n\n\ndef update_registration(\n member=None, event=None, name=None, registration=None, field_values=None\n):\n \"\"\"\n Updates a user registration of an event\n\n :param request: http request\n :param member: the user\n :param event: the event\n :param name: the name of a registration not associated with a user\n :param registration: the registration\n :param field_values: values for the information fields\n \"\"\"\n if not registration:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n if (\n not event_permissions(member, event, name)[\"update_registration\"]\n or not field_values\n ):\n return\n\n for field_id, field_value in field_values:\n field = RegistrationInformationField.objects.get(\n id=field_id.replace(\"info_field_\", \"\")\n )\n\n if (\n field.type == RegistrationInformationField.INTEGER_FIELD\n and field_value is None\n ):\n field_value = 0\n elif (\n field.type == RegistrationInformationField.BOOLEAN_FIELD\n and field_value is None\n ):\n field_value = False\n elif (\n field.type == RegistrationInformationField.TEXT_FIELD\n and field_value is None\n ):\n field_value = \"\"\n\n field.set_value_for(registration, field_value)\n\n\ndef registration_fields(request, member=None, event=None, registration=None, name=None):\n \"\"\"\n Returns information about the registration fields of a registration\n\n :param member: the user (optional if registration provided)\n :param name: the name of a non member registration\n (optional if registration provided)\n :param event: the event (optional if registration provided)\n :param registration: the registration (optional if member & event provided)\n :return: the fields\n \"\"\"\n\n if registration is None:\n try:\n registration = EventRegistration.objects.get(\n event=event, member=member, name=name\n )\n except EventRegistration.DoesNotExist as error:\n raise RegistrationError(\n _(\"You are not registered for this event.\")\n ) from error\n except EventRegistration.MultipleObjectsReturned as error:\n raise RegistrationError(\n _(\"Unable to find the right registration.\")\n ) from error\n else:\n member = registration.member\n event = registration.event\n name = registration.name\n\n perms = event_permissions(member, event, name)[\n \"update_registration\"\n ] or is_organiser(request.member, event)\n if perms and registration:\n information_fields = registration.information_fields\n fields = OrderedDict()\n\n for information_field in information_fields:\n field = information_field[\"field\"]\n\n fields[\"info_field_{}\".format(field.id)] = {\n \"type\": field.type,\n \"label\": getattr(field, \"{}_{}\".format(\"name\", get_language())),\n \"description\": getattr(\n field, \"{}_{}\".format(\"description\", get_language())\n ),\n \"value\": information_field[\"value\"],\n \"required\": field.required,\n }\n\n return fields\n else:\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n\ndef update_registration_by_organiser(registration, member, data):\n if not is_organiser(member, registration.event):\n raise RegistrationError(_(\"You are not allowed to update this registration.\"))\n\n if \"payment\" in data:\n if data[\"payment\"][\"type\"] == PaymentTypeField.NO_PAYMENT:\n if registration.payment is not None:\n delete_payment(registration)\n else:\n registration.payment = create_payment(\n payable=registration,\n processed_by=member,\n pay_type=data[\"payment\"][\"type\"],\n )\n\n if \"present\" in data:\n registration.present = data[\"present\"]\n\n registration.save()\n\n\ndef generate_category_statistics():\n \"\"\"\n Generate statistics about events, number of events per category\n :return: Dict with key, value resp. being category, event count.\n \"\"\"\n year = datetime_to_lectureyear(timezone.now())\n\n data = {}\n for i in range(5):\n year_start = date(year=year - i, month=9, day=1)\n year_end = date(year=year - i + 1, month=9, day=1)\n data[str(year - i)] = {\n str(display): Event.objects.filter(\n category=key, start__gte=year_start, end__lte=year_end\n ).count()\n for key, display in Event.EVENT_CATEGORIES\n }\n\n return data\n", "path": "website/events/services.py"}]}
| 3,581 | 225 |
gh_patches_debug_12091
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-281
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug with building docs - Readme is not correct
I was just trying to follow the [Readme.md](https://github.com/projectmesa/mesa/blob/master/docs/README.md) to build the docs, and I get the following error:
```
mesa/docs [git fix-docs] $ make html
sphinx-build -b html -d _build/doctrees . _build/html
Running Sphinx v1.3.6
Recursion error:
maximum recursion depth exceeded while calling a Python object
This can happen with very large or deeply nested source files. You can carefully increase the default Python recursion limit of 1000 in conf.py with e.g.:
import sys; sys.setrecursionlimit(1500)
make: *** [html] Error 1
```
Not sure why I am running into this. I feel like I have come across this before, but I can't remember how I fixed.
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Mesa documentation build configuration file, created by
5 # sphinx-quickstart on Sun Jan 4 23:34:09 2015.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 import sys
17 import os
18
19
20 # Adding mock imports to see if this builds
21 from unittest.mock import MagicMock
22
23 class Mock(MagicMock):
24 @classmethod
25 def __getattr__(cls, name):
26 return Mock()
27
28 MOCK_MODULES = ['numpy', 'pandas']
29 sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
30
31 # End of mock
32
33 # If extensions (or modules to document with autodoc) are in another directory,
34 # add these directories to sys.path here. If the directory is relative to the
35 # documentation root, use os.path.abspath to make it absolute, like shown here.
36 sys.path.insert(0, os.path.abspath('.'))
37 sys.path.insert(0, "../examples")
38 sys.path.insert(0, "../mesa")
39
40
41 # -- General configuration ------------------------------------------------
42
43 # If your documentation needs a minimal Sphinx version, state it here.
44 #needs_sphinx = '1.0'
45
46 # Add any Sphinx extension module names here, as strings. They can be
47 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
48 # ones.
49 extensions = [
50 'sphinx.ext.autodoc',
51 'sphinx.ext.doctest',
52 'sphinx.ext.intersphinx',
53 'sphinx.ext.todo',
54 'sphinx.ext.coverage',
55 'sphinx.ext.mathjax',
56 'sphinx.ext.ifconfig',
57 'sphinx.ext.viewcode',
58 ]
59
60 # Add any paths that contain templates here, relative to this directory.
61 templates_path = ['_templates']
62
63 # The suffix of source filenames.
64 source_suffix = '.rst'
65
66 # The encoding of source files.
67 #source_encoding = 'utf-8-sig'
68
69 # The master toctree document.
70 master_doc = 'index'
71
72 # General information about the project.
73 project = 'Mesa'
74 copyright = '2016, Project Mesa Team'
75
76 # The version info for the project you're documenting, acts as replacement for
77 # |version| and |release|, also used in various other places throughout the
78 # built documents.
79 #
80 # The short X.Y version.
81 version = '0.5'
82 # The full version, including alpha/beta/rc tags.
83 release = '.1'
84
85 # The language for content autogenerated by Sphinx. Refer to documentation
86 # for a list of supported languages.
87 #language = None
88
89 # There are two options for replacing |today|: either, you set today to some
90 # non-false value, then it is used:
91 #today = ''
92 # Else, today_fmt is used as the format for a strftime call.
93 #today_fmt = '%B %d, %Y'
94
95 # List of patterns, relative to source directory, that match files and
96 # directories to ignore when looking for source files.
97 exclude_patterns = ['_build']
98
99 # The reST default role (used for this markup: `text`) to use for all
100 # documents.
101 #default_role = None
102
103 # If true, '()' will be appended to :func: etc. cross-reference text.
104 #add_function_parentheses = True
105
106 # If true, the current module name will be prepended to all description
107 # unit titles (such as .. function::).
108 #add_module_names = True
109
110 # If true, sectionauthor and moduleauthor directives will be shown in the
111 # output. They are ignored by default.
112 #show_authors = False
113
114 # The name of the Pygments (syntax highlighting) style to use.
115 pygments_style = 'sphinx'
116
117 # A list of ignored prefixes for module index sorting.
118 #modindex_common_prefix = []
119
120 # If true, keep warnings as "system message" paragraphs in the built documents.
121 #keep_warnings = False
122
123
124 # -- Options for HTML output ----------------------------------------------
125
126 # The theme to use for HTML and HTML Help pages. See the documentation for
127 # a list of builtin themes.
128 html_theme = 'default'
129
130 # Theme options are theme-specific and customize the look and feel of a theme
131 # further. For a list of options available for each theme, see the
132 # documentation.
133 #html_theme_options = {}
134
135 # Add any paths that contain custom themes here, relative to this directory.
136 #html_theme_path = []
137
138 # The name for this set of Sphinx documents. If None, it defaults to
139 # "<project> v<release> documentation".
140 #html_title = None
141
142 # A shorter title for the navigation bar. Default is the same as html_title.
143 #html_short_title = None
144
145 # The name of an image file (relative to this directory) to place at the top
146 # of the sidebar.
147 html_logo = "images/mesa_logo.png"
148
149 # The name of an image file (within the static path) to use as favicon of the
150 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
151 # pixels large.
152 html_favicon = "images/mesa_logo.ico"
153
154 # Add any paths that contain custom static files (such as style sheets) here,
155 # relative to this directory. They are copied after the builtin static files,
156 # so a file named "default.css" will overwrite the builtin "default.css".
157 html_static_path = ['_static']
158
159 # Add any extra paths that contain custom files (such as robots.txt or
160 # .htaccess) here, relative to this directory. These files are copied
161 # directly to the root of the documentation.
162 #html_extra_path = []
163
164 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
165 # using the given strftime format.
166 #html_last_updated_fmt = '%b %d, %Y'
167
168 # If true, SmartyPants will be used to convert quotes and dashes to
169 # typographically correct entities.
170 #html_use_smartypants = True
171
172 # Custom sidebar templates, maps document names to template names.
173 #html_sidebars = {}
174
175 # Additional templates that should be rendered to pages, maps page names to
176 # template names.
177 #html_additional_pages = {}
178
179 # If false, no module index is generated.
180 #html_domain_indices = True
181
182 # If false, no index is generated.
183 #html_use_index = True
184
185 # If true, the index is split into individual pages for each letter.
186 #html_split_index = False
187
188 # If true, links to the reST sources are added to the pages.
189 #html_show_sourcelink = True
190
191 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
192 html_show_sphinx = False
193
194 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
195 #html_show_copyright = True
196
197 # If true, an OpenSearch description file will be output, and all pages will
198 # contain a <link> tag referring to it. The value of this option must be the
199 # base URL from which the finished HTML is served.
200 #html_use_opensearch = ''
201
202 # This is the file name suffix for HTML files (e.g. ".xhtml").
203 #html_file_suffix = None
204
205 # Output file base name for HTML help builder.
206 htmlhelp_basename = 'Mesadoc'
207
208
209 # -- Options for LaTeX output ---------------------------------------------
210
211 latex_elements = {
212 # The paper size ('letterpaper' or 'a4paper').
213 #'papersize': 'letterpaper',
214
215 # The font size ('10pt', '11pt' or '12pt').
216 #'pointsize': '10pt',
217
218 # Additional stuff for the LaTeX preamble.
219 #'preamble': '',
220 }
221
222 # Grouping the document tree into LaTeX files. List of tuples
223 # (source start file, target name, title,
224 # author, documentclass [howto, manual, or own class]).
225 latex_documents = [
226 ('index', 'Mesa.tex', 'Mesa Documentation',
227 'Project Mesa Team', 'manual'),
228 ]
229
230 # The name of an image file (relative to this directory) to place at the top of
231 # the title page.
232 #latex_logo = None
233
234 # For "manual" documents, if this is true, then toplevel headings are parts,
235 # not chapters.
236 #latex_use_parts = False
237
238 # If true, show page references after internal links.
239 #latex_show_pagerefs = False
240
241 # If true, show URL addresses after external links.
242 #latex_show_urls = False
243
244 # Documents to append as an appendix to all manuals.
245 #latex_appendices = []
246
247 # If false, no module index is generated.
248 #latex_domain_indices = True
249
250
251 # -- Options for manual page output ---------------------------------------
252
253 # One entry per manual page. List of tuples
254 # (source start file, name, description, authors, manual section).
255 man_pages = [
256 ('index', 'mesa', 'Mesa Documentation',
257 ['Project Mesa Team'], 1)
258 ]
259
260 # If true, show URL addresses after external links.
261 #man_show_urls = False
262
263
264 # -- Options for Texinfo output -------------------------------------------
265
266 # Grouping the document tree into Texinfo files. List of tuples
267 # (source start file, target name, title, author,
268 # dir menu entry, description, category)
269 texinfo_documents = [
270 ('index', 'Mesa', 'Mesa Documentation',
271 'Project Mesa Team', 'Mesa', 'One line description of project.',
272 'Miscellaneous'),
273 ]
274
275 # Documents to append as an appendix to all manuals.
276 #texinfo_appendices = []
277
278 # If false, no module index is generated.
279 #texinfo_domain_indices = True
280
281 # How to display URL addresses: 'footnote', 'no', or 'inline'.
282 #texinfo_show_urls = 'footnote'
283
284 # If true, do not generate a @detailmenu in the "Top" node's menu.
285 #texinfo_no_detailmenu = False
286
287
288 # Example configuration for intersphinx: refer to the Python standard library.
289 intersphinx_mapping = {'http://docs.python.org/': None}
290
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -17,19 +17,6 @@
import os
-# Adding mock imports to see if this builds
-from unittest.mock import MagicMock
-
-class Mock(MagicMock):
- @classmethod
- def __getattr__(cls, name):
- return Mock()
-
-MOCK_MODULES = ['numpy', 'pandas']
-sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
-
-# End of mock
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -17,19 +17,6 @@\n import os\n \n \n-# Adding mock imports to see if this builds\n-from unittest.mock import MagicMock\n-\n-class Mock(MagicMock):\n- @classmethod\n- def __getattr__(cls, name):\n- return Mock()\n-\n-MOCK_MODULES = ['numpy', 'pandas']\n-sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n-\n-# End of mock\n-\n # If extensions (or modules to document with autodoc) are in another directory,\n # add these directories to sys.path here. If the directory is relative to the\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n", "issue": "Bug with building docs - Readme is not correct\nI was just trying to follow the [Readme.md](https://github.com/projectmesa/mesa/blob/master/docs/README.md) to build the docs, and I get the following error: \n\n```\nmesa/docs [git fix-docs] $ make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.3.6\n\nRecursion error:\nmaximum recursion depth exceeded while calling a Python object\n\nThis can happen with very large or deeply nested source files. You can carefully increase the default Python recursion limit of 1000 in conf.py with e.g.:\n import sys; sys.setrecursionlimit(1500)\nmake: *** [html] Error 1\n```\n\nNot sure why I am running into this. I feel like I have come across this before, but I can't remember how I fixed. \n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Mesa documentation build configuration file, created by\n# sphinx-quickstart on Sun Jan 4 23:34:09 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n\n# Adding mock imports to see if this builds\nfrom unittest.mock import MagicMock\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return Mock()\n\nMOCK_MODULES = ['numpy', 'pandas']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n# End of mock\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, \"../examples\")\nsys.path.insert(0, \"../mesa\")\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Mesa'\ncopyright = '2016, Project Mesa Team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.5'\n# The full version, including alpha/beta/rc tags.\nrelease = '.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"images/mesa_logo.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = \"images/mesa_logo.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\nhtml_show_sphinx = False\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Mesadoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Mesa.tex', 'Mesa Documentation',\n 'Project Mesa Team', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'mesa', 'Mesa Documentation',\n ['Project Mesa Team'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Mesa', 'Mesa Documentation',\n 'Project Mesa Team', 'Mesa', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'http://docs.python.org/': None}\n", "path": "docs/conf.py"}]}
| 3,729 | 174 |
gh_patches_debug_20144
|
rasdani/github-patches
|
git_diff
|
openfun__richie-1715
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On the search page, the "more options" feature is broken on the "contributors" filter
## Bug Report
**Problematic behavior/code**
The "more options" feature on the "contributors" filter on the search page is broken.
**Expected Behavior**
When we click on "more options" on the "contributors" filter on the search page, we expect to see a list of more contributors and be able to type a search request to refine the search and find a specific contributor by his/her first/lastname.
**Steps to Reproduce**
1. Go to the search page: https://www.fun-mooc.fr/en/courses/
2. Click "more options" on the "contributors" filter
**Environment**
- Richie version: 2.5.0
- Platform: docker
</issue>
<code>
[start of src/richie/apps/search/defaults.py]
1 """
2 Import custom settings and set up defaults for values the Search app needs
3 """
4 from django.conf import settings
5 from django.utils.functional import lazy
6 from django.utils.translation import gettext_lazy as _
7
8 # Elasticsearch
9 ES_CHUNK_SIZE = 500
10 ES_PAGE_SIZE = 10
11
12 # Use a lazy to enable easier testing by not defining the value at bootstrap time
13 ES_INDICES_PREFIX = lazy(
14 lambda: getattr(settings, "RICHIE_ES_INDICES_PREFIX", "richie")
15 )()
16
17 # Define which analyzer should be used for each language
18 QUERY_ANALYZERS = getattr(
19 settings, "RICHIE_QUERY_ANALYZERS", {"en": "english", "fr": "french"}
20 )
21
22 # Define the scoring boost (in ElasticSearch) related value names receive when using
23 # full-text search.
24 # For example, when a user searches for "Science" in full-text, it should match any
25 # course whose category contains "Science" or a related word, albeit with a lower
26 # score than courses that include it in their title or description.
27 # This lower score factor is the boost value we get or set here.
28 RELATED_CONTENT_BOOST = 0.05
29
30 FACET_SORTING_DEFAULT = "count"
31
32 FACET_COUNTS_DEFAULT_LIMIT = getattr(settings, "RICHIE_FACET_COUNTS_DEFAULT_LIMIT", 10)
33 FACET_COUNTS_MAX_LIMIT = getattr(settings, "RICHIE_FACET_COUNTS_MAX_LIMIT", 50)
34
35 ES_STATE_WEIGHTS = getattr(settings, "RICHIE_ES_STATE_WEIGHTS", None) or [
36 80, # ONGOING_OPEN
37 70, # FUTURE_OPEN
38 60, # ARCHIVED_OPEN
39 30, # FUTURE_NOT_YET_OPEN
40 6, # FUTURE_CLOSED
41 5, # ONGOING_CLOSED
42 1, # ARCHIVED_CLOSED
43 ]
44
45 FILTERS_CONFIGURATION = [
46 (
47 "richie.apps.search.filter_definitions.StaticChoicesFilterDefinition",
48 {
49 "fragment_map": {"new": [{"term": {"is_new": True}}]},
50 "human_name": _("New courses"),
51 "min_doc_count": 0,
52 "name": "new",
53 "position": 0,
54 "sorting": "conf",
55 "values": {"new": _("First session")},
56 },
57 ),
58 (
59 "richie.apps.search.filter_definitions.NestingWrapper",
60 {
61 "name": "course_runs",
62 "filters": [
63 (
64 "richie.apps.search.filter_definitions.AvailabilityFilterDefinition",
65 {
66 "human_name": _("Availability"),
67 "is_drilldown": True,
68 "min_doc_count": 0,
69 "name": "availability",
70 "position": 1,
71 "sorting": "conf",
72 },
73 ),
74 (
75 "richie.apps.search.filter_definitions.LanguagesFilterDefinition",
76 {
77 "human_name": _("Languages"),
78 # There are too many available languages to show them all, all the time.
79 # Eg. 200 languages, 190+ of which will have 0 matching courses.
80 "min_doc_count": 1,
81 "name": "languages",
82 "position": 5,
83 },
84 ),
85 ],
86 },
87 ),
88 (
89 "richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition",
90 {
91 "human_name": _("Subjects"),
92 "is_autocompletable": True,
93 "is_searchable": True,
94 "min_doc_count": 0,
95 "name": "subjects",
96 "position": 2,
97 "reverse_id": "subjects",
98 "term": "categories",
99 },
100 ),
101 (
102 "richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition",
103 {
104 "human_name": _("Levels"),
105 "is_autocompletable": True,
106 "is_searchable": True,
107 "min_doc_count": 0,
108 "name": "levels",
109 "position": 3,
110 "reverse_id": "levels",
111 "term": "categories",
112 },
113 ),
114 (
115 "richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition",
116 {
117 "human_name": _("Organizations"),
118 "is_autocompletable": True,
119 "is_searchable": True,
120 "min_doc_count": 0,
121 "name": "organizations",
122 "position": 4,
123 "reverse_id": "organizations",
124 },
125 ),
126 (
127 "richie.apps.search.filter_definitions.IndexableFilterDefinition",
128 {
129 "human_name": _("Persons"),
130 "is_autocompletable": True,
131 "is_searchable": True,
132 "min_doc_count": 0,
133 "name": "persons",
134 "position": 5,
135 "reverse_id": "persons",
136 },
137 ),
138 ]
139
[end of src/richie/apps/search/defaults.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/richie/apps/search/defaults.py b/src/richie/apps/search/defaults.py
--- a/src/richie/apps/search/defaults.py
+++ b/src/richie/apps/search/defaults.py
@@ -118,6 +118,8 @@
"is_autocompletable": True,
"is_searchable": True,
"min_doc_count": 0,
+ # Note: this is a special name that connects the filter to Organization objects
+ # in Richie as well was the corresponding indexer and API endpoint.
"name": "organizations",
"position": 4,
"reverse_id": "organizations",
@@ -130,6 +132,8 @@
"is_autocompletable": True,
"is_searchable": True,
"min_doc_count": 0,
+ # Note: this is a special name that connects the filter to Person objects
+ # in Richie as well was the corresponding indexer and API endpoint.
"name": "persons",
"position": 5,
"reverse_id": "persons",
|
{"golden_diff": "diff --git a/src/richie/apps/search/defaults.py b/src/richie/apps/search/defaults.py\n--- a/src/richie/apps/search/defaults.py\n+++ b/src/richie/apps/search/defaults.py\n@@ -118,6 +118,8 @@\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n+ # Note: this is a special name that connects the filter to Organization objects\n+ # in Richie as well was the corresponding indexer and API endpoint.\n \"name\": \"organizations\",\n \"position\": 4,\n \"reverse_id\": \"organizations\",\n@@ -130,6 +132,8 @@\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n+ # Note: this is a special name that connects the filter to Person objects\n+ # in Richie as well was the corresponding indexer and API endpoint.\n \"name\": \"persons\",\n \"position\": 5,\n \"reverse_id\": \"persons\",\n", "issue": "On the search page, the \"more options\" feature is broken on the \"contributors\" filter\n## Bug Report\r\n\r\n**Problematic behavior/code**\r\nThe \"more options\" feature on the \"contributors\" filter on the search page is broken.\r\n\r\n**Expected Behavior**\r\nWhen we click on \"more options\" on the \"contributors\" filter on the search page, we expect to see a list of more contributors and be able to type a search request to refine the search and find a specific contributor by his/her first/lastname.\r\n\r\n**Steps to Reproduce**\r\n1. Go to the search page: https://www.fun-mooc.fr/en/courses/\r\n2. Click \"more options\" on the \"contributors\" filter\r\n\r\n**Environment**\r\n- Richie version: 2.5.0\r\n- Platform: docker\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nImport custom settings and set up defaults for values the Search app needs\n\"\"\"\nfrom django.conf import settings\nfrom django.utils.functional import lazy\nfrom django.utils.translation import gettext_lazy as _\n\n# Elasticsearch\nES_CHUNK_SIZE = 500\nES_PAGE_SIZE = 10\n\n# Use a lazy to enable easier testing by not defining the value at bootstrap time\nES_INDICES_PREFIX = lazy(\n lambda: getattr(settings, \"RICHIE_ES_INDICES_PREFIX\", \"richie\")\n)()\n\n# Define which analyzer should be used for each language\nQUERY_ANALYZERS = getattr(\n settings, \"RICHIE_QUERY_ANALYZERS\", {\"en\": \"english\", \"fr\": \"french\"}\n)\n\n# Define the scoring boost (in ElasticSearch) related value names receive when using\n# full-text search.\n# For example, when a user searches for \"Science\" in full-text, it should match any\n# course whose category contains \"Science\" or a related word, albeit with a lower\n# score than courses that include it in their title or description.\n# This lower score factor is the boost value we get or set here.\nRELATED_CONTENT_BOOST = 0.05\n\nFACET_SORTING_DEFAULT = \"count\"\n\nFACET_COUNTS_DEFAULT_LIMIT = getattr(settings, \"RICHIE_FACET_COUNTS_DEFAULT_LIMIT\", 10)\nFACET_COUNTS_MAX_LIMIT = getattr(settings, \"RICHIE_FACET_COUNTS_MAX_LIMIT\", 50)\n\nES_STATE_WEIGHTS = getattr(settings, \"RICHIE_ES_STATE_WEIGHTS\", None) or [\n 80, # ONGOING_OPEN\n 70, # FUTURE_OPEN\n 60, # ARCHIVED_OPEN\n 30, # FUTURE_NOT_YET_OPEN\n 6, # FUTURE_CLOSED\n 5, # ONGOING_CLOSED\n 1, # ARCHIVED_CLOSED\n]\n\nFILTERS_CONFIGURATION = [\n (\n \"richie.apps.search.filter_definitions.StaticChoicesFilterDefinition\",\n {\n \"fragment_map\": {\"new\": [{\"term\": {\"is_new\": True}}]},\n \"human_name\": _(\"New courses\"),\n \"min_doc_count\": 0,\n \"name\": \"new\",\n \"position\": 0,\n \"sorting\": \"conf\",\n \"values\": {\"new\": _(\"First session\")},\n },\n ),\n (\n \"richie.apps.search.filter_definitions.NestingWrapper\",\n {\n \"name\": \"course_runs\",\n \"filters\": [\n (\n \"richie.apps.search.filter_definitions.AvailabilityFilterDefinition\",\n {\n \"human_name\": _(\"Availability\"),\n \"is_drilldown\": True,\n \"min_doc_count\": 0,\n \"name\": \"availability\",\n \"position\": 1,\n \"sorting\": \"conf\",\n },\n ),\n (\n \"richie.apps.search.filter_definitions.LanguagesFilterDefinition\",\n {\n \"human_name\": _(\"Languages\"),\n # There are too many available languages to show them all, all the time.\n # Eg. 200 languages, 190+ of which will have 0 matching courses.\n \"min_doc_count\": 1,\n \"name\": \"languages\",\n \"position\": 5,\n },\n ),\n ],\n },\n ),\n (\n \"richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition\",\n {\n \"human_name\": _(\"Subjects\"),\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n \"name\": \"subjects\",\n \"position\": 2,\n \"reverse_id\": \"subjects\",\n \"term\": \"categories\",\n },\n ),\n (\n \"richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition\",\n {\n \"human_name\": _(\"Levels\"),\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n \"name\": \"levels\",\n \"position\": 3,\n \"reverse_id\": \"levels\",\n \"term\": \"categories\",\n },\n ),\n (\n \"richie.apps.search.filter_definitions.IndexableHierarchicalFilterDefinition\",\n {\n \"human_name\": _(\"Organizations\"),\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n \"name\": \"organizations\",\n \"position\": 4,\n \"reverse_id\": \"organizations\",\n },\n ),\n (\n \"richie.apps.search.filter_definitions.IndexableFilterDefinition\",\n {\n \"human_name\": _(\"Persons\"),\n \"is_autocompletable\": True,\n \"is_searchable\": True,\n \"min_doc_count\": 0,\n \"name\": \"persons\",\n \"position\": 5,\n \"reverse_id\": \"persons\",\n },\n ),\n]\n", "path": "src/richie/apps/search/defaults.py"}]}
| 2,061 | 240 |
gh_patches_debug_7419
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-6344
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scale/Range incompatibility in examples/models/server/population.py
in master:
Scale/Range incompatibility in examples/models/server/population.py
</issue>
<code>
[start of examples/models/server/population.py]
1 from __future__ import print_function
2
3 from math import pi
4
5 from bokeh.client import push_session
6 from bokeh.document import Document
7 from bokeh.models.glyphs import Line, HBar
8 from bokeh.models import (Plot, ColumnDataSource, DataRange1d, FactorRange,
9 LinearAxis, CategoricalAxis, Grid, Legend, CategoricalScale)
10 from bokeh.sampledata.population import load_population
11 from bokeh.models.widgets import Select
12 from bokeh.models.layouts import WidgetBox, Column
13
14 document = Document()
15 session = push_session(document)
16
17 df = load_population()
18 revision = 2012
19
20 year, location = 2010, "World"
21
22 years = [str(x) for x in sorted(df.Year.unique())]
23 locations = sorted(df.Location.unique())
24 groups = [str(x) for x in df.AgeGrp.unique()]
25 groups.remove('80+') # remove oddball group
26
27 source_pyramid_m = ColumnDataSource(data=dict(value=[], group=[]))
28 source_pyramid_f = ColumnDataSource(data=dict(value=[], group=[]))
29
30 def pyramid():
31 xdr = DataRange1d()
32 ydr = FactorRange(factors=groups)
33 y_scale = CategoricalScale()
34
35 plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=500, toolbar_location=None)
36
37 xaxis = LinearAxis()
38 plot.add_layout(xaxis, 'below')
39 plot.add_layout(CategoricalAxis(), 'left')
40
41 plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
42
43 m = HBar(left="value", right=0, y="group", height=1, fill_color="#3B8686")
44 mglyph = plot.add_glyph(source_pyramid_m, m)
45
46 f = HBar(left=0, right="value", y="group", height=1, fill_color="#CFF09E")
47 fglyph = plot.add_glyph(source_pyramid_f, f)
48
49 plot.add_layout(Legend(items=[("Male" , [mglyph]), ("Female" , [fglyph])]))
50
51 return plot
52
53 source_known = ColumnDataSource(data=dict(x=[], y=[]))
54 source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
55
56 def population():
57 xdr = FactorRange(factors=years)
58 ydr = DataRange1d()
59 y_scale = CategoricalScale()
60
61 plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=150, toolbar_location=None)
62
63 plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')
64
65 known = Line(x="x", y="y", line_color="violet", line_width=2)
66 known_glyph = plot.add_glyph(source_known, known)
67
68 predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
69 predicted_glyph = plot.add_glyph(source_predicted, predicted)
70
71 legend = Legend(location="bottom_right",
72 items=[("known", [known_glyph]), ("predicted", [predicted_glyph])])
73 plot.add_layout(legend)
74
75 return plot
76
77 def update_pyramid():
78 pyramid = df[(df.Location == location) & (df.Year == year)]
79
80 male = pyramid[pyramid.Sex == "Male"]
81 female = pyramid[pyramid.Sex == "Female"]
82
83 total = df.Value.sum()
84 male_percent = -male.Value / total
85 female_percent = female.Value / total
86
87 source_pyramid_m.data = dict(
88 group=[str(x) for x in male.AgeGrp.unique()],
89 value=male_percent,
90 )
91 source_pyramid_f.data = dict(
92 group=[str(x) for x in female.AgeGrp.unique()],
93 value=female_percent,
94 )
95
96 def update_population():
97 population = df[df.Location == location].groupby(df.Year).Value.sum()
98 aligned_revision = revision // 10 * 10
99
100 known = population[population.index <= aligned_revision]
101 predicted = population[population.index >= aligned_revision]
102
103 source_known.data = dict(x=known.index.map(str), y=known.values)
104 source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
105
106 def update_data():
107 update_population()
108 update_pyramid()
109
110 def on_year_change(attr, old, new):
111 global year
112 year = int(new)
113 update_data()
114
115 def on_location_change(attr, old, new):
116 global location
117 location = new
118 update_data()
119
120 def create_layout():
121 year_select = Select(title="Year:", value="2010", options=years)
122 location_select = Select(title="Location:", value="World", options=locations)
123
124 year_select.on_change('value', on_year_change)
125 location_select.on_change('value', on_location_change)
126
127 controls = WidgetBox(children=[year_select, location_select], height=150, width=600)
128 layout = Column(children=[controls, pyramid(), population()])
129
130 return layout
131
132 layout = create_layout()
133
134 update_data()
135
136 document.add_root(layout)
137 session.show(layout)
138
139 if __name__ == "__main__":
140 document.validate()
141 print("\npress ctrl-C to exit")
142 session.loop_until_closed()
143
[end of examples/models/server/population.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/models/server/population.py b/examples/models/server/population.py
--- a/examples/models/server/population.py
+++ b/examples/models/server/population.py
@@ -56,9 +56,9 @@
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d()
- y_scale = CategoricalScale()
+ x_scale = CategoricalScale()
- plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=150, toolbar_location=None)
+ plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, plot_width=600, plot_height=150, toolbar_location=None)
plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')
|
{"golden_diff": "diff --git a/examples/models/server/population.py b/examples/models/server/population.py\n--- a/examples/models/server/population.py\n+++ b/examples/models/server/population.py\n@@ -56,9 +56,9 @@\n def population():\n xdr = FactorRange(factors=years)\n ydr = DataRange1d()\n- y_scale = CategoricalScale()\n+ x_scale = CategoricalScale()\n \n- plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=150, toolbar_location=None)\n+ plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, plot_width=600, plot_height=150, toolbar_location=None)\n \n plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')\n", "issue": "Scale/Range incompatibility in examples/models/server/population.py\nin master:\r\n\r\nScale/Range incompatibility in examples/models/server/population.py\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom math import pi\n\nfrom bokeh.client import push_session\nfrom bokeh.document import Document\nfrom bokeh.models.glyphs import Line, HBar\nfrom bokeh.models import (Plot, ColumnDataSource, DataRange1d, FactorRange,\n LinearAxis, CategoricalAxis, Grid, Legend, CategoricalScale)\nfrom bokeh.sampledata.population import load_population\nfrom bokeh.models.widgets import Select\nfrom bokeh.models.layouts import WidgetBox, Column\n\ndocument = Document()\nsession = push_session(document)\n\ndf = load_population()\nrevision = 2012\n\nyear, location = 2010, \"World\"\n\nyears = [str(x) for x in sorted(df.Year.unique())]\nlocations = sorted(df.Location.unique())\ngroups = [str(x) for x in df.AgeGrp.unique()]\ngroups.remove('80+') # remove oddball group\n\nsource_pyramid_m = ColumnDataSource(data=dict(value=[], group=[]))\nsource_pyramid_f = ColumnDataSource(data=dict(value=[], group=[]))\n\ndef pyramid():\n xdr = DataRange1d()\n ydr = FactorRange(factors=groups)\n y_scale = CategoricalScale()\n\n plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=500, toolbar_location=None)\n\n xaxis = LinearAxis()\n plot.add_layout(xaxis, 'below')\n plot.add_layout(CategoricalAxis(), 'left')\n\n plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n\n m = HBar(left=\"value\", right=0, y=\"group\", height=1, fill_color=\"#3B8686\")\n mglyph = plot.add_glyph(source_pyramid_m, m)\n\n f = HBar(left=0, right=\"value\", y=\"group\", height=1, fill_color=\"#CFF09E\")\n fglyph = plot.add_glyph(source_pyramid_f, f)\n\n plot.add_layout(Legend(items=[(\"Male\" , [mglyph]), (\"Female\" , [fglyph])]))\n\n return plot\n\nsource_known = ColumnDataSource(data=dict(x=[], y=[]))\nsource_predicted = ColumnDataSource(data=dict(x=[], y=[]))\n\ndef population():\n xdr = FactorRange(factors=years)\n ydr = DataRange1d()\n y_scale = CategoricalScale()\n\n plot = Plot(x_range=xdr, y_range=ydr, y_scale=y_scale, plot_width=600, plot_height=150, toolbar_location=None)\n\n plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')\n\n known = Line(x=\"x\", y=\"y\", line_color=\"violet\", line_width=2)\n known_glyph = plot.add_glyph(source_known, known)\n\n predicted = Line(x=\"x\", y=\"y\", line_color=\"violet\", line_width=2, line_dash=\"dashed\")\n predicted_glyph = plot.add_glyph(source_predicted, predicted)\n\n legend = Legend(location=\"bottom_right\",\n items=[(\"known\", [known_glyph]), (\"predicted\", [predicted_glyph])])\n plot.add_layout(legend)\n\n return plot\n\ndef update_pyramid():\n pyramid = df[(df.Location == location) & (df.Year == year)]\n\n male = pyramid[pyramid.Sex == \"Male\"]\n female = pyramid[pyramid.Sex == \"Female\"]\n\n total = df.Value.sum()\n male_percent = -male.Value / total\n female_percent = female.Value / total\n\n source_pyramid_m.data = dict(\n group=[str(x) for x in male.AgeGrp.unique()],\n value=male_percent,\n )\n source_pyramid_f.data = dict(\n group=[str(x) for x in female.AgeGrp.unique()],\n value=female_percent,\n )\n\ndef update_population():\n population = df[df.Location == location].groupby(df.Year).Value.sum()\n aligned_revision = revision // 10 * 10\n\n known = population[population.index <= aligned_revision]\n predicted = population[population.index >= aligned_revision]\n\n source_known.data = dict(x=known.index.map(str), y=known.values)\n source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)\n\ndef update_data():\n update_population()\n update_pyramid()\n\ndef on_year_change(attr, old, new):\n global year\n year = int(new)\n update_data()\n\ndef on_location_change(attr, old, new):\n global location\n location = new\n update_data()\n\ndef create_layout():\n year_select = Select(title=\"Year:\", value=\"2010\", options=years)\n location_select = Select(title=\"Location:\", value=\"World\", options=locations)\n\n year_select.on_change('value', on_year_change)\n location_select.on_change('value', on_location_change)\n\n controls = WidgetBox(children=[year_select, location_select], height=150, width=600)\n layout = Column(children=[controls, pyramid(), population()])\n\n return layout\n\nlayout = create_layout()\n\nupdate_data()\n\ndocument.add_root(layout)\nsession.show(layout)\n\nif __name__ == \"__main__\":\n document.validate()\n print(\"\\npress ctrl-C to exit\")\n session.loop_until_closed()\n", "path": "examples/models/server/population.py"}]}
| 2,049 | 187 |
gh_patches_debug_3350
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-422
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search suggestions are lumped together if Yahoo is enabled
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
1.0.0-940-32fb2bdf, master branch, not forked
**How did you install SearXNG?**
searxng-docker, fresh install from yesterday.
**What happened?**
Search keyword suggestions are lumped together in one.
**How To Reproduce**
Enable the Yahoo engine.
You can also reproduce this issue with the Yahoo bang (!yh).
**Expected behavior**
Normally, you would have separate keyword suggestions instead of what's happening right now.
**Screenshots & Logs**

**Additional context**
I have Google, Qwant, Duckduckgo, Startpage, Brave and Yahoo engines enabled by default for all users.
</issue>
<code>
[start of searx/engines/yahoo.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Yahoo Search (Web)
4
5 Languages are supported by mapping the language to a domain. If domain is not
6 found in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.
7
8 """
9
10 from urllib.parse import (
11 unquote,
12 urlencode,
13 )
14 from lxml import html
15
16 from searx.utils import (
17 eval_xpath_getindex,
18 eval_xpath_list,
19 extract_text,
20 match_language,
21 )
22
23 # about
24 about = {
25 "website": 'https://search.yahoo.com/',
26 "wikidata_id": None,
27 "official_api_documentation": 'https://developer.yahoo.com/api/',
28 "use_official_api": False,
29 "require_api_key": False,
30 "results": 'HTML',
31 }
32
33 # engine dependent config
34 categories = ['general']
35 paging = True
36 time_range_support = True
37 supported_languages_url = 'https://search.yahoo.com/preferences/languages'
38 """Supported languages are read from Yahoo preference page."""
39
40 time_range_dict = {
41 'day': ('1d', 'd'),
42 'week': ('1w', 'w'),
43 'month': ('1m', 'm'),
44 }
45
46 language_aliases = {
47 'zh-HK': 'zh_chs',
48 'zh-CN': 'zh_chs', # dead since 2015 / routed to hk.search.yahoo.com
49 'zh-TW': 'zh_cht',
50 }
51
52 lang2domain = {
53 'zh_chs' : 'hk.search.yahoo.com',
54 'zh_cht' : 'tw.search.yahoo.com',
55 'en' : 'search.yahoo.com',
56
57 'bg' : 'search.yahoo.com',
58 'cs' : 'search.yahoo.com',
59 'da' : 'search.yahoo.com',
60 'el' : 'search.yahoo.com',
61 'et' : 'search.yahoo.com',
62 'he' : 'search.yahoo.com',
63 'hr' : 'search.yahoo.com',
64 'ja' : 'search.yahoo.com',
65 'ko' : 'search.yahoo.com',
66 'sk' : 'search.yahoo.com',
67 'sl' : 'search.yahoo.com',
68
69 }
70 """Map language to domain"""
71
72 def _get_language(params):
73
74 lang = language_aliases.get(params['language'])
75 if lang is None:
76 lang = match_language(
77 params['language'], supported_languages, language_aliases
78 )
79 lang = lang.split('-')[0]
80 logger.debug("params['language']: %s --> %s" , params['language'], lang)
81 return lang
82
83 def request(query, params):
84 """build request"""
85 offset = (params['pageno'] - 1) * 7 + 1
86 lang = _get_language(params)
87 age, btf = time_range_dict.get(
88 params['time_range'], ('', ''))
89
90 args = urlencode({
91 'p' : query,
92 'ei' : 'UTF-8',
93 'fl' : 1,
94 'vl' : 'lang_' + lang,
95 'btf' : btf,
96 'fr2' : 'time',
97 'age' : age,
98 'b' : offset,
99 'xargs' :0
100 })
101
102 domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
103 params['url'] = 'https://%s/search?%s' % (domain, args)
104 return params
105
106 def parse_url(url_string):
107 """remove yahoo-specific tracking-url"""
108
109 endings = ['/RS', '/RK']
110 endpositions = []
111 start = url_string.find('http', url_string.find('/RU=') + 1)
112
113 for ending in endings:
114 endpos = url_string.rfind(ending)
115 if endpos > -1:
116 endpositions.append(endpos)
117
118 if start == 0 or len(endpositions) == 0:
119 return url_string
120
121 end = min(endpositions)
122 return unquote(url_string[start:end])
123
124 def response(resp):
125 """parse response"""
126
127 results = []
128 dom = html.fromstring(resp.text)
129
130 # parse results
131 for result in eval_xpath_list(dom, '//div[contains(@class,"algo-sr")]'):
132 url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)
133 if url is None:
134 continue
135 url = parse_url(url)
136
137 title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
138 if title is None:
139 continue
140 offset = len(extract_text(title.xpath('span')))
141 title = extract_text(title)[offset:]
142
143 content = eval_xpath_getindex(
144 result, './/div[contains(@class, "compText")]', 0, default=''
145 )
146 if content:
147 content = extract_text(content)
148
149 # append result
150 results.append({
151 'url': url,
152 'title': title,
153 'content': content
154 })
155
156 for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]'):
157 # append suggestion
158 results.append({'suggestion': extract_text(suggestion)})
159
160 return results
161
162
163 # get supported languages from their site
164 def _fetch_supported_languages(resp):
165 supported_languages = []
166 dom = html.fromstring(resp.text)
167 offset = len('lang_')
168
169 for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
170 supported_languages.append( val[offset:] )
171
172 return supported_languages
173
[end of searx/engines/yahoo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -153,7 +153,7 @@
'content': content
})
- for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]'):
+ for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
|
{"golden_diff": "diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\n--- a/searx/engines/yahoo.py\n+++ b/searx/engines/yahoo.py\n@@ -153,7 +153,7 @@\n 'content': content\n })\n \n- for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]'):\n+ for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n", "issue": "Search suggestions are lumped together if Yahoo is enabled\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n1.0.0-940-32fb2bdf, master branch, not forked\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker, fresh install from yesterday.\r\n\r\n**What happened?**\r\nSearch keyword suggestions are lumped together in one.\r\n\r\n**How To Reproduce**\r\nEnable the Yahoo engine.\r\nYou can also reproduce this issue with the Yahoo bang (!yh). \r\n\r\n**Expected behavior**\r\nNormally, you would have separate keyword suggestions instead of what's happening right now. \r\n\r\n**Screenshots & Logs**\r\n\r\n\r\n**Additional context**\r\nI have Google, Qwant, Duckduckgo, Startpage, Brave and Yahoo engines enabled by default for all users.\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n match_language,\n)\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general']\npaging = True\ntime_range_support = True\nsupported_languages_url = 'https://search.yahoo.com/preferences/languages'\n\"\"\"Supported languages are read from Yahoo preference page.\"\"\"\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlanguage_aliases = {\n 'zh-HK': 'zh_chs',\n 'zh-CN': 'zh_chs', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_cht',\n}\n\nlang2domain = {\n 'zh_chs' : 'hk.search.yahoo.com',\n 'zh_cht' : 'tw.search.yahoo.com',\n 'en' : 'search.yahoo.com',\n\n 'bg' : 'search.yahoo.com',\n 'cs' : 'search.yahoo.com',\n 'da' : 'search.yahoo.com',\n 'el' : 'search.yahoo.com',\n 'et' : 'search.yahoo.com',\n 'he' : 'search.yahoo.com',\n 'hr' : 'search.yahoo.com',\n 'ja' : 'search.yahoo.com',\n 'ko' : 'search.yahoo.com',\n 'sk' : 'search.yahoo.com',\n 'sl' : 'search.yahoo.com',\n\n}\n\"\"\"Map language to domain\"\"\"\n\ndef _get_language(params):\n\n lang = language_aliases.get(params['language'])\n if lang is None:\n lang = match_language(\n params['language'], supported_languages, language_aliases\n )\n lang = lang.split('-')[0]\n logger.debug(\"params['language']: %s --> %s\" , params['language'], lang)\n return lang\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n offset = (params['pageno'] - 1) * 7 + 1\n lang = _get_language(params)\n age, btf = time_range_dict.get(\n params['time_range'], ('', ''))\n\n args = urlencode({\n 'p' : query,\n 'ei' : 'UTF-8',\n 'fl' : 1,\n 'vl' : 'lang_' + lang,\n 'btf' : btf,\n 'fr2' : 'time',\n 'age' : age,\n 'b' : offset,\n 'xargs' :0\n })\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\n if title is None:\n continue\n offset = len(extract_text(title.xpath('span')))\n title = extract_text(title)[offset:]\n\n content = eval_xpath_getindex(\n result, './/div[contains(@class, \"compText\")]', 0, default=''\n )\n if content:\n content = extract_text(content)\n\n # append result\n results.append({\n 'url': url,\n 'title': title,\n 'content': content\n })\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = []\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n supported_languages.append( val[offset:] )\n\n return supported_languages\n", "path": "searx/engines/yahoo.py"}]}
| 2,422 | 134 |
gh_patches_debug_762
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-2610
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory
**What happened:**
The 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files.
This is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz
If you try to install and run kfp 0.1.35 via pip it causes an error:
```
Traceback (most recent call last):
File "/Users/shenderson/venvs/kubeflow/bin/kfp", line 5, in <module>
from kfp.__main__ import main
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module>
from .cli.cli import main
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py", line 21, in <module>
from .diagnose_me_cli import diagnose_me
File "/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py", line 6, in <module>
from .diagnose_me import dev_env
ModuleNotFoundError: No module named 'kfp.cli.diagnose_me'
```
**What did you expect to happen:**
All kfp modules including the diagnose_me package to be installed.
**What steps did you take:**
* Run `pip3 install --upgrade --force --no-cache-dir kfp`
* Run `kfp`
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17 from setuptools import setup
18
19 NAME = 'kfp'
20 #VERSION = .... Change the version in kfp/__init__.py
21
22 REQUIRES = [
23 'urllib3>=1.15,<1.25', #Fixing the version conflict with the "requests" package
24 'six >= 1.10',
25 'certifi',
26 'python-dateutil',
27 'PyYAML',
28 'google-cloud-storage>=1.13.0',
29 'kubernetes>=8.0.0, <=9.0.0',
30 'PyJWT>=1.6.4',
31 'cryptography>=2.4.2',
32 'google-auth>=1.6.1',
33 'requests_toolbelt>=0.8.0',
34 'cloudpickle==1.1.1',
35 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.
36 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1
37 'jsonschema >= 3.0.1',
38 'tabulate == 0.8.3',
39 'click == 7.0',
40 'Deprecated',
41 ]
42
43 def find_version(*file_path_parts):
44 here = os.path.abspath(os.path.dirname(__file__))
45 with open(os.path.join(here, *file_path_parts), 'r') as fp:
46 version_file_text = fp.read()
47
48 version_match = re.search(
49 r"^__version__ = ['\"]([^'\"]*)['\"]",
50 version_file_text,
51 re.M,
52 )
53 if version_match:
54 return version_match.group(1)
55
56 raise RuntimeError("Unable to find version string.")
57
58 setup(
59 name=NAME,
60 version=find_version("kfp", "__init__.py"),
61 description='KubeFlow Pipelines SDK',
62 author='google',
63 install_requires=REQUIRES,
64 packages=[
65 'kfp',
66 'kfp.cli',
67 'kfp.compiler',
68 'kfp.components',
69 'kfp.components.structures',
70 'kfp.components.structures.kubernetes',
71 'kfp.containers',
72 'kfp.dsl',
73 'kfp.notebook',
74 ],
75 classifiers=[
76 'Intended Audience :: Developers',
77 'Intended Audience :: Education',
78 'Intended Audience :: Science/Research',
79 'License :: OSI Approved :: Apache Software License',
80 'Programming Language :: Python :: 3',
81 'Programming Language :: Python :: 3.5',
82 'Programming Language :: Python :: 3.6',
83 'Programming Language :: Python :: 3.7',
84 'Topic :: Scientific/Engineering',
85 'Topic :: Scientific/Engineering :: Artificial Intelligence',
86 'Topic :: Software Development',
87 'Topic :: Software Development :: Libraries',
88 'Topic :: Software Development :: Libraries :: Python Modules',
89 ],
90 python_requires='>=3.5.3',
91 include_package_data=True,
92 entry_points={
93 'console_scripts': [
94 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'
95 ]
96 })
97
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -64,6 +64,7 @@
packages=[
'kfp',
'kfp.cli',
+ 'kfp.cli.diagnose_me',
'kfp.compiler',
'kfp.components',
'kfp.components.structures',
|
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -64,6 +64,7 @@\n packages=[\n 'kfp',\n 'kfp.cli',\n+ 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n", "issue": "kfp 0.1.35 tar.gz in pypi.org is missing diagnose_me directory\n**What happened:**\r\nThe 0.1.35 release of kfp available on pypi.org (i.e. what is installed via `pip3 install kfp`) seems to be missing the `kfp/cli/diagnose_me` directory containing the diagnose_me modules required by the cli. The release hosted on github contains these files.\r\n\r\nThis is the tar.gz file hosted on pypi: https://files.pythonhosted.org/packages/e8/02/51dbeae211ddf1c931b2d1613db90856b7d94a53c1d9f704593dfa6253ae/kfp-0.1.35.tar.gz\r\n\r\nIf you try to install and run kfp 0.1.35 via pip it causes an error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/shenderson/venvs/kubeflow/bin/kfp\", line 5, in <module>\r\n from kfp.__main__ import main\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/__main__.py\", line 15, in <module>\r\n from .cli.cli import main\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/cli.py\", line 21, in <module>\r\n from .diagnose_me_cli import diagnose_me\r\n File \"/Users/shenderson/venvs/kubeflow/lib/python3.7/site-packages/kfp/cli/diagnose_me_cli.py\", line 6, in <module>\r\n from .diagnose_me import dev_env\r\nModuleNotFoundError: No module named 'kfp.cli.diagnose_me'\r\n```\r\n\r\n**What did you expect to happen:**\r\nAll kfp modules including the diagnose_me package to be installed.\r\n\r\n**What steps did you take:**\r\n* Run `pip3 install --upgrade --force --no-cache-dir kfp`\r\n* Run `kfp`\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle==1.1.1',\n 'kfp-server-api >= 0.1.18, <= 0.1.25', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0',\n 'Deprecated',\n]\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n name=NAME,\n version=find_version(\"kfp\", \"__init__.py\"),\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main', 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]}
| 2,036 | 85 |
gh_patches_debug_25943
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-2437
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Updated input manifests creates old version manifests part of `legacy-manifests/` folder
### Describe the bug
With the change to move the old input manifests to [legacy-manifests folder](url) in build repo, the [auto generate manifest workflow ](https://github.com/opensearch-project/opensearch-build/blob/main/.github/workflows/versions.yml) creates even the manifests part of the legacy-manifests folder assuming they does not exist.
Sample PR.
https://github.com/opensearch-project/opensearch-build/pull/2389/files
### To reproduce
The workflow PR
https://github.com/opensearch-project/opensearch-build/pull/2389/files
### Expected behavior
The `./manifest.sh update` logic should be modified:
1) Either it should create manifests greater than the version number from the manifests inside the [legacy manifest folder](https://github.com/opensearch-project/opensearch-build/tree/main/legacy-manifests)
2) Logic to compare both manifests and legacy-manifests folder.
### Screenshots
If applicable, add screenshots to help explain your problem.
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
</issue>
<code>
[start of src/manifests_workflow/input_manifests.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import glob
8 import logging
9 import os
10 import re
11 from abc import abstractmethod
12 from typing import Dict, List, Type, Union
13
14 from manifests.input_manifest import InputComponents, InputManifest
15 from manifests.manifests import Manifests
16 from manifests_workflow.component_opensearch import ComponentOpenSearch
17 from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
18 from manifests_workflow.component_opensearch_min import ComponentOpenSearchMin
19 from system.temporary_directory import TemporaryDirectory
20
21
22 class InputManifests(Manifests):
23 def __init__(self, name: str) -> None:
24 self.name = name
25 self.prefix = name.lower().replace(" ", "-")
26 super().__init__(InputManifest, InputManifests.files(self.prefix))
27
28 @classmethod
29 def manifests_path(self) -> str:
30 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests"))
31
32 @classmethod
33 def jenkins_path(self) -> str:
34 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "jenkins"))
35
36 @classmethod
37 def cron_jenkinsfile(self) -> str:
38 return os.path.join(self.jenkins_path(), "check-for-build.jenkinsfile")
39
40 @classmethod
41 def files(self, name: str) -> List:
42 results = []
43 for filename in glob.glob(os.path.join(self.manifests_path(), f"**/{name}-*.yml")):
44 # avoids the -maven manifest
45 match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename))
46 if match:
47 results.append(filename)
48 return results
49
50 @abstractmethod
51 def update(
52 self,
53 min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],
54 component_klass: Type[ComponentOpenSearch],
55 keep: bool = False,
56 ) -> None:
57 known_versions = self.versions
58 logging.info(f"Known versions: {known_versions}")
59 main_versions: Dict = {}
60 with TemporaryDirectory(keep=keep, chdir=True) as work_dir:
61 logging.info(f"Checking out components into {work_dir.name}")
62
63 # check out and build #main, 1.x, etc.
64 branches = min_klass.branches()
65
66 logging.info(f"Checking {self.name} {branches} branches")
67 for branch in branches:
68 c = min_klass.checkout(
69 path=os.path.join(work_dir.name, self.name.replace(" ", ""), branch),
70 branch=branch,
71 )
72
73 version = c.version
74 logging.info(f"{self.name}#{branch} is version {version}")
75 if version not in main_versions.keys():
76 main_versions[version] = [c]
77
78 if component_klass is not None:
79 # components can increment their own version first without incrementing min
80 manifest = self.latest
81 logging.info(f"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})")
82 for component in manifest.components.values():
83 if component.name == self.name:
84 continue
85
86 logging.info(f"Checking out {component.name}#main")
87 component = component_klass.checkout(
88 name=component.name,
89 path=os.path.join(work_dir.name, component.name),
90 opensearch_version=manifest.build.version,
91 branch="main",
92 )
93
94 component_version = component.version
95 if component_version:
96 release_version = ".".join(component_version.split(".")[:3])
97 if release_version not in main_versions.keys():
98 main_versions[release_version] = []
99 main_versions[release_version].append(component)
100 logging.info(f"{component.name}#main is version {release_version} (from {component_version})")
101
102 # summarize
103 logging.info("Found versions on main:")
104 for main_version in main_versions.keys():
105 for component in main_versions[main_version]:
106 logging.info(f" {component.name}={main_version}")
107
108 # generate new manifests
109 for release_version in sorted(main_versions.keys() - known_versions):
110 self.write_manifest(release_version, main_versions[release_version])
111 self.add_to_cron(release_version)
112
113 def create_manifest(self, version: str, components: List = []) -> InputManifest:
114 templates_base_path = os.path.join(self.manifests_path(), "templates")
115 template_version_folder = version.split(".")[0] + ".x"
116 template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, "manifest.yml")
117 if not os.path.exists(template_full_path):
118 template_full_path = os.path.join(templates_base_path, self.prefix, "default", "manifest.yml")
119
120 manifest = InputManifest.from_file(open(template_full_path))
121
122 manifest.build.version = version
123 manifests_components = []
124
125 for component in components:
126 logging.info(f" Adding {component.name}")
127 manifests_components.append(component.to_dict())
128
129 manifest.components = InputComponents(manifests_components) # type: ignore
130 return manifest
131
132 def write_manifest(self, version: str, components: List = []) -> None:
133 logging.info(f"Creating new version: {version}")
134 manifest = self.create_manifest(version, components)
135 manifest_dir = os.path.join(self.manifests_path(), version)
136 os.makedirs(manifest_dir, exist_ok=True)
137 manifest_path = os.path.join(manifest_dir, f"{self.prefix}-{version}.yml")
138 manifest.to_file(manifest_path)
139 logging.info(f"Wrote {manifest_path}")
140
141 def add_to_cron(self, version: str) -> None:
142 logging.info(f"Adding new version to cron: {version}")
143 jenkinsfile = self.cron_jenkinsfile()
144 with open(jenkinsfile, "r") as f:
145 data = f.read()
146
147 cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\n"
148
149 if cron_entry in data:
150 raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}")
151
152 data = data.replace("parameterizedCron '''\n", f"parameterizedCron '''\n{' ' * 12}{cron_entry}")
153
154 with open(jenkinsfile, "w") as f:
155 f.write(data)
156
157 logging.info(f"Wrote {jenkinsfile}")
158
[end of src/manifests_workflow/input_manifests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py
--- a/src/manifests_workflow/input_manifests.py
+++ b/src/manifests_workflow/input_manifests.py
@@ -29,6 +29,10 @@
def manifests_path(self) -> str:
return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests"))
+ @classmethod
+ def legacy_manifests_path(self) -> str:
+ return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "legacy-manifests"))
+
@classmethod
def jenkins_path(self) -> str:
return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "jenkins"))
@@ -40,11 +44,12 @@
@classmethod
def files(self, name: str) -> List:
results = []
- for filename in glob.glob(os.path.join(self.manifests_path(), f"**/{name}-*.yml")):
- # avoids the -maven manifest
- match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename))
- if match:
- results.append(filename)
+ for path in [self.manifests_path(), self.legacy_manifests_path()]:
+ for filename in glob.glob(os.path.join(path, f"**/{name}-*.yml")):
+ # avoids the -maven manifest
+ match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename))
+ if match:
+ results.append(filename)
return results
@abstractmethod
|
{"golden_diff": "diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py\n--- a/src/manifests_workflow/input_manifests.py\n+++ b/src/manifests_workflow/input_manifests.py\n@@ -29,6 +29,10 @@\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n \n+ @classmethod\n+ def legacy_manifests_path(self) -> str:\n+ return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"legacy-manifests\"))\n+\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n@@ -40,11 +44,12 @@\n @classmethod\n def files(self, name: str) -> List:\n results = []\n- for filename in glob.glob(os.path.join(self.manifests_path(), f\"**/{name}-*.yml\")):\n- # avoids the -maven manifest\n- match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n- if match:\n- results.append(filename)\n+ for path in [self.manifests_path(), self.legacy_manifests_path()]:\n+ for filename in glob.glob(os.path.join(path, f\"**/{name}-*.yml\")):\n+ # avoids the -maven manifest\n+ match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n+ if match:\n+ results.append(filename)\n return results\n \n @abstractmethod\n", "issue": "[Bug]: Updated input manifests creates old version manifests part of `legacy-manifests/` folder\n### Describe the bug\n\nWith the change to move the old input manifests to [legacy-manifests folder](url) in build repo, the [auto generate manifest workflow ](https://github.com/opensearch-project/opensearch-build/blob/main/.github/workflows/versions.yml) creates even the manifests part of the legacy-manifests folder assuming they does not exist.\r\nSample PR.\r\nhttps://github.com/opensearch-project/opensearch-build/pull/2389/files\n\n### To reproduce\n\nThe workflow PR\r\nhttps://github.com/opensearch-project/opensearch-build/pull/2389/files\n\n### Expected behavior\n\nThe `./manifest.sh update` logic should be modified:\r\n1) Either it should create manifests greater than the version number from the manifests inside the [legacy manifest folder](https://github.com/opensearch-project/opensearch-build/tree/main/legacy-manifests)\r\n2) Logic to compare both manifests and legacy-manifests folder.\n\n### Screenshots\n\nIf applicable, add screenshots to help explain your problem.\n\n### Host / Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport glob\nimport logging\nimport os\nimport re\nfrom abc import abstractmethod\nfrom typing import Dict, List, Type, Union\n\nfrom manifests.input_manifest import InputComponents, InputManifest\nfrom manifests.manifests import Manifests\nfrom manifests_workflow.component_opensearch import ComponentOpenSearch\nfrom manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\nfrom manifests_workflow.component_opensearch_min import ComponentOpenSearchMin\nfrom system.temporary_directory import TemporaryDirectory\n\n\nclass InputManifests(Manifests):\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n super().__init__(InputManifest, InputManifests.files(self.prefix))\n\n @classmethod\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n\n @classmethod\n def cron_jenkinsfile(self) -> str:\n return os.path.join(self.jenkins_path(), \"check-for-build.jenkinsfile\")\n\n @classmethod\n def files(self, name: str) -> List:\n results = []\n for filename in glob.glob(os.path.join(self.manifests_path(), f\"**/{name}-*.yml\")):\n # avoids the -maven manifest\n match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n if match:\n results.append(filename)\n return results\n\n @abstractmethod\n def update(\n self,\n min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],\n component_klass: Type[ComponentOpenSearch],\n keep: bool = False,\n ) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n with TemporaryDirectory(keep=keep, chdir=True) as work_dir:\n logging.info(f\"Checking out components into {work_dir.name}\")\n\n # check out and build #main, 1.x, etc.\n branches = min_klass.branches()\n\n logging.info(f\"Checking {self.name} {branches} branches\")\n for branch in branches:\n c = min_klass.checkout(\n path=os.path.join(work_dir.name, self.name.replace(\" \", \"\"), branch),\n branch=branch,\n )\n\n version = c.version\n logging.info(f\"{self.name}#{branch} is version {version}\")\n if version not in main_versions.keys():\n main_versions[version] = [c]\n\n if component_klass is not None:\n # components can increment their own version first without incrementing min\n manifest = self.latest\n logging.info(f\"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})\")\n for component in manifest.components.values():\n if component.name == self.name:\n continue\n\n logging.info(f\"Checking out {component.name}#main\")\n component = component_klass.checkout(\n name=component.name,\n path=os.path.join(work_dir.name, component.name),\n opensearch_version=manifest.build.version,\n branch=\"main\",\n )\n\n component_version = component.version\n if component_version:\n release_version = \".\".join(component_version.split(\".\")[:3])\n if release_version not in main_versions.keys():\n main_versions[release_version] = []\n main_versions[release_version].append(component)\n logging.info(f\"{component.name}#main is version {release_version} (from {component_version})\")\n\n # summarize\n logging.info(\"Found versions on main:\")\n for main_version in main_versions.keys():\n for component in main_versions[main_version]:\n logging.info(f\" {component.name}={main_version}\")\n\n # generate new manifests\n for release_version in sorted(main_versions.keys() - known_versions):\n self.write_manifest(release_version, main_versions[release_version])\n self.add_to_cron(release_version)\n\n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n templates_base_path = os.path.join(self.manifests_path(), \"templates\")\n template_version_folder = version.split(\".\")[0] + \".x\"\n template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, \"manifest.yml\")\n if not os.path.exists(template_full_path):\n template_full_path = os.path.join(templates_base_path, self.prefix, \"default\", \"manifest.yml\")\n\n manifest = InputManifest.from_file(open(template_full_path))\n\n manifest.build.version = version\n manifests_components = []\n\n for component in components:\n logging.info(f\" Adding {component.name}\")\n manifests_components.append(component.to_dict())\n\n manifest.components = InputComponents(manifests_components) # type: ignore\n return manifest\n\n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n manifest = self.create_manifest(version, components)\n manifest_dir = os.path.join(self.manifests_path(), version)\n os.makedirs(manifest_dir, exist_ok=True)\n manifest_path = os.path.join(manifest_dir, f\"{self.prefix}-{version}.yml\")\n manifest.to_file(manifest_path)\n logging.info(f\"Wrote {manifest_path}\")\n\n def add_to_cron(self, version: str) -> None:\n logging.info(f\"Adding new version to cron: {version}\")\n jenkinsfile = self.cron_jenkinsfile()\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n\n cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n\n data = data.replace(\"parameterizedCron '''\\n\", f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\")\n\n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n\n logging.info(f\"Wrote {jenkinsfile}\")\n", "path": "src/manifests_workflow/input_manifests.py"}]}
| 2,598 | 380 |
gh_patches_debug_41258
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5774
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.artetv: error: Unable to validate response text: ValidationError(dict):
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.5.0
### Description
I fix this issue
```
by adding '**API_HLS_NG**' in line 51 of file
`/usr/lib/python3.11/site-packages/streamlink/plugins/artetv.py`
like this :
```

link/streamlink/assets/19744191/b78f47ba-67b2-439b-b336-85bef7e4615a)
### Debug log
```text
error: Unable to validate response text: ValidationError(dict):
Unable to validate value of key 'data'
Context(dict):
Unable to validate value of key 'attributes'
Context(dict):
Unable to validate value of key 'streams'
Context(AnySchema):
ValidationError(AnySchema):
ValidationError(AnySchema):
ValidationError(dict):
Unable to validate value of key 'protocol'
Context(AnySchema):
ValidationError(equality):
'API_HLS_NG' does not equal 'HLS'
ValidationError(equality):
'API_HLS_NG' does not equal 'HLS_NG'
```
</issue>
<code>
[start of src/streamlink/plugins/artetv.py]
1 """
2 $description European public service channel promoting culture, including magazine shows, concerts and documentaries.
3 $url arte.tv
4 $type live, vod
5 $metadata title
6 """
7
8 import logging
9 import re
10 from operator import itemgetter
11
12 from streamlink.plugin import Plugin, pluginmatcher
13 from streamlink.plugin.api import validate
14 from streamlink.stream.hls import HLSStream
15
16
17 log = logging.getLogger(__name__)
18
19
20 @pluginmatcher(re.compile(r"""
21 https?://(?:\w+\.)?arte\.tv/(?:guide/)?
22 (?P<language>[a-z]{2})/
23 (?:
24 (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+
25 |
26 (?:direct|live)
27 )
28 """, re.VERBOSE))
29 class ArteTV(Plugin):
30 API_URL = "https://api.arte.tv/api/player/v2/config/{0}/{1}"
31 API_TOKEN = "MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ"
32
33 def _get_streams(self):
34 language = self.match.group("language")
35 video_id = self.match.group("video_id")
36
37 json_url = self.API_URL.format(language, video_id or "LIVE")
38 headers = {
39 "Authorization": f"Bearer {self.API_TOKEN}",
40 }
41 streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(
42 validate.parse_json(),
43 {"data": {"attributes": {
44 "streams": validate.any(
45 [],
46 [
47 validate.all(
48 {
49 "url": validate.url(),
50 "slot": int,
51 "protocol": validate.any("HLS", "HLS_NG"),
52 },
53 validate.union_get("slot", "protocol", "url"),
54 ),
55 ],
56 ),
57 "metadata": {
58 "title": str,
59 "subtitle": validate.any(None, str),
60 },
61 }}},
62 validate.get(("data", "attributes")),
63 validate.union_get("streams", "metadata"),
64 ))
65
66 if not streams:
67 return
68
69 self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
70
71 for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
72 return HLSStream.parse_variant_playlist(self.session, url)
73
74
75 __plugin__ = ArteTV
76
[end of src/streamlink/plugins/artetv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py
--- a/src/streamlink/plugins/artetv.py
+++ b/src/streamlink/plugins/artetv.py
@@ -2,6 +2,7 @@
$description European public service channel promoting culture, including magazine shows, concerts and documentaries.
$url arte.tv
$type live, vod
+$metadata id
$metadata title
"""
@@ -17,38 +18,41 @@
log = logging.getLogger(__name__)
-@pluginmatcher(re.compile(r"""
- https?://(?:\w+\.)?arte\.tv/(?:guide/)?
- (?P<language>[a-z]{2})/
- (?:
- (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+
- |
- (?:direct|live)
- )
-""", re.VERBOSE))
+@pluginmatcher(
+ name="live",
+ pattern=re.compile(
+ r"https?://(?:\w+\.)?arte\.tv/(?P<language>[a-z]{2})/(?:direct|live)/?",
+ ),
+)
+@pluginmatcher(
+ name="vod",
+ pattern=re.compile(
+ r"https?://(?:\w+\.)?arte\.tv/(?:guide/)?(?P<language>[a-z]{2})/(?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+",
+ ),
+)
class ArteTV(Plugin):
- API_URL = "https://api.arte.tv/api/player/v2/config/{0}/{1}"
- API_TOKEN = "MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ"
+ API_URL = "https://api.arte.tv/api/player/v2/config/{language}/{id}"
def _get_streams(self):
- language = self.match.group("language")
- video_id = self.match.group("video_id")
+ self.id = self.match["video_id"] if self.matches["vod"] else "LIVE"
- json_url = self.API_URL.format(language, video_id or "LIVE")
- headers = {
- "Authorization": f"Bearer {self.API_TOKEN}",
- }
- streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(
+ json_url = self.API_URL.format(
+ language=self.match["language"],
+ id=self.id,
+ )
+ streams, metadata = self.session.http.get(json_url, schema=validate.Schema(
validate.parse_json(),
- {"data": {"attributes": {
+ {"data": {"attributes": dict}},
+ validate.get(("data", "attributes")),
+ {
"streams": validate.any(
[],
[
validate.all(
{
- "url": validate.url(),
"slot": int,
- "protocol": validate.any("HLS", "HLS_NG"),
+ "protocol": str,
+ "url": validate.url(),
},
validate.union_get("slot", "protocol", "url"),
),
@@ -58,17 +62,15 @@
"title": str,
"subtitle": validate.any(None, str),
},
- }}},
- validate.get(("data", "attributes")),
+ },
validate.union_get("streams", "metadata"),
))
- if not streams:
- return
-
self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
- for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
+ for _slot, protocol, url in sorted(streams, key=itemgetter(0)):
+ if "HLS" not in protocol:
+ continue
return HLSStream.parse_variant_playlist(self.session, url)
|
{"golden_diff": "diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py\n--- a/src/streamlink/plugins/artetv.py\n+++ b/src/streamlink/plugins/artetv.py\n@@ -2,6 +2,7 @@\n $description European public service channel promoting culture, including magazine shows, concerts and documentaries.\n $url arte.tv\n $type live, vod\n+$metadata id\n $metadata title\n \"\"\"\n \n@@ -17,38 +18,41 @@\n log = logging.getLogger(__name__)\n \n \n-@pluginmatcher(re.compile(r\"\"\"\n- https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?\n- (?P<language>[a-z]{2})/\n- (?:\n- (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\n- |\n- (?:direct|live)\n- )\n-\"\"\", re.VERBOSE))\n+@pluginmatcher(\n+ name=\"live\",\n+ pattern=re.compile(\n+ r\"https?://(?:\\w+\\.)?arte\\.tv/(?P<language>[a-z]{2})/(?:direct|live)/?\",\n+ ),\n+)\n+@pluginmatcher(\n+ name=\"vod\",\n+ pattern=re.compile(\n+ r\"https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?(?P<language>[a-z]{2})/(?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\",\n+ ),\n+)\n class ArteTV(Plugin):\n- API_URL = \"https://api.arte.tv/api/player/v2/config/{0}/{1}\"\n- API_TOKEN = \"MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ\"\n+ API_URL = \"https://api.arte.tv/api/player/v2/config/{language}/{id}\"\n \n def _get_streams(self):\n- language = self.match.group(\"language\")\n- video_id = self.match.group(\"video_id\")\n+ self.id = self.match[\"video_id\"] if self.matches[\"vod\"] else \"LIVE\"\n \n- json_url = self.API_URL.format(language, video_id or \"LIVE\")\n- headers = {\n- \"Authorization\": f\"Bearer {self.API_TOKEN}\",\n- }\n- streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(\n+ json_url = self.API_URL.format(\n+ language=self.match[\"language\"],\n+ id=self.id,\n+ )\n+ streams, metadata = self.session.http.get(json_url, schema=validate.Schema(\n validate.parse_json(),\n- {\"data\": {\"attributes\": {\n+ {\"data\": {\"attributes\": dict}},\n+ validate.get((\"data\", \"attributes\")),\n+ {\n \"streams\": validate.any(\n [],\n [\n validate.all(\n {\n- \"url\": validate.url(),\n \"slot\": int,\n- \"protocol\": validate.any(\"HLS\", \"HLS_NG\"),\n+ \"protocol\": str,\n+ \"url\": validate.url(),\n },\n validate.union_get(\"slot\", \"protocol\", \"url\"),\n ),\n@@ -58,17 +62,15 @@\n \"title\": str,\n \"subtitle\": validate.any(None, str),\n },\n- }}},\n- validate.get((\"data\", \"attributes\")),\n+ },\n validate.union_get(\"streams\", \"metadata\"),\n ))\n \n- if not streams:\n- return\n-\n self.title = f\"{metadata['title']} - {metadata['subtitle']}\" if metadata[\"subtitle\"] else metadata[\"title\"]\n \n- for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):\n+ for _slot, protocol, url in sorted(streams, key=itemgetter(0)):\n+ if \"HLS\" not in protocol:\n+ continue\n return HLSStream.parse_variant_playlist(self.session, url)\n", "issue": "plugins.artetv: error: Unable to validate response text: ValidationError(dict):\n### Checklist\r\n\r\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\nstreamlink 6.5.0\r\n\r\n### Description\r\n\r\nI fix this issue \r\n```\r\nby adding '**API_HLS_NG**' in line 51 of file \r\n`/usr/lib/python3.11/site-packages/streamlink/plugins/artetv.py`\r\nlike this :\r\n```\r\n\r\nlink/streamlink/assets/19744191/b78f47ba-67b2-439b-b336-85bef7e4615a)\r\n\r\n### Debug log\r\n\r\n```text\r\nerror: Unable to validate response text: ValidationError(dict):\r\n Unable to validate value of key 'data'\r\n Context(dict):\r\n Unable to validate value of key 'attributes'\r\n Context(dict):\r\n Unable to validate value of key 'streams'\r\n Context(AnySchema):\r\n ValidationError(AnySchema):\r\n ValidationError(AnySchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'protocol'\r\n Context(AnySchema):\r\n ValidationError(equality):\r\n 'API_HLS_NG' does not equal 'HLS'\r\n ValidationError(equality):\r\n 'API_HLS_NG' does not equal 'HLS_NG'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description European public service channel promoting culture, including magazine shows, concerts and documentaries.\n$url arte.tv\n$type live, vod\n$metadata title\n\"\"\"\n\nimport logging\nimport re\nfrom operator import itemgetter\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?\n (?P<language>[a-z]{2})/\n (?:\n (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\n |\n (?:direct|live)\n )\n\"\"\", re.VERBOSE))\nclass ArteTV(Plugin):\n API_URL = \"https://api.arte.tv/api/player/v2/config/{0}/{1}\"\n API_TOKEN = \"MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ\"\n\n def _get_streams(self):\n language = self.match.group(\"language\")\n video_id = self.match.group(\"video_id\")\n\n json_url = self.API_URL.format(language, video_id or \"LIVE\")\n headers = {\n \"Authorization\": f\"Bearer {self.API_TOKEN}\",\n }\n streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(\n validate.parse_json(),\n {\"data\": {\"attributes\": {\n \"streams\": validate.any(\n [],\n [\n validate.all(\n {\n \"url\": validate.url(),\n \"slot\": int,\n \"protocol\": validate.any(\"HLS\", \"HLS_NG\"),\n },\n validate.union_get(\"slot\", \"protocol\", \"url\"),\n ),\n ],\n ),\n \"metadata\": {\n \"title\": str,\n \"subtitle\": validate.any(None, str),\n },\n }}},\n validate.get((\"data\", \"attributes\")),\n validate.union_get(\"streams\", \"metadata\"),\n ))\n\n if not streams:\n return\n\n self.title = f\"{metadata['title']} - {metadata['subtitle']}\" if metadata[\"subtitle\"] else metadata[\"title\"]\n\n for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):\n return HLSStream.parse_variant_playlist(self.session, url)\n\n\n__plugin__ = ArteTV\n", "path": "src/streamlink/plugins/artetv.py"}]}
| 1,731 | 909 |
gh_patches_debug_44280
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-57
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for renaming fields
We should be able to specify a custom name for a field when needed, like this:
```python
@strawberry.type
class Query:
example_field: str = strawberry.field(name="example")
```
</issue>
<code>
[start of strawberry/field.py]
1 import typing
2
3 import dataclasses
4 from graphql import GraphQLField
5
6 from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT
7 from .exceptions import MissingArgumentsAnnotationsError, MissingReturnAnnotationError
8 from .type_converter import REGISTRY, get_graphql_type_for_annotation
9 from .utils.dict_to_type import dict_to_type
10 from .utils.inspect import get_func_args
11 from .utils.lazy_property import lazy_property
12 from .utils.str_converters import to_camel_case, to_snake_case
13 from .utils.typing import (
14 get_list_annotation,
15 get_optional_annotation,
16 is_list,
17 is_optional,
18 )
19
20
21 class LazyFieldWrapper:
22 """A lazy wrapper for a strawberry field.
23 This allows to use cyclic dependencies in a strawberry fields:
24
25 >>> @strawberry.type
26 >>> class TypeA:
27 >>> @strawberry.field
28 >>> def type_b(self, info) -> "TypeB":
29 >>> from .type_b import TypeB
30 >>> return TypeB()
31 """
32
33 def __init__(self, obj, is_subscription, **kwargs):
34 self._wrapped_obj = obj
35 self.is_subscription = is_subscription
36 self.kwargs = kwargs
37
38 if callable(self._wrapped_obj):
39 self._check_has_annotations()
40
41 def _check_has_annotations(self):
42 # using annotations without passing from typing.get_type_hints
43 # as we don't the actually types for the annotations
44 annotations = self._wrapped_obj.__annotations__
45 name = self._wrapped_obj.__name__
46
47 if "return" not in annotations:
48 raise MissingReturnAnnotationError(name)
49
50 function_arguments = set(get_func_args(self._wrapped_obj)) - {"self", "info"}
51
52 arguments_annotations = {
53 key: value
54 for key, value in annotations.items()
55 if key not in ["info", "return"]
56 }
57
58 annotated_function_arguments = set(arguments_annotations.keys())
59 arguments_missing_annotations = (
60 function_arguments - annotated_function_arguments
61 )
62
63 if len(arguments_missing_annotations) > 0:
64 raise MissingArgumentsAnnotationsError(name, arguments_missing_annotations)
65
66 def __getattr__(self, attr):
67 if attr in self.__dict__:
68 return getattr(self, attr)
69
70 return getattr(self._wrapped_obj, attr)
71
72 def __call__(self, *args, **kwargs):
73 return self._wrapped_obj(self, *args, **kwargs)
74
75 @lazy_property
76 def field(self):
77 return _get_field(
78 self._wrapped_obj, is_subscription=self.is_subscription, **self.kwargs
79 )
80
81
82 class strawberry_field:
83 """A small wrapper for a field in strawberry.
84
85 You shouldn't be using this directly as this is used internally
86 when using `strawberry.field`.
87
88 This allows to use the following two syntaxes when using the type
89 decorator:
90
91 >>> class X:
92 >>> field_abc: str = strawberry.field(description="ABC")
93
94 >>> class X:
95 >>> @strawberry.field(description="ABC")
96 >>> def field_a(self, info) -> str:
97 >>> return "abc"
98
99 When calling this class as strawberry_field it creates a field
100 that stores metadata (such as field description). In addition
101 to that it also acts as decorator when called as a function,
102 allowing us to us both syntaxes.
103 """
104
105 def __init__(self, *, is_subscription=False, **kwargs):
106 self.field = dataclasses.field()
107 self.is_subscription = is_subscription
108 self.description = kwargs.get("description", None)
109 self.kwargs = kwargs
110
111 def __call__(self, wrap):
112 setattr(wrap, IS_STRAWBERRY_FIELD, True)
113
114 self.kwargs["description"] = self.description or wrap.__doc__
115
116 return LazyFieldWrapper(wrap, self.is_subscription, **self.kwargs)
117
118
119 def convert_args(args, annotations):
120 """Converts a nested dictionary to a dictionary of strawberry input types."""
121
122 converted_args = {}
123
124 for key, value in args.items():
125 key = to_snake_case(key)
126 annotation = annotations[key]
127
128 # we don't need to check about unions here since they are not
129 # yet supported for arguments.
130 # see https://github.com/graphql/graphql-spec/issues/488
131
132 is_list_of_args = False
133
134 if is_optional(annotation):
135 annotation = get_optional_annotation(annotation)
136
137 if is_list(annotation):
138 annotation = get_list_annotation(annotation)
139 is_list_of_args = True
140
141 if getattr(annotation, IS_STRAWBERRY_INPUT, False):
142 if is_list_of_args:
143 converted_args[key] = [dict_to_type(x, annotation) for x in value]
144 else:
145 converted_args[key] = dict_to_type(value, annotation)
146 else:
147 converted_args[key] = value
148
149 return converted_args
150
151
152 def _get_field(wrap, *, is_subscription=False, **kwargs):
153 annotations = typing.get_type_hints(wrap, None, REGISTRY)
154
155 name = wrap.__name__
156
157 field_type = get_graphql_type_for_annotation(annotations["return"], name)
158
159 arguments_annotations = {
160 key: value
161 for key, value in annotations.items()
162 if key not in ["info", "return"]
163 }
164
165 arguments = {
166 to_camel_case(name): get_graphql_type_for_annotation(annotation, name)
167 for name, annotation in arguments_annotations.items()
168 }
169
170 def resolver(source, info, **args):
171 args = convert_args(args, arguments_annotations)
172
173 return wrap(source, info, **args)
174
175 if is_subscription:
176
177 def _resolve(event, info):
178 return event
179
180 kwargs.update({"subscribe": resolver, "resolve": _resolve})
181 else:
182 kwargs.update({"resolve": resolver})
183
184 kwargs["description"] = kwargs.get("description", wrap.__doc__)
185
186 return GraphQLField(field_type, args=arguments, **kwargs)
187
188
189 def field(wrap=None, *, is_subscription=False, description=None):
190 """Annotates a method or property as a GraphQL field.
191
192 This is normally used inside a type declaration:
193
194 >>> @strawberry.type:
195 >>> class X:
196 >>> field_abc: str = strawberry.field(description="ABC")
197
198 >>> @strawberry.field(description="ABC")
199 >>> def field_with_resolver(self, info) -> str:
200 >>> return "abc"
201
202 it can be used both as decorator and as a normal function.
203 """
204
205 field = strawberry_field(description=description, is_subscription=is_subscription)
206
207 # when calling this with parens we are going to return a strawberry_field
208 # instance, so it can be used as both decorator and function.
209
210 if wrap is None:
211 return field
212
213 # otherwise we run the decorator directly,
214 # when called as @strawberry.field, without parens.
215
216 return field(wrap)
217
[end of strawberry/field.py]
[start of strawberry/type.py]
1 import typing
2 from functools import partial
3
4 from dataclasses import dataclass
5 from graphql import (
6 GraphQLField,
7 GraphQLInputField,
8 GraphQLInputObjectType,
9 GraphQLInterfaceType,
10 GraphQLObjectType,
11 )
12 from graphql.utilities.schema_printer import print_type
13
14 from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE
15 from .type_converter import REGISTRY, get_graphql_type_for_annotation
16 from .utils.str_converters import to_camel_case
17
18
19 def _get_resolver(cls, field_name):
20 def _resolver(obj, info):
21 # TODO: can we make this nicer?
22 # does it work in all the cases?
23
24 field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)
25
26 if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):
27 return field_resolver(obj, info)
28
29 return field_resolver
30
31 return _resolver
32
33
34 def _convert_annotations_fields(cls, *, is_input=False):
35 FieldClass = GraphQLInputField if is_input else GraphQLField
36 annotations = typing.get_type_hints(cls, None, REGISTRY)
37
38 fields = {}
39
40 for key, annotation in annotations.items():
41 field_name = to_camel_case(key)
42 class_field = getattr(cls, key, None)
43
44 description = getattr(class_field, "description", None)
45
46 fields[field_name] = FieldClass(
47 get_graphql_type_for_annotation(annotation, key),
48 description=description,
49 **({} if is_input else {"resolve": _get_resolver(cls, key)})
50 )
51
52 return fields
53
54
55 def _process_type(cls, *, is_input=False, is_interface=False, description=None):
56 name = cls.__name__
57 REGISTRY[name] = cls
58
59 def repr_(self):
60 return print_type(self.field)
61
62 setattr(cls, "__repr__", repr_)
63
64 def _get_fields():
65 fields = _convert_annotations_fields(cls, is_input=is_input)
66
67 fields.update(
68 {
69 to_camel_case(key): value.field
70 for key, value in cls.__dict__.items()
71 if getattr(value, IS_STRAWBERRY_FIELD, False)
72 }
73 )
74
75 return fields
76
77 if is_input:
78 setattr(cls, IS_STRAWBERRY_INPUT, True)
79 elif is_interface:
80 setattr(cls, IS_STRAWBERRY_INTERFACE, True)
81
82 extra_kwargs = {"description": description or cls.__doc__}
83
84 if is_input:
85 TypeClass = GraphQLInputObjectType
86 elif is_interface:
87 TypeClass = GraphQLInterfaceType
88 else:
89 TypeClass = GraphQLObjectType
90
91 extra_kwargs["interfaces"] = [
92 klass.field
93 for klass in cls.__bases__
94 if hasattr(klass, IS_STRAWBERRY_INTERFACE)
95 ]
96
97 cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)
98
99 return dataclass(cls, repr=False)
100
101
102 def type(cls=None, *, is_input=False, is_interface=False, description=None):
103 """Annotates a class as a GraphQL type.
104
105 Example usage:
106
107 >>> @strawberry.type:
108 >>> class X:
109 >>> field_abc: str = "ABC"
110 """
111
112 def wrap(cls):
113 return _process_type(
114 cls, is_input=is_input, is_interface=is_interface, description=description
115 )
116
117 if cls is None:
118 return wrap
119
120 return wrap(cls)
121
122
123 input = partial(type, is_input=True)
124 interface = partial(type, is_interface=True)
125
[end of strawberry/type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/field.py b/strawberry/field.py
--- a/strawberry/field.py
+++ b/strawberry/field.py
@@ -30,9 +30,10 @@
>>> return TypeB()
"""
- def __init__(self, obj, is_subscription, **kwargs):
+ def __init__(self, obj, is_subscription, name=None, **kwargs):
self._wrapped_obj = obj
self.is_subscription = is_subscription
+ self.name = name
self.kwargs = kwargs
if callable(self._wrapped_obj):
@@ -106,6 +107,7 @@
self.field = dataclasses.field()
self.is_subscription = is_subscription
self.description = kwargs.get("description", None)
+ self.name = kwargs.pop("name", None)
self.kwargs = kwargs
def __call__(self, wrap):
@@ -113,7 +115,7 @@
self.kwargs["description"] = self.description or wrap.__doc__
- return LazyFieldWrapper(wrap, self.is_subscription, **self.kwargs)
+ return LazyFieldWrapper(wrap, self.is_subscription, self.name, **self.kwargs)
def convert_args(args, annotations):
@@ -186,7 +188,7 @@
return GraphQLField(field_type, args=arguments, **kwargs)
-def field(wrap=None, *, is_subscription=False, description=None):
+def field(wrap=None, *, is_subscription=False, name=None, description=None):
"""Annotates a method or property as a GraphQL field.
This is normally used inside a type declaration:
@@ -202,7 +204,9 @@
it can be used both as decorator and as a normal function.
"""
- field = strawberry_field(description=description, is_subscription=is_subscription)
+ field = strawberry_field(
+ name=name, description=description, is_subscription=is_subscription
+ )
# when calling this with parens we are going to return a strawberry_field
# instance, so it can be used as both decorator and function.
diff --git a/strawberry/type.py b/strawberry/type.py
--- a/strawberry/type.py
+++ b/strawberry/type.py
@@ -12,6 +12,7 @@
from graphql.utilities.schema_printer import print_type
from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE
+from .field import strawberry_field
from .type_converter import REGISTRY, get_graphql_type_for_annotation
from .utils.str_converters import to_camel_case
@@ -26,6 +27,10 @@
if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):
return field_resolver(obj, info)
+ elif field_resolver.__class__ is strawberry_field:
+ # TODO: support default values
+ return None
+
return field_resolver
return _resolver
@@ -38,10 +43,12 @@
fields = {}
for key, annotation in annotations.items():
- field_name = to_camel_case(key)
class_field = getattr(cls, key, None)
description = getattr(class_field, "description", None)
+ name = getattr(class_field, "name", None)
+
+ field_name = name or to_camel_case(key)
fields[field_name] = FieldClass(
get_graphql_type_for_annotation(annotation, key),
@@ -64,13 +71,16 @@
def _get_fields():
fields = _convert_annotations_fields(cls, is_input=is_input)
- fields.update(
- {
- to_camel_case(key): value.field
- for key, value in cls.__dict__.items()
- if getattr(value, IS_STRAWBERRY_FIELD, False)
- }
- )
+ strawberry_fields = {
+ key: value
+ for key, value in cls.__dict__.items()
+ if getattr(value, IS_STRAWBERRY_FIELD, False)
+ }
+
+ for key, value in strawberry_fields.items():
+ name = getattr(value, "name", None) or to_camel_case(key)
+
+ fields[name] = value.field
return fields
|
{"golden_diff": "diff --git a/strawberry/field.py b/strawberry/field.py\n--- a/strawberry/field.py\n+++ b/strawberry/field.py\n@@ -30,9 +30,10 @@\n >>> return TypeB()\n \"\"\"\n \n- def __init__(self, obj, is_subscription, **kwargs):\n+ def __init__(self, obj, is_subscription, name=None, **kwargs):\n self._wrapped_obj = obj\n self.is_subscription = is_subscription\n+ self.name = name\n self.kwargs = kwargs\n \n if callable(self._wrapped_obj):\n@@ -106,6 +107,7 @@\n self.field = dataclasses.field()\n self.is_subscription = is_subscription\n self.description = kwargs.get(\"description\", None)\n+ self.name = kwargs.pop(\"name\", None)\n self.kwargs = kwargs\n \n def __call__(self, wrap):\n@@ -113,7 +115,7 @@\n \n self.kwargs[\"description\"] = self.description or wrap.__doc__\n \n- return LazyFieldWrapper(wrap, self.is_subscription, **self.kwargs)\n+ return LazyFieldWrapper(wrap, self.is_subscription, self.name, **self.kwargs)\n \n \n def convert_args(args, annotations):\n@@ -186,7 +188,7 @@\n return GraphQLField(field_type, args=arguments, **kwargs)\n \n \n-def field(wrap=None, *, is_subscription=False, description=None):\n+def field(wrap=None, *, is_subscription=False, name=None, description=None):\n \"\"\"Annotates a method or property as a GraphQL field.\n \n This is normally used inside a type declaration:\n@@ -202,7 +204,9 @@\n it can be used both as decorator and as a normal function.\n \"\"\"\n \n- field = strawberry_field(description=description, is_subscription=is_subscription)\n+ field = strawberry_field(\n+ name=name, description=description, is_subscription=is_subscription\n+ )\n \n # when calling this with parens we are going to return a strawberry_field\n # instance, so it can be used as both decorator and function.\ndiff --git a/strawberry/type.py b/strawberry/type.py\n--- a/strawberry/type.py\n+++ b/strawberry/type.py\n@@ -12,6 +12,7 @@\n from graphql.utilities.schema_printer import print_type\n \n from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\n+from .field import strawberry_field\n from .type_converter import REGISTRY, get_graphql_type_for_annotation\n from .utils.str_converters import to_camel_case\n \n@@ -26,6 +27,10 @@\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(obj, info)\n \n+ elif field_resolver.__class__ is strawberry_field:\n+ # TODO: support default values\n+ return None\n+\n return field_resolver\n \n return _resolver\n@@ -38,10 +43,12 @@\n fields = {}\n \n for key, annotation in annotations.items():\n- field_name = to_camel_case(key)\n class_field = getattr(cls, key, None)\n \n description = getattr(class_field, \"description\", None)\n+ name = getattr(class_field, \"name\", None)\n+\n+ field_name = name or to_camel_case(key)\n \n fields[field_name] = FieldClass(\n get_graphql_type_for_annotation(annotation, key),\n@@ -64,13 +71,16 @@\n def _get_fields():\n fields = _convert_annotations_fields(cls, is_input=is_input)\n \n- fields.update(\n- {\n- to_camel_case(key): value.field\n- for key, value in cls.__dict__.items()\n- if getattr(value, IS_STRAWBERRY_FIELD, False)\n- }\n- )\n+ strawberry_fields = {\n+ key: value\n+ for key, value in cls.__dict__.items()\n+ if getattr(value, IS_STRAWBERRY_FIELD, False)\n+ }\n+\n+ for key, value in strawberry_fields.items():\n+ name = getattr(value, \"name\", None) or to_camel_case(key)\n+\n+ fields[name] = value.field\n \n return fields\n", "issue": "Add support for renaming fields\nWe should be able to specify a custom name for a field when needed, like this:\r\n\r\n```python\r\[email protected]\r\nclass Query:\r\n example_field: str = strawberry.field(name=\"example\")\r\n```\n", "before_files": [{"content": "import typing\n\nimport dataclasses\nfrom graphql import GraphQLField\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT\nfrom .exceptions import MissingArgumentsAnnotationsError, MissingReturnAnnotationError\nfrom .type_converter import REGISTRY, get_graphql_type_for_annotation\nfrom .utils.dict_to_type import dict_to_type\nfrom .utils.inspect import get_func_args\nfrom .utils.lazy_property import lazy_property\nfrom .utils.str_converters import to_camel_case, to_snake_case\nfrom .utils.typing import (\n get_list_annotation,\n get_optional_annotation,\n is_list,\n is_optional,\n)\n\n\nclass LazyFieldWrapper:\n \"\"\"A lazy wrapper for a strawberry field.\n This allows to use cyclic dependencies in a strawberry fields:\n\n >>> @strawberry.type\n >>> class TypeA:\n >>> @strawberry.field\n >>> def type_b(self, info) -> \"TypeB\":\n >>> from .type_b import TypeB\n >>> return TypeB()\n \"\"\"\n\n def __init__(self, obj, is_subscription, **kwargs):\n self._wrapped_obj = obj\n self.is_subscription = is_subscription\n self.kwargs = kwargs\n\n if callable(self._wrapped_obj):\n self._check_has_annotations()\n\n def _check_has_annotations(self):\n # using annotations without passing from typing.get_type_hints\n # as we don't the actually types for the annotations\n annotations = self._wrapped_obj.__annotations__\n name = self._wrapped_obj.__name__\n\n if \"return\" not in annotations:\n raise MissingReturnAnnotationError(name)\n\n function_arguments = set(get_func_args(self._wrapped_obj)) - {\"self\", \"info\"}\n\n arguments_annotations = {\n key: value\n for key, value in annotations.items()\n if key not in [\"info\", \"return\"]\n }\n\n annotated_function_arguments = set(arguments_annotations.keys())\n arguments_missing_annotations = (\n function_arguments - annotated_function_arguments\n )\n\n if len(arguments_missing_annotations) > 0:\n raise MissingArgumentsAnnotationsError(name, arguments_missing_annotations)\n\n def __getattr__(self, attr):\n if attr in self.__dict__:\n return getattr(self, attr)\n\n return getattr(self._wrapped_obj, attr)\n\n def __call__(self, *args, **kwargs):\n return self._wrapped_obj(self, *args, **kwargs)\n\n @lazy_property\n def field(self):\n return _get_field(\n self._wrapped_obj, is_subscription=self.is_subscription, **self.kwargs\n )\n\n\nclass strawberry_field:\n \"\"\"A small wrapper for a field in strawberry.\n\n You shouldn't be using this directly as this is used internally\n when using `strawberry.field`.\n\n This allows to use the following two syntaxes when using the type\n decorator:\n\n >>> class X:\n >>> field_abc: str = strawberry.field(description=\"ABC\")\n\n >>> class X:\n >>> @strawberry.field(description=\"ABC\")\n >>> def field_a(self, info) -> str:\n >>> return \"abc\"\n\n When calling this class as strawberry_field it creates a field\n that stores metadata (such as field description). In addition\n to that it also acts as decorator when called as a function,\n allowing us to us both syntaxes.\n \"\"\"\n\n def __init__(self, *, is_subscription=False, **kwargs):\n self.field = dataclasses.field()\n self.is_subscription = is_subscription\n self.description = kwargs.get(\"description\", None)\n self.kwargs = kwargs\n\n def __call__(self, wrap):\n setattr(wrap, IS_STRAWBERRY_FIELD, True)\n\n self.kwargs[\"description\"] = self.description or wrap.__doc__\n\n return LazyFieldWrapper(wrap, self.is_subscription, **self.kwargs)\n\n\ndef convert_args(args, annotations):\n \"\"\"Converts a nested dictionary to a dictionary of strawberry input types.\"\"\"\n\n converted_args = {}\n\n for key, value in args.items():\n key = to_snake_case(key)\n annotation = annotations[key]\n\n # we don't need to check about unions here since they are not\n # yet supported for arguments.\n # see https://github.com/graphql/graphql-spec/issues/488\n\n is_list_of_args = False\n\n if is_optional(annotation):\n annotation = get_optional_annotation(annotation)\n\n if is_list(annotation):\n annotation = get_list_annotation(annotation)\n is_list_of_args = True\n\n if getattr(annotation, IS_STRAWBERRY_INPUT, False):\n if is_list_of_args:\n converted_args[key] = [dict_to_type(x, annotation) for x in value]\n else:\n converted_args[key] = dict_to_type(value, annotation)\n else:\n converted_args[key] = value\n\n return converted_args\n\n\ndef _get_field(wrap, *, is_subscription=False, **kwargs):\n annotations = typing.get_type_hints(wrap, None, REGISTRY)\n\n name = wrap.__name__\n\n field_type = get_graphql_type_for_annotation(annotations[\"return\"], name)\n\n arguments_annotations = {\n key: value\n for key, value in annotations.items()\n if key not in [\"info\", \"return\"]\n }\n\n arguments = {\n to_camel_case(name): get_graphql_type_for_annotation(annotation, name)\n for name, annotation in arguments_annotations.items()\n }\n\n def resolver(source, info, **args):\n args = convert_args(args, arguments_annotations)\n\n return wrap(source, info, **args)\n\n if is_subscription:\n\n def _resolve(event, info):\n return event\n\n kwargs.update({\"subscribe\": resolver, \"resolve\": _resolve})\n else:\n kwargs.update({\"resolve\": resolver})\n\n kwargs[\"description\"] = kwargs.get(\"description\", wrap.__doc__)\n\n return GraphQLField(field_type, args=arguments, **kwargs)\n\n\ndef field(wrap=None, *, is_subscription=False, description=None):\n \"\"\"Annotates a method or property as a GraphQL field.\n\n This is normally used inside a type declaration:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = strawberry.field(description=\"ABC\")\n\n >>> @strawberry.field(description=\"ABC\")\n >>> def field_with_resolver(self, info) -> str:\n >>> return \"abc\"\n\n it can be used both as decorator and as a normal function.\n \"\"\"\n\n field = strawberry_field(description=description, is_subscription=is_subscription)\n\n # when calling this with parens we are going to return a strawberry_field\n # instance, so it can be used as both decorator and function.\n\n if wrap is None:\n return field\n\n # otherwise we run the decorator directly,\n # when called as @strawberry.field, without parens.\n\n return field(wrap)\n", "path": "strawberry/field.py"}, {"content": "import typing\nfrom functools import partial\n\nfrom dataclasses import dataclass\nfrom graphql import (\n GraphQLField,\n GraphQLInputField,\n GraphQLInputObjectType,\n GraphQLInterfaceType,\n GraphQLObjectType,\n)\nfrom graphql.utilities.schema_printer import print_type\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .type_converter import REGISTRY, get_graphql_type_for_annotation\nfrom .utils.str_converters import to_camel_case\n\n\ndef _get_resolver(cls, field_name):\n def _resolver(obj, info):\n # TODO: can we make this nicer?\n # does it work in all the cases?\n\n field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(obj, info)\n\n return field_resolver\n\n return _resolver\n\n\ndef _convert_annotations_fields(cls, *, is_input=False):\n FieldClass = GraphQLInputField if is_input else GraphQLField\n annotations = typing.get_type_hints(cls, None, REGISTRY)\n\n fields = {}\n\n for key, annotation in annotations.items():\n field_name = to_camel_case(key)\n class_field = getattr(cls, key, None)\n\n description = getattr(class_field, \"description\", None)\n\n fields[field_name] = FieldClass(\n get_graphql_type_for_annotation(annotation, key),\n description=description,\n **({} if is_input else {\"resolve\": _get_resolver(cls, key)})\n )\n\n return fields\n\n\ndef _process_type(cls, *, is_input=False, is_interface=False, description=None):\n name = cls.__name__\n REGISTRY[name] = cls\n\n def repr_(self):\n return print_type(self.field)\n\n setattr(cls, \"__repr__\", repr_)\n\n def _get_fields():\n fields = _convert_annotations_fields(cls, is_input=is_input)\n\n fields.update(\n {\n to_camel_case(key): value.field\n for key, value in cls.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.field\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)\n\n return dataclass(cls, repr=False)\n\n\ndef type(cls=None, *, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls, is_input=is_input, is_interface=is_interface, description=description\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py"}]}
| 3,661 | 945 |
gh_patches_debug_20703
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1817
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search: search doesn't appear to use the organization
Example: the MapAction org has two public datasets, but searching "mapaction" or MapAction returns 0 results.
Other org searches will return results, but this is probably because the name of the org is mentioned in other metadata.
To do:
1. confirm that search queries from the homepage or main search bar are not using organizations
2. if that is the source of the problem, add org to the search queries
</issue>
<code>
[start of ckanext-hdx_search/ckanext/hdx_search/plugin.py]
1 import logging
2 import ckan.plugins as plugins
3 import ckan.plugins.toolkit as tk
4 import ckan.lib.plugins as lib_plugins
5
6
7 class HDXSearchPlugin(plugins.SingletonPlugin):
8 plugins.implements(plugins.IConfigurer, inherit=False)
9 plugins.implements(plugins.IRoutes, inherit=True)
10 plugins.implements(plugins.ITemplateHelpers, inherit=False)
11 plugins.implements(plugins.IPackageController, inherit=True)
12
13 def update_config(self, config):
14 tk.add_template_directory(config, 'templates')
15
16 def get_helpers(self):
17 return {}
18
19 def before_map(self, map):
20 map.connect('search', '/search',
21 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
22 map.connect('simple_search',
23 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
24 return map
25
26 def after_map(self, map):
27 map.connect('search', '/search',
28 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')
29 map.connect('simple_search',
30 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')
31 return map
32
33 def before_search(self, search_params):
34 if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
35 search_params['facet.field'].append('vocab_Topics')
36
37 # If indicator flag is set, search only that type
38 if 'ext_indicator' in search_params['extras']:
39 if int(search_params['extras']['ext_indicator']) == 1:
40 search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'
41 elif int(search_params['extras']['ext_indicator']) == 0:
42 search_params['fq'] = search_params[
43 'fq'] + ' -extras_indicator:1'
44 return search_params
45
46 def after_search(self, search_results, search_params):
47 return search_results
48
49 def before_view(self, pkg_dict):
50 return pkg_dict
51
[end of ckanext-hdx_search/ckanext/hdx_search/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py
+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py
@@ -1,8 +1,13 @@
-import logging
+import logging, re
import ckan.plugins as plugins
import ckan.plugins.toolkit as tk
import ckan.lib.plugins as lib_plugins
+def convert_country(q):
+ for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):
+ if re.findall(c['display_name'].lower(),q.lower()):
+ q += ' '+c['name']
+ return q
class HDXSearchPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer, inherit=False)
@@ -31,6 +36,7 @@
return map
def before_search(self, search_params):
+ search_params['q'] = convert_country(search_params['q'])
if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:
search_params['facet.field'].append('vocab_Topics')
|
{"golden_diff": "diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n@@ -1,8 +1,13 @@\n-import logging\n+import logging, re\n import ckan.plugins as plugins\n import ckan.plugins.toolkit as tk\n import ckan.lib.plugins as lib_plugins\n \n+def convert_country(q):\n+ for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n+ if re.findall(c['display_name'].lower(),q.lower()):\n+ q += ' '+c['name']\n+ return q\n \n class HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n@@ -31,6 +36,7 @@\n return map\n \n def before_search(self, search_params):\n+ search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n", "issue": "Search: search doesn't appear to use the organization\nExample: the MapAction org has two public datasets, but searching \"mapaction\" or MapAction returns 0 results. \n\nOther org searches will return results, but this is probably because the name of the org is mentioned in other metadata. \n\nTo do: \n1. confirm that search queries from the homepage or main search bar are not using organizations\n2. if that is the source of the problem, add org to the search queries\n\n", "before_files": [{"content": "import logging\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}]}
| 1,204 | 287 |
gh_patches_debug_11786
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2877
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BigQuery] Allow more recent versions of google-api-core?
### Describe the feature
Currently dbt-bigquery has [an upper limit of 1.16 on `google-api-core`](https://github.com/fishtown-analytics/dbt/blob/v0.18.1b3/plugins/bigquery/setup.py#L53). That release is from Jan of this year.
Would it be possible to loosen that?
While it's clearly not dbt's responsibility for us to be able to install arbitrary packages, here's an example where we can't instally `google-cloud-bigquery-datatransfer` because of this restriction:
```
[SolverProblemError]
Because no versions of google-cloud-bigquery-datatransfer match >2.0.0,<3.0.0
and google-cloud-bigquery-datatransfer (2.0.0) depends on google-api-core (>=1.22.2,<2.0.0dev), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) requires google-api-core (>=1.22.2,<2.0.0dev).
And because dbt-bigquery (0.18.0) depends on google-api-core (>=1.16.0,<1.17.0), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt-bigquery (0.18.0).
And because dbt (0.18.0) depends on dbt-bigquery (0.18.0)
and no versions of dbt match >0.18.0,<0.19.0, google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt (>=0.18.0,<0.19.0).
So, because {repo} depends on both dbt (^0.18.0) and google-cloud-bigquery-datatransfer (^2.0.0), version solving failed.
```
Thanks as ever for the awesome product!
</issue>
<code>
[start of plugins/bigquery/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 package_name = "dbt-bigquery"
23 package_version = "0.19.0b1"
24 description = """The bigquery adapter plugin for dbt (data build tool)"""
25
26 this_directory = os.path.abspath(os.path.dirname(__file__))
27 with open(os.path.join(this_directory, 'README.md')) as f:
28 long_description = f.read()
29
30 setup(
31 name=package_name,
32 version=package_version,
33 description=description,
34 long_description=long_description,
35 long_description_content_type='text/markdown',
36 author="Fishtown Analytics",
37 author_email="[email protected]",
38 url="https://github.com/fishtown-analytics/dbt",
39 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
40 package_data={
41 'dbt': [
42 'include/bigquery/dbt_project.yml',
43 'include/bigquery/sample_profiles.yml',
44 'include/bigquery/macros/*.sql',
45 'include/bigquery/macros/**/*.sql',
46 ]
47 },
48 install_requires=[
49 'dbt-core=={}'.format(package_version),
50 'protobuf>=3.6.0,<3.12',
51 'google-cloud-core>=1.3.0,<1.4',
52 'google-cloud-bigquery>=1.25.0,<1.26.0',
53 'google-api-core>=1.16.0,<1.17.0',
54 'googleapis-common-protos>=1.6.0,<1.7.0',
55 'six>=1.14.0',
56 ],
57 zip_safe=False,
58 classifiers=[
59 'Development Status :: 5 - Production/Stable',
60
61 'License :: OSI Approved :: Apache Software License',
62
63 'Operating System :: Microsoft :: Windows',
64 'Operating System :: MacOS :: MacOS X',
65 'Operating System :: POSIX :: Linux',
66
67 'Programming Language :: Python :: 3.6',
68 'Programming Language :: Python :: 3.7',
69 'Programming Language :: Python :: 3.8',
70 ],
71 python_requires=">=3.6.2",
72 )
73
[end of plugins/bigquery/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/bigquery/setup.py b/plugins/bigquery/setup.py
--- a/plugins/bigquery/setup.py
+++ b/plugins/bigquery/setup.py
@@ -47,11 +47,13 @@
},
install_requires=[
'dbt-core=={}'.format(package_version),
- 'protobuf>=3.6.0,<3.12',
- 'google-cloud-core>=1.3.0,<1.4',
- 'google-cloud-bigquery>=1.25.0,<1.26.0',
- 'google-api-core>=1.16.0,<1.17.0',
- 'googleapis-common-protos>=1.6.0,<1.7.0',
+ 'protobuf>=3.13.0,<4',
+ # These are more tightly pinned, as they have a track record of
+ # breaking changes in minor releases.
+ 'google-cloud-core>=1.3.0,<1.5',
+ 'google-cloud-bigquery>=1.25.0,<2.4',
+ 'google-api-core>=1.16.0,<1.24',
+ 'googleapis-common-protos>=1.6.0,<1.53',
'six>=1.14.0',
],
zip_safe=False,
|
{"golden_diff": "diff --git a/plugins/bigquery/setup.py b/plugins/bigquery/setup.py\n--- a/plugins/bigquery/setup.py\n+++ b/plugins/bigquery/setup.py\n@@ -47,11 +47,13 @@\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n- 'protobuf>=3.6.0,<3.12',\n- 'google-cloud-core>=1.3.0,<1.4',\n- 'google-cloud-bigquery>=1.25.0,<1.26.0',\n- 'google-api-core>=1.16.0,<1.17.0',\n- 'googleapis-common-protos>=1.6.0,<1.7.0',\n+ 'protobuf>=3.13.0,<4',\n+ # These are more tightly pinned, as they have a track record of\n+ # breaking changes in minor releases.\n+ 'google-cloud-core>=1.3.0,<1.5',\n+ 'google-cloud-bigquery>=1.25.0,<2.4',\n+ 'google-api-core>=1.16.0,<1.24',\n+ 'googleapis-common-protos>=1.6.0,<1.53',\n 'six>=1.14.0',\n ],\n zip_safe=False,\n", "issue": "[BigQuery] Allow more recent versions of google-api-core?\n### Describe the feature\r\n\r\nCurrently dbt-bigquery has [an upper limit of 1.16 on `google-api-core`](https://github.com/fishtown-analytics/dbt/blob/v0.18.1b3/plugins/bigquery/setup.py#L53). That release is from Jan of this year.\r\n\r\nWould it be possible to loosen that?\r\n\r\nWhile it's clearly not dbt's responsibility for us to be able to install arbitrary packages, here's an example where we can't instally `google-cloud-bigquery-datatransfer` because of this restriction:\r\n\r\n```\r\n[SolverProblemError]\r\nBecause no versions of google-cloud-bigquery-datatransfer match >2.0.0,<3.0.0\r\n and google-cloud-bigquery-datatransfer (2.0.0) depends on google-api-core (>=1.22.2,<2.0.0dev), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) requires google-api-core (>=1.22.2,<2.0.0dev).\r\nAnd because dbt-bigquery (0.18.0) depends on google-api-core (>=1.16.0,<1.17.0), google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt-bigquery (0.18.0).\r\nAnd because dbt (0.18.0) depends on dbt-bigquery (0.18.0)\r\n and no versions of dbt match >0.18.0,<0.19.0, google-cloud-bigquery-datatransfer (>=2.0.0,<3.0.0) is incompatible with dbt (>=0.18.0,<0.19.0).\r\nSo, because {repo} depends on both dbt (^0.18.0) and google-cloud-bigquery-datatransfer (^2.0.0), version solving failed.\r\n```\r\n\r\nThanks as ever for the awesome product!\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\npackage_name = \"dbt-bigquery\"\npackage_version = \"0.19.0b1\"\ndescription = \"\"\"The bigquery adapter plugin for dbt (data build tool)\"\"\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/bigquery/dbt_project.yml',\n 'include/bigquery/sample_profiles.yml',\n 'include/bigquery/macros/*.sql',\n 'include/bigquery/macros/**/*.sql',\n ]\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n 'protobuf>=3.6.0,<3.12',\n 'google-cloud-core>=1.3.0,<1.4',\n 'google-cloud-bigquery>=1.25.0,<1.26.0',\n 'google-api-core>=1.16.0,<1.17.0',\n 'googleapis-common-protos>=1.6.0,<1.7.0',\n 'six>=1.14.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "plugins/bigquery/setup.py"}]}
| 1,718 | 295 |
gh_patches_debug_20310
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-2928
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linkage 'Z' contains negative distances.
### Please make sure these conditions are met
- [X] I have checked that this issue has not already been reported.
- [X] I have confirmed this bug exists on the latest version of scanpy.
- [ ] (optional) I have confirmed this bug exists on the master branch of scanpy.
### What happened?
I'm encountering an error when running the sc.pl.rank_genes_groups_heatmap function in the scanpy package. The error message is "Linkage 'Z' contains negative distances." What could be causing this error and how can I fix it?
### Minimal code sample
```python
sc.pl.rank_genes_groups_heatmap(adata, n_genes=10, groupby='clusters',show_gene_labels=True,save='cluster.markers.heatmap.svg')
```
### Error output
```pytb
sc.pl.rank_genes_groups_heatmap(adata, n_genes=10, groupby=cluster,show_gene_labels=True,save=(id+'_processed.top10.cluster.markers.heatmap.svg'))
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_tools/__init__.py", line 673, in rank_genes_groups_heatmap
return _rank_genes_groups_plot(
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_tools/__init__.py", line 592, in _rank_genes_groups_plot
return heatmap(
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py", line 1087, in heatmap
dendro_data = _reorder_categories_after_dendrogram(
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py", line 2134, in _reorder_categories_after_dendrogram
key = _get_dendrogram_key(adata, dendrogram, groupby)
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py", line 2236, in _get_dendrogram_key
dendrogram(adata, groupby, key_added=dendrogram_key)
File "/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/tools/_dendrogram.py", line 143, in dendrogram
dendro_info = sch.dendrogram(z_var, labels=list(categories), no_plot=True)
File "/opt/conda/envs/st/lib/python3.8/site-packages/scipy/cluster/hierarchy.py", line 3301, in dendrogram
is_valid_linkage(Z, throw=True, name='Z')
File "/opt/conda/envs/st/lib/python3.8/site-packages/scipy/cluster/hierarchy.py", line 2280, in is_valid_linkage
raise ValueError('Linkage %scontains negative distances.' %
ValueError: Linkage 'Z' contains negative distances.
```
### Versions
<details>
```
-----
anndata 0.8.0
scanpy 1.9.3
-----
PIL 9.4.0
asciitree NA
beta_ufunc NA
binom_ufunc NA
cairocffi 1.6.1
cffi 1.15.1
cloudpickle 2.2.1
colorama 0.4.6
cycler 0.10.0
cython_runtime NA
cytoolz 0.12.0
dask 2022.11.1
dateutil 2.8.2
defusedxml 0.7.1
entrypoints 0.4
fasteners 0.17.3
fsspec 2023.6.0
google NA
h5py 3.7.0
igraph 0.9.11
jinja2 3.0.3
joblib 1.2.0
kiwisolver 1.4.4
leidenalg 0.8.10
llvmlite 0.39.1
louvain 0.7.1
lz4 4.3.2
markupsafe 2.1.3
matplotlib 3.5.2
mpl_toolkits NA
msgpack 1.0.5
natsort 8.2.0
nbinom_ufunc NA
numba 0.56.4
numcodecs 0.11.0
numexpr 2.8.4
numpy 1.21.6
packaging 23.1
pandas 1.5.3
pkg_resources NA
psutil 5.9.5
pyarrow 8.0.0
pycparser 2.21
pyparsing 3.1.0
pytz 2023.3
scipy 1.7.3
session_info 1.0.0
setuptools 68.0.0
setuptools_scm NA
six 1.16.0
sklearn 1.0.1
snappy NA
sphinxcontrib NA
tblib 1.7.0
texttable 1.6.7
threadpoolctl 3.2.0
tlz 0.12.0
toolz 0.12.0
typing_extensions NA
wcwidth 0.2.6
yaml 6.0
zarr 2.15.0
zipp NA
-----
Python 3.8.15 | packaged by conda-forge | (default, Nov 22 2022, 08:46:39) [GCC 10.4.0]
Linux-3.10.0-1127.el7.x86_64-x86_64-with-glibc2.10
-----
```
</details>
</issue>
<code>
[start of scanpy/tools/_dendrogram.py]
1 """
2 Computes a dendrogram based on a given categorical observation.
3 """
4
5 from __future__ import annotations
6
7 from typing import TYPE_CHECKING, Any
8
9 import pandas as pd
10 from pandas.api.types import CategoricalDtype
11
12 from .. import logging as logg
13 from .._compat import old_positionals
14 from .._utils import _doc_params
15 from ..neighbors._doc import doc_n_pcs, doc_use_rep
16 from ._utils import _choose_representation
17
18 if TYPE_CHECKING:
19 from collections.abc import Sequence
20
21 from anndata import AnnData
22
23
24 @old_positionals(
25 "n_pcs",
26 "use_rep",
27 "var_names",
28 "use_raw",
29 "cor_method",
30 "linkage_method",
31 "optimal_ordering",
32 "key_added",
33 "inplace",
34 )
35 @_doc_params(n_pcs=doc_n_pcs, use_rep=doc_use_rep)
36 def dendrogram(
37 adata: AnnData,
38 groupby: str | Sequence[str],
39 *,
40 n_pcs: int | None = None,
41 use_rep: str | None = None,
42 var_names: Sequence[str] | None = None,
43 use_raw: bool | None = None,
44 cor_method: str = "pearson",
45 linkage_method: str = "complete",
46 optimal_ordering: bool = False,
47 key_added: str | None = None,
48 inplace: bool = True,
49 ) -> dict[str, Any] | None:
50 """\
51 Computes a hierarchical clustering for the given `groupby` categories.
52
53 By default, the PCA representation is used unless `.X`
54 has less than 50 variables.
55
56 Alternatively, a list of `var_names` (e.g. genes) can be given.
57
58 Average values of either `var_names` or components are used
59 to compute a correlation matrix.
60
61 The hierarchical clustering can be visualized using
62 :func:`scanpy.pl.dendrogram` or multiple other visualizations that can
63 include a dendrogram: :func:`~scanpy.pl.matrixplot`,
64 :func:`~scanpy.pl.heatmap`, :func:`~scanpy.pl.dotplot`,
65 and :func:`~scanpy.pl.stacked_violin`.
66
67 .. note::
68 The computation of the hierarchical clustering is based on predefined
69 groups and not per cell. The correlation matrix is computed using by
70 default pearson but other methods are available.
71
72 Parameters
73 ----------
74 adata
75 Annotated data matrix
76 {n_pcs}
77 {use_rep}
78 var_names
79 List of var_names to use for computing the hierarchical clustering.
80 If `var_names` is given, then `use_rep` and `n_pcs` is ignored.
81 use_raw
82 Only when `var_names` is not None.
83 Use `raw` attribute of `adata` if present.
84 cor_method
85 correlation method to use.
86 Options are 'pearson', 'kendall', and 'spearman'
87 linkage_method
88 linkage method to use. See :func:`scipy.cluster.hierarchy.linkage`
89 for more information.
90 optimal_ordering
91 Same as the optimal_ordering argument of :func:`scipy.cluster.hierarchy.linkage`
92 which reorders the linkage matrix so that the distance between successive
93 leaves is minimal.
94 key_added
95 By default, the dendrogram information is added to
96 `.uns[f'dendrogram_{{groupby}}']`.
97 Notice that the `groupby` information is added to the dendrogram.
98 inplace
99 If `True`, adds dendrogram information to `adata.uns[key_added]`,
100 else this function returns the information.
101
102 Returns
103 -------
104 Returns `None` if `inplace=True`, else returns a `dict` with dendrogram information. Sets the following field if `inplace=True`:
105
106 `adata.uns[f'dendrogram_{{group_by}}' | key_added]` : :class:`dict`
107 Dendrogram information.
108
109 Examples
110 --------
111 >>> import scanpy as sc
112 >>> adata = sc.datasets.pbmc68k_reduced()
113 >>> sc.tl.dendrogram(adata, groupby='bulk_labels')
114 >>> sc.pl.dendrogram(adata, groupby='bulk_labels') # doctest: +SKIP
115 <Axes: >
116 >>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
117 >>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
118 """
119 if isinstance(groupby, str):
120 # if not a list, turn into a list
121 groupby = [groupby]
122 for group in groupby:
123 if group not in adata.obs_keys():
124 raise ValueError(
125 "groupby has to be a valid observation. "
126 f"Given value: {group}, valid observations: {adata.obs_keys()}"
127 )
128 if not isinstance(adata.obs[group].dtype, CategoricalDtype):
129 raise ValueError(
130 "groupby has to be a categorical observation. "
131 f"Given value: {group}, Column type: {adata.obs[group].dtype}"
132 )
133
134 if var_names is None:
135 rep_df = pd.DataFrame(
136 _choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs)
137 )
138 categorical = adata.obs[groupby[0]]
139 if len(groupby) > 1:
140 for group in groupby[1:]:
141 # create new category by merging the given groupby categories
142 categorical = (
143 categorical.astype(str) + "_" + adata.obs[group].astype(str)
144 ).astype("category")
145 categorical.name = "_".join(groupby)
146
147 rep_df.set_index(categorical, inplace=True)
148 categories = rep_df.index.categories
149 else:
150 gene_names = adata.raw.var_names if use_raw else adata.var_names
151 from ..plotting._anndata import _prepare_dataframe
152
153 categories, rep_df = _prepare_dataframe(
154 adata, gene_names, groupby, use_raw=use_raw
155 )
156
157 # aggregate values within categories using 'mean'
158 mean_df = (
159 rep_df.groupby(level=0, observed=True)
160 .mean()
161 .loc[categories] # Fixed ordering for pandas < 2
162 )
163
164 import scipy.cluster.hierarchy as sch
165 from scipy.spatial import distance
166
167 corr_matrix = mean_df.T.corr(method=cor_method)
168 corr_condensed = distance.squareform(1 - corr_matrix)
169 z_var = sch.linkage(
170 corr_condensed, method=linkage_method, optimal_ordering=optimal_ordering
171 )
172 dendro_info = sch.dendrogram(z_var, labels=list(categories), no_plot=True)
173
174 dat = dict(
175 linkage=z_var,
176 groupby=groupby,
177 use_rep=use_rep,
178 cor_method=cor_method,
179 linkage_method=linkage_method,
180 categories_ordered=dendro_info["ivl"],
181 categories_idx_ordered=dendro_info["leaves"],
182 dendrogram_info=dendro_info,
183 correlation_matrix=corr_matrix.values,
184 )
185
186 if inplace:
187 if key_added is None:
188 key_added = f'dendrogram_{"_".join(groupby)}'
189 logg.info(f"Storing dendrogram info using `.uns[{key_added!r}]`")
190 adata.uns[key_added] = dat
191 else:
192 return dat
193
[end of scanpy/tools/_dendrogram.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scanpy/tools/_dendrogram.py b/scanpy/tools/_dendrogram.py
--- a/scanpy/tools/_dendrogram.py
+++ b/scanpy/tools/_dendrogram.py
@@ -145,7 +145,7 @@
categorical.name = "_".join(groupby)
rep_df.set_index(categorical, inplace=True)
- categories = rep_df.index.categories
+ categories: pd.Index = rep_df.index.categories
else:
gene_names = adata.raw.var_names if use_raw else adata.var_names
from ..plotting._anndata import _prepare_dataframe
@@ -164,7 +164,7 @@
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
- corr_matrix = mean_df.T.corr(method=cor_method)
+ corr_matrix = mean_df.T.corr(method=cor_method).clip(-1, 1)
corr_condensed = distance.squareform(1 - corr_matrix)
z_var = sch.linkage(
corr_condensed, method=linkage_method, optimal_ordering=optimal_ordering
|
{"golden_diff": "diff --git a/scanpy/tools/_dendrogram.py b/scanpy/tools/_dendrogram.py\n--- a/scanpy/tools/_dendrogram.py\n+++ b/scanpy/tools/_dendrogram.py\n@@ -145,7 +145,7 @@\n categorical.name = \"_\".join(groupby)\n \n rep_df.set_index(categorical, inplace=True)\n- categories = rep_df.index.categories\n+ categories: pd.Index = rep_df.index.categories\n else:\n gene_names = adata.raw.var_names if use_raw else adata.var_names\n from ..plotting._anndata import _prepare_dataframe\n@@ -164,7 +164,7 @@\n import scipy.cluster.hierarchy as sch\n from scipy.spatial import distance\n \n- corr_matrix = mean_df.T.corr(method=cor_method)\n+ corr_matrix = mean_df.T.corr(method=cor_method).clip(-1, 1)\n corr_condensed = distance.squareform(1 - corr_matrix)\n z_var = sch.linkage(\n corr_condensed, method=linkage_method, optimal_ordering=optimal_ordering\n", "issue": " Linkage 'Z' contains negative distances.\n### Please make sure these conditions are met\n\n- [X] I have checked that this issue has not already been reported.\n- [X] I have confirmed this bug exists on the latest version of scanpy.\n- [ ] (optional) I have confirmed this bug exists on the master branch of scanpy.\n\n### What happened?\n\nI'm encountering an error when running the sc.pl.rank_genes_groups_heatmap function in the scanpy package. The error message is \"Linkage 'Z' contains negative distances.\" What could be causing this error and how can I fix it?\n\n### Minimal code sample\n\n```python\nsc.pl.rank_genes_groups_heatmap(adata, n_genes=10, groupby='clusters',show_gene_labels=True,save='cluster.markers.heatmap.svg')\n```\n\n\n### Error output\n\n```pytb\nsc.pl.rank_genes_groups_heatmap(adata, n_genes=10, groupby=cluster,show_gene_labels=True,save=(id+'_processed.top10.cluster.markers.heatmap.svg'))\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_tools/__init__.py\", line 673, in rank_genes_groups_heatmap\r\n return _rank_genes_groups_plot(\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_tools/__init__.py\", line 592, in _rank_genes_groups_plot\r\n return heatmap(\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py\", line 1087, in heatmap\r\n dendro_data = _reorder_categories_after_dendrogram(\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py\", line 2134, in _reorder_categories_after_dendrogram\r\n key = _get_dendrogram_key(adata, dendrogram, groupby)\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/plotting/_anndata.py\", line 2236, in _get_dendrogram_key\r\n dendrogram(adata, groupby, key_added=dendrogram_key)\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scanpy/tools/_dendrogram.py\", line 143, in dendrogram\r\n dendro_info = sch.dendrogram(z_var, labels=list(categories), no_plot=True)\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scipy/cluster/hierarchy.py\", line 3301, in dendrogram\r\n is_valid_linkage(Z, throw=True, name='Z')\r\n File \"/opt/conda/envs/st/lib/python3.8/site-packages/scipy/cluster/hierarchy.py\", line 2280, in is_valid_linkage\r\n raise ValueError('Linkage %scontains negative distances.' %\r\nValueError: Linkage 'Z' contains negative distances.\n```\n\n\n### Versions\n\n<details>\r\n\r\n```\r\n-----\r\nanndata 0.8.0\r\nscanpy 1.9.3\r\n-----\r\nPIL 9.4.0\r\nasciitree NA\r\nbeta_ufunc NA\r\nbinom_ufunc NA\r\ncairocffi 1.6.1\r\ncffi 1.15.1\r\ncloudpickle 2.2.1\r\ncolorama 0.4.6\r\ncycler 0.10.0\r\ncython_runtime NA\r\ncytoolz 0.12.0\r\ndask 2022.11.1\r\ndateutil 2.8.2\r\ndefusedxml 0.7.1\r\nentrypoints 0.4\r\nfasteners 0.17.3\r\nfsspec 2023.6.0\r\ngoogle NA\r\nh5py 3.7.0\r\nigraph 0.9.11\r\njinja2 3.0.3\r\njoblib 1.2.0\r\nkiwisolver 1.4.4\r\nleidenalg 0.8.10\r\nllvmlite 0.39.1\r\nlouvain 0.7.1\r\nlz4 4.3.2\r\nmarkupsafe 2.1.3\r\nmatplotlib 3.5.2\r\nmpl_toolkits NA\r\nmsgpack 1.0.5\r\nnatsort 8.2.0\r\nnbinom_ufunc NA\r\nnumba 0.56.4\r\nnumcodecs 0.11.0\r\nnumexpr 2.8.4\r\nnumpy 1.21.6\r\npackaging 23.1\r\npandas 1.5.3\r\npkg_resources NA\r\npsutil 5.9.5\r\npyarrow 8.0.0\r\npycparser 2.21\r\npyparsing 3.1.0\r\npytz 2023.3\r\nscipy 1.7.3\r\nsession_info 1.0.0\r\nsetuptools 68.0.0\r\nsetuptools_scm NA\r\nsix 1.16.0\r\nsklearn 1.0.1\r\nsnappy NA\r\nsphinxcontrib NA\r\ntblib 1.7.0\r\ntexttable 1.6.7\r\nthreadpoolctl 3.2.0\r\ntlz 0.12.0\r\ntoolz 0.12.0\r\ntyping_extensions NA\r\nwcwidth 0.2.6\r\nyaml 6.0\r\nzarr 2.15.0\r\nzipp NA\r\n-----\r\nPython 3.8.15 | packaged by conda-forge | (default, Nov 22 2022, 08:46:39) [GCC 10.4.0]\r\nLinux-3.10.0-1127.el7.x86_64-x86_64-with-glibc2.10\r\n-----\r\n\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "\"\"\"\nComputes a dendrogram based on a given categorical observation.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\n\nfrom .. import logging as logg\nfrom .._compat import old_positionals\nfrom .._utils import _doc_params\nfrom ..neighbors._doc import doc_n_pcs, doc_use_rep\nfrom ._utils import _choose_representation\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from anndata import AnnData\n\n\n@old_positionals(\n \"n_pcs\",\n \"use_rep\",\n \"var_names\",\n \"use_raw\",\n \"cor_method\",\n \"linkage_method\",\n \"optimal_ordering\",\n \"key_added\",\n \"inplace\",\n)\n@_doc_params(n_pcs=doc_n_pcs, use_rep=doc_use_rep)\ndef dendrogram(\n adata: AnnData,\n groupby: str | Sequence[str],\n *,\n n_pcs: int | None = None,\n use_rep: str | None = None,\n var_names: Sequence[str] | None = None,\n use_raw: bool | None = None,\n cor_method: str = \"pearson\",\n linkage_method: str = \"complete\",\n optimal_ordering: bool = False,\n key_added: str | None = None,\n inplace: bool = True,\n) -> dict[str, Any] | None:\n \"\"\"\\\n Computes a hierarchical clustering for the given `groupby` categories.\n\n By default, the PCA representation is used unless `.X`\n has less than 50 variables.\n\n Alternatively, a list of `var_names` (e.g. genes) can be given.\n\n Average values of either `var_names` or components are used\n to compute a correlation matrix.\n\n The hierarchical clustering can be visualized using\n :func:`scanpy.pl.dendrogram` or multiple other visualizations that can\n include a dendrogram: :func:`~scanpy.pl.matrixplot`,\n :func:`~scanpy.pl.heatmap`, :func:`~scanpy.pl.dotplot`,\n and :func:`~scanpy.pl.stacked_violin`.\n\n .. note::\n The computation of the hierarchical clustering is based on predefined\n groups and not per cell. The correlation matrix is computed using by\n default pearson but other methods are available.\n\n Parameters\n ----------\n adata\n Annotated data matrix\n {n_pcs}\n {use_rep}\n var_names\n List of var_names to use for computing the hierarchical clustering.\n If `var_names` is given, then `use_rep` and `n_pcs` is ignored.\n use_raw\n Only when `var_names` is not None.\n Use `raw` attribute of `adata` if present.\n cor_method\n correlation method to use.\n Options are 'pearson', 'kendall', and 'spearman'\n linkage_method\n linkage method to use. See :func:`scipy.cluster.hierarchy.linkage`\n for more information.\n optimal_ordering\n Same as the optimal_ordering argument of :func:`scipy.cluster.hierarchy.linkage`\n which reorders the linkage matrix so that the distance between successive\n leaves is minimal.\n key_added\n By default, the dendrogram information is added to\n `.uns[f'dendrogram_{{groupby}}']`.\n Notice that the `groupby` information is added to the dendrogram.\n inplace\n If `True`, adds dendrogram information to `adata.uns[key_added]`,\n else this function returns the information.\n\n Returns\n -------\n Returns `None` if `inplace=True`, else returns a `dict` with dendrogram information. Sets the following field if `inplace=True`:\n\n `adata.uns[f'dendrogram_{{group_by}}' | key_added]` : :class:`dict`\n Dendrogram information.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.dendrogram(adata, groupby='bulk_labels')\n >>> sc.pl.dendrogram(adata, groupby='bulk_labels') # doctest: +SKIP\n <Axes: >\n >>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']\n >>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)\n \"\"\"\n if isinstance(groupby, str):\n # if not a list, turn into a list\n groupby = [groupby]\n for group in groupby:\n if group not in adata.obs_keys():\n raise ValueError(\n \"groupby has to be a valid observation. \"\n f\"Given value: {group}, valid observations: {adata.obs_keys()}\"\n )\n if not isinstance(adata.obs[group].dtype, CategoricalDtype):\n raise ValueError(\n \"groupby has to be a categorical observation. \"\n f\"Given value: {group}, Column type: {adata.obs[group].dtype}\"\n )\n\n if var_names is None:\n rep_df = pd.DataFrame(\n _choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs)\n )\n categorical = adata.obs[groupby[0]]\n if len(groupby) > 1:\n for group in groupby[1:]:\n # create new category by merging the given groupby categories\n categorical = (\n categorical.astype(str) + \"_\" + adata.obs[group].astype(str)\n ).astype(\"category\")\n categorical.name = \"_\".join(groupby)\n\n rep_df.set_index(categorical, inplace=True)\n categories = rep_df.index.categories\n else:\n gene_names = adata.raw.var_names if use_raw else adata.var_names\n from ..plotting._anndata import _prepare_dataframe\n\n categories, rep_df = _prepare_dataframe(\n adata, gene_names, groupby, use_raw=use_raw\n )\n\n # aggregate values within categories using 'mean'\n mean_df = (\n rep_df.groupby(level=0, observed=True)\n .mean()\n .loc[categories] # Fixed ordering for pandas < 2\n )\n\n import scipy.cluster.hierarchy as sch\n from scipy.spatial import distance\n\n corr_matrix = mean_df.T.corr(method=cor_method)\n corr_condensed = distance.squareform(1 - corr_matrix)\n z_var = sch.linkage(\n corr_condensed, method=linkage_method, optimal_ordering=optimal_ordering\n )\n dendro_info = sch.dendrogram(z_var, labels=list(categories), no_plot=True)\n\n dat = dict(\n linkage=z_var,\n groupby=groupby,\n use_rep=use_rep,\n cor_method=cor_method,\n linkage_method=linkage_method,\n categories_ordered=dendro_info[\"ivl\"],\n categories_idx_ordered=dendro_info[\"leaves\"],\n dendrogram_info=dendro_info,\n correlation_matrix=corr_matrix.values,\n )\n\n if inplace:\n if key_added is None:\n key_added = f'dendrogram_{\"_\".join(groupby)}'\n logg.info(f\"Storing dendrogram info using `.uns[{key_added!r}]`\")\n adata.uns[key_added] = dat\n else:\n return dat\n", "path": "scanpy/tools/_dendrogram.py"}]}
| 4,027 | 248 |
gh_patches_debug_10622
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6143
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Require minium length for "explanation" field in BCD signals
**Summary**
_What should be changed?_
A minimum length of 10 characters should be required for the "explanation" field in BCD signals
**Rationale**
_What problems would this solve?_
Less spam submissions
**Audience**
_Who would use this changed feature?_
BCD maintainers
**Proposal**
_What would users see and do? What would happen as a result?_
Users would be required to enter a meaningful explanation and hopefully refrain from submitting "fehfs", "test", and other garbage.
**Additional context**
_Is there anything else we should know?_
Was discussed in https://github.com/mdn/sprints/issues/2289
</issue>
<code>
[start of kuma/api/v1/serializers.py]
1 from rest_framework import exceptions
2 from rest_framework import serializers
3
4 from kuma.wiki.models import BCSignal, Document
5
6
7 class BCSignalSerializer(serializers.Serializer):
8 feature = serializers.CharField(max_length=255)
9 browsers = serializers.CharField(max_length=255)
10 slug = serializers.CharField(max_length=255)
11 locale = serializers.CharField(max_length=7)
12 explanation = serializers.CharField(allow_blank=True, max_length=1000)
13 supporting_material = serializers.CharField(
14 allow_blank=True, required=False, max_length=1000
15 )
16
17 def create(self, validated_data):
18 slug = validated_data.pop("slug")
19 locale = validated_data.pop("locale")
20 document = Document.objects.filter(slug=slug, locale=locale).first()
21
22 if document:
23 return BCSignal.objects.create(document=document, **validated_data)
24 raise exceptions.ValidationError("Document not found")
25
[end of kuma/api/v1/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/api/v1/serializers.py b/kuma/api/v1/serializers.py
--- a/kuma/api/v1/serializers.py
+++ b/kuma/api/v1/serializers.py
@@ -9,7 +9,11 @@
browsers = serializers.CharField(max_length=255)
slug = serializers.CharField(max_length=255)
locale = serializers.CharField(max_length=7)
- explanation = serializers.CharField(allow_blank=True, max_length=1000)
+ explanation = serializers.CharField(
+ # Make sure these match the constants in bcd-signal.jsx
+ max_length=1000,
+ min_length=10,
+ )
supporting_material = serializers.CharField(
allow_blank=True, required=False, max_length=1000
)
|
{"golden_diff": "diff --git a/kuma/api/v1/serializers.py b/kuma/api/v1/serializers.py\n--- a/kuma/api/v1/serializers.py\n+++ b/kuma/api/v1/serializers.py\n@@ -9,7 +9,11 @@\n browsers = serializers.CharField(max_length=255)\n slug = serializers.CharField(max_length=255)\n locale = serializers.CharField(max_length=7)\n- explanation = serializers.CharField(allow_blank=True, max_length=1000)\n+ explanation = serializers.CharField(\n+ # Make sure these match the constants in bcd-signal.jsx\n+ max_length=1000,\n+ min_length=10,\n+ )\n supporting_material = serializers.CharField(\n allow_blank=True, required=False, max_length=1000\n )\n", "issue": "Require minium length for \"explanation\" field in BCD signals\n**Summary**\r\n_What should be changed?_\r\nA minimum length of 10 characters should be required for the \"explanation\" field in BCD signals\r\n\r\n**Rationale**\r\n_What problems would this solve?_\r\nLess spam submissions\r\n\r\n**Audience**\r\n_Who would use this changed feature?_\r\nBCD maintainers\r\n\r\n**Proposal**\r\n_What would users see and do? What would happen as a result?_\r\nUsers would be required to enter a meaningful explanation and hopefully refrain from submitting \"fehfs\", \"test\", and other garbage.\r\n\r\n**Additional context**\r\n_Is there anything else we should know?_\r\nWas discussed in https://github.com/mdn/sprints/issues/2289\n", "before_files": [{"content": "from rest_framework import exceptions\nfrom rest_framework import serializers\n\nfrom kuma.wiki.models import BCSignal, Document\n\n\nclass BCSignalSerializer(serializers.Serializer):\n feature = serializers.CharField(max_length=255)\n browsers = serializers.CharField(max_length=255)\n slug = serializers.CharField(max_length=255)\n locale = serializers.CharField(max_length=7)\n explanation = serializers.CharField(allow_blank=True, max_length=1000)\n supporting_material = serializers.CharField(\n allow_blank=True, required=False, max_length=1000\n )\n\n def create(self, validated_data):\n slug = validated_data.pop(\"slug\")\n locale = validated_data.pop(\"locale\")\n document = Document.objects.filter(slug=slug, locale=locale).first()\n\n if document:\n return BCSignal.objects.create(document=document, **validated_data)\n raise exceptions.ValidationError(\"Document not found\")\n", "path": "kuma/api/v1/serializers.py"}]}
| 938 | 181 |
gh_patches_debug_9125
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-10039
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] mimalloc/1.7.5
### Package Details
* Package Name/Version: **mimalloc/1.7.5**
* Changelog: **https://github.com/microsoft/mimalloc/releases/tag/v1.7.5**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/mimalloc/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import shutil
5 import textwrap
6
7 required_conan_version = ">=1.43.0"
8
9
10 class MimallocConan(ConanFile):
11 name = "mimalloc"
12 license = "MIT"
13 url = "https://github.com/conan-io/conan-center-index"
14 homepage = "https://github.com/microsoft/mimalloc"
15 description = "mimalloc is a compact general purpose allocator with excellent performance."
16 topics = ("mimalloc", "allocator", "performance", "microsoft")
17
18 settings = "os", "arch", "compiler", "build_type"
19 options = {
20 "shared": [True, False],
21 "fPIC": [True, False],
22 "secure": [True, False],
23 "override": [True, False],
24 "inject": [True, False],
25 "single_object": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 "secure": False,
31 "override": False,
32 "inject": False,
33 "single_object": False,
34 }
35
36 generators = "cmake"
37 _cmake = None
38
39 @property
40 def _source_subfolder(self):
41 return "source_subfolder"
42
43 @property
44 def _build_subfolder(self):
45 return "build_subfolder"
46
47 @property
48 def _compilers_minimum_version(self):
49 return {
50 "gcc": "7",
51 "Visual Studio": "15",
52 "clang": "5",
53 "apple-clang": "10",
54 }
55
56 def export_sources(self):
57 self.copy("CMakeLists.txt")
58 for patch in self.conan_data.get("patches", {}).get(self.version, []):
59 self.copy(patch["patch_file"])
60
61 def config_options(self):
62 if self.settings.os == "Windows":
63 del self.options.fPIC
64
65 # single_object and inject are options
66 # only when overriding on Unix-like platforms:
67 if self.settings.compiler == "Visual Studio":
68 del self.options.single_object
69 del self.options.inject
70
71 def configure(self):
72 if self.options.shared:
73 del self.options.fPIC
74
75 # single_object is valid only for static
76 # override:
77 if self.options.get_safe("single_object"):
78 del self.options.single_object
79
80 # inject is valid only for Unix-like dynamic override:
81 if not self.options.shared and self.options.get_safe("inject"):
82 del self.options.inject
83
84 # single_object and inject are valid only when
85 # overriding on Unix-like platforms:
86 if not self.options.override:
87 if self.options.get_safe("single_object"):
88 del self.options.single_object
89 if self.options.get_safe("inject"):
90 del self.options.inject
91
92 def validate(self):
93 # Shared overriding requires dynamic runtime for MSVC:
94 if self.options.override and \
95 self.options.shared and \
96 self.settings.compiler == "Visual Studio" and \
97 "MT" in str(self.settings.compiler.runtime):
98 raise ConanInvalidConfiguration(
99 "Dynamic runtime (MD/MDd) is required when using mimalloc as a shared library for override")
100
101 if self.options.override and \
102 self.options.get_safe("single_object") and \
103 self.options.get_safe("inject"):
104 raise ConanInvalidConfiguration("Single object is incompatible with library injection");
105
106 if self.settings.compiler.get_safe("cppstd"):
107 tools.check_min_cppstd(self, "17")
108
109 minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
110
111 if not minimum_version:
112 self.output.warn("mimalloc requires C++17. Your compiler is unknown. Assuming it supports C++17.")
113 elif tools.Version(self.settings.compiler.version) < minimum_version:
114 raise ConanInvalidConfiguration("mimalloc requires a compiler that supports at least C++17")
115
116 def source(self):
117 tools.get(**self.conan_data["sources"][self.version],
118 destination=self._source_subfolder, strip_root=True)
119
120 def _configure_cmake(self):
121 if self._cmake:
122 return self._cmake
123 self._cmake = CMake(self)
124 if self._cmake.is_multi_configuration:
125 self._cmake.definitions["CMAKE_BUILD_TYPE"] = self.settings.build_type
126 self._cmake.definitions["MI_BUILD_TESTS"] = "OFF"
127 self._cmake.definitions["MI_BUILD_SHARED"] = self.options.shared
128 self._cmake.definitions["MI_BUILD_STATIC"] = not self.options.shared
129 self._cmake.definitions["MI_BUILD_OBJECT"] = self.options.get_safe("single_object", False)
130 self._cmake.definitions["MI_OVERRIDE"] = "ON" if self.options.override else "OFF"
131 self._cmake.definitions["MI_SECURE"] = "ON" if self.options.secure else "OFF"
132 if tools.Version(self.version) >= "1.7.0":
133 self._cmake.definitions["MI_INSTALL_TOPLEVEL"] = "ON"
134 self._cmake.configure(build_folder=self._build_subfolder)
135 return self._cmake
136
137 def build(self):
138 for patch in self.conan_data.get("patches", {}).get(self.version, []):
139 tools.patch(**patch)
140 if self.settings.compiler == "Visual Studio" and self.settings.arch == "x86":
141 tools.replace_path_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
142 "mimalloc-redirect.lib", "mimalloc-redirect32.lib")
143 with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
144 cmake = self._configure_cmake()
145 cmake.build()
146
147 def package(self):
148 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
149 with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
150 cmake = self._configure_cmake()
151 cmake.install()
152
153 tools.rmdir(os.path.join(self.package_folder, "cmake"))
154 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
155
156 if self.options.get_safe("single_object"):
157 tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"),
158 "*.a")
159 shutil.move(os.path.join(self.package_folder, self._obj_name + ".o"),
160 os.path.join(self.package_folder, "lib"))
161 shutil.copy(os.path.join(self.package_folder, "lib", self._obj_name + ".o"),
162 os.path.join(self.package_folder, "lib", self._obj_name))
163
164 if self.settings.os == "Windows" and self.options.shared:
165 if self.settings.arch == "x86_64":
166 self.copy("mimalloc-redirect.dll", src=os.path.join(self._source_subfolder, "bin"),
167 dst="bin")
168 elif self.settings.arch == "x86":
169 self.copy("mimalloc-redirect32.dll", src=os.path.join(self._source_subfolder, "bin"),
170 dst="bin")
171
172 tools.rmdir(os.path.join(self.package_folder, "share"))
173
174 cmake_target = "mimalloc" if self.options.shared else "mimalloc-static"
175 self._create_cmake_module_alias_targets(
176 os.path.join(self.package_folder, self._module_file_rel_path),
177 {cmake_target: "mimalloc::mimalloc"}
178 )
179
180 @staticmethod
181 def _create_cmake_module_alias_targets(module_file, targets):
182 content = ""
183 for alias, aliased in targets.items():
184 content += textwrap.dedent("""\
185 if(TARGET {aliased} AND NOT TARGET {alias})
186 add_library({alias} INTERFACE IMPORTED)
187 set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
188 endif()
189 """.format(alias=alias, aliased=aliased))
190 tools.save(module_file, content)
191
192 @property
193 def _module_subfolder(self):
194 return os.path.join("lib", "cmake")
195
196 @property
197 def _module_file_rel_path(self):
198 return os.path.join(self._module_subfolder,
199 "conan-official-{}-targets.cmake".format(self.name))
200
201 @property
202 def _obj_name(self):
203 name = "mimalloc"
204 if self.options.secure:
205 name += "-secure"
206 if self.settings.build_type not in ("Release", "RelWithDebInfo", "MinSizeRel"):
207 name += "-{}".format(str(self.settings.build_type).lower())
208 return name
209
210 @property
211 def _lib_name(self):
212 name = "mimalloc" if self.settings.os == "Windows" else "libmimalloc"
213
214 if self.settings.os == "Windows" and not self.options.shared:
215 name += "-static"
216 if self.options.secure:
217 name += "-secure"
218 if self.settings.build_type not in ("Release", "RelWithDebInfo", "MinSizeRel"):
219 name += "-{}".format(str(self.settings.build_type).lower())
220 return name
221
222 def package_info(self):
223 self.cpp_info.set_property("cmake_file_name", "mimalloc")
224 self.cpp_info.set_property("cmake_target_name", "mimalloc" if self.options.shared else "mimalloc-static")
225
226 self.cpp_info.names["cmake_find_package"] = "mimalloc"
227 self.cpp_info.names["cmake_find_package_multi"] = "mimalloc"
228 self.cpp_info.builddirs.append(self._module_subfolder)
229 self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
230 self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
231
232 if self.options.get_safe("inject"):
233 self.cpp_info.includedirs = []
234 self.cpp_info.libdirs = []
235 self.cpp_info.resdirs = []
236 return
237
238 if self.options.get_safe("single_object"):
239 obj_ext = "o"
240 obj_file = "{}.{}".format(self._obj_name, obj_ext)
241 obj_path = os.path.join(self.package_folder, "lib", obj_file)
242 self.cpp_info.exelinkflags = [obj_path]
243 self.cpp_info.sharedlinkflags = [obj_path]
244 self.cpp_info.libdirs = []
245 self.cpp_info.bindirs = []
246 else:
247 self.cpp_info.libs = tools.collect_libs(self)
248
249 if self.settings.os == "Linux":
250 self.cpp_info.system_libs.append("pthread")
251 if not self.options.shared:
252 if self.settings.os == "Windows":
253 self.cpp_info.system_libs.extend(["psapi", "shell32", "user32", "bcrypt"])
254 elif self.settings.os == "Linux":
255 self.cpp_info.system_libs.append("rt")
256
[end of recipes/mimalloc/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/mimalloc/all/conanfile.py b/recipes/mimalloc/all/conanfile.py
--- a/recipes/mimalloc/all/conanfile.py
+++ b/recipes/mimalloc/all/conanfile.py
@@ -101,7 +101,7 @@
if self.options.override and \
self.options.get_safe("single_object") and \
self.options.get_safe("inject"):
- raise ConanInvalidConfiguration("Single object is incompatible with library injection");
+ raise ConanInvalidConfiguration("Single object is incompatible with library injection")
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "17")
|
{"golden_diff": "diff --git a/recipes/mimalloc/all/conanfile.py b/recipes/mimalloc/all/conanfile.py\n--- a/recipes/mimalloc/all/conanfile.py\n+++ b/recipes/mimalloc/all/conanfile.py\n@@ -101,7 +101,7 @@\n if self.options.override and \\\n self.options.get_safe(\"single_object\") and \\\n self.options.get_safe(\"inject\"):\n- raise ConanInvalidConfiguration(\"Single object is incompatible with library injection\");\n+ raise ConanInvalidConfiguration(\"Single object is incompatible with library injection\")\n \n if self.settings.compiler.get_safe(\"cppstd\"):\n tools.check_min_cppstd(self, \"17\")\n", "issue": "[request] mimalloc/1.7.5\n### Package Details\r\n * Package Name/Version: **mimalloc/1.7.5**\r\n * Changelog: **https://github.com/microsoft/mimalloc/releases/tag/v1.7.5**\r\n\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport shutil\nimport textwrap\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass MimallocConan(ConanFile):\n name = \"mimalloc\"\n license = \"MIT\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/microsoft/mimalloc\"\n description = \"mimalloc is a compact general purpose allocator with excellent performance.\"\n topics = (\"mimalloc\", \"allocator\", \"performance\", \"microsoft\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"secure\": [True, False],\n \"override\": [True, False],\n \"inject\": [True, False],\n \"single_object\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"secure\": False,\n \"override\": False,\n \"inject\": False,\n \"single_object\": False,\n }\n\n generators = \"cmake\"\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"gcc\": \"7\",\n \"Visual Studio\": \"15\",\n \"clang\": \"5\",\n \"apple-clang\": \"10\",\n }\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n # single_object and inject are options\n # only when overriding on Unix-like platforms:\n if self.settings.compiler == \"Visual Studio\":\n del self.options.single_object\n del self.options.inject\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n # single_object is valid only for static\n # override:\n if self.options.get_safe(\"single_object\"):\n del self.options.single_object\n\n # inject is valid only for Unix-like dynamic override:\n if not self.options.shared and self.options.get_safe(\"inject\"):\n del self.options.inject\n\n # single_object and inject are valid only when\n # overriding on Unix-like platforms:\n if not self.options.override:\n if self.options.get_safe(\"single_object\"):\n del self.options.single_object\n if self.options.get_safe(\"inject\"):\n del self.options.inject\n\n def validate(self):\n # Shared overriding requires dynamic runtime for MSVC:\n if self.options.override and \\\n self.options.shared and \\\n self.settings.compiler == \"Visual Studio\" and \\\n \"MT\" in str(self.settings.compiler.runtime):\n raise ConanInvalidConfiguration(\n \"Dynamic runtime (MD/MDd) is required when using mimalloc as a shared library for override\")\n\n if self.options.override and \\\n self.options.get_safe(\"single_object\") and \\\n self.options.get_safe(\"inject\"):\n raise ConanInvalidConfiguration(\"Single object is incompatible with library injection\");\n\n if self.settings.compiler.get_safe(\"cppstd\"):\n tools.check_min_cppstd(self, \"17\")\n\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n\n if not minimum_version:\n self.output.warn(\"mimalloc requires C++17. Your compiler is unknown. Assuming it supports C++17.\")\n elif tools.Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\"mimalloc requires a compiler that supports at least C++17\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n if self._cmake.is_multi_configuration:\n self._cmake.definitions[\"CMAKE_BUILD_TYPE\"] = self.settings.build_type\n self._cmake.definitions[\"MI_BUILD_TESTS\"] = \"OFF\"\n self._cmake.definitions[\"MI_BUILD_SHARED\"] = self.options.shared\n self._cmake.definitions[\"MI_BUILD_STATIC\"] = not self.options.shared\n self._cmake.definitions[\"MI_BUILD_OBJECT\"] = self.options.get_safe(\"single_object\", False)\n self._cmake.definitions[\"MI_OVERRIDE\"] = \"ON\" if self.options.override else \"OFF\"\n self._cmake.definitions[\"MI_SECURE\"] = \"ON\" if self.options.secure else \"OFF\"\n if tools.Version(self.version) >= \"1.7.0\":\n self._cmake.definitions[\"MI_INSTALL_TOPLEVEL\"] = \"ON\"\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n if self.settings.compiler == \"Visual Studio\" and self.settings.arch == \"x86\":\n tools.replace_path_in_file(os.path.join(self._source_subfolder, \"CMakeLists.txt\"),\n \"mimalloc-redirect.lib\", \"mimalloc-redirect32.lib\")\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n cmake = self._configure_cmake()\n cmake.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n if self.options.get_safe(\"single_object\"):\n tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"),\n \"*.a\")\n shutil.move(os.path.join(self.package_folder, self._obj_name + \".o\"),\n os.path.join(self.package_folder, \"lib\"))\n shutil.copy(os.path.join(self.package_folder, \"lib\", self._obj_name + \".o\"),\n os.path.join(self.package_folder, \"lib\", self._obj_name))\n\n if self.settings.os == \"Windows\" and self.options.shared:\n if self.settings.arch == \"x86_64\":\n self.copy(\"mimalloc-redirect.dll\", src=os.path.join(self._source_subfolder, \"bin\"),\n dst=\"bin\")\n elif self.settings.arch == \"x86\":\n self.copy(\"mimalloc-redirect32.dll\", src=os.path.join(self._source_subfolder, \"bin\"),\n dst=\"bin\")\n\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n cmake_target = \"mimalloc\" if self.options.shared else \"mimalloc-static\"\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {cmake_target: \"mimalloc::mimalloc\"}\n )\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(self._module_subfolder,\n \"conan-official-{}-targets.cmake\".format(self.name))\n\n @property\n def _obj_name(self):\n name = \"mimalloc\"\n if self.options.secure:\n name += \"-secure\"\n if self.settings.build_type not in (\"Release\", \"RelWithDebInfo\", \"MinSizeRel\"):\n name += \"-{}\".format(str(self.settings.build_type).lower())\n return name\n\n @property\n def _lib_name(self):\n name = \"mimalloc\" if self.settings.os == \"Windows\" else \"libmimalloc\"\n\n if self.settings.os == \"Windows\" and not self.options.shared:\n name += \"-static\"\n if self.options.secure:\n name += \"-secure\"\n if self.settings.build_type not in (\"Release\", \"RelWithDebInfo\", \"MinSizeRel\"):\n name += \"-{}\".format(str(self.settings.build_type).lower())\n return name\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"mimalloc\")\n self.cpp_info.set_property(\"cmake_target_name\", \"mimalloc\" if self.options.shared else \"mimalloc-static\")\n\n self.cpp_info.names[\"cmake_find_package\"] = \"mimalloc\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"mimalloc\"\n self.cpp_info.builddirs.append(self._module_subfolder)\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n\n if self.options.get_safe(\"inject\"):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n self.cpp_info.resdirs = []\n return\n\n if self.options.get_safe(\"single_object\"):\n obj_ext = \"o\"\n obj_file = \"{}.{}\".format(self._obj_name, obj_ext)\n obj_path = os.path.join(self.package_folder, \"lib\", obj_file)\n self.cpp_info.exelinkflags = [obj_path]\n self.cpp_info.sharedlinkflags = [obj_path]\n self.cpp_info.libdirs = []\n self.cpp_info.bindirs = []\n else:\n self.cpp_info.libs = tools.collect_libs(self)\n\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n if not self.options.shared:\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"psapi\", \"shell32\", \"user32\", \"bcrypt\"])\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"rt\")\n", "path": "recipes/mimalloc/all/conanfile.py"}]}
| 3,641 | 146 |
gh_patches_debug_12198
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-2549
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nistats: dependence between function calls – strange bug may affect reproducibility
Hi,
I've encountered very strange behavior of two functions – `map_threshold` from `nistats.thresholding` module and `get_clusters_table` from `nistats.reporting` module. It seems like the threshold value returned by `map_threshold` depends on the last output value(s) of `get_clusters_table` function or some internal function variables. Impact seems to be severe (the higher the `cluster_threshold` value passed to `get_cluster_table`, the higher the threshold returned by subsequent call of `map_threshold`).
Here is a simple demonstration: https://github.com/kbonna/decidenet/blob/master/activation_analysis/nistats_map_threshold_bug.ipynb
I've tried to find a source of the problem studying code of both functions, but I everything seems to be ok at first glance.
Am I missing something simple?
</issue>
<code>
[start of nilearn/reporting/_get_clusters_table.py]
1 """
2 This module implements plotting functions useful to report analysis results.
3
4 Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
5 """
6
7 import warnings
8 from string import ascii_lowercase
9
10 import numpy as np
11 import pandas as pd
12 import nibabel as nib
13 from scipy import ndimage
14
15 from nilearn.image import get_data
16 from nilearn.image.resampling import coord_transform
17
18
19 def _local_max(data, affine, min_distance):
20 """Find all local maxima of the array, separated by at least min_distance.
21 Adapted from https://stackoverflow.com/a/22631583/2589328
22
23 Parameters
24 ----------
25 data : array_like
26 3D array of with masked values for cluster.
27
28 affine: np.ndarray
29 Square matrix specifying the position of the image array data
30 in a reference space.
31
32 min_distance : `int`
33 Minimum distance between local maxima in ``data``, in terms of mm.
34
35 Returns
36 -------
37 ijk : `numpy.ndarray`
38 (n_foci, 3) array of local maxima indices for cluster.
39
40 vals : `numpy.ndarray`
41 (n_foci,) array of values from data at ijk.
42 """
43 ijk, vals = _identify_subpeaks(data)
44 xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)
45 ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)
46 return ijk, vals
47
48
49 def _identify_subpeaks(data):
50 # Initial identification of subpeaks with minimal minimum distance
51 data_max = ndimage.filters.maximum_filter(data, 3)
52 maxima = (data == data_max)
53 data_min = ndimage.filters.minimum_filter(data, 3)
54 diff = ((data_max - data_min) > 0)
55 maxima[diff == 0] = 0
56
57 labeled, n_subpeaks = ndimage.label(maxima)
58 labels_index = range(1, n_subpeaks + 1)
59 ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))
60 ijk = np.round(ijk).astype(int)
61 vals = np.apply_along_axis(arr=ijk, axis=1, func1d=_get_val,
62 input_arr=data)
63 return ijk, vals
64
65
66 def _sort_subpeaks(ijk, vals, affine):
67 # Sort subpeaks in cluster in descending order of stat value
68 order = (-vals).argsort()
69 vals = vals[order]
70 ijk = ijk[order, :]
71 xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm
72 return xyz, ijk, vals
73
74
75 def _pare_subpeaks(xyz, ijk, vals, min_distance):
76 # Reduce list of subpeaks based on distance
77 keep_idx = np.ones(xyz.shape[0]).astype(bool)
78 for i in range(xyz.shape[0]):
79 for j in range(i + 1, xyz.shape[0]):
80 if keep_idx[i] == 1:
81 dist = np.linalg.norm(xyz[i, :] - xyz[j, :])
82 keep_idx[j] = dist > min_distance
83 ijk = ijk[keep_idx, :]
84 vals = vals[keep_idx]
85 return ijk, vals
86
87
88 def _get_val(row, input_arr):
89 """Small function for extracting values from array based on index.
90 """
91 i, j, k = row
92 return input_arr[i, j, k]
93
94
95 def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,
96 min_distance=8.):
97 """Creates pandas dataframe with img cluster statistics.
98
99 Parameters
100 ----------
101 stat_img : Niimg-like object,
102 Statistical image (presumably in z- or p-scale).
103
104 stat_threshold: `float`
105 Cluster forming threshold in same scale as `stat_img` (either a
106 p-value or z-scale value).
107
108 cluster_threshold : `int` or `None`, optional
109 Cluster size threshold, in voxels.
110
111 min_distance: `float`, optional
112 Minimum distance between subpeaks in mm. Default is 8 mm.
113
114 Returns
115 -------
116 df : `pandas.DataFrame`
117 Table with peaks and subpeaks from thresholded `stat_img`. For binary
118 clusters (clusters with >1 voxel containing only one value), the table
119 reports the center of mass of the cluster,
120 rather than any peaks/subpeaks.
121 """
122 cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
123 stat_map = get_data(stat_img)
124 conn_mat = np.zeros((3, 3, 3), int) # 6-connectivity, aka NN1 or "faces"
125 conn_mat[1, 1, :] = 1
126 conn_mat[1, :, 1] = 1
127 conn_mat[:, 1, 1] = 1
128 voxel_size = np.prod(stat_img.header.get_zooms())
129
130 # Binarize using CDT
131 binarized = stat_map > stat_threshold
132 binarized = binarized.astype(int)
133
134 # If the stat threshold is too high simply return an empty dataframe
135 if np.sum(binarized) == 0:
136 warnings.warn('Attention: No clusters with stat higher than %f' %
137 stat_threshold)
138 return pd.DataFrame(columns=cols)
139
140 # Extract connected components above cluster size threshold
141 label_map = ndimage.measurements.label(binarized, conn_mat)[0]
142 clust_ids = sorted(list(np.unique(label_map)[1:]))
143 for c_val in clust_ids:
144 if cluster_threshold is not None and np.sum(
145 label_map == c_val) < cluster_threshold:
146 stat_map[label_map == c_val] = 0
147 binarized[label_map == c_val] = 0
148
149 # If the cluster threshold is too high simply return an empty dataframe
150 # this checks for stats higher than threshold after small clusters
151 # were removed from stat_map
152 if np.sum(stat_map > stat_threshold) == 0:
153 warnings.warn('Attention: No clusters with more than %d voxels' %
154 cluster_threshold)
155 return pd.DataFrame(columns=cols)
156
157 # Now re-label and create table
158 label_map = ndimage.measurements.label(binarized, conn_mat)[0]
159 clust_ids = sorted(list(np.unique(label_map)[1:]))
160 peak_vals = np.array(
161 [np.max(stat_map * (label_map == c)) for c in clust_ids])
162 clust_ids = [clust_ids[c] for c in
163 (-peak_vals).argsort()] # Sort by descending max value
164
165 rows = []
166 for c_id, c_val in enumerate(clust_ids):
167 cluster_mask = label_map == c_val
168 masked_data = stat_map * cluster_mask
169
170 cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)
171
172 # Get peaks, subpeaks and associated statistics
173 subpeak_ijk, subpeak_vals = _local_max(masked_data, stat_img.affine,
174 min_distance=min_distance)
175 subpeak_xyz = np.asarray(coord_transform(subpeak_ijk[:, 0],
176 subpeak_ijk[:, 1],
177 subpeak_ijk[:, 2],
178 stat_img.affine)).tolist()
179 subpeak_xyz = np.array(subpeak_xyz).T
180
181 # Only report peak and, at most, top 3 subpeaks.
182 n_subpeaks = np.min((len(subpeak_vals), 4))
183 for subpeak in range(n_subpeaks):
184 if subpeak == 0:
185 row = [c_id + 1, subpeak_xyz[subpeak, 0],
186 subpeak_xyz[subpeak, 1], subpeak_xyz[subpeak, 2],
187 subpeak_vals[subpeak], cluster_size_mm]
188 else:
189 # Subpeak naming convention is cluster num+letter: 1a, 1b, etc
190 sp_id = '{0}{1}'.format(c_id + 1, ascii_lowercase[subpeak - 1])
191 row = [sp_id, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1],
192 subpeak_xyz[subpeak, 2], subpeak_vals[subpeak], '']
193 rows += [row]
194 df = pd.DataFrame(columns=cols, data=rows)
195 return df
196
[end of nilearn/reporting/_get_clusters_table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nilearn/reporting/_get_clusters_table.py b/nilearn/reporting/_get_clusters_table.py
--- a/nilearn/reporting/_get_clusters_table.py
+++ b/nilearn/reporting/_get_clusters_table.py
@@ -120,7 +120,14 @@
rather than any peaks/subpeaks.
"""
cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
- stat_map = get_data(stat_img)
+
+ # If cluster threshold is used, there is chance that stat_map will be
+ # modified, therefore copy is needed
+ if cluster_threshold is None:
+ stat_map = get_data(stat_img)
+ else:
+ stat_map = get_data(stat_img).copy()
+
conn_mat = np.zeros((3, 3, 3), int) # 6-connectivity, aka NN1 or "faces"
conn_mat[1, 1, :] = 1
conn_mat[1, :, 1] = 1
|
{"golden_diff": "diff --git a/nilearn/reporting/_get_clusters_table.py b/nilearn/reporting/_get_clusters_table.py\n--- a/nilearn/reporting/_get_clusters_table.py\n+++ b/nilearn/reporting/_get_clusters_table.py\n@@ -120,7 +120,14 @@\n rather than any peaks/subpeaks.\n \"\"\"\n cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']\n- stat_map = get_data(stat_img)\n+\n+ # If cluster threshold is used, there is chance that stat_map will be\n+ # modified, therefore copy is needed\n+ if cluster_threshold is None:\n+ stat_map = get_data(stat_img)\n+ else:\n+ stat_map = get_data(stat_img).copy()\n+\n conn_mat = np.zeros((3, 3, 3), int) # 6-connectivity, aka NN1 or \"faces\"\n conn_mat[1, 1, :] = 1\n conn_mat[1, :, 1] = 1\n", "issue": "nistats: dependence between function calls \u2013 strange bug may affect reproducibility\nHi,\r\n\r\nI've encountered very strange behavior of two functions \u2013 `map_threshold` from `nistats.thresholding` module and `get_clusters_table` from `nistats.reporting` module. It seems like the threshold value returned by `map_threshold` depends on the last output value(s) of `get_clusters_table` function or some internal function variables. Impact seems to be severe (the higher the `cluster_threshold` value passed to `get_cluster_table`, the higher the threshold returned by subsequent call of `map_threshold`). \r\n\r\nHere is a simple demonstration: https://github.com/kbonna/decidenet/blob/master/activation_analysis/nistats_map_threshold_bug.ipynb\r\n\r\nI've tried to find a source of the problem studying code of both functions, but I everything seems to be ok at first glance. \r\n\r\nAm I missing something simple?\n", "before_files": [{"content": "\"\"\"\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nimport warnings\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\nfrom scipy import ndimage\n\nfrom nilearn.image import get_data\nfrom nilearn.image.resampling import coord_transform\n\n\ndef _local_max(data, affine, min_distance):\n \"\"\"Find all local maxima of the array, separated by at least min_distance.\n Adapted from https://stackoverflow.com/a/22631583/2589328\n\n Parameters\n ----------\n data : array_like\n 3D array of with masked values for cluster.\n\n affine: np.ndarray\n Square matrix specifying the position of the image array data\n in a reference space.\n\n min_distance : `int`\n Minimum distance between local maxima in ``data``, in terms of mm.\n\n Returns\n -------\n ijk : `numpy.ndarray`\n (n_foci, 3) array of local maxima indices for cluster.\n\n vals : `numpy.ndarray`\n (n_foci,) array of values from data at ijk.\n \"\"\"\n ijk, vals = _identify_subpeaks(data)\n xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)\n ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)\n return ijk, vals\n\n\ndef _identify_subpeaks(data):\n # Initial identification of subpeaks with minimal minimum distance\n data_max = ndimage.filters.maximum_filter(data, 3)\n maxima = (data == data_max)\n data_min = ndimage.filters.minimum_filter(data, 3)\n diff = ((data_max - data_min) > 0)\n maxima[diff == 0] = 0\n\n labeled, n_subpeaks = ndimage.label(maxima)\n labels_index = range(1, n_subpeaks + 1)\n ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))\n ijk = np.round(ijk).astype(int)\n vals = np.apply_along_axis(arr=ijk, axis=1, func1d=_get_val,\n input_arr=data)\n return ijk, vals\n\n\ndef _sort_subpeaks(ijk, vals, affine):\n # Sort subpeaks in cluster in descending order of stat value\n order = (-vals).argsort()\n vals = vals[order]\n ijk = ijk[order, :]\n xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm\n return xyz, ijk, vals\n\n\ndef _pare_subpeaks(xyz, ijk, vals, min_distance):\n # Reduce list of subpeaks based on distance\n keep_idx = np.ones(xyz.shape[0]).astype(bool)\n for i in range(xyz.shape[0]):\n for j in range(i + 1, xyz.shape[0]):\n if keep_idx[i] == 1:\n dist = np.linalg.norm(xyz[i, :] - xyz[j, :])\n keep_idx[j] = dist > min_distance\n ijk = ijk[keep_idx, :]\n vals = vals[keep_idx]\n return ijk, vals\n\n\ndef _get_val(row, input_arr):\n \"\"\"Small function for extracting values from array based on index.\n \"\"\"\n i, j, k = row\n return input_arr[i, j, k]\n\n\ndef get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,\n min_distance=8.):\n \"\"\"Creates pandas dataframe with img cluster statistics.\n\n Parameters\n ----------\n stat_img : Niimg-like object,\n Statistical image (presumably in z- or p-scale).\n\n stat_threshold: `float`\n Cluster forming threshold in same scale as `stat_img` (either a\n p-value or z-scale value).\n\n cluster_threshold : `int` or `None`, optional\n Cluster size threshold, in voxels.\n\n min_distance: `float`, optional\n Minimum distance between subpeaks in mm. Default is 8 mm.\n\n Returns\n -------\n df : `pandas.DataFrame`\n Table with peaks and subpeaks from thresholded `stat_img`. For binary\n clusters (clusters with >1 voxel containing only one value), the table\n reports the center of mass of the cluster,\n rather than any peaks/subpeaks.\n \"\"\"\n cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']\n stat_map = get_data(stat_img)\n conn_mat = np.zeros((3, 3, 3), int) # 6-connectivity, aka NN1 or \"faces\"\n conn_mat[1, 1, :] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[:, 1, 1] = 1\n voxel_size = np.prod(stat_img.header.get_zooms())\n\n # Binarize using CDT\n binarized = stat_map > stat_threshold\n binarized = binarized.astype(int)\n\n # If the stat threshold is too high simply return an empty dataframe\n if np.sum(binarized) == 0:\n warnings.warn('Attention: No clusters with stat higher than %f' %\n stat_threshold)\n return pd.DataFrame(columns=cols)\n\n # Extract connected components above cluster size threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if cluster_threshold is not None and np.sum(\n label_map == c_val) < cluster_threshold:\n stat_map[label_map == c_val] = 0\n binarized[label_map == c_val] = 0\n\n # If the cluster threshold is too high simply return an empty dataframe\n # this checks for stats higher than threshold after small clusters\n # were removed from stat_map\n if np.sum(stat_map > stat_threshold) == 0:\n warnings.warn('Attention: No clusters with more than %d voxels' %\n cluster_threshold)\n return pd.DataFrame(columns=cols)\n\n # Now re-label and create table\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n peak_vals = np.array(\n [np.max(stat_map * (label_map == c)) for c in clust_ids])\n clust_ids = [clust_ids[c] for c in\n (-peak_vals).argsort()] # Sort by descending max value\n\n rows = []\n for c_id, c_val in enumerate(clust_ids):\n cluster_mask = label_map == c_val\n masked_data = stat_map * cluster_mask\n\n cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)\n\n # Get peaks, subpeaks and associated statistics\n subpeak_ijk, subpeak_vals = _local_max(masked_data, stat_img.affine,\n min_distance=min_distance)\n subpeak_xyz = np.asarray(coord_transform(subpeak_ijk[:, 0],\n subpeak_ijk[:, 1],\n subpeak_ijk[:, 2],\n stat_img.affine)).tolist()\n subpeak_xyz = np.array(subpeak_xyz).T\n\n # Only report peak and, at most, top 3 subpeaks.\n n_subpeaks = np.min((len(subpeak_vals), 4))\n for subpeak in range(n_subpeaks):\n if subpeak == 0:\n row = [c_id + 1, subpeak_xyz[subpeak, 0],\n subpeak_xyz[subpeak, 1], subpeak_xyz[subpeak, 2],\n subpeak_vals[subpeak], cluster_size_mm]\n else:\n # Subpeak naming convention is cluster num+letter: 1a, 1b, etc\n sp_id = '{0}{1}'.format(c_id + 1, ascii_lowercase[subpeak - 1])\n row = [sp_id, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1],\n subpeak_xyz[subpeak, 2], subpeak_vals[subpeak], '']\n rows += [row]\n df = pd.DataFrame(columns=cols, data=rows)\n return df\n", "path": "nilearn/reporting/_get_clusters_table.py"}]}
| 3,082 | 239 |
gh_patches_debug_7894
|
rasdani/github-patches
|
git_diff
|
vega__altair-390
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pin vega version in requirements
To make sure things still work when ipyvega is updated (as it already has been)
</issue>
<code>
[start of setup.py]
1 LONG_DESCRIPTION = """
2 Altair: A declarative statistical visualization library for Python.
3
4 http://altair-viz.github.io/
5
6 This package provides a Python API for building statistical visualizations
7 in a declarative manner. This API contains no actual visualization rendering
8 code, but instead emits JSON data structures following the `Vega-Lite`_
9 specification. For convenience, Altair can optionally use `ipyvega`_ to
10 seamlessly display client-side renderings in the Jupyter notebook.
11
12 .. image:: https://raw.githubusercontent.com/altair-viz/altair/master/images/cars.png
13
14 Please note that if you wish to use altair in the Jupyter Notebook, the
15 `ipyvega`_ notebook extension must be enabled as follows::
16
17 $ pip install altair
18 $ pip install --upgrade notebook
19 $ jupyter nbextension install --sys-prefix --py vega
20
21 See the `Altair Documentation`_ for tutorials, detailed installation
22 instructions, and examples.
23 See the `Altair Github Repository`_ for issues, bug reports, and contributions.
24
25 .. _Altair Github Repository: http://github.com/altair-viz/altair/
26 .. _Altair Documentation: http://altair-viz.github.io/
27 .. _Vega-Lite: https://github.com/vega/vega-lite
28 .. _ipyvega: https://github.com/vega/ipyvega
29 """
30
31 DESCRIPTION = "Altair: A declarative statistical visualization library for Python."
32 NAME = "altair"
33 PACKAGES = ['altair',
34 'altair.v1',
35 'altair.v1.tests',
36 'altair.v1.schema',
37 'altair.v1.schema._interface',
38 'altair.v1.schema._interface.tests',
39 'altair.v1.examples',
40 'altair.v1.examples.tests',
41 'altair.datasets',
42 'altair.datasets.tests',
43 'altair.expr',
44 'altair.expr.tests',
45 'altair.tests',
46 'altair.utils',
47 'altair.utils.tests',
48 ]
49 PACKAGE_DATA = {'altair': ['notebooks/*.ipynb',
50 'notebooks/*.html',
51 'notebooks/auto_examples/*.ipynb',
52 'v1/schema/*.json',
53 'v1/examples/*.json',
54 'v1/examples/json/*.json',
55 'datasets/*.json',
56 'expr/*.json']}
57 AUTHOR = "Brian E. Granger / Jake VanderPlas"
58 AUTHOR_EMAIL = "[email protected] / [email protected]"
59 URL = 'http://altair-viz.github.io'
60 DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'
61 LICENSE = 'BSD 3-clause'
62 INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']
63
64
65 import io
66 import os
67 import re
68
69 try:
70 from setuptools import setup
71 except ImportError:
72 from distutils.core import setup
73
74
75 def read(path, encoding='utf-8'):
76 path = os.path.join(os.path.dirname(__file__), path)
77 with io.open(path, encoding=encoding) as fp:
78 return fp.read()
79
80
81 def version(path):
82 """Obtain the packge version from a python file e.g. pkg/__init__.py
83
84 See <https://packaging.python.org/en/latest/single_source_version.html>.
85 """
86 version_file = read(path)
87 version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
88 version_file, re.M)
89 if version_match:
90 return version_match.group(1)
91 raise RuntimeError("Unable to find version string.")
92
93
94 VERSION = version('altair/__init__.py')
95
96
97 setup(name=NAME,
98 version=VERSION,
99 description=DESCRIPTION,
100 long_description=LONG_DESCRIPTION,
101 author=AUTHOR,
102 author_email=AUTHOR_EMAIL,
103 url=URL,
104 download_url=DOWNLOAD_URL,
105 license=LICENSE,
106 packages=PACKAGES,
107 package_data=PACKAGE_DATA,
108 install_requires=INSTALL_REQUIRES,
109 classifiers=[
110 'Development Status :: 4 - Beta',
111 'Environment :: Console',
112 'Intended Audience :: Science/Research',
113 'License :: OSI Approved :: BSD License',
114 'Natural Language :: English',
115 'Programming Language :: Python :: 2.7',
116 'Programming Language :: Python :: 3.4',
117 'Programming Language :: Python :: 3.5'],
118 )
119
[end of setup.py]
[start of altair/__init__.py]
1 __version__ = '1.3.0.dev0'
2
3 from .v1 import *
4
[end of altair/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/altair/__init__.py b/altair/__init__.py
--- a/altair/__init__.py
+++ b/altair/__init__.py
@@ -1,3 +1,3 @@
-__version__ = '1.3.0.dev0'
+__version__ = '1.2.1.dev0'
from .v1 import *
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
URL = 'http://altair-viz.github.io'
DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'
LICENSE = 'BSD 3-clause'
-INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']
+INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega==0.4.4']
import io
|
{"golden_diff": "diff --git a/altair/__init__.py b/altair/__init__.py\n--- a/altair/__init__.py\n+++ b/altair/__init__.py\n@@ -1,3 +1,3 @@\n-__version__ = '1.3.0.dev0'\n+__version__ = '1.2.1.dev0'\n \n from .v1 import *\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n URL = 'http://altair-viz.github.io'\n DOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\n LICENSE = 'BSD 3-clause'\n-INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']\n+INSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega==0.4.4']\n \n \n import io\n", "issue": "Pin vega version in requirements\nTo make sure things still work when ipyvega is updated (as it already has been)\n", "before_files": [{"content": "LONG_DESCRIPTION = \"\"\"\nAltair: A declarative statistical visualization library for Python.\n\nhttp://altair-viz.github.io/\n\nThis package provides a Python API for building statistical visualizations\nin a declarative manner. This API contains no actual visualization rendering\ncode, but instead emits JSON data structures following the `Vega-Lite`_\nspecification. For convenience, Altair can optionally use `ipyvega`_ to\nseamlessly display client-side renderings in the Jupyter notebook.\n\n.. image:: https://raw.githubusercontent.com/altair-viz/altair/master/images/cars.png\n\nPlease note that if you wish to use altair in the Jupyter Notebook, the\n`ipyvega`_ notebook extension must be enabled as follows::\n\n $ pip install altair\n $ pip install --upgrade notebook\n $ jupyter nbextension install --sys-prefix --py vega\n\nSee the `Altair Documentation`_ for tutorials, detailed installation\ninstructions, and examples.\nSee the `Altair Github Repository`_ for issues, bug reports, and contributions.\n\n.. _Altair Github Repository: http://github.com/altair-viz/altair/\n.. _Altair Documentation: http://altair-viz.github.io/\n.. _Vega-Lite: https://github.com/vega/vega-lite\n.. _ipyvega: https://github.com/vega/ipyvega\n\"\"\"\n\nDESCRIPTION = \"Altair: A declarative statistical visualization library for Python.\"\nNAME = \"altair\"\nPACKAGES = ['altair',\n 'altair.v1',\n 'altair.v1.tests',\n 'altair.v1.schema',\n 'altair.v1.schema._interface',\n 'altair.v1.schema._interface.tests',\n 'altair.v1.examples',\n 'altair.v1.examples.tests',\n 'altair.datasets',\n 'altair.datasets.tests',\n 'altair.expr',\n 'altair.expr.tests',\n 'altair.tests',\n 'altair.utils',\n 'altair.utils.tests',\n ]\nPACKAGE_DATA = {'altair': ['notebooks/*.ipynb',\n 'notebooks/*.html',\n 'notebooks/auto_examples/*.ipynb',\n 'v1/schema/*.json',\n 'v1/examples/*.json',\n 'v1/examples/json/*.json',\n 'datasets/*.json',\n 'expr/*.json']}\nAUTHOR = \"Brian E. Granger / Jake VanderPlas\"\nAUTHOR_EMAIL = \"[email protected] / [email protected]\"\nURL = 'http://altair-viz.github.io'\nDOWNLOAD_URL = 'http://github.com/altair-viz/altair/'\nLICENSE = 'BSD 3-clause'\nINSTALL_REQUIRES = ['traitlets>=4.3.1','ipython','pandas','vega>=0.4.4']\n\n\nimport io\nimport os\nimport re\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\ndef read(path, encoding='utf-8'):\n path = os.path.join(os.path.dirname(__file__), path)\n with io.open(path, encoding=encoding) as fp:\n return fp.read()\n\n\ndef version(path):\n \"\"\"Obtain the packge version from a python file e.g. pkg/__init__.py\n\n See <https://packaging.python.org/en/latest/single_source_version.html>.\n \"\"\"\n version_file = read(path)\n version_match = re.search(r\"\"\"^__version__ = ['\"]([^'\"]*)['\"]\"\"\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nVERSION = version('altair/__init__.py')\n\n\nsetup(name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n packages=PACKAGES,\n package_data=PACKAGE_DATA,\n install_requires=INSTALL_REQUIRES,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'],\n )\n", "path": "setup.py"}, {"content": "__version__ = '1.3.0.dev0'\n\nfrom .v1 import *\n", "path": "altair/__init__.py"}]}
| 1,819 | 226 |
gh_patches_debug_26558
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-4522
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix JUPYTERHUB_SINGLEUSER_APP after Notebook 7 release
### Bug description
With `notebook 6.5.4` it was possible to specify JUPYTERHUB_SINGLEUSER_APP='notebook' to run `Jupyter Notebook` instead of `JupyterLab`.
#### Expected behaviour
Jupyter Notebook is run in singleuser
#### Actual behaviour
`jupyterhub-singleuser` fails
### How to reproduce
Working image: `jupyter/base-notebook:notebook-6.5.4`
Failing image: `jupyter/base-notebook:notebook-7.0.0`
1. Run image: `docker run -it --rm jupyter/base-notebook:notebook-7.0.0 bash`
2. Run: `JUPYTERHUB_SINGLEUSER_APP='notebook' JUPYTERHUB_SERVICE_URL="127.0.0.1" jupyterhub-singleuser`
JupyterHub is not running inside the image, but I don't think that's the problem.
Output with Jupyter Notebook 7:
```
Traceback (most recent call last):
File "/opt/conda/bin/jupyterhub-singleuser", line 6, in <module>
from jupyterhub.singleuser import main
File "/opt/conda/lib/python3.11/site-packages/jupyterhub/singleuser/__init__.py", line 67, in <module>
from .app import SingleUserNotebookApp, main
File "/opt/conda/lib/python3.11/site-packages/jupyterhub/singleuser/app.py", line 31, in <module>
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/traitlets/utils/importstring.py", line 30, in import_item
module = __import__(package, fromlist=[obj])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ModuleNotFoundError: No module named 'notebook.notebookapp'
```
Output with Jupyter Notebook 6:
```
[I 2023-07-25 20:59:48.574 SingleUserNotebookApp mixins:547] Starting jupyterhub single-user server version 4.0.1
[I 2023-07-25 20:59:48.574 SingleUserNotebookApp mixins:561] Extending notebook.notebookapp.NotebookApp from notebook 6.5.4
[W 2023-07-25 20:59:48.578 SingleUserNotebookApp configurable:200] Config option `open_browser` not recognized by `SingleUserNotebookApp`. Did you mean `browser`?
JUPYTERHUB_API_TOKEN env is required to run jupyterhub-singleuser. Did you launch it manually?
```
### Your personal set up
- OS:
<!-- [e.g. ubuntu 20.04, macOS 11.0] -->
- Version(s):
<!-- e.g. jupyterhub --version, python --version --->
<details><summary>Full environment</summary>
<!-- For reproduction, it's useful to have the full environment. For example, the output of `pip freeze` or `conda list` --->
```
# paste output of `pip freeze` or `conda list` here
```
</details>
<details><summary>Configuration</summary>
<!--
For JupyterHub, especially include information such as what Spawner and Authenticator are being used.
Be careful not to share any sensitive information.
You can paste jupyterhub_config.py below.
To exclude lots of comments and empty lines from auto-generated jupyterhub_config.py, you can do:
grep -v '\(^#\|^[[:space:]]*$\)' jupyterhub_config.py
-->
```python
# jupyterhub_config.py
```
</details>
<details><summary>Logs</summary>
<!--
Errors are often logged by jupytehub. How you get logs depends on your deployment.
With kubernetes it might be:
kubectl get pod # hub pod name starts with hub...
kubectl logs hub-...
# or for a single-user server
kubectl logs jupyter-username
Or the-littlest-jupyterhub:
journalctl -u jupyterhub
# or for a single-user server
journalctl -u jupyter-username
-->
```
# paste relevant logs here, if any
```
</details>
</issue>
<code>
[start of jupyterhub/singleuser/app.py]
1 """Make a single-user app based on the environment:
2
3 - $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.
4 default: jupyter_server.serverapp.ServerApp
5
6 .. versionchanged:: 2.0
7
8 Default app changed to launch `jupyter labhub`.
9 Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.
10 """
11 import os
12
13 from traitlets import import_item
14
15 from .mixins import make_singleuser_app
16
17 JUPYTERHUB_SINGLEUSER_APP = os.environ.get("JUPYTERHUB_SINGLEUSER_APP", "")
18
19 # allow shortcut references
20 _app_shortcuts = {
21 "notebook": "notebook.notebookapp.NotebookApp",
22 "jupyter-server": "jupyter_server.serverapp.ServerApp",
23 "extension": "jupyter_server.serverapp.ServerApp",
24 }
25
26 JUPYTERHUB_SINGLEUSER_APP = _app_shortcuts.get(
27 JUPYTERHUB_SINGLEUSER_APP.replace("_", "-"), JUPYTERHUB_SINGLEUSER_APP
28 )
29
30 if JUPYTERHUB_SINGLEUSER_APP:
31 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
32 else:
33 App = None
34 _import_error = None
35 for JUPYTERHUB_SINGLEUSER_APP in (
36 "jupyter_server.serverapp.ServerApp",
37 "notebook.notebookapp.NotebookApp",
38 ):
39 try:
40 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
41 except ImportError as e:
42 if _import_error is None:
43 _import_error = e
44 continue
45 else:
46 break
47 if App is None:
48 raise _import_error
49
50
51 SingleUserNotebookApp = make_singleuser_app(App)
52
53
54 def main():
55 """Launch a jupyterhub single-user server"""
56 if not os.environ.get("JUPYTERHUB_SINGLEUSER_APP"):
57 # app not specified, launch jupyter-labhub by default,
58 # if jupyterlab is recent enough (3.1).
59 # This is a minimally extended ServerApp that does:
60 # 1. ensure lab extension is enabled, and
61 # 2. set default URL to `/lab`
62 import re
63
64 _version_pat = re.compile(r"(\d+)\.(\d+)")
65 try:
66 import jupyterlab
67 from jupyterlab.labhubapp import SingleUserLabApp
68
69 m = _version_pat.match(jupyterlab.__version__)
70 except Exception:
71 m = None
72
73 if m is not None:
74 version_tuple = tuple(int(v) for v in m.groups())
75 if version_tuple >= (3, 1):
76 return SingleUserLabApp.launch_instance()
77
78 return SingleUserNotebookApp.launch_instance()
79
[end of jupyterhub/singleuser/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py
--- a/jupyterhub/singleuser/app.py
+++ b/jupyterhub/singleuser/app.py
@@ -6,7 +6,7 @@
.. versionchanged:: 2.0
Default app changed to launch `jupyter labhub`.
- Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.
+ Use JUPYTERHUB_SINGLEUSER_APP='notebook' for the legacy 'classic' notebook server (requires notebook<7).
"""
import os
@@ -27,7 +27,25 @@
JUPYTERHUB_SINGLEUSER_APP.replace("_", "-"), JUPYTERHUB_SINGLEUSER_APP
)
+
if JUPYTERHUB_SINGLEUSER_APP:
+ if JUPYTERHUB_SINGLEUSER_APP in {"notebook", _app_shortcuts["notebook"]}:
+ # better error for notebook v7, which uses jupyter-server
+ # when the legacy notebook server is requested
+ try:
+ from notebook import __version__
+ except ImportError:
+ # will raise later
+ pass
+ else:
+ # check if this failed because of notebook v7
+ _notebook_major_version = int(__version__.split(".", 1)[0])
+ if _notebook_major_version >= 7:
+ raise ImportError(
+ f"JUPYTERHUB_SINGLEUSER_APP={JUPYTERHUB_SINGLEUSER_APP} is not valid with notebook>=7 (have notebook=={__version__}).\n"
+ f"Leave $JUPYTERHUB_SINGLEUSER_APP unspecified (or use the default JUPYTERHUB_SINGLEUSER_APP=jupyter-server), "
+ 'and set `c.Spawner.default_url = "/tree"` to make notebook v7 the default UI.'
+ )
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
else:
App = None
|
{"golden_diff": "diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py\n--- a/jupyterhub/singleuser/app.py\n+++ b/jupyterhub/singleuser/app.py\n@@ -6,7 +6,7 @@\n .. versionchanged:: 2.0\n \n Default app changed to launch `jupyter labhub`.\n- Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n+ Use JUPYTERHUB_SINGLEUSER_APP='notebook' for the legacy 'classic' notebook server (requires notebook<7).\n \"\"\"\n import os\n \n@@ -27,7 +27,25 @@\n JUPYTERHUB_SINGLEUSER_APP.replace(\"_\", \"-\"), JUPYTERHUB_SINGLEUSER_APP\n )\n \n+\n if JUPYTERHUB_SINGLEUSER_APP:\n+ if JUPYTERHUB_SINGLEUSER_APP in {\"notebook\", _app_shortcuts[\"notebook\"]}:\n+ # better error for notebook v7, which uses jupyter-server\n+ # when the legacy notebook server is requested\n+ try:\n+ from notebook import __version__\n+ except ImportError:\n+ # will raise later\n+ pass\n+ else:\n+ # check if this failed because of notebook v7\n+ _notebook_major_version = int(__version__.split(\".\", 1)[0])\n+ if _notebook_major_version >= 7:\n+ raise ImportError(\n+ f\"JUPYTERHUB_SINGLEUSER_APP={JUPYTERHUB_SINGLEUSER_APP} is not valid with notebook>=7 (have notebook=={__version__}).\\n\"\n+ f\"Leave $JUPYTERHUB_SINGLEUSER_APP unspecified (or use the default JUPYTERHUB_SINGLEUSER_APP=jupyter-server), \"\n+ 'and set `c.Spawner.default_url = \"/tree\"` to make notebook v7 the default UI.'\n+ )\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n else:\n App = None\n", "issue": "Fix JUPYTERHUB_SINGLEUSER_APP after Notebook 7 release\n### Bug description\r\n\r\nWith `notebook 6.5.4` it was possible to specify JUPYTERHUB_SINGLEUSER_APP='notebook' to run `Jupyter Notebook` instead of `JupyterLab`.\r\n\r\n#### Expected behaviour\r\n\r\nJupyter Notebook is run in singleuser\r\n\r\n#### Actual behaviour\r\n\r\n`jupyterhub-singleuser` fails\r\n\r\n### How to reproduce\r\n\r\nWorking image: `jupyter/base-notebook:notebook-6.5.4`\r\nFailing image: `jupyter/base-notebook:notebook-7.0.0`\r\n\r\n1. Run image: `docker run -it --rm jupyter/base-notebook:notebook-7.0.0 bash`\r\n2. Run: `JUPYTERHUB_SINGLEUSER_APP='notebook' JUPYTERHUB_SERVICE_URL=\"127.0.0.1\" jupyterhub-singleuser`\r\n\r\nJupyterHub is not running inside the image, but I don't think that's the problem.\r\n\r\nOutput with Jupyter Notebook 7:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/bin/jupyterhub-singleuser\", line 6, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/opt/conda/lib/python3.11/site-packages/jupyterhub/singleuser/__init__.py\", line 67, in <module>\r\n from .app import SingleUserNotebookApp, main\r\n File \"/opt/conda/lib/python3.11/site-packages/jupyterhub/singleuser/app.py\", line 31, in <module>\r\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/traitlets/utils/importstring.py\", line 30, in import_item\r\n module = __import__(package, fromlist=[obj])\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nModuleNotFoundError: No module named 'notebook.notebookapp'\r\n```\r\n\r\nOutput with Jupyter Notebook 6:\r\n```\r\n[I 2023-07-25 20:59:48.574 SingleUserNotebookApp mixins:547] Starting jupyterhub single-user server version 4.0.1\r\n[I 2023-07-25 20:59:48.574 SingleUserNotebookApp mixins:561] Extending notebook.notebookapp.NotebookApp from notebook 6.5.4\r\n[W 2023-07-25 20:59:48.578 SingleUserNotebookApp configurable:200] Config option `open_browser` not recognized by `SingleUserNotebookApp`. Did you mean `browser`?\r\nJUPYTERHUB_API_TOKEN env is required to run jupyterhub-singleuser. Did you launch it manually?\r\n```\r\n\r\n### Your personal set up\r\n\r\n\r\n\r\n - OS:\r\n <!-- [e.g. ubuntu 20.04, macOS 11.0] -->\r\n - Version(s):\r\n <!-- e.g. jupyterhub --version, python --version --->\r\n\r\n<details><summary>Full environment</summary>\r\n<!-- For reproduction, it's useful to have the full environment. For example, the output of `pip freeze` or `conda list` --->\r\n\r\n```\r\n# paste output of `pip freeze` or `conda list` here\r\n```\r\n</details>\r\n\r\n<details><summary>Configuration</summary>\r\n<!--\r\nFor JupyterHub, especially include information such as what Spawner and Authenticator are being used.\r\nBe careful not to share any sensitive information.\r\nYou can paste jupyterhub_config.py below.\r\nTo exclude lots of comments and empty lines from auto-generated jupyterhub_config.py, you can do:\r\n grep -v '\\(^#\\|^[[:space:]]*$\\)' jupyterhub_config.py\r\n-->\r\n\r\n```python\r\n# jupyterhub_config.py\r\n```\r\n</details>\r\n\r\n<details><summary>Logs</summary>\r\n<!--\r\nErrors are often logged by jupytehub. How you get logs depends on your deployment.\r\nWith kubernetes it might be:\r\n\r\n kubectl get pod # hub pod name starts with hub...\r\n kubectl logs hub-...\r\n # or for a single-user server\r\n kubectl logs jupyter-username\r\n\r\nOr the-littlest-jupyterhub:\r\n\r\n journalctl -u jupyterhub\r\n # or for a single-user server\r\n journalctl -u jupyter-username\r\n-->\r\n\r\n```\r\n# paste relevant logs here, if any\r\n```\r\n</details>\r\n\n", "before_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\", \"\")\n\n# allow shortcut references\n_app_shortcuts = {\n \"notebook\": \"notebook.notebookapp.NotebookApp\",\n \"jupyter-server\": \"jupyter_server.serverapp.ServerApp\",\n \"extension\": \"jupyter_server.serverapp.ServerApp\",\n}\n\nJUPYTERHUB_SINGLEUSER_APP = _app_shortcuts.get(\n JUPYTERHUB_SINGLEUSER_APP.replace(\"_\", \"-\"), JUPYTERHUB_SINGLEUSER_APP\n)\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n if _import_error is None:\n _import_error = e\n continue\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}]}
| 2,318 | 448 |
gh_patches_debug_14864
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-1931
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Must explicitly define `setuptools` as a dependency
When running gunicorn in a hardened Python docker image (with most of the dependencies removed) `setuptools` might be missing.
For instance:
```
Traceback (most recent call last):
File "/app/manage-docker.binary.runfiles/__main__/server.py", line 1, in <module>
from gunicorn.app.base import BaseApplication
File "/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/app/base.py", line 12, in <module>
from gunicorn import util
File "/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/util.py", line 12, in <module>
import pkg_resources
ImportError: No module named pkg_resources
```
Can be fixed by defining `setuptools` as a direct dependency within the project' `requirements.txt` file, however, it could be fix at the gunicorn codebase level by using `install_requires = ['setuptools']` in setup.py.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import sys
8
9 from setuptools import setup, find_packages
10 from setuptools.command.test import test as TestCommand
11
12 from gunicorn import __version__
13
14
15 CLASSIFIERS = [
16 'Development Status :: 4 - Beta',
17 'Environment :: Other Environment',
18 'Intended Audience :: Developers',
19 'License :: OSI Approved :: MIT License',
20 'Operating System :: MacOS :: MacOS X',
21 'Operating System :: POSIX',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.4',
25 'Programming Language :: Python :: 3.5',
26 'Programming Language :: Python :: 3.6',
27 'Programming Language :: Python :: 3.7',
28 'Programming Language :: Python :: 3 :: Only',
29 'Topic :: Internet',
30 'Topic :: Utilities',
31 'Topic :: Software Development :: Libraries :: Python Modules',
32 'Topic :: Internet :: WWW/HTTP',
33 'Topic :: Internet :: WWW/HTTP :: WSGI',
34 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
35 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']
36
37 # read long description
38 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
39 long_description = f.read()
40
41 # read dev requirements
42 fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')
43 with open(fname) as f:
44 tests_require = [l.strip() for l in f.readlines()]
45
46 class PyTestCommand(TestCommand):
47 user_options = [
48 ("cov", None, "measure coverage")
49 ]
50
51 def initialize_options(self):
52 TestCommand.initialize_options(self)
53 self.cov = None
54
55 def finalize_options(self):
56 TestCommand.finalize_options(self)
57 self.test_args = ['tests']
58 if self.cov:
59 self.test_args += ['--cov', 'gunicorn']
60 self.test_suite = True
61
62 def run_tests(self):
63 import pytest
64 errno = pytest.main(self.test_args)
65 sys.exit(errno)
66
67
68 extra_require = {
69 'gevent': ['gevent>=0.13'],
70 'eventlet': ['eventlet>=0.9.7'],
71 'tornado': ['tornado>=0.2'],
72 'gthread': [],
73 }
74
75 setup(
76 name='gunicorn',
77 version=__version__,
78
79 description='WSGI HTTP Server for UNIX',
80 long_description=long_description,
81 author='Benoit Chesneau',
82 author_email='[email protected]',
83 license='MIT',
84 url='http://gunicorn.org',
85
86 python_requires='>=3.4',
87 classifiers=CLASSIFIERS,
88 zip_safe=False,
89 packages=find_packages(exclude=['examples', 'tests']),
90 include_package_data=True,
91
92 tests_require=tests_require,
93 cmdclass={'test': PyTestCommand},
94
95 entry_points="""
96 [console_scripts]
97 gunicorn=gunicorn.app.wsgiapp:run
98 gunicorn_paster=gunicorn.app.pasterapp:run
99
100 [paste.server_runner]
101 main=gunicorn.app.pasterapp:paste_server
102 """,
103 extras_require=extra_require,
104 )
105
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,6 +65,14 @@
sys.exit(errno)
+install_requires = [
+ # We depend on functioning pkg_resources.working_set.add_entry() and
+ # pkg_resources.load_entry_point(). These both work as of 3.0 which
+ # is the first version to support Python 3.4 which we require as a
+ # floor.
+ 'setuptools>=3.0',
+]
+
extra_require = {
'gevent': ['gevent>=0.13'],
'eventlet': ['eventlet>=0.9.7'],
@@ -84,6 +92,7 @@
url='http://gunicorn.org',
python_requires='>=3.4',
+ install_requires=install_requires,
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(exclude=['examples', 'tests']),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,6 +65,14 @@\n sys.exit(errno)\n \n \n+install_requires = [\n+ # We depend on functioning pkg_resources.working_set.add_entry() and\n+ # pkg_resources.load_entry_point(). These both work as of 3.0 which\n+ # is the first version to support Python 3.4 which we require as a\n+ # floor.\n+ 'setuptools>=3.0',\n+]\n+\n extra_require = {\n 'gevent': ['gevent>=0.13'],\n 'eventlet': ['eventlet>=0.9.7'],\n@@ -84,6 +92,7 @@\n url='http://gunicorn.org',\n \n python_requires='>=3.4',\n+ install_requires=install_requires,\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n", "issue": "Must explicitly define `setuptools` as a dependency\nWhen running gunicorn in a hardened Python docker image (with most of the dependencies removed) `setuptools` might be missing.\r\n\r\nFor instance:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/app/manage-docker.binary.runfiles/__main__/server.py\", line 1, in <module>\r\n from gunicorn.app.base import BaseApplication\r\n File \"/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/app/base.py\", line 12, in <module>\r\n from gunicorn import util\r\n File \"/app/manage-docker.binary.runfiles/pypi__gunicorn_19_7_1/gunicorn/util.py\", line 12, in <module>\r\n import pkg_resources\r\nImportError: No module named pkg_resources\r\n```\r\n\r\nCan be fixed by defining `setuptools` as a direct dependency within the project' `requirements.txt` file, however, it could be fix at the gunicorn codebase level by using `install_requires = ['setuptools']` in setup.py. \n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nextra_require = {\n 'gevent': ['gevent>=0.13'],\n 'eventlet': ['eventlet>=0.9.7'],\n 'tornado': ['tornado>=0.2'],\n 'gthread': [],\n}\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='http://gunicorn.org',\n\n python_requires='>=3.4',\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n gunicorn_paster=gunicorn.app.pasterapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:paste_server\n \"\"\",\n extras_require=extra_require,\n)\n", "path": "setup.py"}]}
| 1,690 | 216 |
gh_patches_debug_4585
|
rasdani/github-patches
|
git_diff
|
spack__spack-10984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation Issue: bowtie build error
### Steps to reproduce the issue
```console
[centos] ~: spack install bowtie
==> Installing bowtie
==> Searching for binary cache of bowtie
==> Warning: No Spack mirrors are currently configured
==> No binary for bowtie found: installing from source
==> Fetching https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz
######################################################################## 100.0%
==> Staging archive: /spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr/v1.2.2_p1.tar.gz
==> Created stage in /spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr
==> No patches needed for bowtie
==> Building bowtie [MakefilePackage]
==> Executing phase: 'edit'
==> Executing phase: 'build'
==> Error: ProcessError: Command exited with status 2:
'make' '-j2' 'NO_TBB=1'
4 errors found in build log:
18 In file included from sequence_io.h:12:0,
19 from multikey_qsort.h:8,
20 from diff_sample.h:13,
21 from blockwise_sa.h:19,
22 from ebwt.h:27,
23 from ebwt_build.cpp:11:
>> 24 pat.h:6:18: fatal error: zlib.h: No such file or directory
25 #include <zlib.h>
26 ^
27 compilation terminated.
28 In file included from sequence_io.h:12:0,
29 from multikey_qsort.h:8,
30 from diff_sample.h:13,
31 from blockwise_sa.h:19,
32 from ebwt.h:27,
33 from ebwt_build.cpp:11:
>> 34 pat.h:6:18: fatal error: zlib.h: No such file or directory
35 #include <zlib.h>
36 ^
37 compilation terminated.
>> 38 make: *** [bowtie-build-l] Error 1
39 make: *** Waiting for unfinished jobs....
>> 40 make: *** [bowtie-build-s] Error 1
See build log for details:
/spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr/bowtie-1.2.2_p1/spack-build.out
```
### Platform and user environment
Please report your OS here:
```commandline
$ uname -a
Linux 4b5226354c71 4.9.125-linuxkit #1 SMP Fri Sep 7 08:20:28 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
```
Bowtie installation fails with missing zlib dependency.
</issue>
<code>
[start of var/spack/repos/builtin/packages/bowtie/package.py]
1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class Bowtie(MakefilePackage):
10 """Bowtie is an ultrafast, memory-efficient short read aligner
11 for short DNA sequences (reads) from next-gen sequencers."""
12
13 homepage = "https://sourceforge.net/projects/bowtie-bio/"
14 url = "https://github.com/BenLangmead/bowtie/archive/v1.2.0.tar.gz"
15
16 # The bowtie project git tagged and GitHub released a v1.2.2,
17 # discovered/fixed a bug, git tagged a v1.2.2_p1 and moved the
18 # 1.2.2 release to use it rather than making a new `1.2.2_p1`
19 # release.
20 #
21 # We point both of the Spack versions at the same tarball so they
22 # build the binaries that are on the release page as v1.2.2
23 version('1.2.2_p1', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1')
24 version('1.2.2', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1', url="https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz")
25 version('1.2.1.1', sha256='1b38408b88f61d18d7ff28b2470a8cfeefccb3fc59fd46e4cc62e23874e52c20')
26 version('1.2.1', sha256='b2a7c8c879cb08f00a82665bee43e1d4861de44a87912c54d168e44c90869728')
27 version('1.2.0', sha256='dc4e7951b8eca56ce7714c47fd4e84f72badd5312ee9546c912af1963570f894')
28 # Keeping the old 1.2 version around for reproducibility, it's not
29 # clearly identical to 1.2.0.
30 version('1.2', md5='6d97f0ea1a65af11d17cc270cfac4af9', url='https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.0/bowtie-1.2-source.zip')
31
32 # Feel free to tighten this. I know that v1.2.2 (aka v1.2.2_p1)
33 # builds with %[email protected] and fails to build with %[email protected]. I'm
34 # not sure whether or not it works with other versions in the
35 # interval.
36 conflicts('%gcc@8:', when='@1.2.2:')
37
38 variant('tbb', default=False, description='Use Intel thread building block')
39
40 depends_on('tbb', when='+tbb')
41
42 # See: https://github.com/BenLangmead/bowtie/issues/87, a
43 # different fix is in the FreeBSD ports/package tree
44 # https://svnweb.freebsd.org/ports?view=revision&revision=483954
45 patch('issue-87.patch', when='%[email protected]:')
46
47 def edit(self, spec, prefix):
48 makefile = FileFilter('Makefile')
49 makefile.filter('CC = .*', 'CC = ' + env['CC'])
50 makefile.filter('CXX = .*', 'CPP = ' + env['CXX'])
51
52 def build(self, spec, prefix):
53 if '+tbb' in spec:
54 make()
55 else:
56 make('NO_TBB=1')
57
58 def install(self, spec, prefix):
59 make('prefix={0}'.format(self.prefix), 'install')
60
[end of var/spack/repos/builtin/packages/bowtie/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/bowtie/package.py b/var/spack/repos/builtin/packages/bowtie/package.py
--- a/var/spack/repos/builtin/packages/bowtie/package.py
+++ b/var/spack/repos/builtin/packages/bowtie/package.py
@@ -38,6 +38,7 @@
variant('tbb', default=False, description='Use Intel thread building block')
depends_on('tbb', when='+tbb')
+ depends_on('zlib')
# See: https://github.com/BenLangmead/bowtie/issues/87, a
# different fix is in the FreeBSD ports/package tree
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/bowtie/package.py b/var/spack/repos/builtin/packages/bowtie/package.py\n--- a/var/spack/repos/builtin/packages/bowtie/package.py\n+++ b/var/spack/repos/builtin/packages/bowtie/package.py\n@@ -38,6 +38,7 @@\n variant('tbb', default=False, description='Use Intel thread building block')\n \n depends_on('tbb', when='+tbb')\n+ depends_on('zlib')\n \n # See: https://github.com/BenLangmead/bowtie/issues/87, a\n # different fix is in the FreeBSD ports/package tree\n", "issue": "Installation Issue: bowtie build error\n### Steps to reproduce the issue\r\n\r\n```console\r\n[centos] ~: spack install bowtie\r\n==> Installing bowtie\r\n==> Searching for binary cache of bowtie\r\n==> Warning: No Spack mirrors are currently configured\r\n==> No binary for bowtie found: installing from source\r\n==> Fetching https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz\r\n######################################################################## 100.0%\r\n==> Staging archive: /spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr/v1.2.2_p1.tar.gz\r\n==> Created stage in /spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr\r\n==> No patches needed for bowtie\r\n==> Building bowtie [MakefilePackage]\r\n==> Executing phase: 'edit'\r\n==> Executing phase: 'build'\r\n==> Error: ProcessError: Command exited with status 2:\r\n 'make' '-j2' 'NO_TBB=1'\r\n\r\n4 errors found in build log:\r\n 18 In file included from sequence_io.h:12:0,\r\n 19 from multikey_qsort.h:8,\r\n 20 from diff_sample.h:13,\r\n 21 from blockwise_sa.h:19,\r\n 22 from ebwt.h:27,\r\n 23 from ebwt_build.cpp:11:\r\n >> 24 pat.h:6:18: fatal error: zlib.h: No such file or directory\r\n 25 #include <zlib.h>\r\n 26 ^\r\n\r\n 27 compilation terminated.\r\n 28 In file included from sequence_io.h:12:0,\r\n 29 from multikey_qsort.h:8,\r\n 30 from diff_sample.h:13,\r\n 31 from blockwise_sa.h:19,\r\n 32 from ebwt.h:27,\r\n 33 from ebwt_build.cpp:11:\r\n >> 34 pat.h:6:18: fatal error: zlib.h: No such file or directory\r\n 35 #include <zlib.h>\r\n 36 ^\r\n 37 compilation terminated.\r\n >> 38 make: *** [bowtie-build-l] Error 1\r\n 39 make: *** Waiting for unfinished jobs....\r\n >> 40 make: *** [bowtie-build-s] Error 1\r\n\r\nSee build log for details:\r\n /spack/var/spack/stage/bowtie-1.2.2_p1-se66bd5p6mfiop65vwqpr4jh6uwvpxsr/bowtie-1.2.2_p1/spack-build.out\r\n```\r\n\r\n### Platform and user environment\r\n\r\nPlease report your OS here:\r\n```commandline\r\n$ uname -a\r\nLinux 4b5226354c71 4.9.125-linuxkit #1 SMP Fri Sep 7 08:20:28 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux\r\n``` \r\nBowtie installation fails with missing zlib dependency. \r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Bowtie(MakefilePackage):\n \"\"\"Bowtie is an ultrafast, memory-efficient short read aligner\n for short DNA sequences (reads) from next-gen sequencers.\"\"\"\n\n homepage = \"https://sourceforge.net/projects/bowtie-bio/\"\n url = \"https://github.com/BenLangmead/bowtie/archive/v1.2.0.tar.gz\"\n\n # The bowtie project git tagged and GitHub released a v1.2.2,\n # discovered/fixed a bug, git tagged a v1.2.2_p1 and moved the\n # 1.2.2 release to use it rather than making a new `1.2.2_p1`\n # release.\n #\n # We point both of the Spack versions at the same tarball so they\n # build the binaries that are on the release page as v1.2.2\n version('1.2.2_p1', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1')\n version('1.2.2', sha256='e1b02b2e77a0d44a3dd411209fa1f44f0c4ee304ef5cc83f098275085740d5a1', url=\"https://github.com/BenLangmead/bowtie/archive/v1.2.2_p1.tar.gz\")\n version('1.2.1.1', sha256='1b38408b88f61d18d7ff28b2470a8cfeefccb3fc59fd46e4cc62e23874e52c20')\n version('1.2.1', sha256='b2a7c8c879cb08f00a82665bee43e1d4861de44a87912c54d168e44c90869728')\n version('1.2.0', sha256='dc4e7951b8eca56ce7714c47fd4e84f72badd5312ee9546c912af1963570f894')\n # Keeping the old 1.2 version around for reproducibility, it's not\n # clearly identical to 1.2.0.\n version('1.2', md5='6d97f0ea1a65af11d17cc270cfac4af9', url='https://downloads.sourceforge.net/project/bowtie-bio/bowtie/1.2.0/bowtie-1.2-source.zip')\n\n # Feel free to tighten this. I know that v1.2.2 (aka v1.2.2_p1)\n # builds with %[email protected] and fails to build with %[email protected]. I'm\n # not sure whether or not it works with other versions in the\n # interval.\n conflicts('%gcc@8:', when='@1.2.2:')\n\n variant('tbb', default=False, description='Use Intel thread building block')\n\n depends_on('tbb', when='+tbb')\n\n # See: https://github.com/BenLangmead/bowtie/issues/87, a\n # different fix is in the FreeBSD ports/package tree\n # https://svnweb.freebsd.org/ports?view=revision&revision=483954\n patch('issue-87.patch', when='%[email protected]:')\n\n def edit(self, spec, prefix):\n makefile = FileFilter('Makefile')\n makefile.filter('CC = .*', 'CC = ' + env['CC'])\n makefile.filter('CXX = .*', 'CPP = ' + env['CXX'])\n\n def build(self, spec, prefix):\n if '+tbb' in spec:\n make()\n else:\n make('NO_TBB=1')\n\n def install(self, spec, prefix):\n make('prefix={0}'.format(self.prefix), 'install')\n", "path": "var/spack/repos/builtin/packages/bowtie/package.py"}]}
| 2,502 | 146 |
gh_patches_debug_4178
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-12049
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'On my own' device - Merging a user is not working
## Observed behavior
Observed while integration testing the [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1) release.
When I try to merge a user created through 'On my own' I am getting an "Invalid URL" error in the console. Note that creating a new account through the same flow is working correctly. This issue is caused by the changes made in https://github.com/learningequality/kolibri/pull/12028 and is not extant in [v0.16.1-beta0](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta0).
https://github.com/learningequality/kolibri/assets/79847249/30daa3ca-918c-4c15-901b-c74c08b96466
## Expected behavior
Fully functional 'Merge accounts' user flow.
## Steps to reproduce the issue
1. Install [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1).
2. Setup a full device as a server and another device by going through the 'On my own' setup flow.
3. Attempt to merge the user from the 'On my own' device' to the server facility.
## Logs
[logs.zip](https://github.com/learningequality/kolibri/files/14850735/logs.zip)
## Usage Details
[v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1)
Windows 11, Ubuntu 22 - Chrome
</issue>
<code>
[start of kolibri/plugins/user_profile/viewsets.py]
1 import requests
2 from django.contrib.auth import login
3 from django.core.exceptions import ValidationError as DjangoValidationError
4 from rest_framework.exceptions import ValidationError
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8 from .utils import TokenGenerator
9 from kolibri.core.auth.models import FacilityUser
10 from kolibri.core.utils.urls import reverse_remote
11 from kolibri.utils.urls import validator
12
13
14 class OnMyOwnSetupViewset(APIView):
15 """
16 Viewset to determine if the facility has been setup as an "On my own setup" facility.
17 """
18
19 def get(self, request, format=None):
20 if request.user.is_anonymous:
21 self.permission_denied(request)
22 user_facility = self.request.user.facility
23 return Response(
24 {
25 "on_my_own_setup": user_facility.on_my_own_setup,
26 }
27 )
28
29
30 class RemoteFacilityUserViewset(APIView):
31 def get(self, request):
32 baseurl = request.query_params.get("baseurl", "")
33 try:
34 validator(baseurl)
35 except DjangoValidationError as e:
36 raise ValidationError(detail=str(e))
37 username = request.query_params.get("username", None)
38 facility = request.query_params.get("facility", None)
39 if username is None or facility is None:
40 raise ValidationError(detail="Both username and facility are required")
41 url = reverse_remote(baseurl, "kolibri:core:publicsearchuser-list")
42 try:
43 response = requests.get(
44 url, params={"facility": facility, "search": username}
45 )
46 if response.status_code == 200:
47 return Response(response.json())
48 else:
49 return Response({})
50 except Exception as e:
51 raise ValidationError(detail=str(e))
52
53
54 class RemoteFacilityUserAuthenticatedViewset(APIView):
55 def post(self, request, *args, **kwargs):
56 baseurl = request.query_params.get("baseurl", "")
57 try:
58 validator(baseurl)
59 except DjangoValidationError as e:
60 raise ValidationError(detail=str(e))
61 username = request.data.get("username", None)
62 facility = request.data.get("facility", None)
63 password = request.data.get("password", None)
64 if username is None or facility is None:
65 raise ValidationError(detail="Both username and facility are required")
66 url = reverse_remote(baseurl, "kolibri:core:publicuser-list")
67 params = {"facility": facility, "search": username}
68
69 # adding facility so auth works when learners can login without password:
70 username = "username={}&facility={}".format(username, facility)
71
72 auth = requests.auth.HTTPBasicAuth(username, password)
73 try:
74 response = requests.get(url, params=params, verify=False, auth=auth)
75 if response.status_code == 200:
76 return Response(response.json())
77 else:
78 return Response({"error": response.json()["detail"]})
79 except Exception as e:
80 raise ValidationError(detail=str(e))
81
82
83 class LoginMergedUserViewset(APIView):
84 """
85 Viewset to login into kolibri using the merged user,
86 after the old user has been deleted
87 """
88
89 def post(self, request):
90 pk = request.data.get("pk", None)
91 token = request.data.get("token", None)
92 new_user = FacilityUser.objects.get(pk=pk)
93 if not TokenGenerator().check_token(new_user, token):
94 return Response({"error": "Unauthorized"}, status=401)
95 login(request, new_user)
96 return Response({"success": True})
97
[end of kolibri/plugins/user_profile/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/plugins/user_profile/viewsets.py b/kolibri/plugins/user_profile/viewsets.py
--- a/kolibri/plugins/user_profile/viewsets.py
+++ b/kolibri/plugins/user_profile/viewsets.py
@@ -53,7 +53,7 @@
class RemoteFacilityUserAuthenticatedViewset(APIView):
def post(self, request, *args, **kwargs):
- baseurl = request.query_params.get("baseurl", "")
+ baseurl = request.data.get("baseurl", "")
try:
validator(baseurl)
except DjangoValidationError as e:
|
{"golden_diff": "diff --git a/kolibri/plugins/user_profile/viewsets.py b/kolibri/plugins/user_profile/viewsets.py\n--- a/kolibri/plugins/user_profile/viewsets.py\n+++ b/kolibri/plugins/user_profile/viewsets.py\n@@ -53,7 +53,7 @@\n \n class RemoteFacilityUserAuthenticatedViewset(APIView):\n def post(self, request, *args, **kwargs):\n- baseurl = request.query_params.get(\"baseurl\", \"\")\n+ baseurl = request.data.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n", "issue": "'On my own' device - Merging a user is not working\n## Observed behavior\r\nObserved while integration testing the [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1) release.\r\nWhen I try to merge a user created through 'On my own' I am getting an \"Invalid URL\" error in the console. Note that creating a new account through the same flow is working correctly. This issue is caused by the changes made in https://github.com/learningequality/kolibri/pull/12028 and is not extant in [v0.16.1-beta0](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta0).\r\n\r\nhttps://github.com/learningequality/kolibri/assets/79847249/30daa3ca-918c-4c15-901b-c74c08b96466\r\n\r\n## Expected behavior\r\n\r\nFully functional 'Merge accounts' user flow. \r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Install [v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1).\r\n2. Setup a full device as a server and another device by going through the 'On my own' setup flow.\r\n3. Attempt to merge the user from the 'On my own' device' to the server facility.\r\n\r\n## Logs\r\n\r\n[logs.zip](https://github.com/learningequality/kolibri/files/14850735/logs.zip)\r\n\r\n## Usage Details\r\n[v0.16.1-beta1 ](https://github.com/learningequality/kolibri/releases/tag/v0.16.1-beta1)\r\nWindows 11, Ubuntu 22 - Chrome\n", "before_files": [{"content": "import requests\nfrom django.contrib.auth import login\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .utils import TokenGenerator\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.utils.urls import reverse_remote\nfrom kolibri.utils.urls import validator\n\n\nclass OnMyOwnSetupViewset(APIView):\n \"\"\"\n Viewset to determine if the facility has been setup as an \"On my own setup\" facility.\n \"\"\"\n\n def get(self, request, format=None):\n if request.user.is_anonymous:\n self.permission_denied(request)\n user_facility = self.request.user.facility\n return Response(\n {\n \"on_my_own_setup\": user_facility.on_my_own_setup,\n }\n )\n\n\nclass RemoteFacilityUserViewset(APIView):\n def get(self, request):\n baseurl = request.query_params.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n username = request.query_params.get(\"username\", None)\n facility = request.query_params.get(\"facility\", None)\n if username is None or facility is None:\n raise ValidationError(detail=\"Both username and facility are required\")\n url = reverse_remote(baseurl, \"kolibri:core:publicsearchuser-list\")\n try:\n response = requests.get(\n url, params={\"facility\": facility, \"search\": username}\n )\n if response.status_code == 200:\n return Response(response.json())\n else:\n return Response({})\n except Exception as e:\n raise ValidationError(detail=str(e))\n\n\nclass RemoteFacilityUserAuthenticatedViewset(APIView):\n def post(self, request, *args, **kwargs):\n baseurl = request.query_params.get(\"baseurl\", \"\")\n try:\n validator(baseurl)\n except DjangoValidationError as e:\n raise ValidationError(detail=str(e))\n username = request.data.get(\"username\", None)\n facility = request.data.get(\"facility\", None)\n password = request.data.get(\"password\", None)\n if username is None or facility is None:\n raise ValidationError(detail=\"Both username and facility are required\")\n url = reverse_remote(baseurl, \"kolibri:core:publicuser-list\")\n params = {\"facility\": facility, \"search\": username}\n\n # adding facility so auth works when learners can login without password:\n username = \"username={}&facility={}\".format(username, facility)\n\n auth = requests.auth.HTTPBasicAuth(username, password)\n try:\n response = requests.get(url, params=params, verify=False, auth=auth)\n if response.status_code == 200:\n return Response(response.json())\n else:\n return Response({\"error\": response.json()[\"detail\"]})\n except Exception as e:\n raise ValidationError(detail=str(e))\n\n\nclass LoginMergedUserViewset(APIView):\n \"\"\"\n Viewset to login into kolibri using the merged user,\n after the old user has been deleted\n \"\"\"\n\n def post(self, request):\n pk = request.data.get(\"pk\", None)\n token = request.data.get(\"token\", None)\n new_user = FacilityUser.objects.get(pk=pk)\n if not TokenGenerator().check_token(new_user, token):\n return Response({\"error\": \"Unauthorized\"}, status=401)\n login(request, new_user)\n return Response({\"success\": True})\n", "path": "kolibri/plugins/user_profile/viewsets.py"}]}
| 1,869 | 127 |
gh_patches_debug_41859
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-823
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tag-search command to search tags via their contents instead of names.
Currently, it can be difficult to find the specific command for a tag even if you know it exists. A command to allow you to search through tag bodies would help with finding the right tags. For example doing `!tag-search backtick` would return `!code-block` (and any other tags that include the word backtick).
</issue>
<code>
[start of bot/cogs/tags.py]
1 import logging
2 import re
3 import time
4 from typing import Dict, List, Optional
5
6 from discord import Colour, Embed
7 from discord.ext.commands import Cog, Context, group
8
9 from bot.bot import Bot
10 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
11 from bot.converters import TagContentConverter, TagNameConverter
12 from bot.decorators import with_role
13 from bot.pagination import LinePaginator
14
15 log = logging.getLogger(__name__)
16
17 TEST_CHANNELS = (
18 Channels.bot_commands,
19 Channels.helpers
20 )
21
22 REGEX_NON_ALPHABET = re.compile(r"[^a-z]", re.MULTILINE & re.IGNORECASE)
23
24
25 class Tags(Cog):
26 """Save new tags and fetch existing tags."""
27
28 def __init__(self, bot: Bot):
29 self.bot = bot
30 self.tag_cooldowns = {}
31
32 self._cache = {}
33 self._last_fetch: float = 0.0
34
35 async def _get_tags(self, is_forced: bool = False) -> None:
36 """Get all tags."""
37 # refresh only when there's a more than 5m gap from last call.
38 time_now: float = time.time()
39 if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:
40 tags = await self.bot.api_client.get('bot/tags')
41 self._cache = {tag['title'].lower(): tag for tag in tags}
42 self._last_fetch = time_now
43
44 @staticmethod
45 def _fuzzy_search(search: str, target: str) -> int:
46 """A simple scoring algorithm based on how many letters are found / total, with order in mind."""
47 current, index = 0, 0
48 _search = REGEX_NON_ALPHABET.sub('', search.lower())
49 _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))
50 _target = next(_targets)
51 try:
52 while True:
53 while index < len(_target) and _search[current] == _target[index]:
54 current += 1
55 index += 1
56 index, _target = 0, next(_targets)
57 except (StopIteration, IndexError):
58 pass
59 return current / len(_search) * 100
60
61 def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:
62 """Return a list of suggested tags."""
63 scores: Dict[str, int] = {
64 tag_title: Tags._fuzzy_search(tag_name, tag['title'])
65 for tag_title, tag in self._cache.items()
66 }
67
68 thresholds = thresholds or [100, 90, 80, 70, 60]
69
70 for threshold in thresholds:
71 suggestions = [
72 self._cache[tag_title]
73 for tag_title, matching_score in scores.items()
74 if matching_score >= threshold
75 ]
76 if suggestions:
77 return suggestions
78
79 return []
80
81 async def _get_tag(self, tag_name: str) -> list:
82 """Get a specific tag."""
83 await self._get_tags()
84 found = [self._cache.get(tag_name.lower(), None)]
85 if not found[0]:
86 return self._get_suggestions(tag_name)
87 return found
88
89 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
90 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
91 """Show all known tags, a single tag, or run a subcommand."""
92 await ctx.invoke(self.get_command, tag_name=tag_name)
93
94 @tags_group.command(name='get', aliases=('show', 'g'))
95 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
96 """Get a specified tag, or a list of all tags if no tag is specified."""
97 def _command_on_cooldown(tag_name: str) -> bool:
98 """
99 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
100
101 The cooldown duration is set in constants.py.
102 """
103 now = time.time()
104
105 cooldown_conditions = (
106 tag_name
107 and tag_name in self.tag_cooldowns
108 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
109 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
110 )
111
112 if cooldown_conditions:
113 return True
114 return False
115
116 if _command_on_cooldown(tag_name):
117 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
118 log.info(
119 f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
120 f"Cooldown ends in {time_left:.1f} seconds."
121 )
122 return
123
124 await self._get_tags()
125
126 if tag_name is not None:
127 founds = await self._get_tag(tag_name)
128
129 if len(founds) == 1:
130 tag = founds[0]
131 if ctx.channel.id not in TEST_CHANNELS:
132 self.tag_cooldowns[tag_name] = {
133 "time": time.time(),
134 "channel": ctx.channel.id
135 }
136 await ctx.send(embed=Embed.from_dict(tag['embed']))
137 elif founds and len(tag_name) >= 3:
138 await ctx.send(embed=Embed(
139 title='Did you mean ...',
140 description='\n'.join(tag['title'] for tag in founds[:10])
141 ))
142
143 else:
144 tags = self._cache.values()
145 if not tags:
146 await ctx.send(embed=Embed(
147 description="**There are no tags in the database!**",
148 colour=Colour.red()
149 ))
150 else:
151 embed: Embed = Embed(title="**Current tags**")
152 await LinePaginator.paginate(
153 sorted(f"**»** {tag['title']}" for tag in tags),
154 ctx,
155 embed,
156 footer_text="To show a tag, type !tags <tagname>.",
157 empty=False,
158 max_lines=15
159 )
160
161 @tags_group.command(name='set', aliases=('add', 's'))
162 @with_role(*MODERATION_ROLES)
163 async def set_command(
164 self,
165 ctx: Context,
166 tag_name: TagNameConverter,
167 *,
168 tag_content: TagContentConverter,
169 ) -> None:
170 """Create a new tag."""
171 body = {
172 'title': tag_name.lower().strip(),
173 'embed': {
174 'title': tag_name,
175 'description': tag_content
176 }
177 }
178
179 await self.bot.api_client.post('bot/tags', json=body)
180 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
181
182 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
183 f"tag_name: {tag_name}\n"
184 f"tag_content: '{tag_content}'\n")
185
186 await ctx.send(embed=Embed(
187 title="Tag successfully added",
188 description=f"**{tag_name}** added to tag database.",
189 colour=Colour.blurple()
190 ))
191
192 @tags_group.command(name='edit', aliases=('e', ))
193 @with_role(*MODERATION_ROLES)
194 async def edit_command(
195 self,
196 ctx: Context,
197 tag_name: TagNameConverter,
198 *,
199 tag_content: TagContentConverter,
200 ) -> None:
201 """Edit an existing tag."""
202 body = {
203 'embed': {
204 'title': tag_name,
205 'description': tag_content
206 }
207 }
208
209 await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
210 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
211
212 log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
213 f"tag_name: {tag_name}\n"
214 f"tag_content: '{tag_content}'\n")
215
216 await ctx.send(embed=Embed(
217 title="Tag successfully edited",
218 description=f"**{tag_name}** edited in the database.",
219 colour=Colour.blurple()
220 ))
221
222 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
223 @with_role(Roles.admins, Roles.owners)
224 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
225 """Remove a tag from the database."""
226 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
227 self._cache.pop(tag_name.lower(), None)
228
229 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
230 await ctx.send(embed=Embed(
231 title=tag_name,
232 description=f"Tag successfully removed: {tag_name}.",
233 colour=Colour.blurple()
234 ))
235
236
237 def setup(bot: Bot) -> None:
238 """Load the Tags cog."""
239 bot.add_cog(Tags(bot))
240
[end of bot/cogs/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -1,7 +1,7 @@
import logging
import re
import time
-from typing import Dict, List, Optional
+from typing import Callable, Dict, Iterable, List, Optional
from discord import Colour, Embed
from discord.ext.commands import Cog, Context, group
@@ -86,11 +86,80 @@
return self._get_suggestions(tag_name)
return found
+ async def _get_tags_via_content(self, check: Callable[[Iterable], bool], keywords: str) -> list:
+ """
+ Search for tags via contents.
+
+ `predicate` will be the built-in any, all, or a custom callable. Must return a bool.
+ """
+ await self._get_tags()
+
+ keywords_processed: List[str] = []
+ for keyword in keywords.split(','):
+ keyword_sanitized = keyword.strip().casefold()
+ if not keyword_sanitized:
+ # this happens when there are leading / trailing / consecutive comma.
+ continue
+ keywords_processed.append(keyword_sanitized)
+
+ if not keywords_processed:
+ # after sanitizing, we can end up with an empty list, for example when keywords is ','
+ # in that case, we simply want to search for such keywords directly instead.
+ keywords_processed = [keywords]
+
+ matching_tags = []
+ for tag in self._cache.values():
+ if check(query in tag['embed']['description'].casefold() for query in keywords_processed):
+ matching_tags.append(tag)
+
+ return matching_tags
+
+ async def _send_matching_tags(self, ctx: Context, keywords: str, matching_tags: list) -> None:
+ """Send the result of matching tags to user."""
+ if not matching_tags:
+ pass
+ elif len(matching_tags) == 1:
+ await ctx.send(embed=Embed().from_dict(matching_tags[0]['embed']))
+ else:
+ is_plural = keywords.strip().count(' ') > 0 or keywords.strip().count(',') > 0
+ embed = Embed(
+ title=f"Here are the tags containing the given keyword{'s' * is_plural}:",
+ description='\n'.join(tag['title'] for tag in matching_tags[:10])
+ )
+ await LinePaginator.paginate(
+ sorted(f"**»** {tag['title']}" for tag in matching_tags),
+ ctx,
+ embed,
+ footer_text="To show a tag, type !tags <tagname>.",
+ empty=False,
+ max_lines=15
+ )
+
@group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
"""Show all known tags, a single tag, or run a subcommand."""
await ctx.invoke(self.get_command, tag_name=tag_name)
+ @tags_group.group(name='search', invoke_without_command=True)
+ async def search_tag_content(self, ctx: Context, *, keywords: str) -> None:
+ """
+ Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.
+
+ Only search for tags that has ALL the keywords.
+ """
+ matching_tags = await self._get_tags_via_content(all, keywords)
+ await self._send_matching_tags(ctx, keywords, matching_tags)
+
+ @search_tag_content.command(name='any')
+ async def search_tag_content_any_keyword(self, ctx: Context, *, keywords: Optional[str] = None) -> None:
+ """
+ Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.
+
+ Search for tags that has ANY of the keywords.
+ """
+ matching_tags = await self._get_tags_via_content(any, keywords or 'any')
+ await self._send_matching_tags(ctx, keywords, matching_tags)
+
@tags_group.command(name='get', aliases=('show', 'g'))
async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
"""Get a specified tag, or a list of all tags if no tag is specified."""
|
{"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -1,7 +1,7 @@\n import logging\n import re\n import time\n-from typing import Dict, List, Optional\n+from typing import Callable, Dict, Iterable, List, Optional\n \n from discord import Colour, Embed\n from discord.ext.commands import Cog, Context, group\n@@ -86,11 +86,80 @@\n return self._get_suggestions(tag_name)\n return found\n \n+ async def _get_tags_via_content(self, check: Callable[[Iterable], bool], keywords: str) -> list:\n+ \"\"\"\n+ Search for tags via contents.\n+\n+ `predicate` will be the built-in any, all, or a custom callable. Must return a bool.\n+ \"\"\"\n+ await self._get_tags()\n+\n+ keywords_processed: List[str] = []\n+ for keyword in keywords.split(','):\n+ keyword_sanitized = keyword.strip().casefold()\n+ if not keyword_sanitized:\n+ # this happens when there are leading / trailing / consecutive comma.\n+ continue\n+ keywords_processed.append(keyword_sanitized)\n+\n+ if not keywords_processed:\n+ # after sanitizing, we can end up with an empty list, for example when keywords is ','\n+ # in that case, we simply want to search for such keywords directly instead.\n+ keywords_processed = [keywords]\n+\n+ matching_tags = []\n+ for tag in self._cache.values():\n+ if check(query in tag['embed']['description'].casefold() for query in keywords_processed):\n+ matching_tags.append(tag)\n+\n+ return matching_tags\n+\n+ async def _send_matching_tags(self, ctx: Context, keywords: str, matching_tags: list) -> None:\n+ \"\"\"Send the result of matching tags to user.\"\"\"\n+ if not matching_tags:\n+ pass\n+ elif len(matching_tags) == 1:\n+ await ctx.send(embed=Embed().from_dict(matching_tags[0]['embed']))\n+ else:\n+ is_plural = keywords.strip().count(' ') > 0 or keywords.strip().count(',') > 0\n+ embed = Embed(\n+ title=f\"Here are the tags containing the given keyword{'s' * is_plural}:\",\n+ description='\\n'.join(tag['title'] for tag in matching_tags[:10])\n+ )\n+ await LinePaginator.paginate(\n+ sorted(f\"**\u00bb** {tag['title']}\" for tag in matching_tags),\n+ ctx,\n+ embed,\n+ footer_text=\"To show a tag, type !tags <tagname>.\",\n+ empty=False,\n+ max_lines=15\n+ )\n+\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n \n+ @tags_group.group(name='search', invoke_without_command=True)\n+ async def search_tag_content(self, ctx: Context, *, keywords: str) -> None:\n+ \"\"\"\n+ Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.\n+\n+ Only search for tags that has ALL the keywords.\n+ \"\"\"\n+ matching_tags = await self._get_tags_via_content(all, keywords)\n+ await self._send_matching_tags(ctx, keywords, matching_tags)\n+\n+ @search_tag_content.command(name='any')\n+ async def search_tag_content_any_keyword(self, ctx: Context, *, keywords: Optional[str] = None) -> None:\n+ \"\"\"\n+ Search inside tags' contents for tags. Allow searching for multiple keywords separated by comma.\n+\n+ Search for tags that has ANY of the keywords.\n+ \"\"\"\n+ matching_tags = await self._get_tags_via_content(any, keywords or 'any')\n+ await self._send_matching_tags(ctx, keywords, matching_tags)\n+\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n", "issue": "tag-search command to search tags via their contents instead of names.\nCurrently, it can be difficult to find the specific command for a tag even if you know it exists. A command to allow you to search through tag bodies would help with finding the right tags. For example doing `!tag-search backtick` would return `!code-block` (and any other tags that include the word backtick).\n", "before_files": [{"content": "import logging\nimport re\nimport time\nfrom typing import Dict, List, Optional\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Cog, Context, group\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.bot_commands,\n Channels.helpers\n)\n\nREGEX_NON_ALPHABET = re.compile(r\"[^a-z]\", re.MULTILINE & re.IGNORECASE)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n self._cache = {}\n self._last_fetch: float = 0.0\n\n async def _get_tags(self, is_forced: bool = False) -> None:\n \"\"\"Get all tags.\"\"\"\n # refresh only when there's a more than 5m gap from last call.\n time_now: float = time.time()\n if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:\n tags = await self.bot.api_client.get('bot/tags')\n self._cache = {tag['title'].lower(): tag for tag in tags}\n self._last_fetch = time_now\n\n @staticmethod\n def _fuzzy_search(search: str, target: str) -> int:\n \"\"\"A simple scoring algorithm based on how many letters are found / total, with order in mind.\"\"\"\n current, index = 0, 0\n _search = REGEX_NON_ALPHABET.sub('', search.lower())\n _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))\n _target = next(_targets)\n try:\n while True:\n while index < len(_target) and _search[current] == _target[index]:\n current += 1\n index += 1\n index, _target = 0, next(_targets)\n except (StopIteration, IndexError):\n pass\n return current / len(_search) * 100\n\n def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:\n \"\"\"Return a list of suggested tags.\"\"\"\n scores: Dict[str, int] = {\n tag_title: Tags._fuzzy_search(tag_name, tag['title'])\n for tag_title, tag in self._cache.items()\n }\n\n thresholds = thresholds or [100, 90, 80, 70, 60]\n\n for threshold in thresholds:\n suggestions = [\n self._cache[tag_title]\n for tag_title, matching_score in scores.items()\n if matching_score >= threshold\n ]\n if suggestions:\n return suggestions\n\n return []\n\n async def _get_tag(self, tag_name: str) -> list:\n \"\"\"Get a specific tag.\"\"\"\n await self._get_tags()\n found = [self._cache.get(tag_name.lower(), None)]\n if not found[0]:\n return self._get_suggestions(tag_name)\n return found\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.info(\n f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\"\n )\n return\n\n await self._get_tags()\n\n if tag_name is not None:\n founds = await self._get_tag(tag_name)\n\n if len(founds) == 1:\n tag = founds[0]\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n elif founds and len(tag_name) >= 3:\n await ctx.send(embed=Embed(\n title='Did you mean ...',\n description='\\n'.join(tag['title'] for tag in founds[:10])\n ))\n\n else:\n tags = self._cache.values()\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admins, Roles.owners)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n self._cache.pop(tag_name.lower(), None)\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Tags cog.\"\"\"\n bot.add_cog(Tags(bot))\n", "path": "bot/cogs/tags.py"}]}
| 3,190 | 957 |
gh_patches_debug_1219
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-4641
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pulp_file version is set to 3.40.0.dev
**Version**
pulpcore 3.40.0
**Describe the bug**
Status API reports pulp_file version as 3.40.0.dev
</issue>
<code>
[start of pulp_file/app/__init__.py]
1 from pulpcore.plugin import PulpPluginAppConfig
2
3
4 class PulpFilePluginAppConfig(PulpPluginAppConfig):
5 """
6 Entry point for pulp_file plugin.
7 """
8
9 name = "pulp_file.app"
10 label = "file"
11 version = "3.40.0.dev"
12 python_package_name = "pulp_file" # TODO Add python_module_name
13 domain_compatible = True
14
[end of pulp_file/app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py
--- a/pulp_file/app/__init__.py
+++ b/pulp_file/app/__init__.py
@@ -8,6 +8,6 @@
name = "pulp_file.app"
label = "file"
- version = "3.40.0.dev"
+ version = "3.41.0.dev"
python_package_name = "pulp_file" # TODO Add python_module_name
domain_compatible = True
|
{"golden_diff": "diff --git a/pulp_file/app/__init__.py b/pulp_file/app/__init__.py\n--- a/pulp_file/app/__init__.py\n+++ b/pulp_file/app/__init__.py\n@@ -8,6 +8,6 @@\n \n name = \"pulp_file.app\"\n label = \"file\"\n- version = \"3.40.0.dev\"\n+ version = \"3.41.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n", "issue": "pulp_file version is set to 3.40.0.dev \n**Version**\r\npulpcore 3.40.0\r\n\r\n**Describe the bug**\r\nStatus API reports pulp_file version as 3.40.0.dev\n", "before_files": [{"content": "from pulpcore.plugin import PulpPluginAppConfig\n\n\nclass PulpFilePluginAppConfig(PulpPluginAppConfig):\n \"\"\"\n Entry point for pulp_file plugin.\n \"\"\"\n\n name = \"pulp_file.app\"\n label = \"file\"\n version = \"3.40.0.dev\"\n python_package_name = \"pulp_file\" # TODO Add python_module_name\n domain_compatible = True\n", "path": "pulp_file/app/__init__.py"}]}
| 700 | 121 |
gh_patches_debug_13169
|
rasdani/github-patches
|
git_diff
|
activeloopai__deeplake-1994
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Can't write objects to JSON
v3.0.14
```python
import pickle
t = ds.create_tensor(f"t/group/f", htype="json", chunk_compression="lz4")
t.append(pickle.dumps("test")) # pass any pickled object into append gets error
```
```
ValueError: Circular reference detected
```
passing strings and such into this tensor works fine, but for some reason any pickled object or python object that gets pickled gives the above ValueError.
</issue>
<code>
[start of deeplake/util/json.py]
1 from typing import Any, Dict, List, Optional, Tuple, Union
2 import numpy as np
3 from numpy import ndarray
4 import json
5 import base64
6 from deeplake.core.sample import Sample # type: ignore
7
8 Schema = Any
9
10
11 scalars = ["int", "float", "bool", "str", "list", "dict", "ndarray", "Sample"]
12 types = ["Any", "Dict", "List", "Optional", "Union"]
13
14
15 def _norm_type(typ: str):
16 typ = typ.replace("typing.", "")
17 replacements = {
18 "numpy.ndarray": "ndarray",
19 "np.ndarray": "ndarray",
20 "deeplake.core.sample.Sample": "Sample",
21 "deeplake.Sample": "Sample",
22 }
23 return replacements.get(typ, typ)
24
25
26 def _parse_schema(schema: Union[str, Schema]) -> Tuple[str, List[str]]:
27 if getattr(schema, "__module__", None) == "typing":
28 schema = str(schema)
29 validate = False
30 else:
31 validate = True
32
33 if schema in scalars:
34 return schema, []
35
36 if "[" not in schema:
37 return _norm_type(schema), []
38
39 typ, param_string = schema.split("[", 1)
40 typ = _norm_type(typ)
41 assert param_string[-1] == "]"
42 params = []
43 buff = ""
44 level = 0
45 for c in param_string:
46 if c == "[":
47 level += 1
48 buff += c
49 elif c == "]":
50 if level == 0:
51 if buff:
52 params.append(buff)
53 if validate:
54 _validate_schema(typ, params)
55 return typ, params
56 else:
57 buff += c
58 level -= 1
59 elif c == ",":
60 if level == 0:
61 params.append(buff)
62 buff = ""
63 else:
64 buff += c
65 elif c == " ":
66 continue
67 else:
68 buff += c
69 raise InvalidJsonSchemaException()
70
71
72 class InvalidJsonSchemaException(Exception):
73 pass
74
75
76 class ArgumentMismatchException(InvalidJsonSchemaException):
77 def __init__(self, typ: str, actual: int, expected: int, exact: bool = False):
78 assert actual != expected
79 gt = actual > expected
80 super(ArgumentMismatchException, self).__init__(
81 f"Too {'many' if gt else 'few'} parameters for {typ};"
82 + f" actual {actual},expected {'exatcly' if exact else ('at most' if gt else 'at least')} {expected}."
83 )
84
85
86 def _validate_schema(typ: str, params: List[str]) -> Tuple[str, List[str]]:
87 if typ in scalars:
88 return typ, params
89
90 if typ not in types:
91 raise InvalidJsonSchemaException(f"Unsupported type: {typ}")
92
93 def _err(expected_num_params: int, exact: bool = False):
94 raise ArgumentMismatchException(typ, len(params), expected_num_params, exact)
95
96 if typ == "Any":
97 if params:
98 _err(0)
99 elif typ == "Optional":
100 if len(params) > 1:
101 _err(1)
102 elif typ == "Union":
103 if len(params) == 0:
104 _err(1)
105 elif typ == "List":
106 if len(params) > 1:
107 _err(1)
108 elif typ == "Dict":
109 if len(params) not in (0, 2):
110 _err(2, True)
111 return typ, params
112
113
114 def _validate_any(obj: Any, params: List[str]):
115 assert not params
116 return True
117
118
119 def _validate_union(obj: Any, params: List[str]):
120 for schema in params:
121 if _validate_object(obj, schema):
122 return True
123 return False
124
125
126 def _validate_optional(obj: Any, params: List[str]) -> bool:
127 assert len(params) <= 1
128 if obj is None:
129 return True
130 if params:
131 return _validate_object(obj, params[0])
132 return True
133
134
135 def _validate_list(obj: Any, params: List[str]) -> bool:
136 assert len(params) <= 1
137 if not isinstance(obj, (list, tuple)):
138 return False
139 if params:
140 for item in obj:
141 if not _validate_object(item, params[0]):
142 return False
143 return True
144
145
146 def _validate_dict(obj: Any, params: List[str]) -> bool:
147 assert len(params) in (0, 2)
148 if not isinstance(obj, dict):
149 return False
150 if params:
151 assert params[0] in (
152 "str",
153 "Any",
154 ), "Only string keys are allowed for json dicts."
155 for v in obj.values():
156 if not _validate_object(v, params[1]):
157 return False
158 return True
159
160
161 def _validate_nonetype(obj: Any, params: List[str]) -> bool:
162 assert not params
163 return obj is None
164
165
166 def _validate_object(obj: Any, schema: Union[str, Schema]) -> bool:
167 typ, params = _parse_schema(schema)
168 if typ in scalars:
169 return isinstance(obj, eval(typ))
170 return globals()[f"_validate_{typ.lower()}"](obj, params)
171
172
173 class JsonValidationError(Exception):
174 pass
175
176
177 def validate_json_object(obj: Any, schema: Union[str, Schema]) -> None:
178 if obj and not _validate_object(obj, schema):
179 raise JsonValidationError()
180
181
182 def validate_json_schema(schema: str):
183 _parse_schema(schema)
184
185
186 class HubJsonEncoder(json.JSONEncoder):
187 def default(self, obj):
188 if isinstance(obj, ndarray):
189 return {
190 "_hub_custom_type": "ndarray",
191 "data": base64.b64encode(obj.tobytes()).decode(),
192 "shape": obj.shape,
193 "dtype": obj.dtype.name,
194 }
195 elif isinstance(obj, Sample):
196 if obj.compression:
197 return {
198 "_hub_custom_type": "Sample",
199 "data": base64.b64encode(obj.buffer).decode(),
200 "compression": obj.compression,
201 }
202 else:
203 return self.default(obj.array)
204 return obj
205
206
207 class HubJsonDecoder(json.JSONDecoder):
208 def __init__(self, *args, **kwargs):
209 json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
210
211 def object_hook(self, obj):
212 hub_custom_type = obj.get("_hub_custom_type")
213 if hub_custom_type == "ndarray":
214 return np.frombuffer(
215 base64.b64decode(obj["data"]), dtype=obj["dtype"]
216 ).reshape(obj["shape"])
217 elif hub_custom_type == "Sample":
218 return Sample(
219 buffer=base64.b64decode(obj["data"]), compression=obj["compression"]
220 )
221 return obj
222
[end of deeplake/util/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deeplake/util/json.py b/deeplake/util/json.py
--- a/deeplake/util/json.py
+++ b/deeplake/util/json.py
@@ -201,6 +201,12 @@
}
else:
return self.default(obj.array)
+ elif isinstance(obj, bytes):
+ return {
+ "_hub_custom_type": "bytes",
+ "data": base64.b64encode(obj).decode(),
+ }
+
return obj
@@ -218,4 +224,6 @@
return Sample(
buffer=base64.b64decode(obj["data"]), compression=obj["compression"]
)
+ elif hub_custom_type == "bytes":
+ return base64.b64decode(obj["data"])
return obj
|
{"golden_diff": "diff --git a/deeplake/util/json.py b/deeplake/util/json.py\n--- a/deeplake/util/json.py\n+++ b/deeplake/util/json.py\n@@ -201,6 +201,12 @@\n }\n else:\n return self.default(obj.array)\n+ elif isinstance(obj, bytes):\n+ return {\n+ \"_hub_custom_type\": \"bytes\",\n+ \"data\": base64.b64encode(obj).decode(),\n+ }\n+\n return obj\n \n \n@@ -218,4 +224,6 @@\n return Sample(\n buffer=base64.b64decode(obj[\"data\"]), compression=obj[\"compression\"]\n )\n+ elif hub_custom_type == \"bytes\":\n+ return base64.b64decode(obj[\"data\"])\n return obj\n", "issue": "[BUG] Can't write objects to JSON\nv3.0.14\r\n\r\n```python\r\nimport pickle\r\nt = ds.create_tensor(f\"t/group/f\", htype=\"json\", chunk_compression=\"lz4\")\r\nt.append(pickle.dumps(\"test\")) # pass any pickled object into append gets error\r\n```\r\n\r\n```\r\nValueError: Circular reference detected\r\n```\r\n\r\npassing strings and such into this tensor works fine, but for some reason any pickled object or python object that gets pickled gives the above ValueError.\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Tuple, Union\nimport numpy as np\nfrom numpy import ndarray\nimport json\nimport base64\nfrom deeplake.core.sample import Sample # type: ignore\n\nSchema = Any\n\n\nscalars = [\"int\", \"float\", \"bool\", \"str\", \"list\", \"dict\", \"ndarray\", \"Sample\"]\ntypes = [\"Any\", \"Dict\", \"List\", \"Optional\", \"Union\"]\n\n\ndef _norm_type(typ: str):\n typ = typ.replace(\"typing.\", \"\")\n replacements = {\n \"numpy.ndarray\": \"ndarray\",\n \"np.ndarray\": \"ndarray\",\n \"deeplake.core.sample.Sample\": \"Sample\",\n \"deeplake.Sample\": \"Sample\",\n }\n return replacements.get(typ, typ)\n\n\ndef _parse_schema(schema: Union[str, Schema]) -> Tuple[str, List[str]]:\n if getattr(schema, \"__module__\", None) == \"typing\":\n schema = str(schema)\n validate = False\n else:\n validate = True\n\n if schema in scalars:\n return schema, []\n\n if \"[\" not in schema:\n return _norm_type(schema), []\n\n typ, param_string = schema.split(\"[\", 1)\n typ = _norm_type(typ)\n assert param_string[-1] == \"]\"\n params = []\n buff = \"\"\n level = 0\n for c in param_string:\n if c == \"[\":\n level += 1\n buff += c\n elif c == \"]\":\n if level == 0:\n if buff:\n params.append(buff)\n if validate:\n _validate_schema(typ, params)\n return typ, params\n else:\n buff += c\n level -= 1\n elif c == \",\":\n if level == 0:\n params.append(buff)\n buff = \"\"\n else:\n buff += c\n elif c == \" \":\n continue\n else:\n buff += c\n raise InvalidJsonSchemaException()\n\n\nclass InvalidJsonSchemaException(Exception):\n pass\n\n\nclass ArgumentMismatchException(InvalidJsonSchemaException):\n def __init__(self, typ: str, actual: int, expected: int, exact: bool = False):\n assert actual != expected\n gt = actual > expected\n super(ArgumentMismatchException, self).__init__(\n f\"Too {'many' if gt else 'few'} parameters for {typ};\"\n + f\" actual {actual},expected {'exatcly' if exact else ('at most' if gt else 'at least')} {expected}.\"\n )\n\n\ndef _validate_schema(typ: str, params: List[str]) -> Tuple[str, List[str]]:\n if typ in scalars:\n return typ, params\n\n if typ not in types:\n raise InvalidJsonSchemaException(f\"Unsupported type: {typ}\")\n\n def _err(expected_num_params: int, exact: bool = False):\n raise ArgumentMismatchException(typ, len(params), expected_num_params, exact)\n\n if typ == \"Any\":\n if params:\n _err(0)\n elif typ == \"Optional\":\n if len(params) > 1:\n _err(1)\n elif typ == \"Union\":\n if len(params) == 0:\n _err(1)\n elif typ == \"List\":\n if len(params) > 1:\n _err(1)\n elif typ == \"Dict\":\n if len(params) not in (0, 2):\n _err(2, True)\n return typ, params\n\n\ndef _validate_any(obj: Any, params: List[str]):\n assert not params\n return True\n\n\ndef _validate_union(obj: Any, params: List[str]):\n for schema in params:\n if _validate_object(obj, schema):\n return True\n return False\n\n\ndef _validate_optional(obj: Any, params: List[str]) -> bool:\n assert len(params) <= 1\n if obj is None:\n return True\n if params:\n return _validate_object(obj, params[0])\n return True\n\n\ndef _validate_list(obj: Any, params: List[str]) -> bool:\n assert len(params) <= 1\n if not isinstance(obj, (list, tuple)):\n return False\n if params:\n for item in obj:\n if not _validate_object(item, params[0]):\n return False\n return True\n\n\ndef _validate_dict(obj: Any, params: List[str]) -> bool:\n assert len(params) in (0, 2)\n if not isinstance(obj, dict):\n return False\n if params:\n assert params[0] in (\n \"str\",\n \"Any\",\n ), \"Only string keys are allowed for json dicts.\"\n for v in obj.values():\n if not _validate_object(v, params[1]):\n return False\n return True\n\n\ndef _validate_nonetype(obj: Any, params: List[str]) -> bool:\n assert not params\n return obj is None\n\n\ndef _validate_object(obj: Any, schema: Union[str, Schema]) -> bool:\n typ, params = _parse_schema(schema)\n if typ in scalars:\n return isinstance(obj, eval(typ))\n return globals()[f\"_validate_{typ.lower()}\"](obj, params)\n\n\nclass JsonValidationError(Exception):\n pass\n\n\ndef validate_json_object(obj: Any, schema: Union[str, Schema]) -> None:\n if obj and not _validate_object(obj, schema):\n raise JsonValidationError()\n\n\ndef validate_json_schema(schema: str):\n _parse_schema(schema)\n\n\nclass HubJsonEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, ndarray):\n return {\n \"_hub_custom_type\": \"ndarray\",\n \"data\": base64.b64encode(obj.tobytes()).decode(),\n \"shape\": obj.shape,\n \"dtype\": obj.dtype.name,\n }\n elif isinstance(obj, Sample):\n if obj.compression:\n return {\n \"_hub_custom_type\": \"Sample\",\n \"data\": base64.b64encode(obj.buffer).decode(),\n \"compression\": obj.compression,\n }\n else:\n return self.default(obj.array)\n return obj\n\n\nclass HubJsonDecoder(json.JSONDecoder):\n def __init__(self, *args, **kwargs):\n json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)\n\n def object_hook(self, obj):\n hub_custom_type = obj.get(\"_hub_custom_type\")\n if hub_custom_type == \"ndarray\":\n return np.frombuffer(\n base64.b64decode(obj[\"data\"]), dtype=obj[\"dtype\"]\n ).reshape(obj[\"shape\"])\n elif hub_custom_type == \"Sample\":\n return Sample(\n buffer=base64.b64decode(obj[\"data\"]), compression=obj[\"compression\"]\n )\n return obj\n", "path": "deeplake/util/json.py"}]}
| 2,699 | 181 |
gh_patches_debug_3577
|
rasdani/github-patches
|
git_diff
|
python__mypy-2596
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Tuple[()]` is occasionally converted to `Tuple[Any, ...]`
Most obvious when the `Tuple[()]` is passed through a Callable
```
from typing import *
Type = Callable[[Tuple[()]], int]
x = "foo" # type: Type
```
Results in:
```
Incompatible types in assignment (expression has type "str", variable has type Callable[[Tuple[Any, ...]], int])
```
As a side note,
```Type = Tuple[()]```
Also appears to give a weird error.
</issue>
<code>
[start of mypy/exprtotype.py]
1 """Translate an Expression to a Type value."""
2
3 from mypy.nodes import (
4 Expression, NameExpr, MemberExpr, IndexExpr, TupleExpr,
5 ListExpr, StrExpr, BytesExpr, UnicodeExpr, EllipsisExpr
6 )
7 from mypy.parsetype import parse_str_as_type, TypeParseError
8 from mypy.types import Type, UnboundType, TypeList, EllipsisType
9
10
11 class TypeTranslationError(Exception):
12 """Exception raised when an expression is not valid as a type."""
13
14
15 def expr_to_unanalyzed_type(expr: Expression) -> Type:
16 """Translate an expression to the corresponding type.
17
18 The result is not semantically analyzed. It can be UnboundType or TypeList.
19 Raise TypeTranslationError if the expression cannot represent a type.
20 """
21 if isinstance(expr, NameExpr):
22 name = expr.name
23 return UnboundType(name, line=expr.line, column=expr.column)
24 elif isinstance(expr, MemberExpr):
25 fullname = get_member_expr_fullname(expr)
26 if fullname:
27 return UnboundType(fullname, line=expr.line, column=expr.column)
28 else:
29 raise TypeTranslationError()
30 elif isinstance(expr, IndexExpr):
31 base = expr_to_unanalyzed_type(expr.base)
32 if isinstance(base, UnboundType):
33 if base.args:
34 raise TypeTranslationError()
35 if isinstance(expr.index, TupleExpr):
36 args = expr.index.items
37 else:
38 args = [expr.index]
39 base.args = [expr_to_unanalyzed_type(arg) for arg in args]
40 return base
41 else:
42 raise TypeTranslationError()
43 elif isinstance(expr, ListExpr):
44 return TypeList([expr_to_unanalyzed_type(t) for t in expr.items],
45 line=expr.line, column=expr.column)
46 elif isinstance(expr, (StrExpr, BytesExpr, UnicodeExpr)):
47 # Parse string literal type.
48 try:
49 result = parse_str_as_type(expr.value, expr.line)
50 except TypeParseError:
51 raise TypeTranslationError()
52 return result
53 elif isinstance(expr, EllipsisExpr):
54 return EllipsisType(expr.line)
55 else:
56 raise TypeTranslationError()
57
58
59 def get_member_expr_fullname(expr: MemberExpr) -> str:
60 """Return the qualified name representation of a member expression.
61
62 Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
63 argument cannot be represented in this form.
64 """
65 if isinstance(expr.expr, NameExpr):
66 initial = expr.expr.name
67 elif isinstance(expr.expr, MemberExpr):
68 initial = get_member_expr_fullname(expr.expr)
69 else:
70 return None
71 return '{}.{}'.format(initial, expr.name)
72
[end of mypy/exprtotype.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py
--- a/mypy/exprtotype.py
+++ b/mypy/exprtotype.py
@@ -37,6 +37,8 @@
else:
args = [expr.index]
base.args = [expr_to_unanalyzed_type(arg) for arg in args]
+ if not base.args:
+ base.empty_tuple_index = True
return base
else:
raise TypeTranslationError()
|
{"golden_diff": "diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py\n--- a/mypy/exprtotype.py\n+++ b/mypy/exprtotype.py\n@@ -37,6 +37,8 @@\n else:\n args = [expr.index]\n base.args = [expr_to_unanalyzed_type(arg) for arg in args]\n+ if not base.args:\n+ base.empty_tuple_index = True\n return base\n else:\n raise TypeTranslationError()\n", "issue": "`Tuple[()]` is occasionally converted to `Tuple[Any, ...]`\nMost obvious when the `Tuple[()]` is passed through a Callable\r\n```\r\nfrom typing import *\r\n\r\nType = Callable[[Tuple[()]], int]\r\nx = \"foo\" # type: Type\r\n```\r\nResults in:\r\n```\r\nIncompatible types in assignment (expression has type \"str\", variable has type Callable[[Tuple[Any, ...]], int])\r\n```\r\n\r\nAs a side note,\r\n```Type = Tuple[()]```\r\nAlso appears to give a weird error.\n", "before_files": [{"content": "\"\"\"Translate an Expression to a Type value.\"\"\"\n\nfrom mypy.nodes import (\n Expression, NameExpr, MemberExpr, IndexExpr, TupleExpr,\n ListExpr, StrExpr, BytesExpr, UnicodeExpr, EllipsisExpr\n)\nfrom mypy.parsetype import parse_str_as_type, TypeParseError\nfrom mypy.types import Type, UnboundType, TypeList, EllipsisType\n\n\nclass TypeTranslationError(Exception):\n \"\"\"Exception raised when an expression is not valid as a type.\"\"\"\n\n\ndef expr_to_unanalyzed_type(expr: Expression) -> Type:\n \"\"\"Translate an expression to the corresponding type.\n\n The result is not semantically analyzed. It can be UnboundType or TypeList.\n Raise TypeTranslationError if the expression cannot represent a type.\n \"\"\"\n if isinstance(expr, NameExpr):\n name = expr.name\n return UnboundType(name, line=expr.line, column=expr.column)\n elif isinstance(expr, MemberExpr):\n fullname = get_member_expr_fullname(expr)\n if fullname:\n return UnboundType(fullname, line=expr.line, column=expr.column)\n else:\n raise TypeTranslationError()\n elif isinstance(expr, IndexExpr):\n base = expr_to_unanalyzed_type(expr.base)\n if isinstance(base, UnboundType):\n if base.args:\n raise TypeTranslationError()\n if isinstance(expr.index, TupleExpr):\n args = expr.index.items\n else:\n args = [expr.index]\n base.args = [expr_to_unanalyzed_type(arg) for arg in args]\n return base\n else:\n raise TypeTranslationError()\n elif isinstance(expr, ListExpr):\n return TypeList([expr_to_unanalyzed_type(t) for t in expr.items],\n line=expr.line, column=expr.column)\n elif isinstance(expr, (StrExpr, BytesExpr, UnicodeExpr)):\n # Parse string literal type.\n try:\n result = parse_str_as_type(expr.value, expr.line)\n except TypeParseError:\n raise TypeTranslationError()\n return result\n elif isinstance(expr, EllipsisExpr):\n return EllipsisType(expr.line)\n else:\n raise TypeTranslationError()\n\n\ndef get_member_expr_fullname(expr: MemberExpr) -> str:\n \"\"\"Return the qualified name representation of a member expression.\n\n Return a string of form foo.bar, foo.bar.baz, or similar, or None if the\n argument cannot be represented in this form.\n \"\"\"\n if isinstance(expr.expr, NameExpr):\n initial = expr.expr.name\n elif isinstance(expr.expr, MemberExpr):\n initial = get_member_expr_fullname(expr.expr)\n else:\n return None\n return '{}.{}'.format(initial, expr.name)\n", "path": "mypy/exprtotype.py"}]}
| 1,363 | 109 |
gh_patches_debug_20406
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1413
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
URLs with ending commas get included when Sopel v6.5.x parse them.
```
Examples:
[02:46pm] <Ant> http://www.tv.com/shows/family-guy/da-boom-25450/,
02:46PM <Crushinator> [ Not found - TV.com ] - www.tv.com
02:47PM <URL> [ Not found - TV.com ] - www.tv.com
Periods had no problems:
[02:48pm] <Ant> http://www.tv.com/shows/family-guy/da-boom-25450/.
02:48PM <URL> [ Family Guy - Season 2, Episode 3: Da Boom - TV.com ] -
www.tv.com
02:48PM <Crushinator> [ Family Guy - Season 2, Episode 3: Da Boom - TV.com ] -
www.tv.com
```
</issue>
<code>
[start of sopel/modules/url.py]
1 # coding=utf-8
2 """URL title module"""
3 # Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham
4 # Copyright 2012-2013 Elsie Powell
5 # Copyright 2013 Lior Ramati ([email protected])
6 # Copyright © 2014 Elad Alfassa <[email protected]>
7 # Licensed under the Eiffel Forum License 2.
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 import re
11 from sopel import web, tools, __version__
12 from sopel.module import commands, rule, example
13 from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection
14
15 import requests
16
17 USER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)
18 default_headers = {'User-Agent': USER_AGENT}
19 find_urls = None
20 # These are used to clean up the title tag before actually parsing it. Not the
21 # world's best way to do this, but it'll do for now.
22 title_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)
23 quoted_title = re.compile('[\'"]<title>[\'"]', re.IGNORECASE)
24 # This is another regex that presumably does something important.
25 re_dcc = re.compile(r'(?i)dcc\ssend')
26 # This sets the maximum number of bytes that should be read in order to find
27 # the title. We don't want it too high, or a link to a big file/stream will
28 # just keep downloading until there's no more memory. 640k ought to be enough
29 # for anybody.
30 max_bytes = 655360
31
32
33 class UrlSection(StaticSection):
34 # TODO some validation rules maybe?
35 exclude = ListAttribute('exclude')
36 exclusion_char = ValidatedAttribute('exclusion_char', default='!')
37 shorten_url_length = ValidatedAttribute(
38 'shorten_url_length', int, default=0)
39
40
41 def configure(config):
42 config.define_section('url', UrlSection)
43 config.url.configure_setting(
44 'exclude',
45 'Enter regular expressions for each URL you would like to exclude.'
46 )
47 config.url.configure_setting(
48 'exclusion_char',
49 'Enter a character which can be prefixed to suppress URL titling'
50 )
51 config.url.configure_setting(
52 'shorten_url_length',
53 'Enter how many characters a URL should be before the bot puts a'
54 ' shorter version of the URL in the title as a TinyURL link'
55 ' (0 to disable)'
56 )
57
58
59 def setup(bot):
60 global find_urls
61
62 bot.config.define_section('url', UrlSection)
63
64 if bot.config.url.exclude:
65 regexes = [re.compile(s) for s in bot.config.url.exclude]
66 else:
67 regexes = []
68
69 # We're keeping these in their own list, rather than putting then in the
70 # callbacks list because 1, it's easier to deal with modules that are still
71 # using this list, and not the newer callbacks list and 2, having a lambda
72 # just to pass is kinda ugly.
73 if not bot.memory.contains('url_exclude'):
74 bot.memory['url_exclude'] = regexes
75 else:
76 exclude = bot.memory['url_exclude']
77 if regexes:
78 exclude.extend(regexes)
79 bot.memory['url_exclude'] = exclude
80
81 # Ensure that url_callbacks and last_seen_url are in memory
82 if not bot.memory.contains('url_callbacks'):
83 bot.memory['url_callbacks'] = tools.SopelMemory()
84 if not bot.memory.contains('last_seen_url'):
85 bot.memory['last_seen_url'] = tools.SopelMemory()
86
87 def find_func(text):
88 re_url = r'(?u)((?<!%s)(?:http|https|ftp)(?::\/\/\S+))'\
89 % (bot.config.url.exclusion_char)
90 r = re.compile(re_url, re.IGNORECASE)
91
92 urls = re.findall(r, text)
93 return urls
94
95 find_urls = find_func
96
97
98 @commands('title')
99 @example('.title http://google.com', '[ Google ] - google.com')
100 def title_command(bot, trigger):
101 """
102 Show the title or URL information for the given URL, or the last URL seen
103 in this channel.
104 """
105 if not trigger.group(2):
106 if trigger.sender not in bot.memory['last_seen_url']:
107 return
108 matched = check_callbacks(bot, trigger,
109 bot.memory['last_seen_url'][trigger.sender],
110 True)
111 if matched:
112 return
113 else:
114 urls = [bot.memory['last_seen_url'][trigger.sender]]
115 else:
116 urls = find_urls(trigger)
117
118 results = process_urls(bot, trigger, urls)
119 for title, domain, tinyurl in results[:4]:
120 message = '[ %s ] - %s' % (title, domain)
121 if tinyurl:
122 message += ' ( %s )' % tinyurl
123 bot.reply(message)
124
125
126 @rule(r'(?u).*(https?://\S+).*')
127 def title_auto(bot, trigger):
128 """
129 Automatically show titles for URLs. For shortened URLs/redirects, find
130 where the URL redirects to and show the title for that (or call a function
131 from another module to give more information).
132 """
133 if re.match(bot.config.core.prefix + 'title', trigger):
134 return
135
136 # Avoid fetching known malicious links
137 if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:
138 if bot.memory['safety_cache'][trigger]['positives'] > 1:
139 return
140
141 urls = find_urls(trigger)
142 if len(urls) == 0:
143 return
144
145 results = process_urls(bot, trigger, urls)
146 bot.memory['last_seen_url'][trigger.sender] = urls[-1]
147
148 for title, domain, tinyurl in results[:4]:
149 message = '[ %s ] - %s' % (title, domain)
150 if tinyurl:
151 message += ' ( %s )' % tinyurl
152 # Guard against responding to other instances of this bot.
153 if message != trigger:
154 bot.say(message)
155
156
157 def process_urls(bot, trigger, urls):
158 """
159 For each URL in the list, ensure that it isn't handled by another module.
160 If not, find where it redirects to, if anywhere. If that redirected URL
161 should be handled by another module, dispatch the callback for it.
162 Return a list of (title, hostname) tuples for each URL which is not handled by
163 another module.
164 """
165
166 results = []
167 shorten_url_length = bot.config.url.shorten_url_length
168 for url in urls:
169 if not url.startswith(bot.config.url.exclusion_char):
170 # Magic stuff to account for international domain names
171 try:
172 url = web.iri_to_uri(url)
173 except Exception: # TODO: Be specific
174 pass
175 # First, check that the URL we got doesn't match
176 matched = check_callbacks(bot, trigger, url, False)
177 if matched:
178 continue
179 # If the URL is over bot.config.url.shorten_url_length,
180 # shorten the URL
181 tinyurl = None
182 if (shorten_url_length > 0) and (len(url) > shorten_url_length):
183 # Check bot memory to see if the shortened URL is already in
184 # memory
185 if not bot.memory.contains('shortened_urls'):
186 # Initialize shortened_urls as a dict if it doesn't exist.
187 bot.memory['shortened_urls'] = tools.SopelMemory()
188 if bot.memory['shortened_urls'].contains(url):
189 tinyurl = bot.memory['shortened_urls'][url]
190 else:
191 tinyurl = get_tinyurl(url)
192 bot.memory['shortened_urls'][url] = tinyurl
193 # Finally, actually show the URL
194 title = find_title(url, verify=bot.config.core.verify_ssl)
195 if title:
196 results.append((title, get_hostname(url), tinyurl))
197 return results
198
199
200 def check_callbacks(bot, trigger, url, run=True):
201 """
202 Check the given URL against the callbacks list. If it matches, and ``run``
203 is given as ``True``, run the callback function, otherwise pass. Returns
204 ``True`` if the url matched anything in the callbacks list.
205 """
206 # Check if it matches the exclusion list first
207 matched = any(regex.search(url) for regex in bot.memory['url_exclude'])
208 # Then, check if there's anything in the callback list
209 for regex, function in tools.iteritems(bot.memory['url_callbacks']):
210 match = regex.search(url)
211 if match:
212 # Always run ones from @url; they don't run on their own.
213 if run or hasattr(function, 'url_regex'):
214 function(bot, trigger, match)
215 matched = True
216 return matched
217
218
219 def find_title(url, verify=True):
220 """Return the title for the given URL."""
221 try:
222 response = requests.get(url, stream=True, verify=verify,
223 headers=default_headers)
224 content = b''
225 for byte in response.iter_content(chunk_size=512):
226 content += byte
227 if b'</title>' in content or len(content) > max_bytes:
228 break
229 content = content.decode('utf-8', errors='ignore')
230 # Need to close the connection because we have not read all
231 # the data
232 response.close()
233 except requests.exceptions.ConnectionError:
234 return None
235
236 # Some cleanup that I don't really grok, but was in the original, so
237 # we'll keep it (with the compiled regexes made global) for now.
238 content = title_tag_data.sub(r'<\1title>', content)
239 content = quoted_title.sub('', content)
240
241 start = content.rfind('<title>')
242 end = content.rfind('</title>')
243 if start == -1 or end == -1:
244 return
245 title = web.decode(content[start + 7:end])
246 title = title.strip()[:200]
247
248 title = ' '.join(title.split()) # cleanly remove multiple spaces
249
250 # More cryptic regex substitutions. This one looks to be myano's invention.
251 title = re_dcc.sub('', title)
252
253 return title or None
254
255
256 def get_hostname(url):
257 idx = 7
258 if url.startswith('https://'):
259 idx = 8
260 elif url.startswith('ftp://'):
261 idx = 6
262 hostname = url[idx:]
263 slash = hostname.find('/')
264 if slash != -1:
265 hostname = hostname[:slash]
266 return hostname
267
268
269 def get_tinyurl(url):
270 """ Returns a shortened tinyURL link of the URL. """
271 tinyurl = "https://tinyurl.com/api-create.php?url=%s" % url
272 try:
273 res = requests.get(tinyurl)
274 res.raise_for_status()
275 except requests.exceptions.RequestException:
276 return None
277 # Replace text output with https instead of http to make the
278 # result an HTTPS link.
279 return res.text.replace("http://", "https://")
280
281
282 if __name__ == "__main__":
283 from sopel.test_tools import run_example_tests
284 run_example_tests(__file__)
285
[end of sopel/modules/url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/url.py b/sopel/modules/url.py
--- a/sopel/modules/url.py
+++ b/sopel/modules/url.py
@@ -84,12 +84,26 @@
if not bot.memory.contains('last_seen_url'):
bot.memory['last_seen_url'] = tools.SopelMemory()
- def find_func(text):
+ def find_func(text, clean=False):
+ def trim_url(url):
+ # clean trailing sentence- or clause-ending punctuation
+ while url[-1] in '.,?!\'":;':
+ url = url[:-1]
+
+ # clean unmatched parentheses/braces/brackets
+ for (opener, closer) in [('(', ')'), ('[', ']'), ('{', '}'), ('<', '>')]:
+ if url[-1] is closer and url.count(opener) < url.count(closer):
+ url = url[:-1]
+
+ return url
+
re_url = r'(?u)((?<!%s)(?:http|https|ftp)(?::\/\/\S+))'\
% (bot.config.url.exclusion_char)
r = re.compile(re_url, re.IGNORECASE)
urls = re.findall(r, text)
+ if clean:
+ urls = [trim_url(url) for url in urls]
return urls
find_urls = find_func
|
{"golden_diff": "diff --git a/sopel/modules/url.py b/sopel/modules/url.py\n--- a/sopel/modules/url.py\n+++ b/sopel/modules/url.py\n@@ -84,12 +84,26 @@\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n \n- def find_func(text):\n+ def find_func(text, clean=False):\n+ def trim_url(url):\n+ # clean trailing sentence- or clause-ending punctuation\n+ while url[-1] in '.,?!\\'\":;':\n+ url = url[:-1]\n+\n+ # clean unmatched parentheses/braces/brackets\n+ for (opener, closer) in [('(', ')'), ('[', ']'), ('{', '}'), ('<', '>')]:\n+ if url[-1] is closer and url.count(opener) < url.count(closer):\n+ url = url[:-1]\n+\n+ return url\n+\n re_url = r'(?u)((?<!%s)(?:http|https|ftp)(?::\\/\\/\\S+))'\\\n % (bot.config.url.exclusion_char)\n r = re.compile(re_url, re.IGNORECASE)\n \n urls = re.findall(r, text)\n+ if clean:\n+ urls = [trim_url(url) for url in urls]\n return urls\n \n find_urls = find_func\n", "issue": "URLs with ending commas get included when Sopel v6.5.x parse them.\n```\r\nExamples:\r\n\r\n[02:46pm] <Ant> http://www.tv.com/shows/family-guy/da-boom-25450/,\r\n02:46PM <Crushinator> [ Not found - TV.com ] - www.tv.com\r\n02:47PM <URL> [ Not found - TV.com ] - www.tv.com\r\n\r\nPeriods had no problems:\r\n[02:48pm] <Ant> http://www.tv.com/shows/family-guy/da-boom-25450/.\r\n02:48PM <URL> [ Family Guy - Season 2, Episode 3: Da Boom - TV.com ] -\r\n www.tv.com\r\n02:48PM <Crushinator> [ Family Guy - Season 2, Episode 3: Da Boom - TV.com ] -\r\n www.tv.com\r\n\r\n```\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools, __version__\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n\nimport requests\n\nUSER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\ndefault_headers = {'User-Agent': USER_AGENT}\nfind_urls = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n shorten_url_length = ValidatedAttribute(\n 'shorten_url_length', int, default=0)\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n config.url.configure_setting(\n 'shorten_url_length',\n 'Enter how many characters a URL should be before the bot puts a'\n ' shorter version of the URL in the title as a TinyURL link'\n ' (0 to disable)'\n )\n\n\ndef setup(bot):\n global find_urls\n\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n def find_func(text):\n re_url = r'(?u)((?<!%s)(?:http|https|ftp)(?::\\/\\/\\S+))'\\\n % (bot.config.url.exclusion_char)\n r = re.compile(re_url, re.IGNORECASE)\n\n urls = re.findall(r, text)\n return urls\n\n find_urls = find_func\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = find_urls(trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain, tinyurl in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n bot.reply(message)\n\n\n@rule(r'(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = find_urls(trigger)\n if len(urls) == 0:\n return\n\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain, tinyurl in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n shorten_url_length = bot.config.url.shorten_url_length\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except Exception: # TODO: Be specific\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # If the URL is over bot.config.url.shorten_url_length,\n # shorten the URL\n tinyurl = None\n if (shorten_url_length > 0) and (len(url) > shorten_url_length):\n # Check bot memory to see if the shortened URL is already in\n # memory\n if not bot.memory.contains('shortened_urls'):\n # Initialize shortened_urls as a dict if it doesn't exist.\n bot.memory['shortened_urls'] = tools.SopelMemory()\n if bot.memory['shortened_urls'].contains(url):\n tinyurl = bot.memory['shortened_urls'][url]\n else:\n tinyurl = get_tinyurl(url)\n bot.memory['shortened_urls'][url] = tinyurl\n # Finally, actually show the URL\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if title:\n results.append((title, get_hostname(url), tinyurl))\n return results\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n # Always run ones from @url; they don't run on their own.\n if run or hasattr(function, 'url_regex'):\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url, verify=True):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n response = requests.get(url, stream=True, verify=verify,\n headers=default_headers)\n content = b''\n for byte in response.iter_content(chunk_size=512):\n content += byte\n if b'</title>' in content or len(content) > max_bytes:\n break\n content = content.decode('utf-8', errors='ignore')\n # Need to close the connection because we have not read all\n # the data\n response.close()\n except requests.exceptions.ConnectionError:\n return None\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.rfind('<title>')\n end = content.rfind('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\n\ndef get_tinyurl(url):\n \"\"\" Returns a shortened tinyURL link of the URL. \"\"\"\n tinyurl = \"https://tinyurl.com/api-create.php?url=%s\" % url\n try:\n res = requests.get(tinyurl)\n res.raise_for_status()\n except requests.exceptions.RequestException:\n return None\n # Replace text output with https instead of http to make the\n # result an HTTPS link.\n return res.text.replace(\"http://\", \"https://\")\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}]}
| 3,940 | 308 |
gh_patches_debug_17355
|
rasdani/github-patches
|
git_diff
|
canonical__microk8s-4023
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-HA single node, leaving node removes all pods
#### Summary
Iam running the microk8s in ubuntu in no high availability i.e. there is a single node onto the same machine where it is installed. I updated the certificates and then i issue following command:
`sudo microk8s leave`
It give following messages:
```
Generating new cluster certificates.
Waiting for node to start.
```
and then i stopped microk8s and then started again, the node appeared however all of my pods / namespaces are gone, how to recover?
#### What Should Happen Instead?
All the pods should retain
#### Reproduction Steps
Already described above
#### Introspection Report
#### Can you suggest a fix?
<!-- (This section is optional). How do you propose that the issue be fixed? -->
[https://wetransfer.com/downloads/ee891f9d62bd9ffd7fdac2f9597e638f20230529135310/bf5d38484b8a54a7107a5447153c884820230529135327/7b32a3](url)
</issue>
<code>
[start of scripts/wrappers/remove_node.py]
1 #!/usr/bin/python3
2 import json
3 import os
4 import shutil
5 import subprocess
6 import sys
7
8 import click
9 import netifaces
10
11 from ipaddress import ip_address, IPv4Address
12
13 from common.cluster.utils import (
14 try_set_file_permissions,
15 is_node_running_dqlite,
16 is_token_auth_enabled,
17 )
18
19 snap_path = os.environ.get("SNAP")
20 snapdata_path = os.environ.get("SNAP_DATA")
21 callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path)
22
23 cluster_dir = "{}/var/kubernetes/backend".format(snapdata_path)
24
25
26 def remove_dqlite_node(node, force=False):
27 try:
28 # If node is an IP address, find the node name.
29 if type(ip_address(node)) is IPv4Address:
30 node_info = subprocess.check_output(
31 "{}/microk8s-kubectl.wrapper get no -o json".format(snap_path).split()
32 )
33 info = json.loads(node_info.decode())
34 found = False
35 for n in info["items"]:
36 if found:
37 break
38 for a in n["status"]["addresses"]:
39 if a["type"] == "InternalIP" and a["address"] == node:
40 node = n["metadata"]["name"]
41 found = True
42 break
43
44 # Make sure this node exists
45 node_info = subprocess.check_output(
46 "{}/microk8s-kubectl.wrapper get no {} -o json".format(snap_path, node).split()
47 )
48 info = json.loads(node_info.decode())
49 node_address = None
50 for a in info["status"]["addresses"]:
51 if a["type"] == "InternalIP":
52 node_address = a["address"]
53 break
54
55 if not node_address:
56 print("Node {} is not part of the cluster.".format(node))
57 exit(1)
58
59 node_ep = None
60 my_ep, other_ep = get_dqlite_endpoints()
61 for ep in other_ep:
62 if ep.startswith("{}:".format(node_address)):
63 node_ep = ep
64
65 if node_ep and force:
66 delete_dqlite_node([node_ep], my_ep)
67 elif node_ep and not force:
68 print(
69 "Removal failed. Node {} is registered with dqlite. "
70 "Please, run first 'microk8s leave' on the departing node. \n"
71 "If the node is not available anymore and will never attempt to join the cluster "
72 "in the future use the '--force' flag \n"
73 "to unregister the node while removing it.".format(node)
74 )
75 exit(1)
76
77 except subprocess.CalledProcessError:
78 print("Node {} does not exist in Kubernetes.".format(node))
79 if force:
80 print("Attempting to remove {} from dqlite.".format(node))
81 # Make sure we do not have the node in dqlite.
82 # We assume the IP is provided to denote the
83 my_ep, other_ep = get_dqlite_endpoints()
84 for ep in other_ep:
85 if ep.startswith("{}:".format(node)):
86 print("Removing node entry found in dqlite.")
87 delete_dqlite_node([ep], my_ep)
88 exit(1)
89
90 remove_node(node)
91
92
93 def remove_node(node):
94 try:
95 # Make sure this node exists
96 subprocess.check_call(
97 "{}/microk8s-kubectl.wrapper get no {}".format(snap_path, node).split(),
98 stdout=subprocess.DEVNULL,
99 stderr=subprocess.DEVNULL,
100 )
101 except subprocess.CalledProcessError:
102 print("Node {} does not exist.".format(node))
103 exit(1)
104
105 if is_token_auth_enabled():
106 remove_kubelet_token(node)
107 remove_callback_token(node)
108 subprocess.check_call(
109 "{}/microk8s-kubectl.wrapper delete no {}".format(snap_path, node).split(),
110 stdout=subprocess.DEVNULL,
111 stderr=subprocess.DEVNULL,
112 )
113
114
115 def remove_kubelet_token(node):
116 """
117 Remove a token for a node in the known tokens
118
119 :param node: the name of the node
120 """
121 file = "{}/credentials/known_tokens.csv".format(snapdata_path)
122 backup_file = "{}.backup".format(file)
123 token = "system:node:{}".format(node)
124 # That is a critical section. We need to protect it.
125 with open(backup_file, "w") as back_fp:
126 with open(file, "r") as fp:
127 for _, line in enumerate(fp):
128 if token in line:
129 continue
130 back_fp.write("{}".format(line))
131
132 try_set_file_permissions(backup_file)
133 shutil.copyfile(backup_file, file)
134
135
136 def get_dqlite_endpoints():
137 """
138 Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.
139
140 :return: two lists with the endpoints
141 """
142 out = subprocess.check_output(
143 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
144 "-k {dbdir}/cluster.key -f json k8s .cluster".format(
145 snappath=snap_path, dbdir=cluster_dir
146 ).split()
147 )
148 data = json.loads(out.decode())
149 ep_addresses = []
150 for ep in data:
151 ep_addresses.append(ep["Address"])
152 local_ips = []
153 for interface in netifaces.interfaces():
154 if netifaces.AF_INET not in netifaces.ifaddresses(interface):
155 continue
156 for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:
157 local_ips.append(link["addr"])
158 my_ep = []
159 other_ep = []
160 for ep in ep_addresses:
161 found = False
162 for ip in local_ips:
163 if "{}:".format(ip) in ep:
164 my_ep.append(ep)
165 found = True
166 if not found:
167 other_ep.append(ep)
168
169 return my_ep, other_ep
170
171
172 def delete_dqlite_node(delete_node, dqlite_ep):
173 if len(delete_node) > 0 and "127.0.0.1" not in delete_node[0]:
174 for ep in dqlite_ep:
175 try:
176 cmd = (
177 "{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt "
178 "-k {dbdir}/cluster.key -f json k8s".format(
179 snappath=snap_path, dbdir=cluster_dir
180 ).split()
181 )
182 cmd.append(".remove {}".format(delete_node[0]))
183 subprocess.check_output(cmd)
184 break
185 except Exception as err:
186 print("Contacting node {} failed. Error:".format(ep))
187 print(repr(err))
188 exit(2)
189
190
191 def remove_callback_token(node):
192 """
193 Remove a callback token
194
195 :param node: the node
196 """
197 tmp_file = "{}.tmp".format(callback_tokens_file)
198 if not os.path.isfile(callback_tokens_file):
199 open(callback_tokens_file, "a+")
200 os.chmod(callback_tokens_file, 0o600)
201 with open(tmp_file, "w") as backup_fp:
202 os.chmod(tmp_file, 0o600)
203 with open(callback_tokens_file, "r+") as callback_fp:
204 # Entries are of the format: 'node_hostname:agent_port token'
205 # We need to get the node_hostname part
206 for line in callback_fp:
207 parts = line.split(":")
208 if parts[0] == node:
209 continue
210 else:
211 backup_fp.write(line)
212
213 try_set_file_permissions(tmp_file)
214 shutil.move(tmp_file, callback_tokens_file)
215
216
217 @click.command()
218 @click.argument("node", required=True)
219 @click.option(
220 "--force",
221 is_flag=True,
222 required=False,
223 default=False,
224 help="Force the node removal operation. (default: false)",
225 )
226 def reset(node, force):
227 """
228 Remove a node from the cluster
229 """
230 if is_node_running_dqlite():
231 remove_dqlite_node(node, force)
232 else:
233 remove_node(node)
234 sys.exit(0)
235
236
237 if __name__ == "__main__":
238 reset(prog_name="microk8s remove-node")
239
[end of scripts/wrappers/remove_node.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py
--- a/scripts/wrappers/remove_node.py
+++ b/scripts/wrappers/remove_node.py
@@ -8,7 +8,7 @@
import click
import netifaces
-from ipaddress import ip_address, IPv4Address
+from ipaddress import ip_address
from common.cluster.utils import (
try_set_file_permissions,
@@ -26,7 +26,13 @@
def remove_dqlite_node(node, force=False):
try:
# If node is an IP address, find the node name.
- if type(ip_address(node)) is IPv4Address:
+ is_node_ip = True
+ try:
+ ip_address(node)
+ except ValueError:
+ is_node_ip = False
+
+ if is_node_ip:
node_info = subprocess.check_output(
"{}/microk8s-kubectl.wrapper get no -o json".format(snap_path).split()
)
|
{"golden_diff": "diff --git a/scripts/wrappers/remove_node.py b/scripts/wrappers/remove_node.py\n--- a/scripts/wrappers/remove_node.py\n+++ b/scripts/wrappers/remove_node.py\n@@ -8,7 +8,7 @@\n import click\n import netifaces\n \n-from ipaddress import ip_address, IPv4Address\n+from ipaddress import ip_address\n \n from common.cluster.utils import (\n try_set_file_permissions,\n@@ -26,7 +26,13 @@\n def remove_dqlite_node(node, force=False):\n try:\n # If node is an IP address, find the node name.\n- if type(ip_address(node)) is IPv4Address:\n+ is_node_ip = True\n+ try:\n+ ip_address(node)\n+ except ValueError:\n+ is_node_ip = False\n+\n+ if is_node_ip:\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no -o json\".format(snap_path).split()\n )\n", "issue": "Non-HA single node, leaving node removes all pods\n\r\n\r\n#### Summary\r\nIam running the microk8s in ubuntu in no high availability i.e. there is a single node onto the same machine where it is installed. I updated the certificates and then i issue following command:\r\n`sudo microk8s leave`\r\n\r\nIt give following messages:\r\n\r\n```\r\nGenerating new cluster certificates.\r\nWaiting for node to start. \r\n```\r\n\r\nand then i stopped microk8s and then started again, the node appeared however all of my pods / namespaces are gone, how to recover?\r\n\r\n#### What Should Happen Instead?\r\nAll the pods should retain\r\n\r\n#### Reproduction Steps\r\nAlready described above\r\n\r\n#### Introspection Report\r\n\r\n#### Can you suggest a fix?\r\n<!-- (This section is optional). How do you propose that the issue be fixed? -->\r\n[https://wetransfer.com/downloads/ee891f9d62bd9ffd7fdac2f9597e638f20230529135310/bf5d38484b8a54a7107a5447153c884820230529135327/7b32a3](url)\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport json\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport click\nimport netifaces\n\nfrom ipaddress import ip_address, IPv4Address\n\nfrom common.cluster.utils import (\n try_set_file_permissions,\n is_node_running_dqlite,\n is_token_auth_enabled,\n)\n\nsnap_path = os.environ.get(\"SNAP\")\nsnapdata_path = os.environ.get(\"SNAP_DATA\")\ncallback_tokens_file = \"{}/credentials/callback-tokens.txt\".format(snapdata_path)\n\ncluster_dir = \"{}/var/kubernetes/backend\".format(snapdata_path)\n\n\ndef remove_dqlite_node(node, force=False):\n try:\n # If node is an IP address, find the node name.\n if type(ip_address(node)) is IPv4Address:\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no -o json\".format(snap_path).split()\n )\n info = json.loads(node_info.decode())\n found = False\n for n in info[\"items\"]:\n if found:\n break\n for a in n[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\" and a[\"address\"] == node:\n node = n[\"metadata\"][\"name\"]\n found = True\n break\n\n # Make sure this node exists\n node_info = subprocess.check_output(\n \"{}/microk8s-kubectl.wrapper get no {} -o json\".format(snap_path, node).split()\n )\n info = json.loads(node_info.decode())\n node_address = None\n for a in info[\"status\"][\"addresses\"]:\n if a[\"type\"] == \"InternalIP\":\n node_address = a[\"address\"]\n break\n\n if not node_address:\n print(\"Node {} is not part of the cluster.\".format(node))\n exit(1)\n\n node_ep = None\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node_address)):\n node_ep = ep\n\n if node_ep and force:\n delete_dqlite_node([node_ep], my_ep)\n elif node_ep and not force:\n print(\n \"Removal failed. Node {} is registered with dqlite. \"\n \"Please, run first 'microk8s leave' on the departing node. \\n\"\n \"If the node is not available anymore and will never attempt to join the cluster \"\n \"in the future use the '--force' flag \\n\"\n \"to unregister the node while removing it.\".format(node)\n )\n exit(1)\n\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist in Kubernetes.\".format(node))\n if force:\n print(\"Attempting to remove {} from dqlite.\".format(node))\n # Make sure we do not have the node in dqlite.\n # We assume the IP is provided to denote the\n my_ep, other_ep = get_dqlite_endpoints()\n for ep in other_ep:\n if ep.startswith(\"{}:\".format(node)):\n print(\"Removing node entry found in dqlite.\")\n delete_dqlite_node([ep], my_ep)\n exit(1)\n\n remove_node(node)\n\n\ndef remove_node(node):\n try:\n # Make sure this node exists\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper get no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n except subprocess.CalledProcessError:\n print(\"Node {} does not exist.\".format(node))\n exit(1)\n\n if is_token_auth_enabled():\n remove_kubelet_token(node)\n remove_callback_token(node)\n subprocess.check_call(\n \"{}/microk8s-kubectl.wrapper delete no {}\".format(snap_path, node).split(),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n\n\ndef remove_kubelet_token(node):\n \"\"\"\n Remove a token for a node in the known tokens\n\n :param node: the name of the node\n \"\"\"\n file = \"{}/credentials/known_tokens.csv\".format(snapdata_path)\n backup_file = \"{}.backup\".format(file)\n token = \"system:node:{}\".format(node)\n # That is a critical section. We need to protect it.\n with open(backup_file, \"w\") as back_fp:\n with open(file, \"r\") as fp:\n for _, line in enumerate(fp):\n if token in line:\n continue\n back_fp.write(\"{}\".format(line))\n\n try_set_file_permissions(backup_file)\n shutil.copyfile(backup_file, file)\n\n\ndef get_dqlite_endpoints():\n \"\"\"\n Return the endpoints the current node has on dqlite and the endpoints of the rest of the nodes.\n\n :return: two lists with the endpoints\n \"\"\"\n out = subprocess.check_output(\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s .cluster\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n data = json.loads(out.decode())\n ep_addresses = []\n for ep in data:\n ep_addresses.append(ep[\"Address\"])\n local_ips = []\n for interface in netifaces.interfaces():\n if netifaces.AF_INET not in netifaces.ifaddresses(interface):\n continue\n for link in netifaces.ifaddresses(interface)[netifaces.AF_INET]:\n local_ips.append(link[\"addr\"])\n my_ep = []\n other_ep = []\n for ep in ep_addresses:\n found = False\n for ip in local_ips:\n if \"{}:\".format(ip) in ep:\n my_ep.append(ep)\n found = True\n if not found:\n other_ep.append(ep)\n\n return my_ep, other_ep\n\n\ndef delete_dqlite_node(delete_node, dqlite_ep):\n if len(delete_node) > 0 and \"127.0.0.1\" not in delete_node[0]:\n for ep in dqlite_ep:\n try:\n cmd = (\n \"{snappath}/bin/dqlite -s file://{dbdir}/cluster.yaml -c {dbdir}/cluster.crt \"\n \"-k {dbdir}/cluster.key -f json k8s\".format(\n snappath=snap_path, dbdir=cluster_dir\n ).split()\n )\n cmd.append(\".remove {}\".format(delete_node[0]))\n subprocess.check_output(cmd)\n break\n except Exception as err:\n print(\"Contacting node {} failed. Error:\".format(ep))\n print(repr(err))\n exit(2)\n\n\ndef remove_callback_token(node):\n \"\"\"\n Remove a callback token\n\n :param node: the node\n \"\"\"\n tmp_file = \"{}.tmp\".format(callback_tokens_file)\n if not os.path.isfile(callback_tokens_file):\n open(callback_tokens_file, \"a+\")\n os.chmod(callback_tokens_file, 0o600)\n with open(tmp_file, \"w\") as backup_fp:\n os.chmod(tmp_file, 0o600)\n with open(callback_tokens_file, \"r+\") as callback_fp:\n # Entries are of the format: 'node_hostname:agent_port token'\n # We need to get the node_hostname part\n for line in callback_fp:\n parts = line.split(\":\")\n if parts[0] == node:\n continue\n else:\n backup_fp.write(line)\n\n try_set_file_permissions(tmp_file)\n shutil.move(tmp_file, callback_tokens_file)\n\n\[email protected]()\[email protected](\"node\", required=True)\[email protected](\n \"--force\",\n is_flag=True,\n required=False,\n default=False,\n help=\"Force the node removal operation. (default: false)\",\n)\ndef reset(node, force):\n \"\"\"\n Remove a node from the cluster\n \"\"\"\n if is_node_running_dqlite():\n remove_dqlite_node(node, force)\n else:\n remove_node(node)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n reset(prog_name=\"microk8s remove-node\")\n", "path": "scripts/wrappers/remove_node.py"}]}
| 3,215 | 215 |
gh_patches_debug_13582
|
rasdani/github-patches
|
git_diff
|
vega__altair-334
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FutureWarning in Pandas 0.20.1
Since upgrading to Pandas 0.20.1 I get this warning when first using altair in a notebook.
```
site-packages\altair\utils\core.py:110: FutureWarning: pandas.lib is deprecated and will be removed in a future version.
You can access infer_dtype as pandas.api.types.infer_dtype
typ = pd.lib.infer_dtype(data)
```
</issue>
<code>
[start of altair/utils/core.py]
1 """
2 Utility routines
3 """
4 import re
5 import warnings
6
7 import pandas as pd
8 import numpy as np
9
10
11 TYPECODE_MAP = {'ordinal': 'O',
12 'nominal': 'N',
13 'quantitative': 'Q',
14 'temporal': 'T'}
15
16 INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
17
18 TYPE_ABBR = TYPECODE_MAP.values()
19
20
21 def parse_shorthand(shorthand):
22 """
23 Parse the shorthand expression for aggregation, field, and type.
24
25 These are of the form:
26
27 - "col_name"
28 - "col_name:O"
29 - "average(col_name)"
30 - "average(col_name):O"
31
32 Parameters
33 ----------
34 shorthand: str
35 Shorthand string
36
37 Returns
38 -------
39 D : dict
40 Dictionary containing the field, aggregate, and typecode
41 """
42 if not shorthand:
43 return {}
44
45 # Must import this here to avoid circular imports
46 from ..schema import AggregateOp
47 valid_aggregates = AggregateOp().values
48 valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
49
50 # build regular expressions
51 units = dict(field='(?P<field>.*)',
52 type='(?P<type>{0})'.format('|'.join(valid_typecodes)),
53 aggregate='(?P<aggregate>{0})'.format('|'.join(valid_aggregates)))
54 patterns = [r'{field}',
55 r'{field}:{type}',
56 r'{aggregate}\({field}\)',
57 r'{aggregate}\({field}\):{type}']
58 regexps = (re.compile('\A' + p.format(**units) + '\Z', re.DOTALL)
59 for p in patterns[::-1])
60
61 # find matches depending on valid fields passed
62 match = next(exp.match(shorthand).groupdict() for exp in regexps
63 if exp.match(shorthand))
64
65 # Use short form of the type expression
66 typ = match.get('type', None)
67 if typ:
68 match['type'] = INV_TYPECODE_MAP.get(typ, typ)
69 return match
70
71
72 def construct_shorthand(field=None, aggregate=None, type=None):
73 """Construct a shorthand representation.
74
75 See also: parse_shorthand"""
76 if field is None:
77 return ''
78
79 sh = field
80
81 if aggregate is not None:
82 sh = '{0}({1})'.format(aggregate, sh)
83
84 if type is not None:
85 type = TYPECODE_MAP.get(type, type)
86 if type not in TYPE_ABBR:
87 raise ValueError('Unrecognized Type: {0}'.format(type))
88 sh = '{0}:{1}'.format(sh, type)
89
90 return sh
91
92
93 def infer_vegalite_type(data, field=None):
94 """
95 From an array-like input, infer the correct vega typecode
96 ('ordinal', 'nominal', 'quantitative', or 'temporal')
97
98 Parameters
99 ----------
100 data: Numpy array or Pandas Series
101 field: str column name
102 """
103 # See if we can read the type from the field
104 if field is not None:
105 parsed = parse_shorthand(field)
106 if parsed.get('type'):
107 return parsed['type']
108
109 # Otherwise, infer based on the dtype of the input
110 typ = pd.lib.infer_dtype(data)
111
112 # TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
113
114 if typ in ['floating', 'mixed-integer-float', 'integer',
115 'mixed-integer', 'complex']:
116 return 'quantitative'
117 elif typ in ['string', 'bytes', 'categorical', 'boolean', 'mixed', 'unicode']:
118 return 'nominal'
119 elif typ in ['datetime', 'datetime64', 'timedelta',
120 'timedelta64', 'date', 'time', 'period']:
121 return 'temporal'
122 else:
123 warnings.warn("I don't know how to infer vegalite type from '{0}'. "
124 "Defaulting to nominal.".format(typ))
125 return 'nominal'
126
127
128 def sanitize_dataframe(df):
129 """Sanitize a DataFrame to prepare it for serialization.
130
131 * Make a copy
132 * Raise ValueError if it has a hierarchical index.
133 * Convert categoricals to strings.
134 * Convert np.bool_ dtypes to Python bool objects
135 * Convert np.int dtypes to Python int objects
136 * Convert floats to objects and replace NaNs by None.
137 * Convert DateTime dtypes into appropriate string representations
138 """
139 df = df.copy()
140
141 if isinstance(df.index, pd.core.index.MultiIndex):
142 raise ValueError('Hierarchical indices not supported')
143 if isinstance(df.columns, pd.core.index.MultiIndex):
144 raise ValueError('Hierarchical indices not supported')
145
146 def to_list_if_array(val):
147 if isinstance(val, np.ndarray):
148 return val.tolist()
149 else:
150 return val
151
152 for col_name, dtype in df.dtypes.iteritems():
153 if str(dtype) == 'category':
154 # XXXX: work around bug in to_json for categorical types
155 # https://github.com/pydata/pandas/issues/10778
156 df[col_name] = df[col_name].astype(str)
157 elif str(dtype) == 'bool':
158 # convert numpy bools to objects; np.bool is not JSON serializable
159 df[col_name] = df[col_name].astype(object)
160 elif np.issubdtype(dtype, np.integer):
161 # convert integers to objects; np.int is not JSON serializable
162 df[col_name] = df[col_name].astype(object)
163 elif np.issubdtype(dtype, np.floating):
164 # For floats, convert nan->None: np.float is not JSON serializable
165 col = df[col_name].astype(object)
166 df[col_name] = col.where(col.notnull(), None)
167 elif str(dtype).startswith('datetime'):
168 # Convert datetimes to strings
169 # astype(str) will choose the appropriate resolution
170 df[col_name] = df[col_name].astype(str).replace('NaT', '')
171 elif dtype == object:
172 # Convert numpy arrays saved as objects to lists
173 # Arrays are not JSON serializable
174 col = df[col_name].apply(to_list_if_array, convert_dtype=False)
175 df[col_name] = col.where(col.notnull(), None)
176 return df
177
[end of altair/utils/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/altair/utils/core.py b/altair/utils/core.py
--- a/altair/utils/core.py
+++ b/altair/utils/core.py
@@ -7,6 +7,10 @@
import pandas as pd
import numpy as np
+try:
+ from pandas.api.types import infer_dtype
+except ImportError: # Pandas before 0.20.0
+ from pandas.lib import infer_dtype
TYPECODE_MAP = {'ordinal': 'O',
'nominal': 'N',
@@ -107,7 +111,7 @@
return parsed['type']
# Otherwise, infer based on the dtype of the input
- typ = pd.lib.infer_dtype(data)
+ typ = infer_dtype(data)
# TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py
|
{"golden_diff": "diff --git a/altair/utils/core.py b/altair/utils/core.py\n--- a/altair/utils/core.py\n+++ b/altair/utils/core.py\n@@ -7,6 +7,10 @@\n import pandas as pd\n import numpy as np\n \n+try:\n+ from pandas.api.types import infer_dtype\n+except ImportError: # Pandas before 0.20.0\n+ from pandas.lib import infer_dtype\n \n TYPECODE_MAP = {'ordinal': 'O',\n 'nominal': 'N',\n@@ -107,7 +111,7 @@\n return parsed['type']\n \n # Otherwise, infer based on the dtype of the input\n- typ = pd.lib.infer_dtype(data)\n+ typ = infer_dtype(data)\n \n # TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py\n", "issue": "FutureWarning in Pandas 0.20.1\nSince upgrading to Pandas 0.20.1 I get this warning when first using altair in a notebook.\r\n\r\n```\r\nsite-packages\\altair\\utils\\core.py:110: FutureWarning: pandas.lib is deprecated and will be removed in a future version.\r\nYou can access infer_dtype as pandas.api.types.infer_dtype\r\n typ = pd.lib.infer_dtype(data)\r\n```\n", "before_files": [{"content": "\"\"\"\nUtility routines\n\"\"\"\nimport re\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\n\nTYPECODE_MAP = {'ordinal': 'O',\n 'nominal': 'N',\n 'quantitative': 'Q',\n 'temporal': 'T'}\n\nINV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}\n\nTYPE_ABBR = TYPECODE_MAP.values()\n\n\ndef parse_shorthand(shorthand):\n \"\"\"\n Parse the shorthand expression for aggregation, field, and type.\n\n These are of the form:\n\n - \"col_name\"\n - \"col_name:O\"\n - \"average(col_name)\"\n - \"average(col_name):O\"\n\n Parameters\n ----------\n shorthand: str\n Shorthand string\n\n Returns\n -------\n D : dict\n Dictionary containing the field, aggregate, and typecode\n \"\"\"\n if not shorthand:\n return {}\n\n # Must import this here to avoid circular imports\n from ..schema import AggregateOp\n valid_aggregates = AggregateOp().values\n valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)\n\n # build regular expressions\n units = dict(field='(?P<field>.*)',\n type='(?P<type>{0})'.format('|'.join(valid_typecodes)),\n aggregate='(?P<aggregate>{0})'.format('|'.join(valid_aggregates)))\n patterns = [r'{field}',\n r'{field}:{type}',\n r'{aggregate}\\({field}\\)',\n r'{aggregate}\\({field}\\):{type}']\n regexps = (re.compile('\\A' + p.format(**units) + '\\Z', re.DOTALL)\n for p in patterns[::-1])\n\n # find matches depending on valid fields passed\n match = next(exp.match(shorthand).groupdict() for exp in regexps\n if exp.match(shorthand))\n\n # Use short form of the type expression\n typ = match.get('type', None)\n if typ:\n match['type'] = INV_TYPECODE_MAP.get(typ, typ)\n return match\n\n\ndef construct_shorthand(field=None, aggregate=None, type=None):\n \"\"\"Construct a shorthand representation.\n\n See also: parse_shorthand\"\"\"\n if field is None:\n return ''\n\n sh = field\n\n if aggregate is not None:\n sh = '{0}({1})'.format(aggregate, sh)\n\n if type is not None:\n type = TYPECODE_MAP.get(type, type)\n if type not in TYPE_ABBR:\n raise ValueError('Unrecognized Type: {0}'.format(type))\n sh = '{0}:{1}'.format(sh, type)\n\n return sh\n\n\ndef infer_vegalite_type(data, field=None):\n \"\"\"\n From an array-like input, infer the correct vega typecode\n ('ordinal', 'nominal', 'quantitative', or 'temporal')\n\n Parameters\n ----------\n data: Numpy array or Pandas Series\n field: str column name\n \"\"\"\n # See if we can read the type from the field\n if field is not None:\n parsed = parse_shorthand(field)\n if parsed.get('type'):\n return parsed['type']\n\n # Otherwise, infer based on the dtype of the input\n typ = pd.lib.infer_dtype(data)\n\n # TODO: Once this returns 'O', please update test_select_x and test_select_y in test_api.py\n\n if typ in ['floating', 'mixed-integer-float', 'integer',\n 'mixed-integer', 'complex']:\n return 'quantitative'\n elif typ in ['string', 'bytes', 'categorical', 'boolean', 'mixed', 'unicode']:\n return 'nominal'\n elif typ in ['datetime', 'datetime64', 'timedelta',\n 'timedelta64', 'date', 'time', 'period']:\n return 'temporal'\n else:\n warnings.warn(\"I don't know how to infer vegalite type from '{0}'. \"\n \"Defaulting to nominal.\".format(typ))\n return 'nominal'\n\n\ndef sanitize_dataframe(df):\n \"\"\"Sanitize a DataFrame to prepare it for serialization.\n\n * Make a copy\n * Raise ValueError if it has a hierarchical index.\n * Convert categoricals to strings.\n * Convert np.bool_ dtypes to Python bool objects\n * Convert np.int dtypes to Python int objects\n * Convert floats to objects and replace NaNs by None.\n * Convert DateTime dtypes into appropriate string representations\n \"\"\"\n df = df.copy()\n\n if isinstance(df.index, pd.core.index.MultiIndex):\n raise ValueError('Hierarchical indices not supported')\n if isinstance(df.columns, pd.core.index.MultiIndex):\n raise ValueError('Hierarchical indices not supported')\n\n def to_list_if_array(val):\n if isinstance(val, np.ndarray):\n return val.tolist()\n else:\n return val\n\n for col_name, dtype in df.dtypes.iteritems():\n if str(dtype) == 'category':\n # XXXX: work around bug in to_json for categorical types\n # https://github.com/pydata/pandas/issues/10778\n df[col_name] = df[col_name].astype(str)\n elif str(dtype) == 'bool':\n # convert numpy bools to objects; np.bool is not JSON serializable\n df[col_name] = df[col_name].astype(object)\n elif np.issubdtype(dtype, np.integer):\n # convert integers to objects; np.int is not JSON serializable\n df[col_name] = df[col_name].astype(object)\n elif np.issubdtype(dtype, np.floating):\n # For floats, convert nan->None: np.float is not JSON serializable\n col = df[col_name].astype(object)\n df[col_name] = col.where(col.notnull(), None)\n elif str(dtype).startswith('datetime'):\n # Convert datetimes to strings\n # astype(str) will choose the appropriate resolution\n df[col_name] = df[col_name].astype(str).replace('NaT', '')\n elif dtype == object:\n # Convert numpy arrays saved as objects to lists\n # Arrays are not JSON serializable\n col = df[col_name].apply(to_list_if_array, convert_dtype=False)\n df[col_name] = col.where(col.notnull(), None)\n return df\n", "path": "altair/utils/core.py"}]}
| 2,433 | 191 |
gh_patches_debug_34021
|
rasdani/github-patches
|
git_diff
|
google__TensorNetwork-820
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Einsum support?
Should we extend our API to support einsum equations? It could potentially make connecting nodes much less verbose. However, I question whether anyone would want to use `tn.einsum` over say `np.einsum`. Perhaps we could support only doing the left side of the equation?
</issue>
<code>
[start of tensornetwork/__init__.py]
1 from tensornetwork.network_components import (AbstractNode, CopyNode, Edge,
2 Node, NodeCollection)
3 from tensornetwork.network_operations import (
4 check_connected, check_correct, contract_trace_edges, copy, get_all_edges,
5 get_all_nodes, get_neighbors, get_subgraph_dangling, reachable,
6 reduced_density, remove_node, replicate_nodes, split_node,
7 split_node_full_svd, split_node_qr, split_node_rq, switch_backend)
8
9 from tensornetwork.tensor import Tensor
10 from tensornetwork.linalg.initialization import (
11 eye,
12 ones,
13 randn,
14 random_uniform,
15 zeros
16 )
17
18 from tensornetwork.linalg.linalg import norm, qr, svd
19
20 #pylint: disable=redefined-builtin
21 from tensornetwork.linalg.operations import (
22 tensordot,
23 reshape,
24 transpose,
25 take_slice,
26 shape,
27 sqrt,
28 outer,
29 einsum,
30 conj,
31 hconj,
32 sin,
33 cos,
34 exp,
35 log,
36 diagonal,
37 diagflat,
38 trace,
39 sign,
40 abs,
41 kron,
42 pivot
43 )
44
45 from tensornetwork.backends.decorators import jit
46
47 from tensornetwork.network_components import (
48 contract, contract_between, contract_copy_node, contract_parallel,
49 flatten_all_edges, flatten_edges, flatten_edges_between,
50 get_all_nondangling, get_all_dangling, get_parallel_edges, get_shared_edges,
51 outer_product, outer_product_final_nodes, slice_edge, split_edge)
52 from tensornetwork.backends.abstract_backend import AbstractBackend
53 from tensornetwork.network_components import connect, disconnect
54 from tensornetwork.ncon_interface import ncon
55 from tensornetwork.version import __version__
56 from tensornetwork.visualization.graphviz import to_graphviz
57 from tensornetwork import contractors
58 from tensornetwork.utils import load_nodes, save_nodes
59 from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS
60 from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
61 from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
62 from tensornetwork.matrixproductstates.mpo import FiniteTFI, FiniteXXZ
63 from tensornetwork.backend_contextmanager import DefaultBackend
64 from tensornetwork.backend_contextmanager import set_default_backend
65 from tensornetwork import block_sparse
66 from tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor
67 from tensornetwork.block_sparse.blocksparsetensor import ChargeArray
68 from tensornetwork.block_sparse.index import Index
69 from tensornetwork.block_sparse.charge import U1Charge, BaseCharge, Z2Charge
70 from tensornetwork.block_sparse.charge import ZNCharge
71
[end of tensornetwork/__init__.py]
[start of tensornetwork/utils.py]
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import h5py
16 from tensornetwork.component_factory import get_component
17 from tensornetwork.network_components import Edge, AbstractNode
18 from tensornetwork.network_operations import reachable, get_all_edges
19 from typing import List, Union, BinaryIO
20 import numpy as np
21 string_type = h5py.special_dtype(vlen=str)
22
23
24 def save_nodes(nodes: List[AbstractNode], path: Union[str, BinaryIO]) -> None:
25 """Save an iterable of nodes into hdf5 format.
26
27 Args:
28 nodes: An iterable of connected nodes. All nodes have to connect within
29 `nodes`.
30 path: path to file where network is saved.
31 """
32 if reachable(nodes) > set(nodes):
33 raise ValueError(
34 "Some nodes in `nodes` are connected to nodes not contained in `nodes`."
35 " Saving not possible.")
36 if len(set(nodes)) < len(list(nodes)):
37 raise ValueError(
38 'Some nodes in `nodes` appear more than once. This is not supported')
39 #we need to iterate twice and order matters
40 edges = list(get_all_edges(nodes))
41 nodes = list(nodes)
42
43 old_edge_names = {n: edge.name for n, edge in enumerate(edges)}
44 old_node_names = {n: node.name for n, node in enumerate(nodes)}
45
46 #generate unique names for nodes and edges
47 #for saving them
48 for n, node in enumerate(nodes):
49 node.set_name('node{}'.format(n))
50
51 for e, edge in enumerate(edges):
52 edge.set_name('edge{}'.format(e))
53
54 with h5py.File(path, 'w') as net_file:
55 nodes_group = net_file.create_group('nodes')
56 node_names_group = net_file.create_group('node_names')
57 node_names_group.create_dataset(
58 'names',
59 dtype=string_type,
60 data=np.array(list(old_node_names.values()), dtype=object))
61
62 edges_group = net_file.create_group('edges')
63 edge_names_group = net_file.create_group('edge_names')
64 edge_names_group.create_dataset(
65 'names',
66 dtype=string_type,
67 data=np.array(list(old_edge_names.values()), dtype=object))
68
69 for n, node in enumerate(nodes):
70 node_group = nodes_group.create_group(node.name)
71 node._save_node(node_group)
72 for edge in node.edges:
73 if edge.node1 == node and edge in edges:
74 edge_group = edges_group.create_group(edge.name)
75 edge._save_edge(edge_group)
76 edges.remove(edge)
77
78 #name edges and nodes back to their original names
79 for n, node in enumerate(nodes):
80 nodes[n].set_name(old_node_names[n])
81
82 for n, edge in enumerate(edges):
83 edges[n].set_name(old_edge_names[n])
84
85
86 def load_nodes(path: str) -> List[AbstractNode]:
87 """Load a set of nodes from disk.
88
89 Args:
90 path: path to file where network is saved.
91 Returns:
92 An iterable of `Node` objects
93 """
94 nodes_list = []
95 edges_list = []
96 with h5py.File(path, 'r') as net_file:
97 nodes = list(net_file["nodes"].keys())
98 node_names = {
99 'node{}'.format(n): v
100 for n, v in enumerate(net_file["node_names"]['names'][()])
101 }
102
103 edge_names = {
104 'edge{}'.format(n): v
105 for n, v in enumerate(net_file["edge_names"]['names'][()])
106 }
107 edges = list(net_file["edges"].keys())
108 for node_name in nodes:
109 node_data = net_file["nodes/" + node_name]
110 node_type = get_component(node_data['type'][()])
111 nodes_list.append(node_type._load_node(node_data=node_data))
112 nodes_dict = {node.name: node for node in nodes_list}
113 for edge in edges:
114 edge_data = net_file["edges/" + edge]
115 edges_list.append(Edge._load_edge(edge_data, nodes_dict))
116
117 for edge in edges_list:
118 edge.set_name(edge_names[edge.name])
119 for node in nodes_list:
120 node.set_name(node_names[node.name])
121
122 return nodes_list
123
[end of tensornetwork/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py
--- a/tensornetwork/__init__.py
+++ b/tensornetwork/__init__.py
@@ -55,7 +55,7 @@
from tensornetwork.version import __version__
from tensornetwork.visualization.graphviz import to_graphviz
from tensornetwork import contractors
-from tensornetwork.utils import load_nodes, save_nodes
+from tensornetwork.utils import load_nodes, save_nodes, from_topology
from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS
from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
from tensornetwork.matrixproductstates.dmrg import FiniteDMRG
diff --git a/tensornetwork/utils.py b/tensornetwork/utils.py
--- a/tensornetwork/utils.py
+++ b/tensornetwork/utils.py
@@ -14,7 +14,7 @@
import h5py
from tensornetwork.component_factory import get_component
-from tensornetwork.network_components import Edge, AbstractNode
+from tensornetwork.network_components import Edge, AbstractNode, Node
from tensornetwork.network_operations import reachable, get_all_edges
from typing import List, Union, BinaryIO
import numpy as np
@@ -120,3 +120,37 @@
node.set_name(node_names[node.name])
return nodes_list
+
+def from_topology(topology, tensors, backend=None):
+ """Create and connect new `tn.Node`s by the given einsum-like topology.
+
+ Example:
+ ```
+ a, b, c = tn.from_topology("xy,yz,zx", [a, b, c])
+ ```
+ Args:
+ topology: A string that defines the topology. Should be like
+ the left side of an einsum expression.
+ tensors: The tensors needed to create the nodes.
+
+ Returns:
+ A list of Nodes.
+ """
+ edge_dict = {}
+ nodes = []
+ split_list = topology.split(",")
+ if len(split_list) != len(tensors):
+ raise ValueError("topology and number of tensors is mismatched")
+ for local_axes, tensor in zip(split_list, tensors):
+ local_axes_list = list(local_axes)
+ if len(local_axes_list) != len(tensor.shape):
+ raise ValueError(f"{local_axes} does not match shape {tensor.shape}")
+ new_node = Node(tensor, axis_names=local_axes_list, backend=backend)
+ for c in local_axes:
+ if c in edge_dict:
+ edge_dict[c] = edge_dict[c] ^ new_node[c]
+ else:
+ edge_dict[c] = new_node[c]
+ nodes.append(new_node)
+ return nodes
+
|
{"golden_diff": "diff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py\n--- a/tensornetwork/__init__.py\n+++ b/tensornetwork/__init__.py\n@@ -55,7 +55,7 @@\n from tensornetwork.version import __version__\n from tensornetwork.visualization.graphviz import to_graphviz\n from tensornetwork import contractors\n-from tensornetwork.utils import load_nodes, save_nodes\n+from tensornetwork.utils import load_nodes, save_nodes, from_topology\n from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS\n from tensornetwork.matrixproductstates.finite_mps import FiniteMPS\n from tensornetwork.matrixproductstates.dmrg import FiniteDMRG\ndiff --git a/tensornetwork/utils.py b/tensornetwork/utils.py\n--- a/tensornetwork/utils.py\n+++ b/tensornetwork/utils.py\n@@ -14,7 +14,7 @@\n \n import h5py\n from tensornetwork.component_factory import get_component\n-from tensornetwork.network_components import Edge, AbstractNode\n+from tensornetwork.network_components import Edge, AbstractNode, Node\n from tensornetwork.network_operations import reachable, get_all_edges\n from typing import List, Union, BinaryIO\n import numpy as np\n@@ -120,3 +120,37 @@\n node.set_name(node_names[node.name])\n \n return nodes_list\n+\n+def from_topology(topology, tensors, backend=None):\n+ \"\"\"Create and connect new `tn.Node`s by the given einsum-like topology.\n+ \n+ Example:\n+ ```\n+ a, b, c = tn.from_topology(\"xy,yz,zx\", [a, b, c])\n+ ```\n+ Args:\n+ topology: A string that defines the topology. Should be like\n+ the left side of an einsum expression.\n+ tensors: The tensors needed to create the nodes.\n+\n+ Returns:\n+ A list of Nodes.\n+ \"\"\"\n+ edge_dict = {}\n+ nodes = []\n+ split_list = topology.split(\",\")\n+ if len(split_list) != len(tensors):\n+ raise ValueError(\"topology and number of tensors is mismatched\")\n+ for local_axes, tensor in zip(split_list, tensors):\n+ local_axes_list = list(local_axes)\n+ if len(local_axes_list) != len(tensor.shape):\n+ raise ValueError(f\"{local_axes} does not match shape {tensor.shape}\")\n+ new_node = Node(tensor, axis_names=local_axes_list, backend=backend)\n+ for c in local_axes:\n+ if c in edge_dict:\n+ edge_dict[c] = edge_dict[c] ^ new_node[c]\n+ else:\n+ edge_dict[c] = new_node[c]\n+ nodes.append(new_node)\n+ return nodes \n+\n", "issue": "Einsum support?\nShould we extend our API to support einsum equations? It could potentially make connecting nodes much less verbose. However, I question whether anyone would want to use `tn.einsum` over say `np.einsum`. Perhaps we could support only doing the left side of the equation?\n", "before_files": [{"content": "from tensornetwork.network_components import (AbstractNode, CopyNode, Edge,\n Node, NodeCollection)\nfrom tensornetwork.network_operations import (\n check_connected, check_correct, contract_trace_edges, copy, get_all_edges,\n get_all_nodes, get_neighbors, get_subgraph_dangling, reachable,\n reduced_density, remove_node, replicate_nodes, split_node,\n split_node_full_svd, split_node_qr, split_node_rq, switch_backend)\n\nfrom tensornetwork.tensor import Tensor\nfrom tensornetwork.linalg.initialization import (\n eye,\n ones,\n randn,\n random_uniform,\n zeros\n )\n\nfrom tensornetwork.linalg.linalg import norm, qr, svd\n\n#pylint: disable=redefined-builtin\nfrom tensornetwork.linalg.operations import (\n tensordot,\n reshape,\n transpose,\n take_slice,\n shape,\n sqrt,\n outer,\n einsum,\n conj,\n hconj,\n sin,\n cos,\n exp,\n log,\n diagonal,\n diagflat,\n trace,\n sign,\n abs,\n kron,\n pivot\n )\n\nfrom tensornetwork.backends.decorators import jit\n\nfrom tensornetwork.network_components import (\n contract, contract_between, contract_copy_node, contract_parallel,\n flatten_all_edges, flatten_edges, flatten_edges_between,\n get_all_nondangling, get_all_dangling, get_parallel_edges, get_shared_edges,\n outer_product, outer_product_final_nodes, slice_edge, split_edge)\nfrom tensornetwork.backends.abstract_backend import AbstractBackend\nfrom tensornetwork.network_components import connect, disconnect\nfrom tensornetwork.ncon_interface import ncon\nfrom tensornetwork.version import __version__\nfrom tensornetwork.visualization.graphviz import to_graphviz\nfrom tensornetwork import contractors\nfrom tensornetwork.utils import load_nodes, save_nodes\nfrom tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS\nfrom tensornetwork.matrixproductstates.finite_mps import FiniteMPS\nfrom tensornetwork.matrixproductstates.dmrg import FiniteDMRG\nfrom tensornetwork.matrixproductstates.mpo import FiniteTFI, FiniteXXZ\nfrom tensornetwork.backend_contextmanager import DefaultBackend\nfrom tensornetwork.backend_contextmanager import set_default_backend\nfrom tensornetwork import block_sparse\nfrom tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor\nfrom tensornetwork.block_sparse.blocksparsetensor import ChargeArray\nfrom tensornetwork.block_sparse.index import Index\nfrom tensornetwork.block_sparse.charge import U1Charge, BaseCharge, Z2Charge\nfrom tensornetwork.block_sparse.charge import ZNCharge\n", "path": "tensornetwork/__init__.py"}, {"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport h5py\nfrom tensornetwork.component_factory import get_component\nfrom tensornetwork.network_components import Edge, AbstractNode\nfrom tensornetwork.network_operations import reachable, get_all_edges\nfrom typing import List, Union, BinaryIO\nimport numpy as np\nstring_type = h5py.special_dtype(vlen=str)\n\n\ndef save_nodes(nodes: List[AbstractNode], path: Union[str, BinaryIO]) -> None:\n \"\"\"Save an iterable of nodes into hdf5 format.\n\n Args:\n nodes: An iterable of connected nodes. All nodes have to connect within\n `nodes`.\n path: path to file where network is saved.\n \"\"\"\n if reachable(nodes) > set(nodes):\n raise ValueError(\n \"Some nodes in `nodes` are connected to nodes not contained in `nodes`.\"\n \" Saving not possible.\")\n if len(set(nodes)) < len(list(nodes)):\n raise ValueError(\n 'Some nodes in `nodes` appear more than once. This is not supported')\n #we need to iterate twice and order matters\n edges = list(get_all_edges(nodes))\n nodes = list(nodes)\n\n old_edge_names = {n: edge.name for n, edge in enumerate(edges)}\n old_node_names = {n: node.name for n, node in enumerate(nodes)}\n\n #generate unique names for nodes and edges\n #for saving them\n for n, node in enumerate(nodes):\n node.set_name('node{}'.format(n))\n\n for e, edge in enumerate(edges):\n edge.set_name('edge{}'.format(e))\n\n with h5py.File(path, 'w') as net_file:\n nodes_group = net_file.create_group('nodes')\n node_names_group = net_file.create_group('node_names')\n node_names_group.create_dataset(\n 'names',\n dtype=string_type,\n data=np.array(list(old_node_names.values()), dtype=object))\n\n edges_group = net_file.create_group('edges')\n edge_names_group = net_file.create_group('edge_names')\n edge_names_group.create_dataset(\n 'names',\n dtype=string_type,\n data=np.array(list(old_edge_names.values()), dtype=object))\n\n for n, node in enumerate(nodes):\n node_group = nodes_group.create_group(node.name)\n node._save_node(node_group)\n for edge in node.edges:\n if edge.node1 == node and edge in edges:\n edge_group = edges_group.create_group(edge.name)\n edge._save_edge(edge_group)\n edges.remove(edge)\n\n #name edges and nodes back to their original names\n for n, node in enumerate(nodes):\n nodes[n].set_name(old_node_names[n])\n\n for n, edge in enumerate(edges):\n edges[n].set_name(old_edge_names[n])\n\n\ndef load_nodes(path: str) -> List[AbstractNode]:\n \"\"\"Load a set of nodes from disk.\n\n Args:\n path: path to file where network is saved.\n Returns:\n An iterable of `Node` objects\n \"\"\"\n nodes_list = []\n edges_list = []\n with h5py.File(path, 'r') as net_file:\n nodes = list(net_file[\"nodes\"].keys())\n node_names = {\n 'node{}'.format(n): v\n for n, v in enumerate(net_file[\"node_names\"]['names'][()])\n }\n\n edge_names = {\n 'edge{}'.format(n): v\n for n, v in enumerate(net_file[\"edge_names\"]['names'][()])\n }\n edges = list(net_file[\"edges\"].keys())\n for node_name in nodes:\n node_data = net_file[\"nodes/\" + node_name]\n node_type = get_component(node_data['type'][()])\n nodes_list.append(node_type._load_node(node_data=node_data))\n nodes_dict = {node.name: node for node in nodes_list}\n for edge in edges:\n edge_data = net_file[\"edges/\" + edge]\n edges_list.append(Edge._load_edge(edge_data, nodes_dict))\n\n for edge in edges_list:\n edge.set_name(edge_names[edge.name])\n for node in nodes_list:\n node.set_name(node_names[node.name])\n\n return nodes_list\n", "path": "tensornetwork/utils.py"}]}
| 2,585 | 612 |
gh_patches_debug_56668
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-841
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
execfile() was removed from Python 3
https://github.com/tensorflow/magenta/blob/master/magenta/tools/pip/setup.py#L23
</issue>
<code>
[start of magenta/tools/pip/setup.py]
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """A setuptools based setup module for magenta."""
15
16 from setuptools import find_packages
17 from setuptools import setup
18
19 # Bit of a hack to parse the version string stored in version.py without
20 # executing __init__.py, which will end up requiring a bunch of dependencies to
21 # execute (e.g., tensorflow, pretty_midi, etc.).
22 # Makes the __version__ variable available.
23 execfile('magenta/version.py')
24
25
26 REQUIRED_PACKAGES = [
27 'IPython',
28 'Pillow >= 3.4.2',
29 'bokeh >= 0.12.0',
30 'futures',
31 'intervaltree >= 2.1.0',
32 'matplotlib >= 1.5.3',
33 'mido == 1.2.6',
34 'numpy >= 1.11.0',
35 'pandas >= 0.18.1',
36 'pretty_midi >= 0.2.6',
37 'python-rtmidi',
38 'scipy >= 0.18.1',
39 'tensorflow >= 1.1.0',
40 'wheel',
41 ]
42
43 CONSOLE_SCRIPTS = [
44 'magenta.interfaces.midi.magenta_midi',
45 'magenta.interfaces.midi.midi_clock',
46 'magenta.models.drums_rnn.drums_rnn_create_dataset',
47 'magenta.models.drums_rnn.drums_rnn_generate',
48 'magenta.models.drums_rnn.drums_rnn_train',
49 'magenta.models.image_stylization.image_stylization_create_dataset',
50 'magenta.models.image_stylization.image_stylization_evaluate',
51 'magenta.models.image_stylization.image_stylization_finetune',
52 'magenta.models.image_stylization.image_stylization_train',
53 'magenta.models.image_stylization.image_stylization_transform',
54 'magenta.models.improv_rnn.improv_rnn_create_dataset',
55 'magenta.models.improv_rnn.improv_rnn_generate',
56 'magenta.models.improv_rnn.improv_rnn_train',
57 'magenta.models.melody_rnn.melody_rnn_create_dataset',
58 'magenta.models.melody_rnn.melody_rnn_generate',
59 'magenta.models.melody_rnn.melody_rnn_train',
60 'magenta.models.nsynth.wavenet.nsynth_generate',
61 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',
62 'magenta.models.performance_rnn.performance_rnn_create_dataset',
63 'magenta.models.performance_rnn.performance_rnn_generate',
64 'magenta.models.performance_rnn.performance_rnn_train',
65 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',
66 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',
67 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',
68 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
69 'magenta.models.polyphony_rnn.polyphony_rnn_generate',
70 'magenta.models.polyphony_rnn.polyphony_rnn_train',
71 'magenta.models.rl_tuner.rl_tuner_train',
72 'magenta.models.sketch_rnn.sketch_rnn_train',
73 'magenta.scripts.convert_dir_to_note_sequences',
74 ]
75
76 setup(
77 name='magenta',
78 version=__version__, # pylint: disable=undefined-variable
79 description='Use machine learning to create art and music',
80 long_description='',
81 url='https://magenta.tensorflow.org/',
82 author='Google Inc.',
83 author_email='[email protected]',
84 license='Apache 2',
85 # PyPI package information.
86 classifiers=[
87 'Development Status :: 4 - Beta',
88 'Intended Audience :: Developers',
89 'Intended Audience :: Education',
90 'Intended Audience :: Science/Research',
91 'License :: OSI Approved :: Apache Software License',
92 'Programming Language :: Python :: 2.7',
93 'Programming Language :: Python :: 3',
94 'Topic :: Scientific/Engineering :: Mathematics',
95 'Topic :: Software Development :: Libraries :: Python Modules',
96 'Topic :: Software Development :: Libraries',
97 ],
98 keywords='tensorflow machine learning magenta music art',
99
100 packages=find_packages(),
101 install_requires=REQUIRED_PACKAGES,
102 entry_points={
103 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
104 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
105 },
106
107 include_package_data=True,
108 package_data={
109 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
110 },
111 )
112
[end of magenta/tools/pip/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py
--- a/magenta/tools/pip/setup.py
+++ b/magenta/tools/pip/setup.py
@@ -20,7 +20,8 @@
# executing __init__.py, which will end up requiring a bunch of dependencies to
# execute (e.g., tensorflow, pretty_midi, etc.).
# Makes the __version__ variable available.
-execfile('magenta/version.py')
+with open('magenta/version.py') as in_file:
+ exec(in_file.read())
REQUIRED_PACKAGES = [
|
{"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -20,7 +20,8 @@\n # executing __init__.py, which will end up requiring a bunch of dependencies to\n # execute (e.g., tensorflow, pretty_midi, etc.).\n # Makes the __version__ variable available.\n-execfile('magenta/version.py')\n+with open('magenta/version.py') as in_file:\n+ exec(in_file.read())\n \n \n REQUIRED_PACKAGES = [\n", "issue": "execfile() was removed from Python 3\nhttps://github.com/tensorflow/magenta/blob/master/magenta/tools/pip/setup.py#L23\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'bokeh >= 0.12.0',\n 'futures',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido == 1.2.6',\n 'numpy >= 1.11.0',\n 'pandas >= 0.18.1',\n 'pretty_midi >= 0.2.6',\n 'python-rtmidi',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.1.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.interfaces.midi.midi_clock',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.nsynth.wavenet.nsynth_generate',\n 'magenta.models.nsynth.wavenet.nsynth_save_embeddings',\n 'magenta.models.performance_rnn.performance_rnn_create_dataset',\n 'magenta.models.performance_rnn.performance_rnn_generate',\n 'magenta.models.performance_rnn.performance_rnn_train',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_create_dataset',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_generate',\n 'magenta.models.pianoroll_rnn_nade.pianoroll_rnn_nade_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.models.sketch_rnn.sketch_rnn_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n", "path": "magenta/tools/pip/setup.py"}]}
| 1,913 | 128 |
gh_patches_debug_30100
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3524
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mélange de contexte entre les contenus, les tutos et les articles
Serveur : Bêta
Version : v18-RC3/ffa18f8
Système : Mac OS X El Capitain
Navigateur : Safari Version 9.0.3 (11601.4.4)
Attention, ce n'est pas simple à comprendre et la portée du bug est peut-être plus important que le scénario décrit dans cette issue mais j'ai tenté de bien cerner le bug.
Scénario :
- Rendez-vous dans le menu déroulant des tutoriels.
- Cliquez sur une catégorie ou un tag, constatez que vous êtes dans un contexte de tutoriel (cf. le fil d'ariane).
- Sur cette page de catégorie et/ou de tag, constatez que vous pouvez utiliser les liens du fil d'ariane, les boutons du menu et que vous pouvez cliqué sur une catégorie dans le menu "Catégories des tutoriels" dans la sidebar. D'ailleurs, si vous cliquez sur l'une des catégories dans la sidebar, vous restez dans le contexte des tutoriels.
- Maintenant retournez dans le menu déroulant des tutoriels et cliquez sur "Tous les tags", puis rendez-vous dans l'une des catégories listée.
- Constatez que vous êtes dans un contexte "Contenu" et non plus tutoriel. Puis vous ne pouvez
- Plus utiliser les liens dans le fil d'ariane.
- Plus créer un contenu ni aider les auteurs.
- Plus utiliser les flux.
- Vous pouvez utilisé les catégories de la sidebar mais vous restez dans le contexte des menus. Vous gardez donc les mêmes bugs.
Note 1 : Tous ces bugs sont surement identiques avec les articles.
Note 2 : Vous pouvez aussi retrouver le bug en cliquant sur les catégories et les tags depuis un contenu publié.
</issue>
<code>
[start of zds/tutorialv2/urls/urls_contents.py]
1 # coding: utf-8
2
3 from django.conf.urls import url
4
5 from zds.tutorialv2.views.views_contents import DisplayContent, CreateContent, EditContent, \
6 DeleteContent, CreateContainer, DisplayContainer, EditContainer, CreateExtract, EditExtract, \
7 DeleteContainerOrExtract, ManageBetaContent, DisplayHistory, DisplayDiff, ActivateJSFiddleInContent, MoveChild, \
8 DownloadContent, UpdateContentWithArchive, CreateContentFromArchive, ContentsWithHelps, AddAuthorToContent, \
9 RemoveAuthorFromContent, WarnTypo, DisplayBetaContent, DisplayBetaContainer, ContentOfAuthor
10
11 from zds.tutorialv2.views.views_published import SendNoteFormView, UpdateNoteView, \
12 HideReaction, ShowReaction, SendNoteAlert, SolveNoteAlert, TagsListView, ListOnlineContents, FollowContent
13
14 urlpatterns = [
15 url(r'^tutoriels/(?P<pk>\d+)/$',
16 ContentOfAuthor.as_view(type='TUTORIAL', context_object_name='tutorials'),
17 name="find-tutorial"),
18 url(r'^articles/(?P<pk>\d+)/$',
19 ContentOfAuthor.as_view(type='ARTICLE', context_object_name='articles'),
20 name="find-article"),
21
22 url(r'^aides/$', ContentsWithHelps.as_view(), name='helps'),
23 url(r'^(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',
24 DisplayContainer.as_view(public_is_prioritary=False),
25 name='view-container'),
26 url(r'^(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',
27 DisplayContainer.as_view(public_is_prioritary=False),
28 name='view-container'),
29
30 url(r'^(?P<pk>\d+)/(?P<slug>.+)/$', DisplayContent.as_view(public_is_prioritary=False),
31 name='view'),
32
33 url(r'^telecharger/(?P<pk>\d+)/(?P<slug>.+)/$', DownloadContent.as_view(),
34 name='download-zip'),
35
36 # beta:
37 url(r'^beta/(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',
38 DisplayBetaContainer.as_view(public_is_prioritary=False),
39 name='beta-view-container'),
40 url(r'^beta/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',
41 DisplayBetaContainer.as_view(public_is_prioritary=False),
42 name='beta-view-container'),
43
44 url(r'^beta/(?P<pk>\d+)/(?P<slug>.+)/$', DisplayBetaContent.as_view(), name='beta-view'),
45
46 # reactions:
47 url(r'^reactions/ajouter/$', SendNoteFormView.as_view(redirection_is_needed=False), name="add-reaction"),
48 url(r'^reactions/editer/$', UpdateNoteView.as_view(redirection_is_needed=False), name="update-reaction"),
49 url(r'^reactions/cacher/(?P<pk>\d+)/$', HideReaction.as_view(), name="hide-reaction"),
50 url(r'^reactions/afficher/(?P<pk>\d+)/$', ShowReaction.as_view(), name="show-reaction"),
51 url(r'^reactions/alerter/(?P<pk>\d+)/$', SendNoteAlert.as_view(), name="alert-reaction"),
52 url(r'^reactions/resoudre/$', SolveNoteAlert.as_view(), name="resolve-reaction"),
53
54 # follow:
55 url(r'^follow/(?P<pk>\d+)/$', FollowContent.as_view(), name="follow"),
56
57 # typo:
58 url(r'^reactions/typo/$', WarnTypo.as_view(), name="warn-typo"),
59
60 # create:
61 url(r'^nouveau-tutoriel/$',
62 CreateContent.as_view(created_content_type="TUTORIAL"), name='create-tutorial'),
63 url(r'^nouvel-article/$',
64 CreateContent.as_view(created_content_type="ARTICLE"), name='create-article'),
65 url(r'^nouveau-conteneur/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',
66 CreateContainer.as_view(),
67 name='create-container'),
68 url(r'^nouveau-conteneur/(?P<pk>\d+)/(?P<slug>.+)/$',
69 CreateContainer.as_view(),
70 name='create-container'),
71
72
73 url(r'^nouvelle-section/(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',
74 CreateExtract.as_view(),
75 name='create-extract'),
76 url(r'^nouvelle-section/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',
77 CreateExtract.as_view(),
78 name='create-extract'),
79 url(r'^nouvelle-section/(?P<pk>\d+)/(?P<slug>.+)/$',
80 CreateExtract.as_view(),
81 name='create-extract'),
82
83 # edit:
84 url(r'^editer-conteneur/(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/'
85 r'(?P<container_slug>.+)/$',
86 EditContainer.as_view(),
87 name='edit-container'),
88 url(r'^editer-conteneur/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',
89 EditContainer.as_view(),
90 name='edit-container'),
91
92 url(r'^editer-section/(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/'
93 r'(?P<container_slug>.+)/(?P<extract_slug>.+)/$',
94 EditExtract.as_view(),
95 name='edit-extract'),
96 url(r'^editer-section/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/(?P<extract_slug>.+)/$',
97 EditExtract.as_view(),
98 name='edit-extract'),
99 url(r'^editer-section/(?P<pk>\d+)/(?P<slug>.+)/(?P<extract_slug>.+)/$',
100 EditExtract.as_view(),
101 name='edit-extract'),
102
103 url(r'^editer/(?P<pk>\d+)/(?P<slug>.+)/$', EditContent.as_view(), name='edit'),
104 url(r'^deplacer/$', MoveChild.as_view(), name='move-element'),
105
106 url(r'^historique/(?P<pk>\d+)/(?P<slug>.+)/$', DisplayHistory.as_view(), name="history"),
107 url(r'^comparaison/(?P<pk>\d+)/(?P<slug>.+)/$', DisplayDiff.as_view(), name="diff"),
108 url(r'^ajouter-auteur/(?P<pk>\d+)/$', AddAuthorToContent.as_view(), name="add-author"),
109 url(r'^enlever-auteur/(?P<pk>\d+)/$', RemoveAuthorFromContent.as_view(), name="remove-author"),
110 # beta:
111 url(r'^activer-beta/(?P<pk>\d+)/(?P<slug>.+)/$', ManageBetaContent.as_view(action='set'),
112 name="set-beta"),
113 url(r'^desactiver-beta/(?P<pk>\d+)/(?P<slug>.+)/$', ManageBetaContent.as_view(action='inactive'),
114 name="inactive-beta"),
115
116 # jsfiddle support:
117 url(r'activer-js/', ActivateJSFiddleInContent.as_view(), name="activate-jsfiddle"),
118
119 # delete:
120 url(r'^supprimer/(?P<pk>\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/'
121 r'(?P<object_slug>.+)/$',
122 DeleteContainerOrExtract.as_view(),
123 name='delete'),
124 url(r'^supprimer/(?P<pk>\d+)/(?P<slug>.+)/(?P<container_slug>.+)/(?P<object_slug>.+)/$',
125 DeleteContainerOrExtract.as_view(),
126 name='delete'),
127 url(r'^supprimer/(?P<pk>\d+)/(?P<slug>.+)/(?P<object_slug>.+)/$',
128 DeleteContainerOrExtract.as_view(),
129 name='delete'),
130
131 url(r'^supprimer/(?P<pk>\d+)/(?P<slug>.+)/$', DeleteContent.as_view(), name='delete'),
132
133 # markdown import
134 url(r'^importer/archive/nouveau/$', CreateContentFromArchive.as_view(), name="import-new"),
135 url(r'^importer/(?P<pk>\d+)/(?P<slug>.+)/$', UpdateContentWithArchive.as_view(), name="import"),
136
137 # tags
138 url(r'^tags/$', TagsListView.as_view(), name='tags'),
139
140 url(r'^$', ListOnlineContents.as_view(), name='list'),
141 ]
142
[end of zds/tutorialv2/urls/urls_contents.py]
[start of zds/tutorialv2/feeds.py]
1 # coding: utf-8
2
3 from django.contrib.syndication.views import Feed
4 from django.conf import settings
5
6 from django.utils.feedgenerator import Atom1Feed
7
8 from zds.tutorialv2.models.models_database import PublishedContent
9 from zds.settings import ZDS_APP
10
11
12 class LastContentFeedRSS(Feed):
13 """
14 RSS feed for any type of content.
15 """
16 title = u"Contenu sur {}".format(settings.ZDS_APP['site']['litteral_name'])
17 description = u"Les derniers contenus parus sur {}.".format(settings.ZDS_APP['site']['litteral_name'])
18 link = ""
19 content_type = None
20
21 def items(self):
22 """
23 :return: The last (typically 5) contents (sorted by publication date).
24 If `self.type` is not `None`, the contents will only be of this type.
25 """
26 contents = PublishedContent.objects\
27 .prefetch_related("content")\
28 .prefetch_related("content__authors")
29
30 if self.content_type is not None:
31 contents = contents.filter(content_type=self.content_type)
32
33 return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]
34
35 def item_title(self, item):
36 return item.content.title
37
38 def item_pubdate(self, item):
39 return item.publication_date
40
41 def item_description(self, item):
42 return item.content.description
43
44 def item_author_name(self, item):
45 authors_list = item.content.authors.all()
46 authors = []
47 for authors_obj in authors_list:
48 authors.append(authors_obj.username)
49 authors = ", ".join(authors)
50 return authors
51
52 def item_link(self, item):
53 return item.get_absolute_url_online()
54
55
56 class LastTutorialsFeedRSS(LastContentFeedRSS):
57 """
58 Redefinition of `LastContentFeedRSS` for tutorials only
59 """
60 content_type = "TUTORIAL"
61 link = "/tutoriels/"
62 title = u"Tutoriels sur {}".format(settings.ZDS_APP['site']['litteral_name'])
63 description = u"Les derniers tutoriels parus sur {}.".format(settings.ZDS_APP['site']['litteral_name'])
64
65
66 class LastTutorialsFeedATOM(LastTutorialsFeedRSS):
67 feed_type = Atom1Feed
68 subtitle = LastTutorialsFeedRSS.description
69
70
71 class LastArticlesFeedRSS(LastContentFeedRSS):
72 """
73 Redefinition of `LastContentFeedRSS` for articles only
74 """
75 content_type = "ARTICLE"
76 link = "/articles/"
77 title = u"Articles sur {}".format(settings.ZDS_APP['site']['litteral_name'])
78 description = u"Les derniers articles parus sur {}.".format(settings.ZDS_APP['site']['litteral_name'])
79
80
81 class LastArticlesFeedATOM(LastArticlesFeedRSS):
82 feed_type = Atom1Feed
83 subtitle = LastArticlesFeedRSS.description
84
[end of zds/tutorialv2/feeds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py
--- a/zds/tutorialv2/feeds.py
+++ b/zds/tutorialv2/feeds.py
@@ -13,7 +13,7 @@
"""
RSS feed for any type of content.
"""
- title = u"Contenu sur {}".format(settings.ZDS_APP['site']['litteral_name'])
+ title = u"Contenus sur {}".format(settings.ZDS_APP['site']['litteral_name'])
description = u"Les derniers contenus parus sur {}.".format(settings.ZDS_APP['site']['litteral_name'])
link = ""
content_type = None
@@ -53,6 +53,11 @@
return item.get_absolute_url_online()
+class LastContentFeedATOM(LastContentFeedRSS):
+ feed_type = Atom1Feed
+ subtitle = LastContentFeedRSS.description
+
+
class LastTutorialsFeedRSS(LastContentFeedRSS):
"""
Redefinition of `LastContentFeedRSS` for tutorials only
diff --git a/zds/tutorialv2/urls/urls_contents.py b/zds/tutorialv2/urls/urls_contents.py
--- a/zds/tutorialv2/urls/urls_contents.py
+++ b/zds/tutorialv2/urls/urls_contents.py
@@ -11,7 +11,13 @@
from zds.tutorialv2.views.views_published import SendNoteFormView, UpdateNoteView, \
HideReaction, ShowReaction, SendNoteAlert, SolveNoteAlert, TagsListView, ListOnlineContents, FollowContent
+from zds.tutorialv2.feeds import LastContentFeedRSS, LastContentFeedATOM
+
urlpatterns = [
+ # Flux
+ url(r'^flux/rss/$', LastContentFeedRSS(), name='feed-rss'),
+ url(r'^flux/atom/$', LastContentFeedATOM(), name='feed-atom'),
+
url(r'^tutoriels/(?P<pk>\d+)/$',
ContentOfAuthor.as_view(type='TUTORIAL', context_object_name='tutorials'),
name="find-tutorial"),
|
{"golden_diff": "diff --git a/zds/tutorialv2/feeds.py b/zds/tutorialv2/feeds.py\n--- a/zds/tutorialv2/feeds.py\n+++ b/zds/tutorialv2/feeds.py\n@@ -13,7 +13,7 @@\n \"\"\"\n RSS feed for any type of content.\n \"\"\"\n- title = u\"Contenu sur {}\".format(settings.ZDS_APP['site']['litteral_name'])\n+ title = u\"Contenus sur {}\".format(settings.ZDS_APP['site']['litteral_name'])\n description = u\"Les derniers contenus parus sur {}.\".format(settings.ZDS_APP['site']['litteral_name'])\n link = \"\"\n content_type = None\n@@ -53,6 +53,11 @@\n return item.get_absolute_url_online()\n \n \n+class LastContentFeedATOM(LastContentFeedRSS):\n+ feed_type = Atom1Feed\n+ subtitle = LastContentFeedRSS.description\n+\n+\n class LastTutorialsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for tutorials only\ndiff --git a/zds/tutorialv2/urls/urls_contents.py b/zds/tutorialv2/urls/urls_contents.py\n--- a/zds/tutorialv2/urls/urls_contents.py\n+++ b/zds/tutorialv2/urls/urls_contents.py\n@@ -11,7 +11,13 @@\n from zds.tutorialv2.views.views_published import SendNoteFormView, UpdateNoteView, \\\n HideReaction, ShowReaction, SendNoteAlert, SolveNoteAlert, TagsListView, ListOnlineContents, FollowContent\n \n+from zds.tutorialv2.feeds import LastContentFeedRSS, LastContentFeedATOM\n+\n urlpatterns = [\n+ # Flux\n+ url(r'^flux/rss/$', LastContentFeedRSS(), name='feed-rss'),\n+ url(r'^flux/atom/$', LastContentFeedATOM(), name='feed-atom'),\n+\n url(r'^tutoriels/(?P<pk>\\d+)/$',\n ContentOfAuthor.as_view(type='TUTORIAL', context_object_name='tutorials'),\n name=\"find-tutorial\"),\n", "issue": "M\u00e9lange de contexte entre les contenus, les tutos et les articles\nServeur : B\u00eata\nVersion : v18-RC3/ffa18f8\nSyst\u00e8me : Mac OS X El Capitain\nNavigateur : Safari Version 9.0.3 (11601.4.4)\n\nAttention, ce n'est pas simple \u00e0 comprendre et la port\u00e9e du bug est peut-\u00eatre plus important que le sc\u00e9nario d\u00e9crit dans cette issue mais j'ai tent\u00e9 de bien cerner le bug.\n\nSc\u00e9nario :\n- Rendez-vous dans le menu d\u00e9roulant des tutoriels.\n- Cliquez sur une cat\u00e9gorie ou un tag, constatez que vous \u00eates dans un contexte de tutoriel (cf. le fil d'ariane).\n- Sur cette page de cat\u00e9gorie et/ou de tag, constatez que vous pouvez utiliser les liens du fil d'ariane, les boutons du menu et que vous pouvez cliqu\u00e9 sur une cat\u00e9gorie dans le menu \"Cat\u00e9gories des tutoriels\" dans la sidebar. D'ailleurs, si vous cliquez sur l'une des cat\u00e9gories dans la sidebar, vous restez dans le contexte des tutoriels.\n- Maintenant retournez dans le menu d\u00e9roulant des tutoriels et cliquez sur \"Tous les tags\", puis rendez-vous dans l'une des cat\u00e9gories list\u00e9e.\n- Constatez que vous \u00eates dans un contexte \"Contenu\" et non plus tutoriel. Puis vous ne pouvez \n - Plus utiliser les liens dans le fil d'ariane.\n - Plus cr\u00e9er un contenu ni aider les auteurs.\n - Plus utiliser les flux.\n - Vous pouvez utilis\u00e9 les cat\u00e9gories de la sidebar mais vous restez dans le contexte des menus. Vous gardez donc les m\u00eames bugs.\n\nNote 1 : Tous ces bugs sont surement identiques avec les articles.\nNote 2 : Vous pouvez aussi retrouver le bug en cliquant sur les cat\u00e9gories et les tags depuis un contenu publi\u00e9.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom django.conf.urls import url\n\nfrom zds.tutorialv2.views.views_contents import DisplayContent, CreateContent, EditContent, \\\n DeleteContent, CreateContainer, DisplayContainer, EditContainer, CreateExtract, EditExtract, \\\n DeleteContainerOrExtract, ManageBetaContent, DisplayHistory, DisplayDiff, ActivateJSFiddleInContent, MoveChild, \\\n DownloadContent, UpdateContentWithArchive, CreateContentFromArchive, ContentsWithHelps, AddAuthorToContent, \\\n RemoveAuthorFromContent, WarnTypo, DisplayBetaContent, DisplayBetaContainer, ContentOfAuthor\n\nfrom zds.tutorialv2.views.views_published import SendNoteFormView, UpdateNoteView, \\\n HideReaction, ShowReaction, SendNoteAlert, SolveNoteAlert, TagsListView, ListOnlineContents, FollowContent\n\nurlpatterns = [\n url(r'^tutoriels/(?P<pk>\\d+)/$',\n ContentOfAuthor.as_view(type='TUTORIAL', context_object_name='tutorials'),\n name=\"find-tutorial\"),\n url(r'^articles/(?P<pk>\\d+)/$',\n ContentOfAuthor.as_view(type='ARTICLE', context_object_name='articles'),\n name=\"find-article\"),\n\n url(r'^aides/$', ContentsWithHelps.as_view(), name='helps'),\n url(r'^(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',\n DisplayContainer.as_view(public_is_prioritary=False),\n name='view-container'),\n url(r'^(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',\n DisplayContainer.as_view(public_is_prioritary=False),\n name='view-container'),\n\n url(r'^(?P<pk>\\d+)/(?P<slug>.+)/$', DisplayContent.as_view(public_is_prioritary=False),\n name='view'),\n\n url(r'^telecharger/(?P<pk>\\d+)/(?P<slug>.+)/$', DownloadContent.as_view(),\n name='download-zip'),\n\n # beta:\n url(r'^beta/(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',\n DisplayBetaContainer.as_view(public_is_prioritary=False),\n name='beta-view-container'),\n url(r'^beta/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',\n DisplayBetaContainer.as_view(public_is_prioritary=False),\n name='beta-view-container'),\n\n url(r'^beta/(?P<pk>\\d+)/(?P<slug>.+)/$', DisplayBetaContent.as_view(), name='beta-view'),\n\n # reactions:\n url(r'^reactions/ajouter/$', SendNoteFormView.as_view(redirection_is_needed=False), name=\"add-reaction\"),\n url(r'^reactions/editer/$', UpdateNoteView.as_view(redirection_is_needed=False), name=\"update-reaction\"),\n url(r'^reactions/cacher/(?P<pk>\\d+)/$', HideReaction.as_view(), name=\"hide-reaction\"),\n url(r'^reactions/afficher/(?P<pk>\\d+)/$', ShowReaction.as_view(), name=\"show-reaction\"),\n url(r'^reactions/alerter/(?P<pk>\\d+)/$', SendNoteAlert.as_view(), name=\"alert-reaction\"),\n url(r'^reactions/resoudre/$', SolveNoteAlert.as_view(), name=\"resolve-reaction\"),\n\n # follow:\n url(r'^follow/(?P<pk>\\d+)/$', FollowContent.as_view(), name=\"follow\"),\n\n # typo:\n url(r'^reactions/typo/$', WarnTypo.as_view(), name=\"warn-typo\"),\n\n # create:\n url(r'^nouveau-tutoriel/$',\n CreateContent.as_view(created_content_type=\"TUTORIAL\"), name='create-tutorial'),\n url(r'^nouvel-article/$',\n CreateContent.as_view(created_content_type=\"ARTICLE\"), name='create-article'),\n url(r'^nouveau-conteneur/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',\n CreateContainer.as_view(),\n name='create-container'),\n url(r'^nouveau-conteneur/(?P<pk>\\d+)/(?P<slug>.+)/$',\n CreateContainer.as_view(),\n name='create-container'),\n\n\n url(r'^nouvelle-section/(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/$',\n CreateExtract.as_view(),\n name='create-extract'),\n url(r'^nouvelle-section/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',\n CreateExtract.as_view(),\n name='create-extract'),\n url(r'^nouvelle-section/(?P<pk>\\d+)/(?P<slug>.+)/$',\n CreateExtract.as_view(),\n name='create-extract'),\n\n # edit:\n url(r'^editer-conteneur/(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/'\n r'(?P<container_slug>.+)/$',\n EditContainer.as_view(),\n name='edit-container'),\n url(r'^editer-conteneur/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/$',\n EditContainer.as_view(),\n name='edit-container'),\n\n url(r'^editer-section/(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/'\n r'(?P<container_slug>.+)/(?P<extract_slug>.+)/$',\n EditExtract.as_view(),\n name='edit-extract'),\n url(r'^editer-section/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/(?P<extract_slug>.+)/$',\n EditExtract.as_view(),\n name='edit-extract'),\n url(r'^editer-section/(?P<pk>\\d+)/(?P<slug>.+)/(?P<extract_slug>.+)/$',\n EditExtract.as_view(),\n name='edit-extract'),\n\n url(r'^editer/(?P<pk>\\d+)/(?P<slug>.+)/$', EditContent.as_view(), name='edit'),\n url(r'^deplacer/$', MoveChild.as_view(), name='move-element'),\n\n url(r'^historique/(?P<pk>\\d+)/(?P<slug>.+)/$', DisplayHistory.as_view(), name=\"history\"),\n url(r'^comparaison/(?P<pk>\\d+)/(?P<slug>.+)/$', DisplayDiff.as_view(), name=\"diff\"),\n url(r'^ajouter-auteur/(?P<pk>\\d+)/$', AddAuthorToContent.as_view(), name=\"add-author\"),\n url(r'^enlever-auteur/(?P<pk>\\d+)/$', RemoveAuthorFromContent.as_view(), name=\"remove-author\"),\n # beta:\n url(r'^activer-beta/(?P<pk>\\d+)/(?P<slug>.+)/$', ManageBetaContent.as_view(action='set'),\n name=\"set-beta\"),\n url(r'^desactiver-beta/(?P<pk>\\d+)/(?P<slug>.+)/$', ManageBetaContent.as_view(action='inactive'),\n name=\"inactive-beta\"),\n\n # jsfiddle support:\n url(r'activer-js/', ActivateJSFiddleInContent.as_view(), name=\"activate-jsfiddle\"),\n\n # delete:\n url(r'^supprimer/(?P<pk>\\d+)/(?P<slug>.+)/(?P<parent_container_slug>.+)/(?P<container_slug>.+)/'\n r'(?P<object_slug>.+)/$',\n DeleteContainerOrExtract.as_view(),\n name='delete'),\n url(r'^supprimer/(?P<pk>\\d+)/(?P<slug>.+)/(?P<container_slug>.+)/(?P<object_slug>.+)/$',\n DeleteContainerOrExtract.as_view(),\n name='delete'),\n url(r'^supprimer/(?P<pk>\\d+)/(?P<slug>.+)/(?P<object_slug>.+)/$',\n DeleteContainerOrExtract.as_view(),\n name='delete'),\n\n url(r'^supprimer/(?P<pk>\\d+)/(?P<slug>.+)/$', DeleteContent.as_view(), name='delete'),\n\n # markdown import\n url(r'^importer/archive/nouveau/$', CreateContentFromArchive.as_view(), name=\"import-new\"),\n url(r'^importer/(?P<pk>\\d+)/(?P<slug>.+)/$', UpdateContentWithArchive.as_view(), name=\"import\"),\n\n # tags\n url(r'^tags/$', TagsListView.as_view(), name='tags'),\n\n url(r'^$', ListOnlineContents.as_view(), name='list'),\n]\n", "path": "zds/tutorialv2/urls/urls_contents.py"}, {"content": "# coding: utf-8\n\nfrom django.contrib.syndication.views import Feed\nfrom django.conf import settings\n\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom zds.tutorialv2.models.models_database import PublishedContent\nfrom zds.settings import ZDS_APP\n\n\nclass LastContentFeedRSS(Feed):\n \"\"\"\n RSS feed for any type of content.\n \"\"\"\n title = u\"Contenu sur {}\".format(settings.ZDS_APP['site']['litteral_name'])\n description = u\"Les derniers contenus parus sur {}.\".format(settings.ZDS_APP['site']['litteral_name'])\n link = \"\"\n content_type = None\n\n def items(self):\n \"\"\"\n :return: The last (typically 5) contents (sorted by publication date).\n If `self.type` is not `None`, the contents will only be of this type.\n \"\"\"\n contents = PublishedContent.objects\\\n .prefetch_related(\"content\")\\\n .prefetch_related(\"content__authors\")\n\n if self.content_type is not None:\n contents = contents.filter(content_type=self.content_type)\n\n return contents.order_by('-publication_date')[:ZDS_APP['content']['feed_length']]\n\n def item_title(self, item):\n return item.content.title\n\n def item_pubdate(self, item):\n return item.publication_date\n\n def item_description(self, item):\n return item.content.description\n\n def item_author_name(self, item):\n authors_list = item.content.authors.all()\n authors = []\n for authors_obj in authors_list:\n authors.append(authors_obj.username)\n authors = \", \".join(authors)\n return authors\n\n def item_link(self, item):\n return item.get_absolute_url_online()\n\n\nclass LastTutorialsFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for tutorials only\n \"\"\"\n content_type = \"TUTORIAL\"\n link = \"/tutoriels/\"\n title = u\"Tutoriels sur {}\".format(settings.ZDS_APP['site']['litteral_name'])\n description = u\"Les derniers tutoriels parus sur {}.\".format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastTutorialsFeedATOM(LastTutorialsFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastTutorialsFeedRSS.description\n\n\nclass LastArticlesFeedRSS(LastContentFeedRSS):\n \"\"\"\n Redefinition of `LastContentFeedRSS` for articles only\n \"\"\"\n content_type = \"ARTICLE\"\n link = \"/articles/\"\n title = u\"Articles sur {}\".format(settings.ZDS_APP['site']['litteral_name'])\n description = u\"Les derniers articles parus sur {}.\".format(settings.ZDS_APP['site']['litteral_name'])\n\n\nclass LastArticlesFeedATOM(LastArticlesFeedRSS):\n feed_type = Atom1Feed\n subtitle = LastArticlesFeedRSS.description\n", "path": "zds/tutorialv2/feeds.py"}]}
| 4,051 | 465 |
gh_patches_debug_25197
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-4829
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Twilio SMS sending "U" instead of numerical MFA code
**Describe the bug**
When using the SMS authenticator stage with Twilio configured, users are being sent MFA text messages containing nothing other than the letter "U". I have confirmed in the Twilio console that the body of the message received from Authentik is indeed just the letter "U".
**To Reproduce**
Steps to reproduce the behavior:
1. Log in with a user that has an SMS device setup, or set up a new SMS device
2. See issue with received text message containing only the letter "U"
**Expected behavior**
Users should receive a text message with a numerical code for MFA.
**Version and Deployment (please complete the following information):**
- authentik version: 2023.2.2
- Deployment: docker-compose
</issue>
<code>
[start of authentik/stages/authenticator_sms/models.py]
1 """SMS Authenticator models"""
2 from hashlib import sha256
3 from typing import Optional
4
5 from django.contrib.auth import get_user_model
6 from django.db import models
7 from django.utils.translation import gettext_lazy as _
8 from django.views import View
9 from django_otp.models import SideChannelDevice
10 from requests.exceptions import RequestException
11 from rest_framework.exceptions import ValidationError
12 from rest_framework.serializers import BaseSerializer
13 from structlog.stdlib import get_logger
14 from twilio.base.exceptions import TwilioRestException
15 from twilio.rest import Client
16
17 from authentik.core.types import UserSettingSerializer
18 from authentik.events.models import Event, EventAction, NotificationWebhookMapping
19 from authentik.events.utils import sanitize_item
20 from authentik.flows.models import ConfigurableStage, Stage
21 from authentik.lib.models import SerializerModel
22 from authentik.lib.utils.errors import exception_to_string
23 from authentik.lib.utils.http import get_http_session
24
25 LOGGER = get_logger()
26
27
28 class SMSProviders(models.TextChoices):
29 """Supported SMS Providers"""
30
31 TWILIO = "twilio"
32 GENERIC = "generic"
33
34
35 class SMSAuthTypes(models.TextChoices):
36 """Supported SMS Auth Types"""
37
38 BASIC = "basic"
39 BEARER = "bearer"
40
41
42 class AuthenticatorSMSStage(ConfigurableStage, Stage):
43 """Use SMS-based TOTP instead of authenticator-based."""
44
45 provider = models.TextField(choices=SMSProviders.choices)
46
47 from_number = models.TextField()
48
49 account_sid = models.TextField()
50 auth = models.TextField()
51 auth_password = models.TextField(default="", blank=True)
52 auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)
53
54 verify_only = models.BooleanField(
55 default=False,
56 help_text=_(
57 "When enabled, the Phone number is only used during enrollment to verify the "
58 "users authenticity. Only a hash of the phone number is saved to ensure it is "
59 "not re-used in the future."
60 ),
61 )
62
63 mapping = models.ForeignKey(
64 NotificationWebhookMapping,
65 null=True,
66 default=None,
67 on_delete=models.SET_NULL,
68 help_text=_("Optionally modify the payload being sent to custom providers."),
69 )
70
71 def send(self, token: str, device: "SMSDevice"):
72 """Send message via selected provider"""
73 if self.provider == SMSProviders.TWILIO:
74 return self.send_twilio(token, device)
75 if self.provider == SMSProviders.GENERIC:
76 return self.send_generic(token, device)
77 raise ValueError(f"invalid provider {self.provider}")
78
79 def get_message(self, token: str) -> str:
80 """Get SMS message"""
81 return _("Use this code to authenticate in authentik: %(token)s" % {"token": token})
82
83 def send_twilio(self, token: str, device: "SMSDevice"):
84 """send sms via twilio provider"""
85 client = Client(self.account_sid, self.auth)
86
87 try:
88 message = client.messages.create(
89 to=device.phone_number, from_=self.from_number, body=self.get_message(token)
90 )
91 LOGGER.debug("Sent SMS", to=device, message=message.sid)
92 except TwilioRestException as exc:
93 LOGGER.warning("Error sending token by Twilio SMS", exc=exc, msg=exc.msg)
94 raise ValidationError(exc.msg)
95
96 def send_generic(self, token: str, device: "SMSDevice"):
97 """Send SMS via outside API"""
98 payload = {
99 "From": self.from_number,
100 "To": device.phone_number,
101 "Body": token,
102 "Message": self.get_message(token),
103 }
104
105 if self.mapping:
106 payload = sanitize_item(
107 self.mapping.evaluate(
108 user=device.user,
109 request=None,
110 device=device,
111 token=token,
112 stage=self,
113 )
114 )
115
116 if self.auth_type == SMSAuthTypes.BEARER:
117 response = get_http_session().post(
118 f"{self.account_sid}",
119 json=payload,
120 headers={"Authorization": f"Bearer {self.auth}"},
121 )
122 elif self.auth_type == SMSAuthTypes.BASIC:
123 response = get_http_session().post(
124 f"{self.account_sid}",
125 json=payload,
126 auth=(self.auth, self.auth_password),
127 )
128 else:
129 raise ValueError(f"Invalid Auth type '{self.auth_type}'")
130
131 LOGGER.debug("Sent SMS", to=device.phone_number)
132 try:
133 response.raise_for_status()
134 except RequestException as exc:
135 LOGGER.warning(
136 "Error sending token by generic SMS",
137 exc=exc,
138 status=response.status_code,
139 body=response.text[:100],
140 )
141 Event.new(
142 EventAction.CONFIGURATION_ERROR,
143 message="Error sending SMS",
144 exc=exception_to_string(exc),
145 status_code=response.status_code,
146 body=response.text,
147 ).set_user(device.user).save()
148 if response.status_code >= 400:
149 raise ValidationError(response.text)
150 raise
151
152 @property
153 def serializer(self) -> type[BaseSerializer]:
154 from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer
155
156 return AuthenticatorSMSStageSerializer
157
158 @property
159 def type(self) -> type[View]:
160 from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView
161
162 return AuthenticatorSMSStageView
163
164 @property
165 def component(self) -> str:
166 return "ak-stage-authenticator-sms-form"
167
168 def ui_user_settings(self) -> Optional[UserSettingSerializer]:
169 return UserSettingSerializer(
170 data={
171 "title": str(self._meta.verbose_name),
172 "component": "ak-user-settings-authenticator-sms",
173 }
174 )
175
176 def __str__(self) -> str:
177 return f"SMS Authenticator Setup Stage {self.name}"
178
179 class Meta:
180 verbose_name = _("SMS Authenticator Setup Stage")
181 verbose_name_plural = _("SMS Authenticator Setup Stages")
182
183
184 def hash_phone_number(phone_number: str) -> str:
185 """Hash phone number with prefix"""
186 return "hash:" + sha256(phone_number.encode()).hexdigest()
187
188
189 class SMSDevice(SerializerModel, SideChannelDevice):
190 """SMS Device"""
191
192 user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
193
194 # Connect to the stage to when validating access we know the API Credentials
195 stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)
196
197 phone_number = models.TextField()
198
199 last_t = models.DateTimeField(auto_now=True)
200
201 def set_hashed_number(self):
202 """Set phone_number to hashed number"""
203 self.phone_number = hash_phone_number(self.phone_number)
204
205 @property
206 def is_hashed(self) -> bool:
207 """Check if the phone number is hashed"""
208 return self.phone_number.startswith("hash:")
209
210 @property
211 def serializer(self) -> type[BaseSerializer]:
212 from authentik.stages.authenticator_sms.api import SMSDeviceSerializer
213
214 return SMSDeviceSerializer
215
216 def verify_token(self, token):
217 valid = super().verify_token(token)
218 if valid:
219 self.save()
220 return valid
221
222 def __str__(self):
223 return str(self.name) or str(self.user)
224
225 class Meta:
226 verbose_name = _("SMS Device")
227 verbose_name_plural = _("SMS Devices")
228 unique_together = (("stage", "phone_number"),)
229
[end of authentik/stages/authenticator_sms/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/authentik/stages/authenticator_sms/models.py b/authentik/stages/authenticator_sms/models.py
--- a/authentik/stages/authenticator_sms/models.py
+++ b/authentik/stages/authenticator_sms/models.py
@@ -86,7 +86,7 @@
try:
message = client.messages.create(
- to=device.phone_number, from_=self.from_number, body=self.get_message(token)
+ to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))
)
LOGGER.debug("Sent SMS", to=device, message=message.sid)
except TwilioRestException as exc:
@@ -115,13 +115,13 @@
if self.auth_type == SMSAuthTypes.BEARER:
response = get_http_session().post(
- f"{self.account_sid}",
+ self.account_sid,
json=payload,
headers={"Authorization": f"Bearer {self.auth}"},
)
elif self.auth_type == SMSAuthTypes.BASIC:
response = get_http_session().post(
- f"{self.account_sid}",
+ self.account_sid,
json=payload,
auth=(self.auth, self.auth_password),
)
|
{"golden_diff": "diff --git a/authentik/stages/authenticator_sms/models.py b/authentik/stages/authenticator_sms/models.py\n--- a/authentik/stages/authenticator_sms/models.py\n+++ b/authentik/stages/authenticator_sms/models.py\n@@ -86,7 +86,7 @@\n \n try:\n message = client.messages.create(\n- to=device.phone_number, from_=self.from_number, body=self.get_message(token)\n+ to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n@@ -115,13 +115,13 @@\n \n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n- f\"{self.account_sid}\",\n+ self.account_sid,\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n- f\"{self.account_sid}\",\n+ self.account_sid,\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n", "issue": "Twilio SMS sending \"U\" instead of numerical MFA code\n**Describe the bug**\r\nWhen using the SMS authenticator stage with Twilio configured, users are being sent MFA text messages containing nothing other than the letter \"U\". I have confirmed in the Twilio console that the body of the message received from Authentik is indeed just the letter \"U\".\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Log in with a user that has an SMS device setup, or set up a new SMS device\r\n2. See issue with received text message containing only the letter \"U\"\r\n\r\n**Expected behavior**\r\nUsers should receive a text message with a numerical code for MFA.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2023.2.2\r\n - Deployment: docker-compose\r\n\n", "before_files": [{"content": "\"\"\"SMS Authenticator models\"\"\"\nfrom hashlib import sha256\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django_otp.models import SideChannelDevice\nfrom requests.exceptions import RequestException\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\nfrom twilio.base.exceptions import TwilioRestException\nfrom twilio.rest import Client\n\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.events.models import Event, EventAction, NotificationWebhookMapping\nfrom authentik.events.utils import sanitize_item\nfrom authentik.flows.models import ConfigurableStage, Stage\nfrom authentik.lib.models import SerializerModel\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.http import get_http_session\n\nLOGGER = get_logger()\n\n\nclass SMSProviders(models.TextChoices):\n \"\"\"Supported SMS Providers\"\"\"\n\n TWILIO = \"twilio\"\n GENERIC = \"generic\"\n\n\nclass SMSAuthTypes(models.TextChoices):\n \"\"\"Supported SMS Auth Types\"\"\"\n\n BASIC = \"basic\"\n BEARER = \"bearer\"\n\n\nclass AuthenticatorSMSStage(ConfigurableStage, Stage):\n \"\"\"Use SMS-based TOTP instead of authenticator-based.\"\"\"\n\n provider = models.TextField(choices=SMSProviders.choices)\n\n from_number = models.TextField()\n\n account_sid = models.TextField()\n auth = models.TextField()\n auth_password = models.TextField(default=\"\", blank=True)\n auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)\n\n verify_only = models.BooleanField(\n default=False,\n help_text=_(\n \"When enabled, the Phone number is only used during enrollment to verify the \"\n \"users authenticity. Only a hash of the phone number is saved to ensure it is \"\n \"not re-used in the future.\"\n ),\n )\n\n mapping = models.ForeignKey(\n NotificationWebhookMapping,\n null=True,\n default=None,\n on_delete=models.SET_NULL,\n help_text=_(\"Optionally modify the payload being sent to custom providers.\"),\n )\n\n def send(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send message via selected provider\"\"\"\n if self.provider == SMSProviders.TWILIO:\n return self.send_twilio(token, device)\n if self.provider == SMSProviders.GENERIC:\n return self.send_generic(token, device)\n raise ValueError(f\"invalid provider {self.provider}\")\n\n def get_message(self, token: str) -> str:\n \"\"\"Get SMS message\"\"\"\n return _(\"Use this code to authenticate in authentik: %(token)s\" % {\"token\": token})\n\n def send_twilio(self, token: str, device: \"SMSDevice\"):\n \"\"\"send sms via twilio provider\"\"\"\n client = Client(self.account_sid, self.auth)\n\n try:\n message = client.messages.create(\n to=device.phone_number, from_=self.from_number, body=self.get_message(token)\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n LOGGER.warning(\"Error sending token by Twilio SMS\", exc=exc, msg=exc.msg)\n raise ValidationError(exc.msg)\n\n def send_generic(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send SMS via outside API\"\"\"\n payload = {\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n \"Message\": self.get_message(token),\n }\n\n if self.mapping:\n payload = sanitize_item(\n self.mapping.evaluate(\n user=device.user,\n request=None,\n device=device,\n token=token,\n stage=self,\n )\n )\n\n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n f\"{self.account_sid}\",\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n f\"{self.account_sid}\",\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n else:\n raise ValueError(f\"Invalid Auth type '{self.auth_type}'\")\n\n LOGGER.debug(\"Sent SMS\", to=device.phone_number)\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Error sending token by generic SMS\",\n exc=exc,\n status=response.status_code,\n body=response.text[:100],\n )\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=\"Error sending SMS\",\n exc=exception_to_string(exc),\n status_code=response.status_code,\n body=response.text,\n ).set_user(device.user).save()\n if response.status_code >= 400:\n raise ValidationError(response.text)\n raise\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer\n\n return AuthenticatorSMSStageSerializer\n\n @property\n def type(self) -> type[View]:\n from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView\n\n return AuthenticatorSMSStageView\n\n @property\n def component(self) -> str:\n return \"ak-stage-authenticator-sms-form\"\n\n def ui_user_settings(self) -> Optional[UserSettingSerializer]:\n return UserSettingSerializer(\n data={\n \"title\": str(self._meta.verbose_name),\n \"component\": \"ak-user-settings-authenticator-sms\",\n }\n )\n\n def __str__(self) -> str:\n return f\"SMS Authenticator Setup Stage {self.name}\"\n\n class Meta:\n verbose_name = _(\"SMS Authenticator Setup Stage\")\n verbose_name_plural = _(\"SMS Authenticator Setup Stages\")\n\n\ndef hash_phone_number(phone_number: str) -> str:\n \"\"\"Hash phone number with prefix\"\"\"\n return \"hash:\" + sha256(phone_number.encode()).hexdigest()\n\n\nclass SMSDevice(SerializerModel, SideChannelDevice):\n \"\"\"SMS Device\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n\n # Connect to the stage to when validating access we know the API Credentials\n stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)\n\n phone_number = models.TextField()\n\n last_t = models.DateTimeField(auto_now=True)\n\n def set_hashed_number(self):\n \"\"\"Set phone_number to hashed number\"\"\"\n self.phone_number = hash_phone_number(self.phone_number)\n\n @property\n def is_hashed(self) -> bool:\n \"\"\"Check if the phone number is hashed\"\"\"\n return self.phone_number.startswith(\"hash:\")\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import SMSDeviceSerializer\n\n return SMSDeviceSerializer\n\n def verify_token(self, token):\n valid = super().verify_token(token)\n if valid:\n self.save()\n return valid\n\n def __str__(self):\n return str(self.name) or str(self.user)\n\n class Meta:\n verbose_name = _(\"SMS Device\")\n verbose_name_plural = _(\"SMS Devices\")\n unique_together = ((\"stage\", \"phone_number\"),)\n", "path": "authentik/stages/authenticator_sms/models.py"}]}
| 2,880 | 264 |
gh_patches_debug_30244
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-8738
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running pytest locally fails due to no TESTING or API_KEY
### Repository commit
1faf10b5c2dff8cef3f5d59f60a126bd19bb1c44
### Python version (python --version)
Python 3.11.3
### Dependencies version (pip freeze)
```
absl-py==1.4.0
astunparse==1.6.3
beautifulsoup4==4.12.2
cachetools==5.3.0
certifi==2023.5.7
cffi==1.15.1
cfgv==3.3.1
charset-normalizer==3.1.0
colorama==0.4.6
contourpy==1.0.7
cryptography==40.0.2
cycler==0.11.0
dill==0.3.6
distlib==0.3.6
fake-useragent==1.1.3
filelock==3.12.0
flatbuffers==23.5.9
fonttools==4.39.4
gast==0.4.0
google-auth==2.18.0
google-auth-oauthlib==1.0.0
google-pasta==0.2.0
grpcio==1.54.2
h5py==3.8.0
identify==2.5.24
idna==3.4
iniconfig==2.0.0
jax==0.4.10
joblib==1.2.0
keras==2.12.0
kiwisolver==1.4.4
libclang==16.0.0
lxml==4.9.2
Markdown==3.4.3
markdown-it-py==2.2.0
MarkupSafe==2.1.2
matplotlib==3.7.1
mdurl==0.1.2
ml-dtypes==0.1.0
mpmath==1.3.0
networkx==3.1
nodeenv==1.8.0
ntlm-auth==1.5.0
numpy==1.23.5
oauthlib==3.2.2
opencv-python==4.7.0.72
opt-einsum==3.3.0
packaging==23.1
pandas==2.0.1
patsy==0.5.3
pbr==5.11.1
Pillow==9.5.0
pip==22.3.1
platformdirs==3.5.1
pluggy==1.0.0
ply==3.11
pre-commit==3.3.1
projectq==0.8.0
protobuf==4.23.0
psutil==5.9.5
pyasn1==0.5.0
pyasn1-modules==0.3.0
pycparser==2.21
Pygments==2.15.1
pyparsing==3.0.9
pytest==7.3.1
python-dateutil==2.8.2
pytz==2023.3
PyYAML==6.0
qiskit==0.43.0
qiskit-aer==0.12.0
qiskit-ibmq-provider==0.20.2
qiskit-terra==0.24.0
requests==2.30.0
requests-ntlm==1.1.0
requests-oauthlib==1.3.1
rich==13.3.5
rsa==4.9
ruff==0.0.267
rustworkx==0.12.1
scikit-fuzzy==0.4.2
scikit-learn==1.2.2
scipy==1.10.1
setuptools==65.5.0
six==1.16.0
soupsieve==2.4.1
statsmodels==0.14.0
stevedore==5.0.0
sympy==1.12
tensorboard==2.12.3
tensorboard-data-server==0.7.0
tensorflow==2.12.0
tensorflow-estimator==2.12.0
tensorflow-intel==2.12.0
tensorflow-io-gcs-filesystem==0.31.0
termcolor==2.3.0
texttable==1.6.7
threadpoolctl==3.1.0
tweepy==4.14.0
typing_extensions==4.5.0
tzdata==2023.3
urllib3==1.26.15
virtualenv==20.23.0
websocket-client==1.5.1
websockets==11.0.3
Werkzeug==2.3.4
wheel==0.40.0
wrapt==1.14.1
xgboost==1.7.5
yulewalker==0.1.1
```
### Expected behavior
Every test running successfully
### Actual behavior
```
ERROR web_programming/currency_converter.py - KeyError: "API key must be provided in the 'AMDOREN_API_KEY' environment variable."
```
</issue>
<code>
[start of web_programming/currency_converter.py]
1 """
2 This is used to convert the currency using the Amdoren Currency API
3 https://www.amdoren.com
4 """
5
6 import os
7
8 import requests
9
10 URL_BASE = "https://www.amdoren.com/api/currency.php"
11 TESTING = os.getenv("CI", "")
12 API_KEY = os.getenv("AMDOREN_API_KEY", "")
13
14 if not API_KEY and not TESTING:
15 raise KeyError(
16 "API key must be provided in the 'AMDOREN_API_KEY' environment variable."
17 )
18
19 # Currency and their description
20 list_of_currencies = """
21 AED United Arab Emirates Dirham
22 AFN Afghan Afghani
23 ALL Albanian Lek
24 AMD Armenian Dram
25 ANG Netherlands Antillean Guilder
26 AOA Angolan Kwanza
27 ARS Argentine Peso
28 AUD Australian Dollar
29 AWG Aruban Florin
30 AZN Azerbaijani Manat
31 BAM Bosnia & Herzegovina Convertible Mark
32 BBD Barbadian Dollar
33 BDT Bangladeshi Taka
34 BGN Bulgarian Lev
35 BHD Bahraini Dinar
36 BIF Burundian Franc
37 BMD Bermudian Dollar
38 BND Brunei Dollar
39 BOB Bolivian Boliviano
40 BRL Brazilian Real
41 BSD Bahamian Dollar
42 BTN Bhutanese Ngultrum
43 BWP Botswana Pula
44 BYN Belarus Ruble
45 BZD Belize Dollar
46 CAD Canadian Dollar
47 CDF Congolese Franc
48 CHF Swiss Franc
49 CLP Chilean Peso
50 CNY Chinese Yuan
51 COP Colombian Peso
52 CRC Costa Rican Colon
53 CUC Cuban Convertible Peso
54 CVE Cape Verdean Escudo
55 CZK Czech Republic Koruna
56 DJF Djiboutian Franc
57 DKK Danish Krone
58 DOP Dominican Peso
59 DZD Algerian Dinar
60 EGP Egyptian Pound
61 ERN Eritrean Nakfa
62 ETB Ethiopian Birr
63 EUR Euro
64 FJD Fiji Dollar
65 GBP British Pound Sterling
66 GEL Georgian Lari
67 GHS Ghanaian Cedi
68 GIP Gibraltar Pound
69 GMD Gambian Dalasi
70 GNF Guinea Franc
71 GTQ Guatemalan Quetzal
72 GYD Guyanaese Dollar
73 HKD Hong Kong Dollar
74 HNL Honduran Lempira
75 HRK Croatian Kuna
76 HTG Haiti Gourde
77 HUF Hungarian Forint
78 IDR Indonesian Rupiah
79 ILS Israeli Shekel
80 INR Indian Rupee
81 IQD Iraqi Dinar
82 IRR Iranian Rial
83 ISK Icelandic Krona
84 JMD Jamaican Dollar
85 JOD Jordanian Dinar
86 JPY Japanese Yen
87 KES Kenyan Shilling
88 KGS Kyrgystani Som
89 KHR Cambodian Riel
90 KMF Comorian Franc
91 KPW North Korean Won
92 KRW South Korean Won
93 KWD Kuwaiti Dinar
94 KYD Cayman Islands Dollar
95 KZT Kazakhstan Tenge
96 LAK Laotian Kip
97 LBP Lebanese Pound
98 LKR Sri Lankan Rupee
99 LRD Liberian Dollar
100 LSL Lesotho Loti
101 LYD Libyan Dinar
102 MAD Moroccan Dirham
103 MDL Moldovan Leu
104 MGA Malagasy Ariary
105 MKD Macedonian Denar
106 MMK Myanma Kyat
107 MNT Mongolian Tugrik
108 MOP Macau Pataca
109 MRO Mauritanian Ouguiya
110 MUR Mauritian Rupee
111 MVR Maldivian Rufiyaa
112 MWK Malawi Kwacha
113 MXN Mexican Peso
114 MYR Malaysian Ringgit
115 MZN Mozambican Metical
116 NAD Namibian Dollar
117 NGN Nigerian Naira
118 NIO Nicaragua Cordoba
119 NOK Norwegian Krone
120 NPR Nepalese Rupee
121 NZD New Zealand Dollar
122 OMR Omani Rial
123 PAB Panamanian Balboa
124 PEN Peruvian Nuevo Sol
125 PGK Papua New Guinean Kina
126 PHP Philippine Peso
127 PKR Pakistani Rupee
128 PLN Polish Zloty
129 PYG Paraguayan Guarani
130 QAR Qatari Riyal
131 RON Romanian Leu
132 RSD Serbian Dinar
133 RUB Russian Ruble
134 RWF Rwanda Franc
135 SAR Saudi Riyal
136 SBD Solomon Islands Dollar
137 SCR Seychellois Rupee
138 SDG Sudanese Pound
139 SEK Swedish Krona
140 SGD Singapore Dollar
141 SHP Saint Helena Pound
142 SLL Sierra Leonean Leone
143 SOS Somali Shilling
144 SRD Surinamese Dollar
145 SSP South Sudanese Pound
146 STD Sao Tome and Principe Dobra
147 SYP Syrian Pound
148 SZL Swazi Lilangeni
149 THB Thai Baht
150 TJS Tajikistan Somoni
151 TMT Turkmenistani Manat
152 TND Tunisian Dinar
153 TOP Tonga Paanga
154 TRY Turkish Lira
155 TTD Trinidad and Tobago Dollar
156 TWD New Taiwan Dollar
157 TZS Tanzanian Shilling
158 UAH Ukrainian Hryvnia
159 UGX Ugandan Shilling
160 USD United States Dollar
161 UYU Uruguayan Peso
162 UZS Uzbekistan Som
163 VEF Venezuelan Bolivar
164 VND Vietnamese Dong
165 VUV Vanuatu Vatu
166 WST Samoan Tala
167 XAF Central African CFA franc
168 XCD East Caribbean Dollar
169 XOF West African CFA franc
170 XPF CFP Franc
171 YER Yemeni Rial
172 ZAR South African Rand
173 ZMW Zambian Kwacha
174 """
175
176
177 def convert_currency(
178 from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY
179 ) -> str:
180 """https://www.amdoren.com/currency-api/"""
181 params = locals()
182 params["from"] = params.pop("from_")
183 res = requests.get(URL_BASE, params=params).json()
184 return str(res["amount"]) if res["error"] == 0 else res["error_message"]
185
186
187 if __name__ == "__main__":
188 print(
189 convert_currency(
190 input("Enter from currency: ").strip(),
191 input("Enter to currency: ").strip(),
192 float(input("Enter the amount: ").strip()),
193 )
194 )
195
[end of web_programming/currency_converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py
--- a/web_programming/currency_converter.py
+++ b/web_programming/currency_converter.py
@@ -8,13 +8,7 @@
import requests
URL_BASE = "https://www.amdoren.com/api/currency.php"
-TESTING = os.getenv("CI", "")
-API_KEY = os.getenv("AMDOREN_API_KEY", "")
-if not API_KEY and not TESTING:
- raise KeyError(
- "API key must be provided in the 'AMDOREN_API_KEY' environment variable."
- )
# Currency and their description
list_of_currencies = """
@@ -175,20 +169,31 @@
def convert_currency(
- from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY
+ from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = ""
) -> str:
"""https://www.amdoren.com/currency-api/"""
+ # Instead of manually generating parameters
params = locals()
+ # from is a reserved keyword
params["from"] = params.pop("from_")
res = requests.get(URL_BASE, params=params).json()
return str(res["amount"]) if res["error"] == 0 else res["error_message"]
if __name__ == "__main__":
+ TESTING = os.getenv("CI", "")
+ API_KEY = os.getenv("AMDOREN_API_KEY", "")
+
+ if not API_KEY and not TESTING:
+ raise KeyError(
+ "API key must be provided in the 'AMDOREN_API_KEY' environment variable."
+ )
+
print(
convert_currency(
input("Enter from currency: ").strip(),
input("Enter to currency: ").strip(),
float(input("Enter the amount: ").strip()),
+ API_KEY,
)
)
|
{"golden_diff": "diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py\n--- a/web_programming/currency_converter.py\n+++ b/web_programming/currency_converter.py\n@@ -8,13 +8,7 @@\n import requests\n \n URL_BASE = \"https://www.amdoren.com/api/currency.php\"\n-TESTING = os.getenv(\"CI\", \"\")\n-API_KEY = os.getenv(\"AMDOREN_API_KEY\", \"\")\n \n-if not API_KEY and not TESTING:\n- raise KeyError(\n- \"API key must be provided in the 'AMDOREN_API_KEY' environment variable.\"\n- )\n \n # Currency and their description\n list_of_currencies = \"\"\"\n@@ -175,20 +169,31 @@\n \n \n def convert_currency(\n- from_: str = \"USD\", to: str = \"INR\", amount: float = 1.0, api_key: str = API_KEY\n+ from_: str = \"USD\", to: str = \"INR\", amount: float = 1.0, api_key: str = \"\"\n ) -> str:\n \"\"\"https://www.amdoren.com/currency-api/\"\"\"\n+ # Instead of manually generating parameters\n params = locals()\n+ # from is a reserved keyword\n params[\"from\"] = params.pop(\"from_\")\n res = requests.get(URL_BASE, params=params).json()\n return str(res[\"amount\"]) if res[\"error\"] == 0 else res[\"error_message\"]\n \n \n if __name__ == \"__main__\":\n+ TESTING = os.getenv(\"CI\", \"\")\n+ API_KEY = os.getenv(\"AMDOREN_API_KEY\", \"\")\n+\n+ if not API_KEY and not TESTING:\n+ raise KeyError(\n+ \"API key must be provided in the 'AMDOREN_API_KEY' environment variable.\"\n+ )\n+\n print(\n convert_currency(\n input(\"Enter from currency: \").strip(),\n input(\"Enter to currency: \").strip(),\n float(input(\"Enter the amount: \").strip()),\n+ API_KEY,\n )\n )\n", "issue": "Running pytest locally fails due to no TESTING or API_KEY\n### Repository commit\n\n1faf10b5c2dff8cef3f5d59f60a126bd19bb1c44\n\n### Python version (python --version)\n\nPython 3.11.3\n\n### Dependencies version (pip freeze)\n\n```\r\nabsl-py==1.4.0\r\nastunparse==1.6.3\r\nbeautifulsoup4==4.12.2\r\ncachetools==5.3.0\r\ncertifi==2023.5.7\r\ncffi==1.15.1\r\ncfgv==3.3.1\r\ncharset-normalizer==3.1.0\r\ncolorama==0.4.6\r\ncontourpy==1.0.7\r\ncryptography==40.0.2\r\ncycler==0.11.0\r\ndill==0.3.6\r\ndistlib==0.3.6\r\nfake-useragent==1.1.3\r\nfilelock==3.12.0\r\nflatbuffers==23.5.9\r\nfonttools==4.39.4\r\ngast==0.4.0\r\ngoogle-auth==2.18.0\r\ngoogle-auth-oauthlib==1.0.0\r\ngoogle-pasta==0.2.0\r\ngrpcio==1.54.2\r\nh5py==3.8.0\r\nidentify==2.5.24\r\nidna==3.4\r\niniconfig==2.0.0\r\njax==0.4.10\r\njoblib==1.2.0\r\nkeras==2.12.0\r\nkiwisolver==1.4.4\r\nlibclang==16.0.0\r\nlxml==4.9.2\r\nMarkdown==3.4.3\r\nmarkdown-it-py==2.2.0\r\nMarkupSafe==2.1.2\r\nmatplotlib==3.7.1\r\nmdurl==0.1.2\r\nml-dtypes==0.1.0\r\nmpmath==1.3.0\r\nnetworkx==3.1\r\nnodeenv==1.8.0\r\nntlm-auth==1.5.0\r\nnumpy==1.23.5\r\noauthlib==3.2.2\r\nopencv-python==4.7.0.72\r\nopt-einsum==3.3.0\r\npackaging==23.1\r\npandas==2.0.1\r\npatsy==0.5.3\r\npbr==5.11.1\r\nPillow==9.5.0\r\npip==22.3.1\r\nplatformdirs==3.5.1\r\npluggy==1.0.0\r\nply==3.11\r\npre-commit==3.3.1\r\nprojectq==0.8.0\r\nprotobuf==4.23.0\r\npsutil==5.9.5\r\npyasn1==0.5.0\r\npyasn1-modules==0.3.0\r\npycparser==2.21\r\nPygments==2.15.1\r\npyparsing==3.0.9\r\npytest==7.3.1\r\npython-dateutil==2.8.2\r\npytz==2023.3\r\nPyYAML==6.0\r\nqiskit==0.43.0\r\nqiskit-aer==0.12.0\r\nqiskit-ibmq-provider==0.20.2\r\nqiskit-terra==0.24.0\r\nrequests==2.30.0\r\nrequests-ntlm==1.1.0\r\nrequests-oauthlib==1.3.1\r\nrich==13.3.5\r\nrsa==4.9\r\nruff==0.0.267\r\nrustworkx==0.12.1\r\nscikit-fuzzy==0.4.2\r\nscikit-learn==1.2.2\r\nscipy==1.10.1\r\nsetuptools==65.5.0\r\nsix==1.16.0\r\nsoupsieve==2.4.1\r\nstatsmodels==0.14.0\r\nstevedore==5.0.0\r\nsympy==1.12\r\ntensorboard==2.12.3\r\ntensorboard-data-server==0.7.0\r\ntensorflow==2.12.0\r\ntensorflow-estimator==2.12.0\r\ntensorflow-intel==2.12.0\r\ntensorflow-io-gcs-filesystem==0.31.0\r\ntermcolor==2.3.0\r\ntexttable==1.6.7\r\nthreadpoolctl==3.1.0\r\ntweepy==4.14.0\r\ntyping_extensions==4.5.0\r\ntzdata==2023.3\r\nurllib3==1.26.15\r\nvirtualenv==20.23.0\r\nwebsocket-client==1.5.1\r\nwebsockets==11.0.3\r\nWerkzeug==2.3.4\r\nwheel==0.40.0\r\nwrapt==1.14.1\r\nxgboost==1.7.5\r\nyulewalker==0.1.1\r\n```\n\n### Expected behavior\n\nEvery test running successfully\n\n### Actual behavior\n\n```\r\nERROR web_programming/currency_converter.py - KeyError: \"API key must be provided in the 'AMDOREN_API_KEY' environment variable.\"\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nThis is used to convert the currency using the Amdoren Currency API\nhttps://www.amdoren.com\n\"\"\"\n\nimport os\n\nimport requests\n\nURL_BASE = \"https://www.amdoren.com/api/currency.php\"\nTESTING = os.getenv(\"CI\", \"\")\nAPI_KEY = os.getenv(\"AMDOREN_API_KEY\", \"\")\n\nif not API_KEY and not TESTING:\n raise KeyError(\n \"API key must be provided in the 'AMDOREN_API_KEY' environment variable.\"\n )\n\n# Currency and their description\nlist_of_currencies = \"\"\"\nAED\tUnited Arab Emirates Dirham\nAFN\tAfghan Afghani\nALL\tAlbanian Lek\nAMD\tArmenian Dram\nANG\tNetherlands Antillean Guilder\nAOA\tAngolan Kwanza\nARS\tArgentine Peso\nAUD\tAustralian Dollar\nAWG\tAruban Florin\nAZN\tAzerbaijani Manat\nBAM\tBosnia & Herzegovina Convertible Mark\nBBD\tBarbadian Dollar\nBDT\tBangladeshi Taka\nBGN\tBulgarian Lev\nBHD\tBahraini Dinar\nBIF\tBurundian Franc\nBMD\tBermudian Dollar\nBND\tBrunei Dollar\nBOB\tBolivian Boliviano\nBRL\tBrazilian Real\nBSD\tBahamian Dollar\nBTN\tBhutanese Ngultrum\nBWP\tBotswana Pula\nBYN\tBelarus Ruble\nBZD\tBelize Dollar\nCAD\tCanadian Dollar\nCDF\tCongolese Franc\nCHF\tSwiss Franc\nCLP\tChilean Peso\nCNY\tChinese Yuan\nCOP\tColombian Peso\nCRC\tCosta Rican Colon\nCUC\tCuban Convertible Peso\nCVE\tCape Verdean Escudo\nCZK\tCzech Republic Koruna\nDJF\tDjiboutian Franc\nDKK\tDanish Krone\nDOP\tDominican Peso\nDZD\tAlgerian Dinar\nEGP\tEgyptian Pound\nERN\tEritrean Nakfa\nETB\tEthiopian Birr\nEUR\tEuro\nFJD\tFiji Dollar\nGBP\tBritish Pound Sterling\nGEL\tGeorgian Lari\nGHS\tGhanaian Cedi\nGIP\tGibraltar Pound\nGMD\tGambian Dalasi\nGNF\tGuinea Franc\nGTQ\tGuatemalan Quetzal\nGYD\tGuyanaese Dollar\nHKD\tHong Kong Dollar\nHNL\tHonduran Lempira\nHRK\tCroatian Kuna\nHTG\tHaiti Gourde\nHUF\tHungarian Forint\nIDR\tIndonesian Rupiah\nILS\tIsraeli Shekel\nINR\tIndian Rupee\nIQD\tIraqi Dinar\nIRR\tIranian Rial\nISK\tIcelandic Krona\nJMD\tJamaican Dollar\nJOD\tJordanian Dinar\nJPY\tJapanese Yen\nKES\tKenyan Shilling\nKGS\tKyrgystani Som\nKHR\tCambodian Riel\nKMF\tComorian Franc\nKPW\tNorth Korean Won\nKRW\tSouth Korean Won\nKWD\tKuwaiti Dinar\nKYD\tCayman Islands Dollar\nKZT\tKazakhstan Tenge\nLAK\tLaotian Kip\nLBP\tLebanese Pound\nLKR\tSri Lankan Rupee\nLRD\tLiberian Dollar\nLSL\tLesotho Loti\nLYD\tLibyan Dinar\nMAD\tMoroccan Dirham\nMDL\tMoldovan Leu\nMGA\tMalagasy Ariary\nMKD\tMacedonian Denar\nMMK\tMyanma Kyat\nMNT\tMongolian Tugrik\nMOP\tMacau Pataca\nMRO\tMauritanian Ouguiya\nMUR\tMauritian Rupee\nMVR\tMaldivian Rufiyaa\nMWK\tMalawi Kwacha\nMXN\tMexican Peso\nMYR\tMalaysian Ringgit\nMZN\tMozambican Metical\nNAD\tNamibian Dollar\nNGN\tNigerian Naira\nNIO\tNicaragua Cordoba\nNOK\tNorwegian Krone\nNPR\tNepalese Rupee\nNZD\tNew Zealand Dollar\nOMR\tOmani Rial\nPAB\tPanamanian Balboa\nPEN\tPeruvian Nuevo Sol\nPGK\tPapua New Guinean Kina\nPHP\tPhilippine Peso\nPKR\tPakistani Rupee\nPLN\tPolish Zloty\nPYG\tParaguayan Guarani\nQAR\tQatari Riyal\nRON\tRomanian Leu\nRSD\tSerbian Dinar\nRUB\tRussian Ruble\nRWF\tRwanda Franc\nSAR\tSaudi Riyal\nSBD\tSolomon Islands Dollar\nSCR\tSeychellois Rupee\nSDG\tSudanese Pound\nSEK\tSwedish Krona\nSGD\tSingapore Dollar\nSHP\tSaint Helena Pound\nSLL\tSierra Leonean Leone\nSOS\tSomali Shilling\nSRD\tSurinamese Dollar\nSSP\tSouth Sudanese Pound\nSTD\tSao Tome and Principe Dobra\nSYP\tSyrian Pound\nSZL\tSwazi Lilangeni\nTHB\tThai Baht\nTJS\tTajikistan Somoni\nTMT\tTurkmenistani Manat\nTND\tTunisian Dinar\nTOP\tTonga Paanga\nTRY\tTurkish Lira\nTTD\tTrinidad and Tobago Dollar\nTWD\tNew Taiwan Dollar\nTZS\tTanzanian Shilling\nUAH\tUkrainian Hryvnia\nUGX\tUgandan Shilling\nUSD\tUnited States Dollar\nUYU\tUruguayan Peso\nUZS\tUzbekistan Som\nVEF\tVenezuelan Bolivar\nVND\tVietnamese Dong\nVUV\tVanuatu Vatu\nWST\tSamoan Tala\nXAF\tCentral African CFA franc\nXCD\tEast Caribbean Dollar\nXOF\tWest African CFA franc\nXPF\tCFP Franc\nYER\tYemeni Rial\nZAR\tSouth African Rand\nZMW\tZambian Kwacha\n\"\"\"\n\n\ndef convert_currency(\n from_: str = \"USD\", to: str = \"INR\", amount: float = 1.0, api_key: str = API_KEY\n) -> str:\n \"\"\"https://www.amdoren.com/currency-api/\"\"\"\n params = locals()\n params[\"from\"] = params.pop(\"from_\")\n res = requests.get(URL_BASE, params=params).json()\n return str(res[\"amount\"]) if res[\"error\"] == 0 else res[\"error_message\"]\n\n\nif __name__ == \"__main__\":\n print(\n convert_currency(\n input(\"Enter from currency: \").strip(),\n input(\"Enter to currency: \").strip(),\n float(input(\"Enter the amount: \").strip()),\n )\n )\n", "path": "web_programming/currency_converter.py"}]}
| 3,744 | 438 |
gh_patches_debug_19061
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-623
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Processing .ycm_extra_conf.py creates __pycache__ directory
I'm not sure if this is the intended behaviour. When YCM reads configuration creates a compiled version in `__pycache__`. I know that this behaviour can be disabled passing to `python` the `-B` argument or setting `PYTHONDONTWRITEBYTECODE=1` environmental variable. I don't want to disable global bytecode generation but I want to disable for `.ycm_extra_conf.py` because I feel it pollutes my project directory.
Is there a easy/reliable way to disable it in the YCM config?
</issue>
<code>
[start of ycmd/extra_conf_store.py]
1 # Copyright (C) 2011, 2012 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 # NOTE: This module is used as a Singleton
19
20 from __future__ import unicode_literals
21 from __future__ import print_function
22 from __future__ import division
23 from __future__ import absolute_import
24 from future import standard_library
25 standard_library.install_aliases()
26 from builtins import * # noqa
27
28 import os
29 import random
30 import string
31 import sys
32 import logging
33 from threading import Lock
34 from ycmd import user_options_store
35 from ycmd.responses import UnknownExtraConf, YCM_EXTRA_CONF_FILENAME
36 from ycmd.utils import LoadPythonSource, PathsToAllParentFolders
37 from fnmatch import fnmatch
38
39
40 # Singleton variables
41 _module_for_module_file = {}
42 _module_for_module_file_lock = Lock()
43 _module_file_for_source_file = {}
44 _module_file_for_source_file_lock = Lock()
45
46
47 def Reset():
48 global _module_for_module_file, _module_file_for_source_file
49 _module_for_module_file = {}
50 _module_file_for_source_file = {}
51
52
53 def ModuleForSourceFile( filename ):
54 return Load( ModuleFileForSourceFile( filename ) )
55
56
57 def ModuleFileForSourceFile( filename ):
58 """This will try all files returned by _ExtraConfModuleSourceFilesForFile in
59 order and return the filename of the first module that was allowed to load.
60 If no module was found or allowed to load, None is returned."""
61
62 with _module_file_for_source_file_lock:
63 if filename not in _module_file_for_source_file:
64 for module_file in _ExtraConfModuleSourceFilesForFile( filename ):
65 if Load( module_file ):
66 _module_file_for_source_file[ filename ] = module_file
67 break
68
69 return _module_file_for_source_file.setdefault( filename )
70
71
72 def CallGlobalExtraConfYcmCorePreloadIfExists():
73 _CallGlobalExtraConfMethod( 'YcmCorePreload' )
74
75
76 def Shutdown():
77 # VimClose is for the sake of backwards compatibility; it's a no-op when it
78 # doesn't exist.
79 _CallGlobalExtraConfMethod( 'VimClose' )
80 _CallGlobalExtraConfMethod( 'Shutdown' )
81
82
83 def _CallGlobalExtraConfMethod( function_name ):
84 logger = _Logger()
85 global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
86 if not ( global_ycm_extra_conf and
87 os.path.exists( global_ycm_extra_conf ) ):
88 logger.debug( 'No global extra conf, not calling method ' + function_name )
89 return
90
91 module = Load( global_ycm_extra_conf, force = True )
92 if not module or not hasattr( module, function_name ):
93 logger.debug( 'Global extra conf not loaded or no function ' +
94 function_name )
95 return
96
97 logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(
98 function_name, global_ycm_extra_conf ) )
99 getattr( module, function_name )()
100
101
102 def Disable( module_file ):
103 """Disables the loading of a module for the current session."""
104 with _module_for_module_file_lock:
105 _module_for_module_file[ module_file ] = None
106
107
108 def _ShouldLoad( module_file ):
109 """Checks if a module is safe to be loaded. By default this will try to
110 decide using a white-/blacklist and ask the user for confirmation as a
111 fallback."""
112
113 if ( module_file == _GlobalYcmExtraConfFileLocation() or
114 not user_options_store.Value( 'confirm_extra_conf' ) ):
115 return True
116
117 globlist = user_options_store.Value( 'extra_conf_globlist' )
118 for glob in globlist:
119 is_blacklisted = glob[0] == '!'
120 if _MatchesGlobPattern( module_file, glob.lstrip('!') ):
121 return not is_blacklisted
122
123 raise UnknownExtraConf( module_file )
124
125
126 def Load( module_file, force = False ):
127 """Load and return the module contained in a file.
128 Using force = True the module will be loaded regardless
129 of the criteria in _ShouldLoad.
130 This will return None if the module was not allowed to be loaded."""
131
132 if not module_file:
133 return None
134
135 if not force:
136 with _module_for_module_file_lock:
137 if module_file in _module_for_module_file:
138 return _module_for_module_file[ module_file ]
139
140 if not _ShouldLoad( module_file ):
141 Disable( module_file )
142 return None
143
144 # This has to be here because a long time ago, the ycm_extra_conf.py files
145 # used to import clang_helpers.py from the cpp folder. This is not needed
146 # anymore, but there are a lot of old ycm_extra_conf.py files that we don't
147 # want to break.
148 sys.path.insert( 0, _PathToCppCompleterFolder() )
149 module = LoadPythonSource( _RandomName(), module_file )
150 del sys.path[ 0 ]
151
152 with _module_for_module_file_lock:
153 _module_for_module_file[ module_file ] = module
154 return module
155
156
157 def _MatchesGlobPattern( filename, glob ):
158 """Returns true if a filename matches a given pattern. A '~' in glob will be
159 expanded to the home directory and checking will be performed using absolute
160 paths. See the documentation of fnmatch for the supported patterns."""
161
162 abspath = os.path.abspath( filename )
163 return fnmatch( abspath, os.path.abspath( os.path.expanduser( glob ) ) )
164
165
166 def _ExtraConfModuleSourceFilesForFile( filename ):
167 """For a given filename, search all parent folders for YCM_EXTRA_CONF_FILENAME
168 files that will compute the flags necessary to compile the file.
169 If _GlobalYcmExtraConfFileLocation() exists it is returned as a fallback."""
170
171 for folder in PathsToAllParentFolders( filename ):
172 candidate = os.path.join( folder, YCM_EXTRA_CONF_FILENAME )
173 if os.path.exists( candidate ):
174 yield candidate
175 global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()
176 if ( global_ycm_extra_conf
177 and os.path.exists( global_ycm_extra_conf ) ):
178 yield global_ycm_extra_conf
179
180
181 def _PathToCppCompleterFolder():
182 """Returns the path to the 'cpp' completer folder. This is necessary
183 because ycm_extra_conf files need it on the path."""
184 return os.path.join( _DirectoryOfThisScript(), 'completers', 'cpp' )
185
186
187 def _DirectoryOfThisScript():
188 return os.path.dirname( os.path.abspath( __file__ ) )
189
190
191 def _RandomName():
192 """Generates a random module name."""
193 return ''.join( random.choice( string.ascii_lowercase ) for x in range( 15 ) )
194
195
196 def _GlobalYcmExtraConfFileLocation():
197 return os.path.expanduser(
198 user_options_store.Value( 'global_ycm_extra_conf' ) )
199
200
201 def _Logger():
202 return logging.getLogger( __name__ )
203
[end of ycmd/extra_conf_store.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py
--- a/ycmd/extra_conf_store.py
+++ b/ycmd/extra_conf_store.py
@@ -146,7 +146,21 @@
# anymore, but there are a lot of old ycm_extra_conf.py files that we don't
# want to break.
sys.path.insert( 0, _PathToCppCompleterFolder() )
- module = LoadPythonSource( _RandomName(), module_file )
+
+ # By default, the Python interpreter compiles source files into bytecode to
+ # load them faster next time they are run. These *.pyc files are generated
+ # along the source files prior to Python 3.2 or in a __pycache__ folder for
+ # newer versions. We disable the generation of these files when loading
+ # ycm_extra_conf.py files as users do not want them inside their projects.
+ # The drawback is negligible since ycm_extra_conf.py files are generally small
+ # files thus really fast to compile and only loaded once by editing session.
+ old_dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ try:
+ module = LoadPythonSource( _RandomName(), module_file )
+ finally:
+ sys.dont_write_bytecode = old_dont_write_bytecode
+
del sys.path[ 0 ]
with _module_for_module_file_lock:
|
{"golden_diff": "diff --git a/ycmd/extra_conf_store.py b/ycmd/extra_conf_store.py\n--- a/ycmd/extra_conf_store.py\n+++ b/ycmd/extra_conf_store.py\n@@ -146,7 +146,21 @@\n # anymore, but there are a lot of old ycm_extra_conf.py files that we don't\n # want to break.\n sys.path.insert( 0, _PathToCppCompleterFolder() )\n- module = LoadPythonSource( _RandomName(), module_file )\n+\n+ # By default, the Python interpreter compiles source files into bytecode to\n+ # load them faster next time they are run. These *.pyc files are generated\n+ # along the source files prior to Python 3.2 or in a __pycache__ folder for\n+ # newer versions. We disable the generation of these files when loading\n+ # ycm_extra_conf.py files as users do not want them inside their projects.\n+ # The drawback is negligible since ycm_extra_conf.py files are generally small\n+ # files thus really fast to compile and only loaded once by editing session.\n+ old_dont_write_bytecode = sys.dont_write_bytecode\n+ sys.dont_write_bytecode = True\n+ try:\n+ module = LoadPythonSource( _RandomName(), module_file )\n+ finally:\n+ sys.dont_write_bytecode = old_dont_write_bytecode\n+\n del sys.path[ 0 ]\n \n with _module_for_module_file_lock:\n", "issue": "Processing .ycm_extra_conf.py creates __pycache__ directory\nI'm not sure if this is the intended behaviour. When YCM reads configuration creates a compiled version in `__pycache__`. I know that this behaviour can be disabled passing to `python` the `-B` argument or setting `PYTHONDONTWRITEBYTECODE=1` environmental variable. I don't want to disable global bytecode generation but I want to disable for `.ycm_extra_conf.py` because I feel it pollutes my project directory. \n\nIs there a easy/reliable way to disable it in the YCM config?\n\n", "before_files": [{"content": "# Copyright (C) 2011, 2012 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\n# NOTE: This module is used as a Singleton\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport os\nimport random\nimport string\nimport sys\nimport logging\nfrom threading import Lock\nfrom ycmd import user_options_store\nfrom ycmd.responses import UnknownExtraConf, YCM_EXTRA_CONF_FILENAME\nfrom ycmd.utils import LoadPythonSource, PathsToAllParentFolders\nfrom fnmatch import fnmatch\n\n\n# Singleton variables\n_module_for_module_file = {}\n_module_for_module_file_lock = Lock()\n_module_file_for_source_file = {}\n_module_file_for_source_file_lock = Lock()\n\n\ndef Reset():\n global _module_for_module_file, _module_file_for_source_file\n _module_for_module_file = {}\n _module_file_for_source_file = {}\n\n\ndef ModuleForSourceFile( filename ):\n return Load( ModuleFileForSourceFile( filename ) )\n\n\ndef ModuleFileForSourceFile( filename ):\n \"\"\"This will try all files returned by _ExtraConfModuleSourceFilesForFile in\n order and return the filename of the first module that was allowed to load.\n If no module was found or allowed to load, None is returned.\"\"\"\n\n with _module_file_for_source_file_lock:\n if filename not in _module_file_for_source_file:\n for module_file in _ExtraConfModuleSourceFilesForFile( filename ):\n if Load( module_file ):\n _module_file_for_source_file[ filename ] = module_file\n break\n\n return _module_file_for_source_file.setdefault( filename )\n\n\ndef CallGlobalExtraConfYcmCorePreloadIfExists():\n _CallGlobalExtraConfMethod( 'YcmCorePreload' )\n\n\ndef Shutdown():\n # VimClose is for the sake of backwards compatibility; it's a no-op when it\n # doesn't exist.\n _CallGlobalExtraConfMethod( 'VimClose' )\n _CallGlobalExtraConfMethod( 'Shutdown' )\n\n\ndef _CallGlobalExtraConfMethod( function_name ):\n logger = _Logger()\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if not ( global_ycm_extra_conf and\n os.path.exists( global_ycm_extra_conf ) ):\n logger.debug( 'No global extra conf, not calling method ' + function_name )\n return\n\n module = Load( global_ycm_extra_conf, force = True )\n if not module or not hasattr( module, function_name ):\n logger.debug( 'Global extra conf not loaded or no function ' +\n function_name )\n return\n\n logger.info( 'Calling global extra conf method {0} on conf file {1}'.format(\n function_name, global_ycm_extra_conf ) )\n getattr( module, function_name )()\n\n\ndef Disable( module_file ):\n \"\"\"Disables the loading of a module for the current session.\"\"\"\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = None\n\n\ndef _ShouldLoad( module_file ):\n \"\"\"Checks if a module is safe to be loaded. By default this will try to\n decide using a white-/blacklist and ask the user for confirmation as a\n fallback.\"\"\"\n\n if ( module_file == _GlobalYcmExtraConfFileLocation() or\n not user_options_store.Value( 'confirm_extra_conf' ) ):\n return True\n\n globlist = user_options_store.Value( 'extra_conf_globlist' )\n for glob in globlist:\n is_blacklisted = glob[0] == '!'\n if _MatchesGlobPattern( module_file, glob.lstrip('!') ):\n return not is_blacklisted\n\n raise UnknownExtraConf( module_file )\n\n\ndef Load( module_file, force = False ):\n \"\"\"Load and return the module contained in a file.\n Using force = True the module will be loaded regardless\n of the criteria in _ShouldLoad.\n This will return None if the module was not allowed to be loaded.\"\"\"\n\n if not module_file:\n return None\n\n if not force:\n with _module_for_module_file_lock:\n if module_file in _module_for_module_file:\n return _module_for_module_file[ module_file ]\n\n if not _ShouldLoad( module_file ):\n Disable( module_file )\n return None\n\n # This has to be here because a long time ago, the ycm_extra_conf.py files\n # used to import clang_helpers.py from the cpp folder. This is not needed\n # anymore, but there are a lot of old ycm_extra_conf.py files that we don't\n # want to break.\n sys.path.insert( 0, _PathToCppCompleterFolder() )\n module = LoadPythonSource( _RandomName(), module_file )\n del sys.path[ 0 ]\n\n with _module_for_module_file_lock:\n _module_for_module_file[ module_file ] = module\n return module\n\n\ndef _MatchesGlobPattern( filename, glob ):\n \"\"\"Returns true if a filename matches a given pattern. A '~' in glob will be\n expanded to the home directory and checking will be performed using absolute\n paths. See the documentation of fnmatch for the supported patterns.\"\"\"\n\n abspath = os.path.abspath( filename )\n return fnmatch( abspath, os.path.abspath( os.path.expanduser( glob ) ) )\n\n\ndef _ExtraConfModuleSourceFilesForFile( filename ):\n \"\"\"For a given filename, search all parent folders for YCM_EXTRA_CONF_FILENAME\n files that will compute the flags necessary to compile the file.\n If _GlobalYcmExtraConfFileLocation() exists it is returned as a fallback.\"\"\"\n\n for folder in PathsToAllParentFolders( filename ):\n candidate = os.path.join( folder, YCM_EXTRA_CONF_FILENAME )\n if os.path.exists( candidate ):\n yield candidate\n global_ycm_extra_conf = _GlobalYcmExtraConfFileLocation()\n if ( global_ycm_extra_conf\n and os.path.exists( global_ycm_extra_conf ) ):\n yield global_ycm_extra_conf\n\n\ndef _PathToCppCompleterFolder():\n \"\"\"Returns the path to the 'cpp' completer folder. This is necessary\n because ycm_extra_conf files need it on the path.\"\"\"\n return os.path.join( _DirectoryOfThisScript(), 'completers', 'cpp' )\n\n\ndef _DirectoryOfThisScript():\n return os.path.dirname( os.path.abspath( __file__ ) )\n\n\ndef _RandomName():\n \"\"\"Generates a random module name.\"\"\"\n return ''.join( random.choice( string.ascii_lowercase ) for x in range( 15 ) )\n\n\ndef _GlobalYcmExtraConfFileLocation():\n return os.path.expanduser(\n user_options_store.Value( 'global_ycm_extra_conf' ) )\n\n\ndef _Logger():\n return logging.getLogger( __name__ )\n", "path": "ycmd/extra_conf_store.py"}]}
| 2,850 | 336 |
gh_patches_debug_18735
|
rasdani/github-patches
|
git_diff
|
openfun__marsha-1060
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create XAPI statements for live video
## Feature Request
**Is your feature request related to a problem or unsupported use case? Please describe.**
When a video is a live all the existing XAPI statement are sent like a regular videos. Some events should not be sent and some data can't be computed
**Describe the solution you'd like**
Change the activity-type to `http://id.tincanapi.com/activitytype/webinar`
Send statement for those events :
- initialized
- play
- pause
- interacted
Also, do not send video length info, we can't have it. The completion threshold can not be computed too.
</issue>
<code>
[start of src/backend/marsha/core/xapi.py]
1 """XAPI module."""
2 import re
3 import uuid
4
5 from django.conf import settings
6 from django.utils import timezone
7 from django.utils.translation import to_locale
8
9 import requests
10
11
12 class XAPIStatement:
13 """Object to work on a XAPI Statement."""
14
15 statement = None
16
17 def __init__(self, video, statement, lti_user):
18 """Compute a valid xapi satement.
19
20 Parameters
21 ----------
22 video : Type[.models/videos]
23 The video object used in the xAPI statement
24
25 statement : dictionary
26 Statement containing base information to send to the LRS
27 An example of expected statement:
28 {
29 "verb": {
30 "id": "http://adlnet.gov/expapi/verbs/initialized",
31 "display": {
32 "en-US": "initialized"
33 }
34 },
35 "context": {
36 "extensions": {
37 "https://w3id.org/xapi/video/extensions/volume": 1,
38 "https://w3id.org/xapi/video/extensions/video-playback-size": "640x264",
39 }
40 }
41 }
42
43 lti_user : Type[lti.LTIUser]
44 Object representing data stored in the JWT Token and related to the user authenticated
45 with LTI
46
47 """
48 try:
49 user_id = lti_user.user.get("id")
50 except AttributeError:
51 user_id = lti_user.session_id
52
53 homepage = video.playlist.consumer_site.domain
54
55 if re.match(r"^http(s?):\/\/.*", homepage) is None:
56 homepage = f"http://{homepage}"
57
58 if "id" not in statement:
59 statement["id"] = str(uuid.uuid4())
60
61 statement["timestamp"] = timezone.now().isoformat()
62 statement["context"].update(
63 {"contextActivities": {"category": [{"id": "https://w3id.org/xapi/video"}]}}
64 )
65
66 statement["actor"] = {
67 "objectType": "Agent",
68 "account": {"name": user_id, "homePage": homepage},
69 }
70
71 statement["object"] = {
72 "definition": {
73 "type": "https://w3id.org/xapi/video/activity-type/video",
74 "name": {
75 to_locale(settings.LANGUAGE_CODE).replace("_", "-"): video.title
76 },
77 },
78 "id": "uuid://{id}".format(id=str(video.id)),
79 "objectType": "Activity",
80 }
81
82 object_extensions = {}
83 if lti_user.course.get("school_name") is not None:
84 object_extensions[
85 "https://w3id.org/xapi/acrossx/extensions/school"
86 ] = lti_user.course["school_name"]
87
88 if lti_user.course.get("course_name") is not None:
89 object_extensions[
90 "http://adlnet.gov/expapi/activities/course"
91 ] = lti_user.course["course_name"]
92
93 if lti_user.course.get("course_run") is not None:
94 object_extensions[
95 "http://adlnet.gov/expapi/activities/module"
96 ] = lti_user.course["course_run"]
97
98 if object_extensions:
99 statement["object"]["definition"]["extensions"] = object_extensions
100
101 self.statement = statement
102
103 def get_statement(self):
104 """Return the enriched statement."""
105 return self.statement
106
107
108 class XAPI:
109 """The XAPI object compute statements and send them to a LRS."""
110
111 def __init__(self, url, auth_token, xapi_version="1.0.3"):
112 """Initialize the XAPI module.
113
114 Parameters
115 ----------
116 url: string
117 The LRS endpoint to fetch
118
119 auth_token: string
120 The basic_auth token used to authenticate on the LRS
121
122 xapi_version: string
123 The xAPI version used.
124
125 """
126 self.url = url
127 self.auth_token = auth_token
128 self.xapi_version = xapi_version
129
130 def send(self, xapi_statement):
131 """Send the statement to a LRS.
132
133 Parameters
134 ----------
135 statement : Type[.XAPIStatement]
136
137 """
138 headers = {
139 "Authorization": self.auth_token,
140 "Content-Type": "application/json",
141 "X-Experience-API-Version": self.xapi_version,
142 }
143
144 response = requests.post(
145 self.url, json=xapi_statement.get_statement(), headers=headers
146 )
147
148 response.raise_for_status()
149
[end of src/backend/marsha/core/xapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/backend/marsha/core/xapi.py b/src/backend/marsha/core/xapi.py
--- a/src/backend/marsha/core/xapi.py
+++ b/src/backend/marsha/core/xapi.py
@@ -52,6 +52,12 @@
homepage = video.playlist.consumer_site.domain
+ activity_type = "https://w3id.org/xapi/video/activity-type/video"
+
+ # When the video is a live we change the activity to webinar
+ if video.live_state is not None:
+ activity_type = "http://id.tincanapi.com/activitytype/webinar"
+
if re.match(r"^http(s?):\/\/.*", homepage) is None:
homepage = f"http://{homepage}"
@@ -70,7 +76,7 @@
statement["object"] = {
"definition": {
- "type": "https://w3id.org/xapi/video/activity-type/video",
+ "type": activity_type,
"name": {
to_locale(settings.LANGUAGE_CODE).replace("_", "-"): video.title
},
|
{"golden_diff": "diff --git a/src/backend/marsha/core/xapi.py b/src/backend/marsha/core/xapi.py\n--- a/src/backend/marsha/core/xapi.py\n+++ b/src/backend/marsha/core/xapi.py\n@@ -52,6 +52,12 @@\n \n homepage = video.playlist.consumer_site.domain\n \n+ activity_type = \"https://w3id.org/xapi/video/activity-type/video\"\n+\n+ # When the video is a live we change the activity to webinar\n+ if video.live_state is not None:\n+ activity_type = \"http://id.tincanapi.com/activitytype/webinar\"\n+\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n \n@@ -70,7 +76,7 @@\n \n statement[\"object\"] = {\n \"definition\": {\n- \"type\": \"https://w3id.org/xapi/video/activity-type/video\",\n+ \"type\": activity_type,\n \"name\": {\n to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\"): video.title\n },\n", "issue": "Create XAPI statements for live video\n## Feature Request\r\n\r\n**Is your feature request related to a problem or unsupported use case? Please describe.**\r\n\r\nWhen a video is a live all the existing XAPI statement are sent like a regular videos. Some events should not be sent and some data can't be computed\r\n\r\n**Describe the solution you'd like**\r\n\r\nChange the activity-type to `http://id.tincanapi.com/activitytype/webinar`\r\nSend statement for those events : \r\n- initialized\r\n- play\r\n- pause\r\n- interacted\r\n\r\nAlso, do not send video length info, we can't have it. The completion threshold can not be computed too.\r\n\n", "before_files": [{"content": "\"\"\"XAPI module.\"\"\"\nimport re\nimport uuid\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.translation import to_locale\n\nimport requests\n\n\nclass XAPIStatement:\n \"\"\"Object to work on a XAPI Statement.\"\"\"\n\n statement = None\n\n def __init__(self, video, statement, lti_user):\n \"\"\"Compute a valid xapi satement.\n\n Parameters\n ----------\n video : Type[.models/videos]\n The video object used in the xAPI statement\n\n statement : dictionary\n Statement containing base information to send to the LRS\n An example of expected statement:\n {\n \"verb\": {\n \"id\": \"http://adlnet.gov/expapi/verbs/initialized\",\n \"display\": {\n \"en-US\": \"initialized\"\n }\n },\n \"context\": {\n \"extensions\": {\n \"https://w3id.org/xapi/video/extensions/volume\": 1,\n \"https://w3id.org/xapi/video/extensions/video-playback-size\": \"640x264\",\n }\n }\n }\n\n lti_user : Type[lti.LTIUser]\n Object representing data stored in the JWT Token and related to the user authenticated\n with LTI\n\n \"\"\"\n try:\n user_id = lti_user.user.get(\"id\")\n except AttributeError:\n user_id = lti_user.session_id\n\n homepage = video.playlist.consumer_site.domain\n\n if re.match(r\"^http(s?):\\/\\/.*\", homepage) is None:\n homepage = f\"http://{homepage}\"\n\n if \"id\" not in statement:\n statement[\"id\"] = str(uuid.uuid4())\n\n statement[\"timestamp\"] = timezone.now().isoformat()\n statement[\"context\"].update(\n {\"contextActivities\": {\"category\": [{\"id\": \"https://w3id.org/xapi/video\"}]}}\n )\n\n statement[\"actor\"] = {\n \"objectType\": \"Agent\",\n \"account\": {\"name\": user_id, \"homePage\": homepage},\n }\n\n statement[\"object\"] = {\n \"definition\": {\n \"type\": \"https://w3id.org/xapi/video/activity-type/video\",\n \"name\": {\n to_locale(settings.LANGUAGE_CODE).replace(\"_\", \"-\"): video.title\n },\n },\n \"id\": \"uuid://{id}\".format(id=str(video.id)),\n \"objectType\": \"Activity\",\n }\n\n object_extensions = {}\n if lti_user.course.get(\"school_name\") is not None:\n object_extensions[\n \"https://w3id.org/xapi/acrossx/extensions/school\"\n ] = lti_user.course[\"school_name\"]\n\n if lti_user.course.get(\"course_name\") is not None:\n object_extensions[\n \"http://adlnet.gov/expapi/activities/course\"\n ] = lti_user.course[\"course_name\"]\n\n if lti_user.course.get(\"course_run\") is not None:\n object_extensions[\n \"http://adlnet.gov/expapi/activities/module\"\n ] = lti_user.course[\"course_run\"]\n\n if object_extensions:\n statement[\"object\"][\"definition\"][\"extensions\"] = object_extensions\n\n self.statement = statement\n\n def get_statement(self):\n \"\"\"Return the enriched statement.\"\"\"\n return self.statement\n\n\nclass XAPI:\n \"\"\"The XAPI object compute statements and send them to a LRS.\"\"\"\n\n def __init__(self, url, auth_token, xapi_version=\"1.0.3\"):\n \"\"\"Initialize the XAPI module.\n\n Parameters\n ----------\n url: string\n The LRS endpoint to fetch\n\n auth_token: string\n The basic_auth token used to authenticate on the LRS\n\n xapi_version: string\n The xAPI version used.\n\n \"\"\"\n self.url = url\n self.auth_token = auth_token\n self.xapi_version = xapi_version\n\n def send(self, xapi_statement):\n \"\"\"Send the statement to a LRS.\n\n Parameters\n ----------\n statement : Type[.XAPIStatement]\n\n \"\"\"\n headers = {\n \"Authorization\": self.auth_token,\n \"Content-Type\": \"application/json\",\n \"X-Experience-API-Version\": self.xapi_version,\n }\n\n response = requests.post(\n self.url, json=xapi_statement.get_statement(), headers=headers\n )\n\n response.raise_for_status()\n", "path": "src/backend/marsha/core/xapi.py"}]}
| 1,973 | 238 |
gh_patches_debug_34117
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-1090
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
case.lt_archive
lt_archive script has several problems preventing functionality.
</issue>
<code>
[start of utils/python/CIME/case_lt_archive.py]
1 from CIME.XML.standard_module_setup import *
2 from CIME.utils import expect, does_file_have_string, append_status
3 from CIME.XML.lt_archive import LTArchive
4
5 import time
6
7 logger = logging.getLogger(__name__)
8
9 ###############################################################################
10 def case_lt_archive(case):
11 ###############################################################################
12 caseroot = case.get_value("CASEROOT")
13
14 # max number of threads needed by scripts
15 os.environ["maxthrds"] = 1
16
17 # document start
18 append_status("lt_archive starting",caseroot=caseroot,sfile="CaseStatus")
19
20 # determine status of run and short term archiving
21 runComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
22 "run SUCCESSFUL")
23 staComplete = does_file_have_string(os.path.join(caseroot, "stArchiveStatus"),
24 "st_archive_complete")
25
26 # set up envrionment vars and call the lt_archive.sh script
27 if runComplete and staComplete:
28 os.environ["DOUT_S_ROOT"] = case.get_value("DOUT_S_ROOT")
29 os.environ["DOUT_L_MSROOT"] = case.get_value("DOUT_L_MSROOT")
30 os.environ["DOUT_L_HPSS_ACCNT"] = case.get_value("DOUT_L_HPSS_ACCNT")
31
32 lid = time.strftime("%y%m%d-%H%M%S")
33 lt_archive = LTArchive(case.get_value("MACH"))
34 lt_archive_args = lt_archive.get_lt_archive_args()
35 cmd = os.path.join(caseroot, "Tools/lt_archive.sh") \
36 + lt_archive_args + "ltArchiveStatus." + lid + " 2>&1"
37 run_cmd_no_fail(cmd, from_dir=caseroot)
38 else:
39 expect(False,
40 "lt_archive: run or st_archive is not yet complete or was not successful."
41 "Unable to perform long term archive...")
42
43 # document completion
44 append_status("lt_archive completed" ,caseroot=caseroot, sfile="CaseStatus")
45
46 return True
47
[end of utils/python/CIME/case_lt_archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/utils/python/CIME/case_lt_archive.py b/utils/python/CIME/case_lt_archive.py
--- a/utils/python/CIME/case_lt_archive.py
+++ b/utils/python/CIME/case_lt_archive.py
@@ -12,17 +12,16 @@
caseroot = case.get_value("CASEROOT")
# max number of threads needed by scripts
- os.environ["maxthrds"] = 1
+ os.environ["maxthrds"] = "1"
# document start
append_status("lt_archive starting",caseroot=caseroot,sfile="CaseStatus")
# determine status of run and short term archiving
runComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
- "run SUCCESSFUL")
- staComplete = does_file_have_string(os.path.join(caseroot, "stArchiveStatus"),
- "st_archive_complete")
-
+ "Run SUCCESSFUL")
+ staComplete = does_file_have_string(os.path.join(caseroot, "CaseStatus"),
+ "st_archiving completed")
# set up envrionment vars and call the lt_archive.sh script
if runComplete and staComplete:
os.environ["DOUT_S_ROOT"] = case.get_value("DOUT_S_ROOT")
@@ -32,10 +31,13 @@
lid = time.strftime("%y%m%d-%H%M%S")
lt_archive = LTArchive(case.get_value("MACH"))
lt_archive_args = lt_archive.get_lt_archive_args()
- cmd = os.path.join(caseroot, "Tools/lt_archive.sh") \
+ if lt_archive_args is None:
+ lt_archive_args = " "
+ cmd = os.path.join(caseroot, "Tools", "lt_archive.sh") \
+ lt_archive_args + "ltArchiveStatus." + lid + " 2>&1"
run_cmd_no_fail(cmd, from_dir=caseroot)
else:
+ logger.warn("runComplete %s staComplete %s"%(runComplete, staComplete))
expect(False,
"lt_archive: run or st_archive is not yet complete or was not successful."
"Unable to perform long term archive...")
|
{"golden_diff": "diff --git a/utils/python/CIME/case_lt_archive.py b/utils/python/CIME/case_lt_archive.py\n--- a/utils/python/CIME/case_lt_archive.py\n+++ b/utils/python/CIME/case_lt_archive.py\n@@ -12,17 +12,16 @@\n caseroot = case.get_value(\"CASEROOT\")\n \n # max number of threads needed by scripts\n- os.environ[\"maxthrds\"] = 1\n+ os.environ[\"maxthrds\"] = \"1\"\n \n # document start\n append_status(\"lt_archive starting\",caseroot=caseroot,sfile=\"CaseStatus\")\n \n # determine status of run and short term archiving\n runComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n- \"run SUCCESSFUL\")\n- staComplete = does_file_have_string(os.path.join(caseroot, \"stArchiveStatus\"),\n- \"st_archive_complete\")\n-\n+ \"Run SUCCESSFUL\")\n+ staComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n+ \"st_archiving completed\")\n # set up envrionment vars and call the lt_archive.sh script\n if runComplete and staComplete:\n os.environ[\"DOUT_S_ROOT\"] = case.get_value(\"DOUT_S_ROOT\")\n@@ -32,10 +31,13 @@\n lid = time.strftime(\"%y%m%d-%H%M%S\")\n lt_archive = LTArchive(case.get_value(\"MACH\"))\n lt_archive_args = lt_archive.get_lt_archive_args()\n- cmd = os.path.join(caseroot, \"Tools/lt_archive.sh\") \\\n+ if lt_archive_args is None:\n+ lt_archive_args = \" \"\n+ cmd = os.path.join(caseroot, \"Tools\", \"lt_archive.sh\") \\\n + lt_archive_args + \"ltArchiveStatus.\" + lid + \" 2>&1\"\n run_cmd_no_fail(cmd, from_dir=caseroot)\n else:\n+ logger.warn(\"runComplete %s staComplete %s\"%(runComplete, staComplete))\n expect(False,\n \"lt_archive: run or st_archive is not yet complete or was not successful.\"\n \"Unable to perform long term archive...\")\n", "issue": "case.lt_archive\nlt_archive script has several problems preventing functionality. \n", "before_files": [{"content": "from CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, does_file_have_string, append_status\nfrom CIME.XML.lt_archive import LTArchive\n\nimport time\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef case_lt_archive(case):\n###############################################################################\n caseroot = case.get_value(\"CASEROOT\")\n\n # max number of threads needed by scripts\n os.environ[\"maxthrds\"] = 1\n\n # document start\n append_status(\"lt_archive starting\",caseroot=caseroot,sfile=\"CaseStatus\")\n\n # determine status of run and short term archiving\n runComplete = does_file_have_string(os.path.join(caseroot, \"CaseStatus\"),\n \"run SUCCESSFUL\")\n staComplete = does_file_have_string(os.path.join(caseroot, \"stArchiveStatus\"),\n \"st_archive_complete\")\n\n # set up envrionment vars and call the lt_archive.sh script\n if runComplete and staComplete:\n os.environ[\"DOUT_S_ROOT\"] = case.get_value(\"DOUT_S_ROOT\")\n os.environ[\"DOUT_L_MSROOT\"] = case.get_value(\"DOUT_L_MSROOT\")\n os.environ[\"DOUT_L_HPSS_ACCNT\"] = case.get_value(\"DOUT_L_HPSS_ACCNT\")\n\n lid = time.strftime(\"%y%m%d-%H%M%S\")\n lt_archive = LTArchive(case.get_value(\"MACH\"))\n lt_archive_args = lt_archive.get_lt_archive_args()\n cmd = os.path.join(caseroot, \"Tools/lt_archive.sh\") \\\n + lt_archive_args + \"ltArchiveStatus.\" + lid + \" 2>&1\"\n run_cmd_no_fail(cmd, from_dir=caseroot)\n else:\n expect(False,\n \"lt_archive: run or st_archive is not yet complete or was not successful.\"\n \"Unable to perform long term archive...\")\n\n # document completion\n append_status(\"lt_archive completed\" ,caseroot=caseroot, sfile=\"CaseStatus\")\n\n return True\n", "path": "utils/python/CIME/case_lt_archive.py"}]}
| 1,076 | 481 |
gh_patches_debug_14320
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug][Documentation] Exporting: write() got an unexpected keyword argument 'merge'
**Describe the bug**
Following the example on the documentation to [export](https://www.dynaconf.com/advanced/#exporting) Dynaconf data to a file raises an exception with the `merge` argument
**To Reproduce**
~~~Python
loaders.write("/a/b/c", DynaBox(config).to_dict(), merge=False)
~~~
**Expected behavior**
The file should have been written
**Actual Behavior**
`TypeError: write() got an unexpected keyword argument 'merge'`
Just a quick documentation fix,
thanks !
</issue>
<code>
[start of dynaconf/loaders/__init__.py]
1 from __future__ import annotations
2
3 import importlib
4 import os
5
6 from dynaconf import constants as ct
7 from dynaconf import default_settings
8 from dynaconf.loaders import ini_loader
9 from dynaconf.loaders import json_loader
10 from dynaconf.loaders import py_loader
11 from dynaconf.loaders import toml_loader
12 from dynaconf.loaders import yaml_loader
13 from dynaconf.utils import deduplicate
14 from dynaconf.utils import ensure_a_list
15 from dynaconf.utils.boxing import DynaBox
16 from dynaconf.utils.files import get_local_filename
17 from dynaconf.utils.parse_conf import false_values
18
19
20 def default_loader(obj, defaults=None):
21 """Loads default settings and check if there are overridings
22 exported as environment variables"""
23 defaults = defaults or {}
24 default_settings_values = {
25 key: value
26 for key, value in default_settings.__dict__.items() # noqa
27 if key.isupper()
28 }
29
30 all_keys = deduplicate(
31 list(defaults.keys()) + list(default_settings_values.keys())
32 )
33
34 for key in all_keys:
35 if not obj.exists(key):
36 value = defaults.get(key, default_settings_values.get(key))
37 obj.set(key, value)
38
39 # start dotenv to get default env vars from there
40 # check overrides in env vars
41 if obj.get("load_dotenv") is True:
42 default_settings.start_dotenv(obj)
43
44 # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED
45 # Example: Flask and Django Extensions
46 env_switcher = defaults.get(
47 "ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
48 )
49
50 for key in all_keys:
51 if key not in default_settings_values.keys():
52 continue
53
54 env_value = obj.get_environ(
55 env_switcher if key == "ENV_FOR_DYNACONF" else key,
56 default="_not_found",
57 )
58
59 if env_value != "_not_found":
60 obj.set(key, env_value, tomlfy=True)
61
62
63 def _run_hook_module(hook, hook_module, obj, key=None):
64 """Run the hook function from the settings obj.
65
66 given a hook name, a hook_module and a settings object
67 load the function and execute if found.
68 """
69 if hook in obj._loaded_hooks.get(hook_module.__file__, {}):
70 # already loaded
71 return
72
73 if hook_module and getattr(hook_module, "_error", False):
74 if not isinstance(hook_module._error, FileNotFoundError):
75 raise hook_module._error
76
77 hook_func = getattr(hook_module, hook, None)
78 if hook_func:
79 hook_dict = hook_func(obj.dynaconf.clone())
80 if hook_dict:
81 merge = hook_dict.pop(
82 "dynaconf_merge", hook_dict.pop("DYNACONF_MERGE", False)
83 )
84 if key and key in hook_dict:
85 obj.set(key, hook_dict[key], tomlfy=False, merge=merge)
86 elif not key:
87 obj.update(hook_dict, tomlfy=False, merge=merge)
88 obj._loaded_hooks[hook_module.__file__][hook] = hook_dict
89
90
91 def execute_hooks(
92 hook, obj, env=None, silent=True, key=None, modules=None, files=None
93 ):
94 """Execute dynaconf_hooks from module or filepath."""
95 if hook not in ["post"]:
96 raise ValueError(f"hook {hook} not supported yet.")
97
98 # try to load hooks using python module __name__
99 modules = modules or obj._loaded_py_modules
100 for loaded_module in modules:
101 hook_module_name = ".".join(
102 loaded_module.split(".")[:-1] + ["dynaconf_hooks"]
103 )
104 try:
105 hook_module = importlib.import_module(hook_module_name)
106 except (ImportError, TypeError):
107 # There was no hook on the same path as a python module
108 continue
109 else:
110 _run_hook_module(
111 hook=hook,
112 hook_module=hook_module,
113 obj=obj,
114 key=key,
115 )
116
117 # Try to load from python filename path
118 files = files or obj._loaded_files
119 for loaded_file in files:
120 hook_file = os.path.join(
121 os.path.dirname(loaded_file), "dynaconf_hooks.py"
122 )
123 hook_module = py_loader.import_from_filename(
124 obj, hook_file, silent=silent
125 )
126 if not hook_module:
127 # There was no hook on the same path as a python file
128 continue
129 _run_hook_module(
130 hook=hook,
131 hook_module=hook_module,
132 obj=obj,
133 key=key,
134 )
135
136
137 def settings_loader(
138 obj, settings_module=None, env=None, silent=True, key=None, filename=None
139 ):
140 """Loads from defined settings module
141
142 :param obj: A dynaconf instance
143 :param settings_module: A path or a list of paths e.g settings.toml
144 :param env: Env to look for data defaults: development
145 :param silent: Boolean to raise loading errors
146 :param key: Load a single key if provided
147 :param filename: optional filename to override the settings_module
148 """
149 if filename is None:
150 settings_module = settings_module or obj.settings_module
151 if not settings_module: # pragma: no cover
152 return
153 files = ensure_a_list(settings_module)
154 else:
155 files = ensure_a_list(filename)
156
157 files.extend(ensure_a_list(obj.get("SECRETS_FOR_DYNACONF", None)))
158
159 found_files = []
160 modules_names = []
161 for item in files:
162 item = str(item) # Ensure str in case of LocalPath/Path is passed.
163 if item.endswith(ct.ALL_EXTENSIONS + (".py",)):
164 p_root = obj._root_path or (
165 os.path.dirname(found_files[0]) if found_files else None
166 )
167 found = obj.find_file(item, project_root=p_root)
168 if found:
169 found_files.append(found)
170 else:
171 # a bare python module name w/o extension
172 modules_names.append(item)
173
174 enabled_core_loaders = [
175 item.upper() for item in obj.get("CORE_LOADERS_FOR_DYNACONF") or []
176 ]
177
178 # add `.local.` to found_files list to search for local files.
179 found_files.extend(
180 [
181 get_local_filename(item)
182 for item in found_files
183 if ".local." not in str(item)
184 ]
185 )
186
187 for mod_file in modules_names + found_files:
188 # can be set to multiple files settings.py,settings.yaml,...
189
190 # Cascade all loaders
191 loaders = [
192 {"ext": ct.YAML_EXTENSIONS, "name": "YAML", "loader": yaml_loader},
193 {"ext": ct.TOML_EXTENSIONS, "name": "TOML", "loader": toml_loader},
194 {"ext": ct.INI_EXTENSIONS, "name": "INI", "loader": ini_loader},
195 {"ext": ct.JSON_EXTENSIONS, "name": "JSON", "loader": json_loader},
196 ]
197
198 for loader in loaders:
199 if loader["name"] not in enabled_core_loaders:
200 continue
201
202 if mod_file.endswith(loader["ext"]):
203 loader["loader"].load(
204 obj, filename=mod_file, env=env, silent=silent, key=key
205 )
206 continue
207
208 if mod_file.endswith(ct.ALL_EXTENSIONS):
209 continue
210
211 if "PY" not in enabled_core_loaders:
212 # pyloader is disabled
213 continue
214
215 # must be Python file or module
216 # load from default defined module settings.py or .secrets.py if exists
217 py_loader.load(obj, mod_file, key=key)
218
219 # load from the current env e.g: development_settings.py
220 env = env or obj.current_env
221 if mod_file.endswith(".py"):
222 if ".secrets.py" == mod_file:
223 tmpl = ".{0}_{1}{2}"
224 mod_file = "secrets.py"
225 else:
226 tmpl = "{0}_{1}{2}"
227
228 dirname = os.path.dirname(mod_file)
229 filename, extension = os.path.splitext(os.path.basename(mod_file))
230 new_filename = tmpl.format(env.lower(), filename, extension)
231 env_mod_file = os.path.join(dirname, new_filename)
232 global_filename = tmpl.format("global", filename, extension)
233 global_mod_file = os.path.join(dirname, global_filename)
234 else:
235 env_mod_file = f"{env.lower()}_{mod_file}"
236 global_mod_file = f"global_{mod_file}"
237
238 py_loader.load(
239 obj,
240 env_mod_file,
241 identifier=f"py_{env.upper()}",
242 silent=True,
243 key=key,
244 )
245
246 # load from global_settings.py
247 py_loader.load(
248 obj, global_mod_file, identifier="py_global", silent=True, key=key
249 )
250
251
252 def enable_external_loaders(obj):
253 """Enable external service loaders like `VAULT_` and `REDIS_`
254 looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
255 """
256 for name, loader in ct.EXTERNAL_LOADERS.items():
257 enabled = getattr(obj, f"{name.upper()}_ENABLED_FOR_DYNACONF", False)
258 if (
259 enabled
260 and enabled not in false_values
261 and loader not in obj.LOADERS_FOR_DYNACONF
262 ): # noqa
263 obj.LOADERS_FOR_DYNACONF.insert(0, loader)
264
265
266 def write(filename, data, env=None):
267 """Writes `data` to `filename` infers format by file extension."""
268 loader_name = f"{filename.rpartition('.')[-1]}_loader"
269 loader = globals().get(loader_name)
270 if not loader:
271 raise OSError(f"{loader_name} cannot be found.")
272
273 data = DynaBox(data, box_settings={}).to_dict()
274 if loader is not py_loader and env and env not in data:
275 data = {env: data}
276
277 loader.write(filename, data, merge=False)
278
[end of dynaconf/loaders/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dynaconf/loaders/__init__.py b/dynaconf/loaders/__init__.py
--- a/dynaconf/loaders/__init__.py
+++ b/dynaconf/loaders/__init__.py
@@ -263,7 +263,7 @@
obj.LOADERS_FOR_DYNACONF.insert(0, loader)
-def write(filename, data, env=None):
+def write(filename, data, env=None, merge=False):
"""Writes `data` to `filename` infers format by file extension."""
loader_name = f"{filename.rpartition('.')[-1]}_loader"
loader = globals().get(loader_name)
@@ -274,4 +274,4 @@
if loader is not py_loader and env and env not in data:
data = {env: data}
- loader.write(filename, data, merge=False)
+ loader.write(filename, data, merge=merge)
|
{"golden_diff": "diff --git a/dynaconf/loaders/__init__.py b/dynaconf/loaders/__init__.py\n--- a/dynaconf/loaders/__init__.py\n+++ b/dynaconf/loaders/__init__.py\n@@ -263,7 +263,7 @@\n obj.LOADERS_FOR_DYNACONF.insert(0, loader)\n \n \n-def write(filename, data, env=None):\n+def write(filename, data, env=None, merge=False):\n \"\"\"Writes `data` to `filename` infers format by file extension.\"\"\"\n loader_name = f\"{filename.rpartition('.')[-1]}_loader\"\n loader = globals().get(loader_name)\n@@ -274,4 +274,4 @@\n if loader is not py_loader and env and env not in data:\n data = {env: data}\n \n- loader.write(filename, data, merge=False)\n+ loader.write(filename, data, merge=merge)\n", "issue": "[bug][Documentation] Exporting: write() got an unexpected keyword argument 'merge'\n**Describe the bug**\r\nFollowing the example on the documentation to [export](https://www.dynaconf.com/advanced/#exporting) Dynaconf data to a file raises an exception with the `merge` argument\r\n\r\n**To Reproduce**\r\n~~~Python\r\nloaders.write(\"/a/b/c\", DynaBox(config).to_dict(), merge=False)\r\n~~~\r\n\r\n**Expected behavior**\r\nThe file should have been written\r\n\r\n**Actual Behavior**\r\n`TypeError: write() got an unexpected keyword argument 'merge'`\r\n\r\nJust a quick documentation fix,\r\nthanks !\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport importlib\nimport os\n\nfrom dynaconf import constants as ct\nfrom dynaconf import default_settings\nfrom dynaconf.loaders import ini_loader\nfrom dynaconf.loaders import json_loader\nfrom dynaconf.loaders import py_loader\nfrom dynaconf.loaders import toml_loader\nfrom dynaconf.loaders import yaml_loader\nfrom dynaconf.utils import deduplicate\nfrom dynaconf.utils import ensure_a_list\nfrom dynaconf.utils.boxing import DynaBox\nfrom dynaconf.utils.files import get_local_filename\nfrom dynaconf.utils.parse_conf import false_values\n\n\ndef default_loader(obj, defaults=None):\n \"\"\"Loads default settings and check if there are overridings\n exported as environment variables\"\"\"\n defaults = defaults or {}\n default_settings_values = {\n key: value\n for key, value in default_settings.__dict__.items() # noqa\n if key.isupper()\n }\n\n all_keys = deduplicate(\n list(defaults.keys()) + list(default_settings_values.keys())\n )\n\n for key in all_keys:\n if not obj.exists(key):\n value = defaults.get(key, default_settings_values.get(key))\n obj.set(key, value)\n\n # start dotenv to get default env vars from there\n # check overrides in env vars\n if obj.get(\"load_dotenv\") is True:\n default_settings.start_dotenv(obj)\n\n # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED\n # Example: Flask and Django Extensions\n env_switcher = defaults.get(\n \"ENV_SWITCHER_FOR_DYNACONF\", \"ENV_FOR_DYNACONF\"\n )\n\n for key in all_keys:\n if key not in default_settings_values.keys():\n continue\n\n env_value = obj.get_environ(\n env_switcher if key == \"ENV_FOR_DYNACONF\" else key,\n default=\"_not_found\",\n )\n\n if env_value != \"_not_found\":\n obj.set(key, env_value, tomlfy=True)\n\n\ndef _run_hook_module(hook, hook_module, obj, key=None):\n \"\"\"Run the hook function from the settings obj.\n\n given a hook name, a hook_module and a settings object\n load the function and execute if found.\n \"\"\"\n if hook in obj._loaded_hooks.get(hook_module.__file__, {}):\n # already loaded\n return\n\n if hook_module and getattr(hook_module, \"_error\", False):\n if not isinstance(hook_module._error, FileNotFoundError):\n raise hook_module._error\n\n hook_func = getattr(hook_module, hook, None)\n if hook_func:\n hook_dict = hook_func(obj.dynaconf.clone())\n if hook_dict:\n merge = hook_dict.pop(\n \"dynaconf_merge\", hook_dict.pop(\"DYNACONF_MERGE\", False)\n )\n if key and key in hook_dict:\n obj.set(key, hook_dict[key], tomlfy=False, merge=merge)\n elif not key:\n obj.update(hook_dict, tomlfy=False, merge=merge)\n obj._loaded_hooks[hook_module.__file__][hook] = hook_dict\n\n\ndef execute_hooks(\n hook, obj, env=None, silent=True, key=None, modules=None, files=None\n):\n \"\"\"Execute dynaconf_hooks from module or filepath.\"\"\"\n if hook not in [\"post\"]:\n raise ValueError(f\"hook {hook} not supported yet.\")\n\n # try to load hooks using python module __name__\n modules = modules or obj._loaded_py_modules\n for loaded_module in modules:\n hook_module_name = \".\".join(\n loaded_module.split(\".\")[:-1] + [\"dynaconf_hooks\"]\n )\n try:\n hook_module = importlib.import_module(hook_module_name)\n except (ImportError, TypeError):\n # There was no hook on the same path as a python module\n continue\n else:\n _run_hook_module(\n hook=hook,\n hook_module=hook_module,\n obj=obj,\n key=key,\n )\n\n # Try to load from python filename path\n files = files or obj._loaded_files\n for loaded_file in files:\n hook_file = os.path.join(\n os.path.dirname(loaded_file), \"dynaconf_hooks.py\"\n )\n hook_module = py_loader.import_from_filename(\n obj, hook_file, silent=silent\n )\n if not hook_module:\n # There was no hook on the same path as a python file\n continue\n _run_hook_module(\n hook=hook,\n hook_module=hook_module,\n obj=obj,\n key=key,\n )\n\n\ndef settings_loader(\n obj, settings_module=None, env=None, silent=True, key=None, filename=None\n):\n \"\"\"Loads from defined settings module\n\n :param obj: A dynaconf instance\n :param settings_module: A path or a list of paths e.g settings.toml\n :param env: Env to look for data defaults: development\n :param silent: Boolean to raise loading errors\n :param key: Load a single key if provided\n :param filename: optional filename to override the settings_module\n \"\"\"\n if filename is None:\n settings_module = settings_module or obj.settings_module\n if not settings_module: # pragma: no cover\n return\n files = ensure_a_list(settings_module)\n else:\n files = ensure_a_list(filename)\n\n files.extend(ensure_a_list(obj.get(\"SECRETS_FOR_DYNACONF\", None)))\n\n found_files = []\n modules_names = []\n for item in files:\n item = str(item) # Ensure str in case of LocalPath/Path is passed.\n if item.endswith(ct.ALL_EXTENSIONS + (\".py\",)):\n p_root = obj._root_path or (\n os.path.dirname(found_files[0]) if found_files else None\n )\n found = obj.find_file(item, project_root=p_root)\n if found:\n found_files.append(found)\n else:\n # a bare python module name w/o extension\n modules_names.append(item)\n\n enabled_core_loaders = [\n item.upper() for item in obj.get(\"CORE_LOADERS_FOR_DYNACONF\") or []\n ]\n\n # add `.local.` to found_files list to search for local files.\n found_files.extend(\n [\n get_local_filename(item)\n for item in found_files\n if \".local.\" not in str(item)\n ]\n )\n\n for mod_file in modules_names + found_files:\n # can be set to multiple files settings.py,settings.yaml,...\n\n # Cascade all loaders\n loaders = [\n {\"ext\": ct.YAML_EXTENSIONS, \"name\": \"YAML\", \"loader\": yaml_loader},\n {\"ext\": ct.TOML_EXTENSIONS, \"name\": \"TOML\", \"loader\": toml_loader},\n {\"ext\": ct.INI_EXTENSIONS, \"name\": \"INI\", \"loader\": ini_loader},\n {\"ext\": ct.JSON_EXTENSIONS, \"name\": \"JSON\", \"loader\": json_loader},\n ]\n\n for loader in loaders:\n if loader[\"name\"] not in enabled_core_loaders:\n continue\n\n if mod_file.endswith(loader[\"ext\"]):\n loader[\"loader\"].load(\n obj, filename=mod_file, env=env, silent=silent, key=key\n )\n continue\n\n if mod_file.endswith(ct.ALL_EXTENSIONS):\n continue\n\n if \"PY\" not in enabled_core_loaders:\n # pyloader is disabled\n continue\n\n # must be Python file or module\n # load from default defined module settings.py or .secrets.py if exists\n py_loader.load(obj, mod_file, key=key)\n\n # load from the current env e.g: development_settings.py\n env = env or obj.current_env\n if mod_file.endswith(\".py\"):\n if \".secrets.py\" == mod_file:\n tmpl = \".{0}_{1}{2}\"\n mod_file = \"secrets.py\"\n else:\n tmpl = \"{0}_{1}{2}\"\n\n dirname = os.path.dirname(mod_file)\n filename, extension = os.path.splitext(os.path.basename(mod_file))\n new_filename = tmpl.format(env.lower(), filename, extension)\n env_mod_file = os.path.join(dirname, new_filename)\n global_filename = tmpl.format(\"global\", filename, extension)\n global_mod_file = os.path.join(dirname, global_filename)\n else:\n env_mod_file = f\"{env.lower()}_{mod_file}\"\n global_mod_file = f\"global_{mod_file}\"\n\n py_loader.load(\n obj,\n env_mod_file,\n identifier=f\"py_{env.upper()}\",\n silent=True,\n key=key,\n )\n\n # load from global_settings.py\n py_loader.load(\n obj, global_mod_file, identifier=\"py_global\", silent=True, key=key\n )\n\n\ndef enable_external_loaders(obj):\n \"\"\"Enable external service loaders like `VAULT_` and `REDIS_`\n looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`\n \"\"\"\n for name, loader in ct.EXTERNAL_LOADERS.items():\n enabled = getattr(obj, f\"{name.upper()}_ENABLED_FOR_DYNACONF\", False)\n if (\n enabled\n and enabled not in false_values\n and loader not in obj.LOADERS_FOR_DYNACONF\n ): # noqa\n obj.LOADERS_FOR_DYNACONF.insert(0, loader)\n\n\ndef write(filename, data, env=None):\n \"\"\"Writes `data` to `filename` infers format by file extension.\"\"\"\n loader_name = f\"{filename.rpartition('.')[-1]}_loader\"\n loader = globals().get(loader_name)\n if not loader:\n raise OSError(f\"{loader_name} cannot be found.\")\n\n data = DynaBox(data, box_settings={}).to_dict()\n if loader is not py_loader and env and env not in data:\n data = {env: data}\n\n loader.write(filename, data, merge=False)\n", "path": "dynaconf/loaders/__init__.py"}]}
| 3,577 | 205 |
gh_patches_debug_34113
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1777
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: initializer for ctype 'HMAC_CTX *
I sometimes get the below exception when using Fernet within an Apache mod_wsgi runtime. It does not occur when using a non-Apache environment (Eventlet).
Is this cryptography module thread safe? Or maybe is there an issue in how I used the module?
TypeError: initializer for ctype 'HMAC_CTX *' must be a pointer to same type, not cdata 'HMAC_CTX *'
Cryptography version: 0.7.2
Sample code:
```
def __init__(self):
key = self._load_key()
self.fernet = Fernet(key)
def encode(self, input, errors='strict'):
return (self.fernet.encrypt(input), len(input))
def decode(self, input, errors='strict'):
return (self.fernet.decrypt(input), len(input))
def _load_key(self):
# Load the key from a file
username = pwd.getpwuid(os.getuid()).pw_name
filename = '/etc/fernet/%s.key' % username
try:
with open(filename) as f:
key = f.read()
return key
except IOError:
raise UnicodeEncodeError()
```
```
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/lib/python2.7/encodings/fernet.py", line 19, in decode
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return (self.fernet.decrypt(input), len(input))
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/local/lib/python2.7/dist-packages/cryptography/fernet.py", line 96, in decrypt
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/primitives/hmac.py", line 32, in __init__
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/multibackend.py", line 99, in create_hmac_ctx
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return b.create_hmac_ctx(key, algorithm)
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/openssl/backend.py", line 140, in create_hmac_ctx
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return _HMACContext(self, key, algorithm)
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File "/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/openssl/hmac.py", line 24, in __init__
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi self._backend._lib.HMAC_CTX_init(ctx)
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi TypeError: initializer for ctype 'HMAC_CTX *' must be a pointer to same type, not cdata 'HMAC_CTX *'
2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi
```
</issue>
<code>
[start of src/cryptography/hazmat/bindings/commoncrypto/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography.hazmat.bindings.utils import (
8 build_ffi_for_binding, load_library_for_binding,
9 )
10
11
12 class Binding(object):
13 """
14 CommonCrypto API wrapper.
15 """
16 _module_prefix = "cryptography.hazmat.bindings.commoncrypto."
17 _modules = [
18 "cf",
19 "common_digest",
20 "common_hmac",
21 "common_key_derivation",
22 "common_cryptor",
23 "common_symmetric_key_wrap",
24 "secimport",
25 "secitem",
26 "seckey",
27 "seckeychain",
28 "sectransform",
29 ]
30
31 ffi = build_ffi_for_binding(
32 module_prefix=_module_prefix,
33 modules=_modules,
34 extra_link_args=[
35 "-framework", "Security", "-framework", "CoreFoundation"
36 ],
37 )
38 lib = None
39
40 def __init__(self):
41 self._ensure_ffi_initialized()
42
43 @classmethod
44 def _ensure_ffi_initialized(cls):
45 if cls.lib is not None:
46 return
47
48 cls.lib = load_library_for_binding(
49 cls.ffi,
50 module_prefix=cls._module_prefix,
51 modules=cls._modules,
52 )
53
[end of src/cryptography/hazmat/bindings/commoncrypto/binding.py]
[start of src/cryptography/hazmat/bindings/openssl/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import os
8 import sys
9 import threading
10
11 from cryptography.hazmat.bindings.utils import (
12 build_ffi_for_binding, load_library_for_binding,
13 )
14
15
16 _OSX_PRE_INCLUDE = """
17 #ifdef __APPLE__
18 #include <AvailabilityMacros.h>
19 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
20 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
21 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
22 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
23 #endif
24 """
25
26 _OSX_POST_INCLUDE = """
27 #ifdef __APPLE__
28 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
29 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
30 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
31 #endif
32 """
33
34
35 def _get_libraries(platform):
36 # OpenSSL goes by a different library name on different operating systems.
37 if platform != "win32":
38 # In some circumstances, the order in which these libs are
39 # specified on the linker command-line is significant;
40 # libssl must come before libcrypto
41 # (http://marc.info/?l=openssl-users&m=135361825921871)
42 return ["ssl", "crypto"]
43 else:
44 link_type = os.environ.get("PYCA_WINDOWS_LINK_TYPE", "static")
45 return _get_windows_libraries(link_type)
46
47
48 def _get_windows_libraries(link_type):
49 if link_type == "dynamic":
50 return ["libeay32", "ssleay32", "advapi32"]
51 elif link_type == "static" or link_type == "":
52 return ["libeay32mt", "ssleay32mt", "advapi32",
53 "crypt32", "gdi32", "user32", "ws2_32"]
54 else:
55 raise ValueError(
56 "PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'"
57 )
58
59
60 class Binding(object):
61 """
62 OpenSSL API wrapper.
63 """
64 _module_prefix = "cryptography.hazmat.bindings.openssl."
65 _modules = [
66 "aes",
67 "asn1",
68 "bignum",
69 "bio",
70 "cmac",
71 "cms",
72 "conf",
73 "crypto",
74 "dh",
75 "dsa",
76 "ec",
77 "ecdh",
78 "ecdsa",
79 "engine",
80 "err",
81 "evp",
82 "hmac",
83 "nid",
84 "objects",
85 "opensslv",
86 "osrandom_engine",
87 "pem",
88 "pkcs7",
89 "pkcs12",
90 "rand",
91 "rsa",
92 "ssl",
93 "x509",
94 "x509name",
95 "x509v3",
96 "x509_vfy"
97 ]
98
99 _locks = None
100 _lock_cb_handle = None
101 _lock_init_lock = threading.Lock()
102
103 ffi = build_ffi_for_binding(
104 module_prefix=_module_prefix,
105 modules=_modules,
106 pre_include=_OSX_PRE_INCLUDE,
107 post_include=_OSX_POST_INCLUDE,
108 libraries=_get_libraries(sys.platform)
109 )
110 lib = None
111
112 def __init__(self):
113 self._ensure_ffi_initialized()
114
115 @classmethod
116 def _ensure_ffi_initialized(cls):
117 if cls.lib is not None:
118 return
119
120 cls.lib = load_library_for_binding(
121 cls.ffi,
122 cls._module_prefix,
123 cls._modules,
124 )
125
126 res = cls.lib.Cryptography_add_osrandom_engine()
127 assert res != 0
128
129 @classmethod
130 def init_static_locks(cls):
131 with cls._lock_init_lock:
132 cls._ensure_ffi_initialized()
133
134 if not cls._lock_cb_handle:
135 cls._lock_cb_handle = cls.ffi.callback(
136 "void(int, int, const char *, int)",
137 cls._lock_cb
138 )
139
140 # Use Python's implementation if available, importing _ssl triggers
141 # the setup for this.
142 __import__("_ssl")
143
144 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
145 return
146
147 # If nothing else has setup a locking callback already, we set up
148 # our own
149 num_locks = cls.lib.CRYPTO_num_locks()
150 cls._locks = [threading.Lock() for n in range(num_locks)]
151
152 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
153
154 @classmethod
155 def _lock_cb(cls, mode, n, file, line):
156 lock = cls._locks[n]
157
158 if mode & cls.lib.CRYPTO_LOCK:
159 lock.acquire()
160 elif mode & cls.lib.CRYPTO_UNLOCK:
161 lock.release()
162 else:
163 raise RuntimeError(
164 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format(
165 mode, n, file, line
166 )
167 )
168
[end of src/cryptography/hazmat/bindings/openssl/binding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cryptography/hazmat/bindings/commoncrypto/binding.py b/src/cryptography/hazmat/bindings/commoncrypto/binding.py
--- a/src/cryptography/hazmat/bindings/commoncrypto/binding.py
+++ b/src/cryptography/hazmat/bindings/commoncrypto/binding.py
@@ -4,6 +4,8 @@
from __future__ import absolute_import, division, print_function
+import threading
+
from cryptography.hazmat.bindings.utils import (
build_ffi_for_binding, load_library_for_binding,
)
@@ -36,6 +38,7 @@
],
)
lib = None
+ _init_lock = threading.Lock()
def __init__(self):
self._ensure_ffi_initialized()
@@ -45,8 +48,10 @@
if cls.lib is not None:
return
- cls.lib = load_library_for_binding(
- cls.ffi,
- module_prefix=cls._module_prefix,
- modules=cls._modules,
- )
+ with cls._init_lock:
+ if cls.lib is None:
+ cls.lib = load_library_for_binding(
+ cls.ffi,
+ module_prefix=cls._module_prefix,
+ modules=cls._modules,
+ )
diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -98,6 +98,7 @@
_locks = None
_lock_cb_handle = None
+ _init_lock = threading.Lock()
_lock_init_lock = threading.Lock()
ffi = build_ffi_for_binding(
@@ -117,14 +118,16 @@
if cls.lib is not None:
return
- cls.lib = load_library_for_binding(
- cls.ffi,
- cls._module_prefix,
- cls._modules,
- )
+ with cls._init_lock:
+ if cls.lib is None:
+ cls.lib = load_library_for_binding(
+ cls.ffi,
+ cls._module_prefix,
+ cls._modules,
+ )
- res = cls.lib.Cryptography_add_osrandom_engine()
- assert res != 0
+ res = cls.lib.Cryptography_add_osrandom_engine()
+ assert res != 0
@classmethod
def init_static_locks(cls):
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/commoncrypto/binding.py b/src/cryptography/hazmat/bindings/commoncrypto/binding.py\n--- a/src/cryptography/hazmat/bindings/commoncrypto/binding.py\n+++ b/src/cryptography/hazmat/bindings/commoncrypto/binding.py\n@@ -4,6 +4,8 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import threading\n+\n from cryptography.hazmat.bindings.utils import (\n build_ffi_for_binding, load_library_for_binding,\n )\n@@ -36,6 +38,7 @@\n ],\n )\n lib = None\n+ _init_lock = threading.Lock()\n \n def __init__(self):\n self._ensure_ffi_initialized()\n@@ -45,8 +48,10 @@\n if cls.lib is not None:\n return\n \n- cls.lib = load_library_for_binding(\n- cls.ffi,\n- module_prefix=cls._module_prefix,\n- modules=cls._modules,\n- )\n+ with cls._init_lock:\n+ if cls.lib is None:\n+ cls.lib = load_library_for_binding(\n+ cls.ffi,\n+ module_prefix=cls._module_prefix,\n+ modules=cls._modules,\n+ )\ndiff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -98,6 +98,7 @@\n \n _locks = None\n _lock_cb_handle = None\n+ _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n \n ffi = build_ffi_for_binding(\n@@ -117,14 +118,16 @@\n if cls.lib is not None:\n return\n \n- cls.lib = load_library_for_binding(\n- cls.ffi,\n- cls._module_prefix,\n- cls._modules,\n- )\n+ with cls._init_lock:\n+ if cls.lib is None:\n+ cls.lib = load_library_for_binding(\n+ cls.ffi,\n+ cls._module_prefix,\n+ cls._modules,\n+ )\n \n- res = cls.lib.Cryptography_add_osrandom_engine()\n- assert res != 0\n+ res = cls.lib.Cryptography_add_osrandom_engine()\n+ assert res != 0\n \n @classmethod\n def init_static_locks(cls):\n", "issue": "TypeError: initializer for ctype 'HMAC_CTX *\nI sometimes get the below exception when using Fernet within an Apache mod_wsgi runtime. It does not occur when using a non-Apache environment (Eventlet).\n\nIs this cryptography module thread safe? Or maybe is there an issue in how I used the module?\n\nTypeError: initializer for ctype 'HMAC_CTX *' must be a pointer to same type, not cdata 'HMAC_CTX *'\nCryptography version: 0.7.2\n\nSample code:\n\n```\ndef __init__(self):\n key = self._load_key()\n self.fernet = Fernet(key)\n\ndef encode(self, input, errors='strict'):\n return (self.fernet.encrypt(input), len(input))\n\ndef decode(self, input, errors='strict'):\n return (self.fernet.decrypt(input), len(input))\n\ndef _load_key(self):\n # Load the key from a file\n username = pwd.getpwuid(os.getuid()).pw_name\n filename = '/etc/fernet/%s.key' % username\n try:\n with open(filename) as f:\n key = f.read()\n return key\n except IOError:\n raise UnicodeEncodeError()\n```\n\n```\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/lib/python2.7/encodings/fernet.py\", line 19, in decode\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return (self.fernet.decrypt(input), len(input))\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/local/lib/python2.7/dist-packages/cryptography/fernet.py\", line 96, in decrypt\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/primitives/hmac.py\", line 32, in __init__\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/multibackend.py\", line 99, in create_hmac_ctx\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return b.create_hmac_ctx(key, algorithm)\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/openssl/backend.py\", line 140, in create_hmac_ctx\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi return _HMACContext(self, key, algorithm)\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi File \"/usr/local/lib/python2.7/dist-packages/cryptography/hazmat/backends/openssl/hmac.py\", line 24, in __init__\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi self._backend._lib.HMAC_CTX_init(ctx)\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi TypeError: initializer for ctype 'HMAC_CTX *' must be a pointer to same type, not cdata 'HMAC_CTX *'\n2015-03-18 22:55:08.512 6509 TRACE keystone.common.wsgi \n```\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography.hazmat.bindings.utils import (\n build_ffi_for_binding, load_library_for_binding,\n)\n\n\nclass Binding(object):\n \"\"\"\n CommonCrypto API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.commoncrypto.\"\n _modules = [\n \"cf\",\n \"common_digest\",\n \"common_hmac\",\n \"common_key_derivation\",\n \"common_cryptor\",\n \"common_symmetric_key_wrap\",\n \"secimport\",\n \"secitem\",\n \"seckey\",\n \"seckeychain\",\n \"sectransform\",\n ]\n\n ffi = build_ffi_for_binding(\n module_prefix=_module_prefix,\n modules=_modules,\n extra_link_args=[\n \"-framework\", \"Security\", \"-framework\", \"CoreFoundation\"\n ],\n )\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.lib is not None:\n return\n\n cls.lib = load_library_for_binding(\n cls.ffi,\n module_prefix=cls._module_prefix,\n modules=cls._modules,\n )\n", "path": "src/cryptography/hazmat/bindings/commoncrypto/binding.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport threading\n\nfrom cryptography.hazmat.bindings.utils import (\n build_ffi_for_binding, load_library_for_binding,\n)\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\ndef _get_libraries(platform):\n # OpenSSL goes by a different library name on different operating systems.\n if platform != \"win32\":\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (http://marc.info/?l=openssl-users&m=135361825921871)\n return [\"ssl\", \"crypto\"]\n else:\n link_type = os.environ.get(\"PYCA_WINDOWS_LINK_TYPE\", \"static\")\n return _get_windows_libraries(link_type)\n\n\ndef _get_windows_libraries(link_type):\n if link_type == \"dynamic\":\n return [\"libeay32\", \"ssleay32\", \"advapi32\"]\n elif link_type == \"static\" or link_type == \"\":\n return [\"libeay32mt\", \"ssleay32mt\", \"advapi32\",\n \"crypt32\", \"gdi32\", \"user32\", \"ws2_32\"]\n else:\n raise ValueError(\n \"PYCA_WINDOWS_LINK_TYPE must be 'static' or 'dynamic'\"\n )\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"cms\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\"\n ]\n\n _locks = None\n _lock_cb_handle = None\n _lock_init_lock = threading.Lock()\n\n ffi = build_ffi_for_binding(\n module_prefix=_module_prefix,\n modules=_modules,\n pre_include=_OSX_PRE_INCLUDE,\n post_include=_OSX_POST_INCLUDE,\n libraries=_get_libraries(sys.platform)\n )\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.lib is not None:\n return\n\n cls.lib = load_library_for_binding(\n cls.ffi,\n cls._module_prefix,\n cls._modules,\n )\n\n res = cls.lib.Cryptography_add_osrandom_engine()\n assert res != 0\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]}
| 3,609 | 555 |
gh_patches_debug_4891
|
rasdani/github-patches
|
git_diff
|
qutip__qutip-949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optimization flags in setup.py should be completely avoided
Hard-coding compiler flag `-march=native` in setup.py completely destroys possibility to set up Qutip on heterogeneous cluster. In general, it brings a lot of problems for people that don't have a good experience in debugging "illegal instruction" errors, that often happen, if you compile the module on different machine than you use.
If you are sure you need optimized build for localhost, you might use
```
export CFLAGS="-O3 -march=native"
export CXXFLAGS="$CFLAGS"
pip install qutip
```
instead or provide separate option for setup.py script.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """QuTiP: The Quantum Toolbox in Python
3
4 QuTiP is open-source software for simulating the dynamics of closed and open
5 quantum systems. The QuTiP library depends on the excellent Numpy, Scipy, and
6 Cython numerical packages. In addition, graphical output is provided by
7 Matplotlib. QuTiP aims to provide user-friendly and efficient numerical
8 simulations of a wide variety of quantum mechanical problems, including those
9 with Hamiltonians and/or collapse operators with arbitrary time-dependence,
10 commonly found in a wide range of physics applications. QuTiP is freely
11 available for use and/or modification on all common platforms. Being free of
12 any licensing fees, QuTiP is ideal for exploring quantum mechanics in research
13 as well as in the classroom.
14 """
15
16 DOCLINES = __doc__.split('\n')
17
18 CLASSIFIERS = """\
19 Development Status :: 4 - Beta
20 Intended Audience :: Science/Research
21 License :: OSI Approved :: BSD License
22 Programming Language :: Python
23 Programming Language :: Python :: 3
24 Topic :: Scientific/Engineering
25 Operating System :: MacOS
26 Operating System :: POSIX
27 Operating System :: Unix
28 Operating System :: Microsoft :: Windows
29 """
30
31 # import statements
32 import os
33 import sys
34 # The following is required to get unit tests up and running.
35 # If the user doesn't have, then that's OK, we'll just skip unit tests.
36 try:
37 from setuptools import setup, Extension
38 TEST_SUITE = 'nose.collector'
39 TESTS_REQUIRE = ['nose']
40 EXTRA_KWARGS = {
41 'test_suite': TEST_SUITE,
42 'tests_require': TESTS_REQUIRE
43 }
44 except:
45 from distutils.core import setup
46 from distutils.extension import Extension
47 EXTRA_KWARGS = {}
48
49 try:
50 import numpy as np
51 except:
52 np = None
53
54 from Cython.Build import cythonize
55 from Cython.Distutils import build_ext
56
57 # all information about QuTiP goes here
58 MAJOR = 4
59 MINOR = 4
60 MICRO = 0
61 ISRELEASED = False
62 VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
63 REQUIRES = ['numpy (>=1.8)', 'scipy (>=0.15)', 'cython (>=0.21)']
64 INSTALL_REQUIRES = ['numpy>=1.8', 'scipy>=0.15', 'cython>=0.21']
65 PACKAGES = ['qutip', 'qutip/ui', 'qutip/cy', 'qutip/cy/src',
66 'qutip/qip', 'qutip/qip/models',
67 'qutip/qip/algorithms', 'qutip/control', 'qutip/nonmarkov',
68 'qutip/_mkl', 'qutip/tests', 'qutip/legacy',
69 'qutip/cy/openmp', 'qutip/cy/openmp/src']
70 PACKAGE_DATA = {
71 'qutip': ['configspec.ini'],
72 'qutip/tests': ['*.ini'],
73 'qutip/cy': ['*.pxi', '*.pxd', '*.pyx'],
74 'qutip/cy/src': ['*.cpp', '*.hpp'],
75 'qutip/control': ['*.pyx'],
76 'qutip/cy/openmp': ['*.pxd', '*.pyx'],
77 'qutip/cy/openmp/src': ['*.cpp', '*.hpp']
78 }
79 # If we're missing numpy, exclude import directories until we can
80 # figure them out properly.
81 INCLUDE_DIRS = [np.get_include()] if np is not None else []
82 # ajgpitch Mar 2017:
83 # This HEADERS did not work, but I will leave it in anyway, as it is supposed to.
84 # I had to do the nasty thing with PACKAGES and PACKAGE_DATA above.
85 HEADERS = ['qutip/cy/src/zspmv.hpp', 'qutip/cy/openmp/src/zspmv_openmp.hpp']
86 NAME = "qutip"
87 AUTHOR = ("Alexander Pitchford, Paul D. Nation, Robert J. Johansson, "
88 "Chris Granade, Arne Grimsmo")
89 AUTHOR_EMAIL = ("[email protected], [email protected], "
90 "[email protected], [email protected], "
91 "[email protected]")
92 LICENSE = "BSD"
93 DESCRIPTION = DOCLINES[0]
94 LONG_DESCRIPTION = "\n".join(DOCLINES[2:])
95 KEYWORDS = "quantum physics dynamics"
96 URL = "http://qutip.org"
97 CLASSIFIERS = [_f for _f in CLASSIFIERS.split('\n') if _f]
98 PLATFORMS = ["Linux", "Mac OSX", "Unix", "Windows"]
99
100
101 def git_short_hash():
102 try:
103 git_str = "+" + os.popen('git log -1 --format="%h"').read().strip()
104 except:
105 git_str = ""
106 else:
107 if git_str == '+': #fixes setuptools PEP issues with versioning
108 git_str = ''
109 return git_str
110
111 FULLVERSION = VERSION
112 if not ISRELEASED:
113 FULLVERSION += '.dev'+str(MICRO)+git_short_hash()
114
115 # NumPy's distutils reads in versions differently than
116 # our fallback. To make sure that versions are added to
117 # egg-info correctly, we need to add FULLVERSION to
118 # EXTRA_KWARGS if NumPy wasn't imported correctly.
119 if np is None:
120 EXTRA_KWARGS['version'] = FULLVERSION
121
122
123 def write_version_py(filename='qutip/version.py'):
124 cnt = """\
125 # THIS FILE IS GENERATED FROM QUTIP SETUP.PY
126 short_version = '%(version)s'
127 version = '%(fullversion)s'
128 release = %(isrelease)s
129 """
130 a = open(filename, 'w')
131 try:
132 a.write(cnt % {'version': VERSION, 'fullversion':
133 FULLVERSION, 'isrelease': str(ISRELEASED)})
134 finally:
135 a.close()
136
137 local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
138 os.chdir(local_path)
139 sys.path.insert(0, local_path)
140 sys.path.insert(0, os.path.join(local_path, 'qutip')) # to retrive _version
141
142 # always rewrite _version
143 if os.path.exists('qutip/version.py'):
144 os.remove('qutip/version.py')
145
146 write_version_py()
147
148 # Add Cython extensions here
149 cy_exts = ['spmatfuncs', 'stochastic', 'sparse_utils', 'graph_utils', 'interpolate',
150 'spmath', 'heom', 'math', 'spconvert', 'ptrace', 'checks', 'brtools',
151 'brtools_checks', 'br_tensor', 'inter', 'cqobjevo', 'cqobjevo_factor', 'piqs']
152
153 # Extra link args
154 _link_flags = []
155
156 # If on Win and Python version >= 3.5 and not in MSYS2 (i.e. Visual studio compile)
157 if (sys.platform == 'win32' and int(str(sys.version_info[0])+str(sys.version_info[1])) >= 35
158 and os.environ.get('MSYSTEM') is None):
159 _compiler_flags = ['/w', '/Ox']
160 # Everything else
161 else:
162 _compiler_flags = ['-w', '-O3', '-march=native', '-funroll-loops']
163 if sys.platform == 'darwin':
164 # These are needed for compiling on OSX 10.14+
165 _compiler_flags.append('-mmacosx-version-min=10.9')
166 _link_flags.append('-mmacosx-version-min=10.9')
167
168
169
170 EXT_MODULES =[]
171 # Add Cython files from qutip/cy
172 for ext in cy_exts:
173 _mod = Extension('qutip.cy.'+ext,
174 sources = ['qutip/cy/'+ext+'.pyx', 'qutip/cy/src/zspmv.cpp'],
175 include_dirs = [np.get_include()],
176 extra_compile_args=_compiler_flags,
177 extra_link_args=_link_flags,
178 language='c++')
179 EXT_MODULES.append(_mod)
180
181 # Add Cython files from qutip/control
182 _mod = Extension('qutip.control.cy_grape',
183 sources = ['qutip/control/cy_grape.pyx'],
184 include_dirs = [np.get_include()],
185 extra_compile_args=_compiler_flags,
186 extra_link_args=_link_flags,
187 language='c++')
188 EXT_MODULES.append(_mod)
189
190
191 # Add optional ext modules here
192 if "--with-openmp" in sys.argv:
193 sys.argv.remove("--with-openmp")
194 if (sys.platform == 'win32'
195 and int(str(sys.version_info[0])+str(sys.version_info[1])) >= 35):
196 omp_flags = ['/openmp']
197 omp_args = []
198 else:
199 omp_flags = ['-fopenmp']
200 omp_args = omp_flags
201 _mod = Extension('qutip.cy.openmp.parfuncs',
202 sources = ['qutip/cy/openmp/parfuncs.pyx',
203 'qutip/cy/openmp/src/zspmv_openmp.cpp'],
204 include_dirs = [np.get_include()],
205 extra_compile_args=_compiler_flags+omp_flags,
206 extra_link_args=omp_args+_link_flags,
207 language='c++')
208 EXT_MODULES.append(_mod)
209 # Add benchmark pyx
210 _mod = Extension('qutip.cy.openmp.benchmark',
211 sources = ['qutip/cy/openmp/benchmark.pyx'],
212 include_dirs = [np.get_include()],
213 extra_compile_args=_compiler_flags,
214 extra_link_args=_link_flags,
215 language='c++')
216 EXT_MODULES.append(_mod)
217
218 # Add brtools_omp
219 _mod = Extension('qutip.cy.openmp.br_omp',
220 sources = ['qutip/cy/openmp/br_omp.pyx'],
221 include_dirs = [np.get_include()],
222 extra_compile_args=_compiler_flags,
223 extra_link_args=_link_flags,
224 language='c++')
225 EXT_MODULES.append(_mod)
226
227 # Add omp_sparse_utils
228 _mod = Extension('qutip.cy.openmp.omp_sparse_utils',
229 sources = ['qutip/cy/openmp/omp_sparse_utils.pyx'],
230 include_dirs = [np.get_include()],
231 extra_compile_args=_compiler_flags+omp_flags,
232 extra_link_args=omp_args+_link_flags,
233 language='c++')
234 EXT_MODULES.append(_mod)
235
236 # Add cqobjevo_omp
237 _mod = Extension('qutip.cy.openmp.cqobjevo_omp',
238 sources = ['qutip/cy/openmp/cqobjevo_omp.pyx'],
239 include_dirs = [np.get_include()],
240 extra_compile_args=_compiler_flags+omp_flags,
241 extra_link_args=omp_args,
242 language='c++')
243 EXT_MODULES.append(_mod)
244
245
246 # Remove -Wstrict-prototypes from cflags
247 import distutils.sysconfig
248 cfg_vars = distutils.sysconfig.get_config_vars()
249 if "CFLAGS" in cfg_vars:
250 cfg_vars["CFLAGS"] = cfg_vars["CFLAGS"].replace("-Wstrict-prototypes", "")
251
252
253 # Setup commands go here
254 setup(
255 name = NAME,
256 version = FULLVERSION,
257 packages = PACKAGES,
258 include_package_data=True,
259 include_dirs = INCLUDE_DIRS,
260 headers = HEADERS,
261 ext_modules = cythonize(EXT_MODULES),
262 cmdclass = {'build_ext': build_ext},
263 author = AUTHOR,
264 author_email = AUTHOR_EMAIL,
265 license = LICENSE,
266 description = DESCRIPTION,
267 long_description = LONG_DESCRIPTION,
268 keywords = KEYWORDS,
269 url = URL,
270 classifiers = CLASSIFIERS,
271 platforms = PLATFORMS,
272 requires = REQUIRES,
273 package_data = PACKAGE_DATA,
274 zip_safe = False,
275 install_requires=INSTALL_REQUIRES,
276 **EXTRA_KWARGS
277 )
278 _cite = """\
279 ==============================================================================
280 Installation complete
281 Please cite QuTiP in your publication.
282 ==============================================================================
283 For your convenience a bibtex reference can be easily generated using
284 `qutip.cite()`"""
285 print(_cite)
286
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -159,7 +159,7 @@
_compiler_flags = ['/w', '/Ox']
# Everything else
else:
- _compiler_flags = ['-w', '-O3', '-march=native', '-funroll-loops']
+ _compiler_flags = ['-w', '-O3', '-funroll-loops']
if sys.platform == 'darwin':
# These are needed for compiling on OSX 10.14+
_compiler_flags.append('-mmacosx-version-min=10.9')
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -159,7 +159,7 @@\n _compiler_flags = ['/w', '/Ox']\n # Everything else\n else:\n- _compiler_flags = ['-w', '-O3', '-march=native', '-funroll-loops']\n+ _compiler_flags = ['-w', '-O3', '-funroll-loops']\n if sys.platform == 'darwin':\n # These are needed for compiling on OSX 10.14+\n _compiler_flags.append('-mmacosx-version-min=10.9')\n", "issue": "Optimization flags in setup.py should be completely avoided\nHard-coding compiler flag `-march=native` in setup.py completely destroys possibility to set up Qutip on heterogeneous cluster. In general, it brings a lot of problems for people that don't have a good experience in debugging \"illegal instruction\" errors, that often happen, if you compile the module on different machine than you use.\r\n\r\nIf you are sure you need optimized build for localhost, you might use\r\n```\r\nexport CFLAGS=\"-O3 -march=native\"\r\nexport CXXFLAGS=\"$CFLAGS\"\r\npip install qutip\r\n```\r\ninstead or provide separate option for setup.py script.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"QuTiP: The Quantum Toolbox in Python\n\nQuTiP is open-source software for simulating the dynamics of closed and open\nquantum systems. The QuTiP library depends on the excellent Numpy, Scipy, and\nCython numerical packages. In addition, graphical output is provided by\nMatplotlib. QuTiP aims to provide user-friendly and efficient numerical\nsimulations of a wide variety of quantum mechanical problems, including those\nwith Hamiltonians and/or collapse operators with arbitrary time-dependence,\ncommonly found in a wide range of physics applications. QuTiP is freely\navailable for use and/or modification on all common platforms. Being free of\nany licensing fees, QuTiP is ideal for exploring quantum mechanics in research\nas well as in the classroom.\n\"\"\"\n\nDOCLINES = __doc__.split('\\n')\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 4 - Beta\nIntended Audience :: Science/Research\nLicense :: OSI Approved :: BSD License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nTopic :: Scientific/Engineering\nOperating System :: MacOS\nOperating System :: POSIX\nOperating System :: Unix\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n# import statements\nimport os\nimport sys\n# The following is required to get unit tests up and running.\n# If the user doesn't have, then that's OK, we'll just skip unit tests.\ntry:\n from setuptools import setup, Extension\n TEST_SUITE = 'nose.collector'\n TESTS_REQUIRE = ['nose']\n EXTRA_KWARGS = {\n 'test_suite': TEST_SUITE,\n 'tests_require': TESTS_REQUIRE\n }\nexcept:\n from distutils.core import setup\n from distutils.extension import Extension\n EXTRA_KWARGS = {}\n\ntry:\n import numpy as np\nexcept:\n np = None\n\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\n# all information about QuTiP goes here\nMAJOR = 4\nMINOR = 4\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\nREQUIRES = ['numpy (>=1.8)', 'scipy (>=0.15)', 'cython (>=0.21)']\nINSTALL_REQUIRES = ['numpy>=1.8', 'scipy>=0.15', 'cython>=0.21']\nPACKAGES = ['qutip', 'qutip/ui', 'qutip/cy', 'qutip/cy/src',\n 'qutip/qip', 'qutip/qip/models',\n 'qutip/qip/algorithms', 'qutip/control', 'qutip/nonmarkov',\n 'qutip/_mkl', 'qutip/tests', 'qutip/legacy',\n 'qutip/cy/openmp', 'qutip/cy/openmp/src']\nPACKAGE_DATA = {\n 'qutip': ['configspec.ini'],\n 'qutip/tests': ['*.ini'],\n 'qutip/cy': ['*.pxi', '*.pxd', '*.pyx'],\n 'qutip/cy/src': ['*.cpp', '*.hpp'],\n 'qutip/control': ['*.pyx'],\n 'qutip/cy/openmp': ['*.pxd', '*.pyx'],\n 'qutip/cy/openmp/src': ['*.cpp', '*.hpp']\n}\n# If we're missing numpy, exclude import directories until we can\n# figure them out properly.\nINCLUDE_DIRS = [np.get_include()] if np is not None else []\n# ajgpitch Mar 2017:\n# This HEADERS did not work, but I will leave it in anyway, as it is supposed to.\n# I had to do the nasty thing with PACKAGES and PACKAGE_DATA above.\nHEADERS = ['qutip/cy/src/zspmv.hpp', 'qutip/cy/openmp/src/zspmv_openmp.hpp']\nNAME = \"qutip\"\nAUTHOR = (\"Alexander Pitchford, Paul D. Nation, Robert J. Johansson, \"\n \"Chris Granade, Arne Grimsmo\")\nAUTHOR_EMAIL = (\"[email protected], [email protected], \"\n \"[email protected], [email protected], \"\n \"[email protected]\")\nLICENSE = \"BSD\"\nDESCRIPTION = DOCLINES[0]\nLONG_DESCRIPTION = \"\\n\".join(DOCLINES[2:])\nKEYWORDS = \"quantum physics dynamics\"\nURL = \"http://qutip.org\"\nCLASSIFIERS = [_f for _f in CLASSIFIERS.split('\\n') if _f]\nPLATFORMS = [\"Linux\", \"Mac OSX\", \"Unix\", \"Windows\"]\n\n\ndef git_short_hash():\n try:\n git_str = \"+\" + os.popen('git log -1 --format=\"%h\"').read().strip()\n except:\n git_str = \"\"\n else:\n if git_str == '+': #fixes setuptools PEP issues with versioning\n git_str = ''\n return git_str\n\nFULLVERSION = VERSION\nif not ISRELEASED:\n FULLVERSION += '.dev'+str(MICRO)+git_short_hash()\n\n# NumPy's distutils reads in versions differently than\n# our fallback. To make sure that versions are added to\n# egg-info correctly, we need to add FULLVERSION to\n# EXTRA_KWARGS if NumPy wasn't imported correctly.\nif np is None:\n EXTRA_KWARGS['version'] = FULLVERSION\n\n\ndef write_version_py(filename='qutip/version.py'):\n cnt = \"\"\"\\\n# THIS FILE IS GENERATED FROM QUTIP SETUP.PY\nshort_version = '%(version)s'\nversion = '%(fullversion)s'\nrelease = %(isrelease)s\n\"\"\"\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION, 'fullversion':\n FULLVERSION, 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\nlocal_path = os.path.dirname(os.path.abspath(sys.argv[0]))\nos.chdir(local_path)\nsys.path.insert(0, local_path)\nsys.path.insert(0, os.path.join(local_path, 'qutip')) # to retrive _version\n\n# always rewrite _version\nif os.path.exists('qutip/version.py'):\n os.remove('qutip/version.py')\n\nwrite_version_py()\n\n# Add Cython extensions here\ncy_exts = ['spmatfuncs', 'stochastic', 'sparse_utils', 'graph_utils', 'interpolate',\n 'spmath', 'heom', 'math', 'spconvert', 'ptrace', 'checks', 'brtools',\n 'brtools_checks', 'br_tensor', 'inter', 'cqobjevo', 'cqobjevo_factor', 'piqs']\n\n# Extra link args\n_link_flags = []\n\n# If on Win and Python version >= 3.5 and not in MSYS2 (i.e. Visual studio compile)\nif (sys.platform == 'win32' and int(str(sys.version_info[0])+str(sys.version_info[1])) >= 35\n and os.environ.get('MSYSTEM') is None):\n _compiler_flags = ['/w', '/Ox']\n# Everything else\nelse:\n _compiler_flags = ['-w', '-O3', '-march=native', '-funroll-loops']\n if sys.platform == 'darwin':\n # These are needed for compiling on OSX 10.14+\n _compiler_flags.append('-mmacosx-version-min=10.9')\n _link_flags.append('-mmacosx-version-min=10.9')\n\n\n\nEXT_MODULES =[]\n# Add Cython files from qutip/cy\nfor ext in cy_exts:\n _mod = Extension('qutip.cy.'+ext,\n sources = ['qutip/cy/'+ext+'.pyx', 'qutip/cy/src/zspmv.cpp'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags,\n extra_link_args=_link_flags,\n language='c++')\n EXT_MODULES.append(_mod)\n\n# Add Cython files from qutip/control\n_mod = Extension('qutip.control.cy_grape',\n sources = ['qutip/control/cy_grape.pyx'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags,\n extra_link_args=_link_flags,\n language='c++')\nEXT_MODULES.append(_mod)\n\n\n# Add optional ext modules here\nif \"--with-openmp\" in sys.argv:\n sys.argv.remove(\"--with-openmp\")\n if (sys.platform == 'win32'\n and int(str(sys.version_info[0])+str(sys.version_info[1])) >= 35):\n omp_flags = ['/openmp']\n omp_args = []\n else:\n omp_flags = ['-fopenmp']\n omp_args = omp_flags\n _mod = Extension('qutip.cy.openmp.parfuncs',\n sources = ['qutip/cy/openmp/parfuncs.pyx',\n 'qutip/cy/openmp/src/zspmv_openmp.cpp'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags+omp_flags,\n extra_link_args=omp_args+_link_flags,\n language='c++')\n EXT_MODULES.append(_mod)\n # Add benchmark pyx\n _mod = Extension('qutip.cy.openmp.benchmark',\n sources = ['qutip/cy/openmp/benchmark.pyx'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags,\n extra_link_args=_link_flags,\n language='c++')\n EXT_MODULES.append(_mod)\n\n # Add brtools_omp\n _mod = Extension('qutip.cy.openmp.br_omp',\n sources = ['qutip/cy/openmp/br_omp.pyx'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags,\n extra_link_args=_link_flags,\n language='c++')\n EXT_MODULES.append(_mod)\n\n # Add omp_sparse_utils\n _mod = Extension('qutip.cy.openmp.omp_sparse_utils',\n sources = ['qutip/cy/openmp/omp_sparse_utils.pyx'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags+omp_flags,\n extra_link_args=omp_args+_link_flags,\n language='c++')\n EXT_MODULES.append(_mod)\n\n # Add cqobjevo_omp\n _mod = Extension('qutip.cy.openmp.cqobjevo_omp',\n sources = ['qutip/cy/openmp/cqobjevo_omp.pyx'],\n include_dirs = [np.get_include()],\n extra_compile_args=_compiler_flags+omp_flags,\n extra_link_args=omp_args,\n language='c++')\n EXT_MODULES.append(_mod)\n\n\n# Remove -Wstrict-prototypes from cflags\nimport distutils.sysconfig\ncfg_vars = distutils.sysconfig.get_config_vars()\nif \"CFLAGS\" in cfg_vars:\n cfg_vars[\"CFLAGS\"] = cfg_vars[\"CFLAGS\"].replace(\"-Wstrict-prototypes\", \"\")\n\n\n# Setup commands go here\nsetup(\n name = NAME,\n version = FULLVERSION,\n packages = PACKAGES,\n include_package_data=True,\n include_dirs = INCLUDE_DIRS,\n headers = HEADERS,\n ext_modules = cythonize(EXT_MODULES),\n cmdclass = {'build_ext': build_ext},\n author = AUTHOR,\n author_email = AUTHOR_EMAIL,\n license = LICENSE,\n description = DESCRIPTION,\n long_description = LONG_DESCRIPTION,\n keywords = KEYWORDS,\n url = URL,\n classifiers = CLASSIFIERS,\n platforms = PLATFORMS,\n requires = REQUIRES,\n package_data = PACKAGE_DATA,\n zip_safe = False,\n install_requires=INSTALL_REQUIRES,\n **EXTRA_KWARGS\n)\n_cite = \"\"\"\\\n==============================================================================\nInstallation complete\nPlease cite QuTiP in your publication.\n==============================================================================\nFor your convenience a bibtex reference can be easily generated using\n`qutip.cite()`\"\"\"\nprint(_cite)\n", "path": "setup.py"}]}
| 4,045 | 137 |
gh_patches_debug_15742
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-939
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hyphens in search query are normalized differently than in ElasticSearch
If I have the substring "fooo-baar" in the text of one of the indexed fields for a Page-derived model instance, I'd expect to be see that page when I search for "fooo-baar", but I don't.
This seems to be because `wagtailsearch` normalizes "fooo-baar" to "fooobaar", while ElasticSearch treats the hyphen as a whitespace character.
Failing test: add
```
"Hello-world",
```
to `test_queries:45`.
Suggested fix: normalize to "fooo baar" instead.
</issue>
<code>
[start of wagtail/wagtailsearch/backends/base.py]
1 from six import text_type
2
3 from django.db import models
4 from django.db.models.query import QuerySet
5 from django.db.models.lookups import Lookup
6 from django.db.models.sql.where import SubqueryConstraint, WhereNode
7 from django.core.exceptions import ImproperlyConfigured
8
9 from wagtail.wagtailsearch.index import class_is_indexed
10 from wagtail.wagtailsearch.utils import normalise_query_string
11
12
13 class FilterError(Exception):
14 pass
15
16
17 class FieldError(Exception):
18 pass
19
20
21 class BaseSearchQuery(object):
22 def __init__(self, queryset, query_string, fields=None):
23 self.queryset = queryset
24 self.query_string = query_string
25 self.fields = fields
26
27 def _get_searchable_field(self, field_attname):
28 # Get field
29 field = dict(
30 (field.get_attname(self.queryset.model), field)
31 for field in self.queryset.model.get_searchable_search_fields()
32 ).get(field_attname, None)
33
34 return field
35
36 def _get_filterable_field(self, field_attname):
37 # Get field
38 field = dict(
39 (field.get_attname(self.queryset.model), field)
40 for field in self.queryset.model.get_filterable_search_fields()
41 ).get(field_attname, None)
42
43 return field
44
45 def _process_lookup(self, field, lookup, value):
46 raise NotImplementedError
47
48 def _connect_filters(self, filters, connector, negated):
49 raise NotImplementedError
50
51 def _process_filter(self, field_attname, lookup, value):
52 # Get the field
53 field = self._get_filterable_field(field_attname)
54
55 if field is None:
56 raise FieldError('Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' + field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.')
57
58 # Process the lookup
59 result = self._process_lookup(field, lookup, value)
60
61 if result is None:
62 raise FilterError('Could not apply filter on search results: "' + field_attname + '__' + lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognosed.')
63
64 return result
65
66 def _get_filters_from_where_node(self, where_node):
67 # Check if this is a leaf node
68 if isinstance(where_node, Lookup):
69 field_attname = where_node.lhs.target.attname
70 lookup = where_node.lookup_name
71 value = where_node.rhs
72
73 # Process the filter
74 return self._process_filter(field_attname, lookup, value)
75
76 elif isinstance(where_node, SubqueryConstraint):
77 raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
78
79 elif isinstance(where_node, WhereNode):
80 # Get child filters
81 connector = where_node.connector
82 child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
83 child_filters = [child_filter for child_filter in child_filters if child_filter]
84
85 return self._connect_filters(child_filters, connector, where_node.negated)
86
87 else:
88 raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
89
90 def _get_filters_from_queryset(self):
91 return self._get_filters_from_where_node(self.queryset.query.where)
92
93
94 class BaseSearchResults(object):
95 def __init__(self, backend, query, prefetch_related=None):
96 self.backend = backend
97 self.query = query
98 self.prefetch_related = prefetch_related
99 self.start = 0
100 self.stop = None
101 self._results_cache = None
102 self._count_cache = None
103
104 def _set_limits(self, start=None, stop=None):
105 if stop is not None:
106 if self.stop is not None:
107 self.stop = min(self.stop, self.start + stop)
108 else:
109 self.stop = self.start + stop
110
111 if start is not None:
112 if self.stop is not None:
113 self.start = min(self.stop, self.start + start)
114 else:
115 self.start = self.start + start
116
117 def _clone(self):
118 klass = self.__class__
119 new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
120 new.start = self.start
121 new.stop = self.stop
122 return new
123
124 def _do_search(self):
125 raise NotImplementedError
126
127 def _do_count(self):
128 raise NotImplementedError
129
130 def results(self):
131 if self._results_cache is None:
132 self._results_cache = self._do_search()
133 return self._results_cache
134
135 def count(self):
136 if self._count_cache is None:
137 if self._results_cache is not None:
138 self._count_cache = len(self._results_cache)
139 else:
140 self._count_cache = self._do_count()
141 return self._count_cache
142
143 def __getitem__(self, key):
144 new = self._clone()
145
146 if isinstance(key, slice):
147 # Set limits
148 start = int(key.start) if key.start else None
149 stop = int(key.stop) if key.stop else None
150 new._set_limits(start, stop)
151
152 # Copy results cache
153 if self._results_cache is not None:
154 new._results_cache = self._results_cache[key]
155
156 return new
157 else:
158 if self._results_cache is not None:
159 return self._results_cache[key]
160
161 new.start = key
162 new.stop = key + 1
163 return list(new)[0]
164
165 def __iter__(self):
166 return iter(self.results())
167
168 def __len__(self):
169 return len(self.results())
170
171 def __repr__(self):
172 data = list(self[:21])
173 if len(data) > 20:
174 data[-1] = "...(remaining elements truncated)..."
175 return repr(data)
176
177
178 class BaseSearch(object):
179 def __init__(self, params):
180 pass
181
182 def reset_index(self):
183 raise NotImplementedError
184
185 def add_type(self, model):
186 raise NotImplementedError
187
188 def refresh_index(self):
189 raise NotImplementedError
190
191 def add(self, obj):
192 raise NotImplementedError
193
194 def add_bulk(self, model, obj_list):
195 raise NotImplementedError
196
197 def delete(self, obj):
198 raise NotImplementedError
199
200 def _search(self, queryset, query_string, fields=None):
201 raise NotImplementedError
202
203 def search(self, query_string, model_or_queryset, fields=None, filters=None, prefetch_related=None):
204 # Find model/queryset
205 if isinstance(model_or_queryset, QuerySet):
206 model = model_or_queryset.model
207 queryset = model_or_queryset
208 else:
209 model = model_or_queryset
210 queryset = model_or_queryset.objects.all()
211
212 # Model must be a class that is in the index
213 if not class_is_indexed(model):
214 return []
215
216 # Normalise query string
217 if query_string is not None:
218 query_string = normalise_query_string(query_string)
219
220 # Check that theres still a query string after the clean up
221 if query_string == "":
222 return []
223
224 # Apply filters to queryset
225 if filters:
226 queryset = queryset.filter(**filters)
227
228 # Prefetch related
229 if prefetch_related:
230 for prefetch in prefetch_related:
231 queryset = queryset.prefetch_related(prefetch)
232
233 # Search
234 return self._search(queryset, query_string, fields=fields)
235
[end of wagtail/wagtailsearch/backends/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/wagtailsearch/backends/base.py b/wagtail/wagtailsearch/backends/base.py
--- a/wagtail/wagtailsearch/backends/base.py
+++ b/wagtail/wagtailsearch/backends/base.py
@@ -7,7 +7,6 @@
from django.core.exceptions import ImproperlyConfigured
from wagtail.wagtailsearch.index import class_is_indexed
-from wagtail.wagtailsearch.utils import normalise_query_string
class FilterError(Exception):
@@ -213,10 +212,6 @@
if not class_is_indexed(model):
return []
- # Normalise query string
- if query_string is not None:
- query_string = normalise_query_string(query_string)
-
# Check that theres still a query string after the clean up
if query_string == "":
return []
|
{"golden_diff": "diff --git a/wagtail/wagtailsearch/backends/base.py b/wagtail/wagtailsearch/backends/base.py\n--- a/wagtail/wagtailsearch/backends/base.py\n+++ b/wagtail/wagtailsearch/backends/base.py\n@@ -7,7 +7,6 @@\n from django.core.exceptions import ImproperlyConfigured\n \n from wagtail.wagtailsearch.index import class_is_indexed\n-from wagtail.wagtailsearch.utils import normalise_query_string\n \n \n class FilterError(Exception):\n@@ -213,10 +212,6 @@\n if not class_is_indexed(model):\n return []\n \n- # Normalise query string\n- if query_string is not None:\n- query_string = normalise_query_string(query_string)\n-\n # Check that theres still a query string after the clean up\n if query_string == \"\":\n return []\n", "issue": "Hyphens in search query are normalized differently than in ElasticSearch\nIf I have the substring \"fooo-baar\" in the text of one of the indexed fields for a Page-derived model instance, I'd expect to be see that page when I search for \"fooo-baar\", but I don't.\n\nThis seems to be because `wagtailsearch` normalizes \"fooo-baar\" to \"fooobaar\", while ElasticSearch treats the hyphen as a whitespace character.\n\nFailing test: add \n\n```\n\"Hello-world\",\n```\n\nto `test_queries:45`.\n\nSuggested fix: normalize to \"fooo baar\" instead.\n\n", "before_files": [{"content": "from six import text_type\n\nfrom django.db import models\nfrom django.db.models.query import QuerySet\nfrom django.db.models.lookups import Lookup\nfrom django.db.models.sql.where import SubqueryConstraint, WhereNode\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom wagtail.wagtailsearch.index import class_is_indexed\nfrom wagtail.wagtailsearch.utils import normalise_query_string\n\n\nclass FilterError(Exception):\n pass\n\n\nclass FieldError(Exception):\n pass\n\n\nclass BaseSearchQuery(object):\n def __init__(self, queryset, query_string, fields=None):\n self.queryset = queryset\n self.query_string = query_string\n self.fields = fields\n\n def _get_searchable_field(self, field_attname):\n # Get field\n field = dict(\n (field.get_attname(self.queryset.model), field)\n for field in self.queryset.model.get_searchable_search_fields()\n ).get(field_attname, None)\n\n return field\n\n def _get_filterable_field(self, field_attname):\n # Get field\n field = dict(\n (field.get_attname(self.queryset.model), field)\n for field in self.queryset.model.get_filterable_search_fields()\n ).get(field_attname, None)\n\n return field\n\n def _process_lookup(self, field, lookup, value):\n raise NotImplementedError\n\n def _connect_filters(self, filters, connector, negated):\n raise NotImplementedError\n\n def _process_filter(self, field_attname, lookup, value):\n # Get the field\n field = self._get_filterable_field(field_attname)\n\n if field is None:\n raise FieldError('Cannot filter search results with field \"' + field_attname + '\". Please add index.FilterField(\\'' + field_attname + '\\') to ' + self.queryset.model.__name__ + '.search_fields.')\n\n # Process the lookup\n result = self._process_lookup(field, lookup, value)\n\n if result is None:\n raise FilterError('Could not apply filter on search results: \"' + field_attname + '__' + lookup + ' = ' + text_type(value) + '\". Lookup \"' + lookup + '\"\" not recognosed.')\n\n return result\n\n def _get_filters_from_where_node(self, where_node):\n # Check if this is a leaf node\n if isinstance(where_node, Lookup):\n field_attname = where_node.lhs.target.attname\n lookup = where_node.lookup_name\n value = where_node.rhs\n\n # Process the filter\n return self._process_filter(field_attname, lookup, value)\n\n elif isinstance(where_node, SubqueryConstraint):\n raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')\n\n elif isinstance(where_node, WhereNode):\n # Get child filters\n connector = where_node.connector\n child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]\n child_filters = [child_filter for child_filter in child_filters if child_filter]\n\n return self._connect_filters(child_filters, connector, where_node.negated)\n\n else:\n raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))\n\n def _get_filters_from_queryset(self):\n return self._get_filters_from_where_node(self.queryset.query.where)\n\n\nclass BaseSearchResults(object):\n def __init__(self, backend, query, prefetch_related=None):\n self.backend = backend\n self.query = query\n self.prefetch_related = prefetch_related\n self.start = 0\n self.stop = None\n self._results_cache = None\n self._count_cache = None\n\n def _set_limits(self, start=None, stop=None):\n if stop is not None:\n if self.stop is not None:\n self.stop = min(self.stop, self.start + stop)\n else:\n self.stop = self.start + stop\n\n if start is not None:\n if self.stop is not None:\n self.start = min(self.stop, self.start + start)\n else:\n self.start = self.start + start\n\n def _clone(self):\n klass = self.__class__\n new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)\n new.start = self.start\n new.stop = self.stop\n return new\n\n def _do_search(self):\n raise NotImplementedError\n\n def _do_count(self):\n raise NotImplementedError\n\n def results(self):\n if self._results_cache is None:\n self._results_cache = self._do_search()\n return self._results_cache\n\n def count(self):\n if self._count_cache is None:\n if self._results_cache is not None:\n self._count_cache = len(self._results_cache)\n else:\n self._count_cache = self._do_count()\n return self._count_cache\n\n def __getitem__(self, key):\n new = self._clone()\n\n if isinstance(key, slice):\n # Set limits\n start = int(key.start) if key.start else None\n stop = int(key.stop) if key.stop else None\n new._set_limits(start, stop)\n\n # Copy results cache\n if self._results_cache is not None:\n new._results_cache = self._results_cache[key]\n\n return new\n else:\n if self._results_cache is not None:\n return self._results_cache[key]\n\n new.start = key\n new.stop = key + 1\n return list(new)[0]\n\n def __iter__(self):\n return iter(self.results())\n\n def __len__(self):\n return len(self.results())\n\n def __repr__(self):\n data = list(self[:21])\n if len(data) > 20:\n data[-1] = \"...(remaining elements truncated)...\"\n return repr(data)\n\n\nclass BaseSearch(object):\n def __init__(self, params):\n pass\n\n def reset_index(self):\n raise NotImplementedError\n\n def add_type(self, model):\n raise NotImplementedError\n\n def refresh_index(self):\n raise NotImplementedError\n\n def add(self, obj):\n raise NotImplementedError\n\n def add_bulk(self, model, obj_list):\n raise NotImplementedError\n\n def delete(self, obj):\n raise NotImplementedError\n\n def _search(self, queryset, query_string, fields=None):\n raise NotImplementedError\n\n def search(self, query_string, model_or_queryset, fields=None, filters=None, prefetch_related=None):\n # Find model/queryset\n if isinstance(model_or_queryset, QuerySet):\n model = model_or_queryset.model\n queryset = model_or_queryset\n else:\n model = model_or_queryset\n queryset = model_or_queryset.objects.all()\n\n # Model must be a class that is in the index\n if not class_is_indexed(model):\n return []\n\n # Normalise query string\n if query_string is not None:\n query_string = normalise_query_string(query_string)\n\n # Check that theres still a query string after the clean up\n if query_string == \"\":\n return []\n\n # Apply filters to queryset\n if filters:\n queryset = queryset.filter(**filters)\n\n # Prefetch related\n if prefetch_related:\n for prefetch in prefetch_related:\n queryset = queryset.prefetch_related(prefetch)\n\n # Search\n return self._search(queryset, query_string, fields=fields)\n", "path": "wagtail/wagtailsearch/backends/base.py"}]}
| 2,885 | 192 |
gh_patches_debug_27176
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-717
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MockHistoDataGenerator generates corrupted tiff files
Update tiffwriter arguments after upgrade in #691
</issue>
<code>
[start of hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py]
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5 from enum import Enum
6 from pathlib import Path
7 from typing import Any, Optional, Tuple, List, Union
8
9 import numpy as np
10 import pandas as pd
11 import torch
12 from tifffile import TiffWriter
13 from torch import Tensor
14 from health_cpath.datasets.panda_dataset import PandaDataset
15 from testhisto.mocks.base_data_generator import MockHistoDataGenerator, MockHistoDataType, PANDA_N_CLASSES
16
17
18 class TilesPositioningType(Enum):
19 DIAGONAL = 0
20 RANDOM = 1
21
22
23 class MockPandaSlidesGenerator(MockHistoDataGenerator):
24 """Generator class to create mock WSI on the fly.
25 If tiles positioning is diagonal, a mock WSI resembles to:
26 [** ]
27 [ ** ]
28 [ ** ]
29 [ **]
30 where * represents 2 tiles stitched along the Y axis.
31 If tiles positioning is random, tiles are positioned randomly on the WSI grid.
32 """
33
34 ISUP_GRADE = "isup_grade"
35
36 def __init__(
37 self,
38 n_levels: int = 3,
39 n_repeat_diag: int = 4,
40 n_repeat_tile: int = 2,
41 background_val: Union[int, float] = 255,
42 tiles_pos_type: TilesPositioningType = TilesPositioningType.DIAGONAL,
43 n_tiles_list: Optional[List[int]] = None,
44 **kwargs: Any,
45 ) -> None:
46 """
47 :param n_levels: Number of levels for multi resolution WSI.
48 :param n_repeat_diag: Number of repeat time along the diagonal axis, defaults to 4.
49 :param n_repeat_tile: Number of repeat times of a tile along both Y and X axes, defaults to 2.
50 :param background_val: A value to assign to the background, defaults to 255.
51 :param tiles_pos_type: The tiles positioning type to define how tiles should be positioned within the WSI grid,
52 defaults to TilesPositioningType.DIAGONAL.
53 :param n_tiles_list: A list to use different n_tiles per slide for randomly positioned tiles.
54 :param kwargs: Same params passed to MockHistoDataGenerator.
55 """
56 super().__init__(**kwargs)
57
58 self.n_levels = n_levels
59 self.n_repeat_diag = n_repeat_diag
60 self.n_repeat_tile = n_repeat_tile
61 self.background_val = background_val
62 self.tiles_pos_type = tiles_pos_type
63
64 self.step_size = self.tile_size * self.n_repeat_tile
65 self._dtype = np.uint8 if type(background_val) == int else np.float32
66 self.img_size: int = self.n_repeat_diag * self.n_repeat_tile * self.tile_size
67 self.n_tiles_list = n_tiles_list
68
69 if self.n_tiles_list:
70 assert len(self.n_tiles_list) == self.n_slides, "n_tiles_list length should be equal to n_slides"
71 assert self.tiles_pos_type == TilesPositioningType.RANDOM, "different n_tiles enabled only for randomly "
72 "positionned tiles."
73
74 def validate(self) -> None:
75 assert (
76 self.n_slides >= PANDA_N_CLASSES
77 ), f"The number of slides should be >= PANDA_N_CLASSES (i.e., {PANDA_N_CLASSES})"
78
79 def create_mock_metadata_dataframe(self) -> pd.DataFrame:
80 """Create a mock dataframe with random metadata."""
81 isup_grades = np.tile(list(self.ISUP_GRADE_MAPPING.keys()), self.n_slides // PANDA_N_CLASSES + 1,)
82 mock_metadata: dict = {
83 col: [] for col in [PandaDataset.SLIDE_ID_COLUMN, PandaDataset.MASK_COLUMN, *PandaDataset.METADATA_COLUMNS]
84 }
85 for slide_id in range(self.n_slides):
86 mock_metadata[PandaDataset.SLIDE_ID_COLUMN].append(f"_{slide_id}")
87 mock_metadata[PandaDataset.MASK_COLUMN].append(f"_{slide_id}_mask")
88 mock_metadata[self.DATA_PROVIDER].append(np.random.choice(self.DATA_PROVIDERS_VALUES))
89 mock_metadata[self.ISUP_GRADE].append(isup_grades[slide_id])
90 mock_metadata[self.GLEASON_SCORE].append(np.random.choice(self.ISUP_GRADE_MAPPING[isup_grades[slide_id]]))
91 df = pd.DataFrame(data=mock_metadata)
92 csv_filename = self.dest_data_path / PandaDataset.DEFAULT_CSV_FILENAME
93 df.to_csv(csv_filename, index=False)
94
95 def create_mock_wsi(self, tiles: Tensor) -> Tuple[np.ndarray, Optional[np.ndarray]]:
96 if self.tiles_pos_type == TilesPositioningType.DIAGONAL:
97 return self._create_wsi_from_stitched_tiles_along_diagonal_axis(tiles)
98 elif self.tiles_pos_type == TilesPositioningType.RANDOM:
99 return self._create_wsi_from_randomly_positioned_tiles(tiles), None
100 else:
101 raise NotImplementedError
102
103 def _create_wsi_from_stitched_tiles_along_diagonal_axis(self, tiles: Tensor) -> Tuple[np.ndarray, np.ndarray]:
104 """Create a whole slide image by stitching tiles along the diagonal axis.
105
106 :param tiles: A tensor of tiles of shape (n_tiles, n_channels, tile_size, tile_size).
107 :return: returns a wsi of shape (img_size, img_size, n_channels) and the tiles used to create it.
108 The image is in channels_last format so that it can save by TiffWriter.
109 """
110 mock_image = np.full(
111 shape=(self.n_channels, self.img_size, self.img_size), fill_value=self.background_val, dtype=self._dtype
112 )
113 dump_tiles = []
114 for i in range(self.n_repeat_diag):
115 if self.mock_type == MockHistoDataType.PATHMNIST:
116 if i == 0 or self.n_tiles > 1:
117 tile = (
118 (tiles[i % self.n_tiles].numpy()).astype(self._dtype)
119 if self._dtype == np.uint8
120 else tiles[i % self.n_tiles].numpy()
121 )
122 # fill the square diagonal with tile repeated n_repeat_tile times along X and Y axis.
123 fill_square = np.tile(tile, (self.n_repeat_tile, self.n_repeat_tile))
124 dump_tiles.append(tile)
125
126 elif self.mock_type == MockHistoDataType.FAKE:
127 if i == 0 or self.n_tiles > 1:
128 # pick a random fake value to fill in the square diagonal.
129 fill_square = np.random.uniform(0, self.background_val / (self.n_repeat_diag + 1) * (i + 1))
130 dump_tiles.append(
131 np.full(
132 shape=(self.n_channels, self.tile_size, self.tile_size),
133 fill_value=fill_square,
134 dtype=self._dtype,
135 )
136 )
137 else:
138 raise NotImplementedError
139 mock_image[
140 :, self.step_size * i: self.step_size * (i + 1), self.step_size * i: self.step_size * (i + 1)
141 ] = fill_square
142 return np.transpose(mock_image, (1, 2, 0)), np.array(dump_tiles) # switch to channels_last.
143
144 def _create_wsi_from_randomly_positioned_tiles(self, tiles: Tensor) -> np.ndarray:
145 """Create a whole slide image by positioning tiles randomly in the whole slide image grid.
146
147 :param tiles: A tensor of tiles of shape (n_tiles, n_channels, tile_size, tile_size).
148 :return: returns a wsi of shape (img_size, img_size, n_channels) in channels_last format so that it can save by
149 TiffWriter.
150 """
151 mock_image = np.full(
152 shape=(self.n_channels, self.img_size, self.img_size), fill_value=self.background_val, dtype=self._dtype
153 )
154
155 n_tiles_side = self.img_size // self.tile_size
156 total_n_tiles = n_tiles_side ** 2
157 coords = [
158 (k // n_tiles_side, k % n_tiles_side)
159 for k in np.random.choice(total_n_tiles, size=self.n_tiles, replace=False)
160 ]
161 for i in range(self.n_tiles):
162 x, y = self.tile_size * np.array(coords[i])
163 if self.mock_type == MockHistoDataType.PATHMNIST:
164 new_tile = tiles[i].numpy()
165 elif self.mock_type == MockHistoDataType.FAKE:
166 new_tile = np.random.uniform(0, self.background_val / (self.n_repeat_diag + 1) * (i + 1))
167 else:
168 raise NotImplementedError
169 mock_image[:, x: x + self.tile_size, y: y + self.tile_size] = new_tile
170 return np.transpose(mock_image, (1, 2, 0))
171
172 @staticmethod
173 def _save_mock_wsi_as_tiff_file(file_path: Path, wsi_levels: List[np.ndarray]) -> None:
174 """Save a mock whole slide image as a tiff file of pyramidal levels.
175 Warning: this function expects images to be in channels_last format (H, W, C).
176
177 :param file_name: The tiff file name path.
178 :param wsi_levels: List of whole slide images of different resolution levels in channels_last format.
179 """
180 with TiffWriter(file_path, bigtiff=True) as tif:
181 options = dict(photometric="rgb", compression="zlib")
182 for i, wsi_level in enumerate(wsi_levels):
183 # the subfiletype parameter is a bitfield that determines if the wsi_level is a reduced version of
184 # another image.
185 tif.write(wsi_level, **options, subfiletype=int(i > 0))
186
187 def _create_multi_resolution_wsi(self, mock_image: np.ndarray) -> List[np.ndarray]:
188 """Create multi resolution versions of a mock image via 2 factor downsampling.
189
190 :param mock_image: A mock image in channels_last format (H, W, 3).
191 :return: Returns a list of n_levels downsampled versions of the original mock image.
192 """
193 levels = [mock_image[:: 2 ** i, :: 2 ** i] for i in range(self.n_levels)]
194 return levels
195
196 def generate_mock_histo_data(self) -> None:
197 """Create mock wsi and save them as tiff files"""
198 iterator = iter(self.dataloader) if self.dataloader else None
199
200 slide_dir = self.dest_data_path / "train_images"
201 slide_dir.mkdir(parents=True, exist_ok=True)
202 tile_dir = self.dest_data_path / "dump_tiles"
203 tile_dir.mkdir(parents=True, exist_ok=True)
204
205 for slide_counter in range(self.n_slides):
206
207 if self.n_tiles_list:
208 self.total_tiles = self.n_tiles_list[slide_counter]
209 self.n_tiles: int = self.n_tiles_list[slide_counter]
210 self.dataloader: torch.utils.data.DataLoader = self.get_dataloader()
211 iterator = iter(self.dataloader)
212
213 tiles, _ = next(iterator) if iterator else (None, None)
214 mock_image, dump_tiles = self.create_mock_wsi(tiles)
215 wsi_levels = self._create_multi_resolution_wsi(mock_image)
216
217 slide_tiff_filename = self.dest_data_path / "train_images" / f"_{slide_counter}.tiff"
218 self._save_mock_wsi_as_tiff_file(slide_tiff_filename, wsi_levels)
219
220 if dump_tiles is not None:
221 dump_tiles_filename = self.dest_data_path / "dump_tiles" / f"_{slide_counter}.npy"
222 np.save(dump_tiles_filename, dump_tiles)
223
[end of hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py b/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py
--- a/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py
+++ b/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py
@@ -9,7 +9,7 @@
import numpy as np
import pandas as pd
import torch
-from tifffile import TiffWriter
+from tifffile.tifffile import TiffWriter, PHOTOMETRIC, COMPRESSION
from torch import Tensor
from health_cpath.datasets.panda_dataset import PandaDataset
from testhisto.mocks.base_data_generator import MockHistoDataGenerator, MockHistoDataType, PANDA_N_CLASSES
@@ -178,7 +178,13 @@
:param wsi_levels: List of whole slide images of different resolution levels in channels_last format.
"""
with TiffWriter(file_path, bigtiff=True) as tif:
- options = dict(photometric="rgb", compression="zlib")
+ options = dict(
+ software='tifffile',
+ metadata={'axes': 'YXC'},
+ photometric=PHOTOMETRIC.RGB,
+ compression=COMPRESSION.ADOBE_DEFLATE, # ADOBE_DEFLATE aka ZLIB lossless compression
+ tile=(16, 16),
+ )
for i, wsi_level in enumerate(wsi_levels):
# the subfiletype parameter is a bitfield that determines if the wsi_level is a reduced version of
# another image.
|
{"golden_diff": "diff --git a/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py b/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py\n--- a/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py\n+++ b/hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py\n@@ -9,7 +9,7 @@\n import numpy as np\n import pandas as pd\n import torch\n-from tifffile import TiffWriter\n+from tifffile.tifffile import TiffWriter, PHOTOMETRIC, COMPRESSION\n from torch import Tensor\n from health_cpath.datasets.panda_dataset import PandaDataset\n from testhisto.mocks.base_data_generator import MockHistoDataGenerator, MockHistoDataType, PANDA_N_CLASSES\n@@ -178,7 +178,13 @@\n :param wsi_levels: List of whole slide images of different resolution levels in channels_last format.\n \"\"\"\n with TiffWriter(file_path, bigtiff=True) as tif:\n- options = dict(photometric=\"rgb\", compression=\"zlib\")\n+ options = dict(\n+ software='tifffile',\n+ metadata={'axes': 'YXC'},\n+ photometric=PHOTOMETRIC.RGB,\n+ compression=COMPRESSION.ADOBE_DEFLATE, # ADOBE_DEFLATE aka ZLIB lossless compression\n+ tile=(16, 16),\n+ )\n for i, wsi_level in enumerate(wsi_levels):\n # the subfiletype parameter is a bitfield that determines if the wsi_level is a reduced version of\n # another image.\n", "issue": "MockHistoDataGenerator generates corrupted tiff files\nUpdate tiffwriter arguments after upgrade in #691 \n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any, Optional, Tuple, List, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tifffile import TiffWriter\nfrom torch import Tensor\nfrom health_cpath.datasets.panda_dataset import PandaDataset\nfrom testhisto.mocks.base_data_generator import MockHistoDataGenerator, MockHistoDataType, PANDA_N_CLASSES\n\n\nclass TilesPositioningType(Enum):\n DIAGONAL = 0\n RANDOM = 1\n\n\nclass MockPandaSlidesGenerator(MockHistoDataGenerator):\n \"\"\"Generator class to create mock WSI on the fly.\n If tiles positioning is diagonal, a mock WSI resembles to:\n [** ]\n [ ** ]\n [ ** ]\n [ **]\n where * represents 2 tiles stitched along the Y axis.\n If tiles positioning is random, tiles are positioned randomly on the WSI grid.\n \"\"\"\n\n ISUP_GRADE = \"isup_grade\"\n\n def __init__(\n self,\n n_levels: int = 3,\n n_repeat_diag: int = 4,\n n_repeat_tile: int = 2,\n background_val: Union[int, float] = 255,\n tiles_pos_type: TilesPositioningType = TilesPositioningType.DIAGONAL,\n n_tiles_list: Optional[List[int]] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"\n :param n_levels: Number of levels for multi resolution WSI.\n :param n_repeat_diag: Number of repeat time along the diagonal axis, defaults to 4.\n :param n_repeat_tile: Number of repeat times of a tile along both Y and X axes, defaults to 2.\n :param background_val: A value to assign to the background, defaults to 255.\n :param tiles_pos_type: The tiles positioning type to define how tiles should be positioned within the WSI grid,\n defaults to TilesPositioningType.DIAGONAL.\n :param n_tiles_list: A list to use different n_tiles per slide for randomly positioned tiles.\n :param kwargs: Same params passed to MockHistoDataGenerator.\n \"\"\"\n super().__init__(**kwargs)\n\n self.n_levels = n_levels\n self.n_repeat_diag = n_repeat_diag\n self.n_repeat_tile = n_repeat_tile\n self.background_val = background_val\n self.tiles_pos_type = tiles_pos_type\n\n self.step_size = self.tile_size * self.n_repeat_tile\n self._dtype = np.uint8 if type(background_val) == int else np.float32\n self.img_size: int = self.n_repeat_diag * self.n_repeat_tile * self.tile_size\n self.n_tiles_list = n_tiles_list\n\n if self.n_tiles_list:\n assert len(self.n_tiles_list) == self.n_slides, \"n_tiles_list length should be equal to n_slides\"\n assert self.tiles_pos_type == TilesPositioningType.RANDOM, \"different n_tiles enabled only for randomly \"\n \"positionned tiles.\"\n\n def validate(self) -> None:\n assert (\n self.n_slides >= PANDA_N_CLASSES\n ), f\"The number of slides should be >= PANDA_N_CLASSES (i.e., {PANDA_N_CLASSES})\"\n\n def create_mock_metadata_dataframe(self) -> pd.DataFrame:\n \"\"\"Create a mock dataframe with random metadata.\"\"\"\n isup_grades = np.tile(list(self.ISUP_GRADE_MAPPING.keys()), self.n_slides // PANDA_N_CLASSES + 1,)\n mock_metadata: dict = {\n col: [] for col in [PandaDataset.SLIDE_ID_COLUMN, PandaDataset.MASK_COLUMN, *PandaDataset.METADATA_COLUMNS]\n }\n for slide_id in range(self.n_slides):\n mock_metadata[PandaDataset.SLIDE_ID_COLUMN].append(f\"_{slide_id}\")\n mock_metadata[PandaDataset.MASK_COLUMN].append(f\"_{slide_id}_mask\")\n mock_metadata[self.DATA_PROVIDER].append(np.random.choice(self.DATA_PROVIDERS_VALUES))\n mock_metadata[self.ISUP_GRADE].append(isup_grades[slide_id])\n mock_metadata[self.GLEASON_SCORE].append(np.random.choice(self.ISUP_GRADE_MAPPING[isup_grades[slide_id]]))\n df = pd.DataFrame(data=mock_metadata)\n csv_filename = self.dest_data_path / PandaDataset.DEFAULT_CSV_FILENAME\n df.to_csv(csv_filename, index=False)\n\n def create_mock_wsi(self, tiles: Tensor) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n if self.tiles_pos_type == TilesPositioningType.DIAGONAL:\n return self._create_wsi_from_stitched_tiles_along_diagonal_axis(tiles)\n elif self.tiles_pos_type == TilesPositioningType.RANDOM:\n return self._create_wsi_from_randomly_positioned_tiles(tiles), None\n else:\n raise NotImplementedError\n\n def _create_wsi_from_stitched_tiles_along_diagonal_axis(self, tiles: Tensor) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Create a whole slide image by stitching tiles along the diagonal axis.\n\n :param tiles: A tensor of tiles of shape (n_tiles, n_channels, tile_size, tile_size).\n :return: returns a wsi of shape (img_size, img_size, n_channels) and the tiles used to create it.\n The image is in channels_last format so that it can save by TiffWriter.\n \"\"\"\n mock_image = np.full(\n shape=(self.n_channels, self.img_size, self.img_size), fill_value=self.background_val, dtype=self._dtype\n )\n dump_tiles = []\n for i in range(self.n_repeat_diag):\n if self.mock_type == MockHistoDataType.PATHMNIST:\n if i == 0 or self.n_tiles > 1:\n tile = (\n (tiles[i % self.n_tiles].numpy()).astype(self._dtype)\n if self._dtype == np.uint8\n else tiles[i % self.n_tiles].numpy()\n )\n # fill the square diagonal with tile repeated n_repeat_tile times along X and Y axis.\n fill_square = np.tile(tile, (self.n_repeat_tile, self.n_repeat_tile))\n dump_tiles.append(tile)\n\n elif self.mock_type == MockHistoDataType.FAKE:\n if i == 0 or self.n_tiles > 1:\n # pick a random fake value to fill in the square diagonal.\n fill_square = np.random.uniform(0, self.background_val / (self.n_repeat_diag + 1) * (i + 1))\n dump_tiles.append(\n np.full(\n shape=(self.n_channels, self.tile_size, self.tile_size),\n fill_value=fill_square,\n dtype=self._dtype,\n )\n )\n else:\n raise NotImplementedError\n mock_image[\n :, self.step_size * i: self.step_size * (i + 1), self.step_size * i: self.step_size * (i + 1)\n ] = fill_square\n return np.transpose(mock_image, (1, 2, 0)), np.array(dump_tiles) # switch to channels_last.\n\n def _create_wsi_from_randomly_positioned_tiles(self, tiles: Tensor) -> np.ndarray:\n \"\"\"Create a whole slide image by positioning tiles randomly in the whole slide image grid.\n\n :param tiles: A tensor of tiles of shape (n_tiles, n_channels, tile_size, tile_size).\n :return: returns a wsi of shape (img_size, img_size, n_channels) in channels_last format so that it can save by\n TiffWriter.\n \"\"\"\n mock_image = np.full(\n shape=(self.n_channels, self.img_size, self.img_size), fill_value=self.background_val, dtype=self._dtype\n )\n\n n_tiles_side = self.img_size // self.tile_size\n total_n_tiles = n_tiles_side ** 2\n coords = [\n (k // n_tiles_side, k % n_tiles_side)\n for k in np.random.choice(total_n_tiles, size=self.n_tiles, replace=False)\n ]\n for i in range(self.n_tiles):\n x, y = self.tile_size * np.array(coords[i])\n if self.mock_type == MockHistoDataType.PATHMNIST:\n new_tile = tiles[i].numpy()\n elif self.mock_type == MockHistoDataType.FAKE:\n new_tile = np.random.uniform(0, self.background_val / (self.n_repeat_diag + 1) * (i + 1))\n else:\n raise NotImplementedError\n mock_image[:, x: x + self.tile_size, y: y + self.tile_size] = new_tile\n return np.transpose(mock_image, (1, 2, 0))\n\n @staticmethod\n def _save_mock_wsi_as_tiff_file(file_path: Path, wsi_levels: List[np.ndarray]) -> None:\n \"\"\"Save a mock whole slide image as a tiff file of pyramidal levels.\n Warning: this function expects images to be in channels_last format (H, W, C).\n\n :param file_name: The tiff file name path.\n :param wsi_levels: List of whole slide images of different resolution levels in channels_last format.\n \"\"\"\n with TiffWriter(file_path, bigtiff=True) as tif:\n options = dict(photometric=\"rgb\", compression=\"zlib\")\n for i, wsi_level in enumerate(wsi_levels):\n # the subfiletype parameter is a bitfield that determines if the wsi_level is a reduced version of\n # another image.\n tif.write(wsi_level, **options, subfiletype=int(i > 0))\n\n def _create_multi_resolution_wsi(self, mock_image: np.ndarray) -> List[np.ndarray]:\n \"\"\"Create multi resolution versions of a mock image via 2 factor downsampling.\n\n :param mock_image: A mock image in channels_last format (H, W, 3).\n :return: Returns a list of n_levels downsampled versions of the original mock image.\n \"\"\"\n levels = [mock_image[:: 2 ** i, :: 2 ** i] for i in range(self.n_levels)]\n return levels\n\n def generate_mock_histo_data(self) -> None:\n \"\"\"Create mock wsi and save them as tiff files\"\"\"\n iterator = iter(self.dataloader) if self.dataloader else None\n\n slide_dir = self.dest_data_path / \"train_images\"\n slide_dir.mkdir(parents=True, exist_ok=True)\n tile_dir = self.dest_data_path / \"dump_tiles\"\n tile_dir.mkdir(parents=True, exist_ok=True)\n\n for slide_counter in range(self.n_slides):\n\n if self.n_tiles_list:\n self.total_tiles = self.n_tiles_list[slide_counter]\n self.n_tiles: int = self.n_tiles_list[slide_counter]\n self.dataloader: torch.utils.data.DataLoader = self.get_dataloader()\n iterator = iter(self.dataloader)\n\n tiles, _ = next(iterator) if iterator else (None, None)\n mock_image, dump_tiles = self.create_mock_wsi(tiles)\n wsi_levels = self._create_multi_resolution_wsi(mock_image)\n\n slide_tiff_filename = self.dest_data_path / \"train_images\" / f\"_{slide_counter}.tiff\"\n self._save_mock_wsi_as_tiff_file(slide_tiff_filename, wsi_levels)\n\n if dump_tiles is not None:\n dump_tiles_filename = self.dest_data_path / \"dump_tiles\" / f\"_{slide_counter}.npy\"\n np.save(dump_tiles_filename, dump_tiles)\n", "path": "hi-ml-cpath/testhisto/testhisto/mocks/slides_generator.py"}]}
| 3,673 | 373 |
gh_patches_debug_30955
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-1595
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TQDMProgressBar not working in TF-2.2.0rc1
**System information**
- OS Platform and Distribution: Linux Ubuntu 18.04
- TensorFlow version and how it was installed (source or binary): TF-2.2.0rc1 (wheel compiled from source)
- TensorFlow-Addons version and how it was installed (source or binary): 0.8.3 installed via pip
- Python version: 3.7.6
- Is GPU used? (yes/no): Yes
**Describe the bug**
Executing `model.fit()` with the `TQDMProgressBar()` callback results in `KeyError: 'metrics'` because of a change in TF-2.2 that moves initialization of `model.metrics` (and `model.metrics_names`) from compile stage to train stage.
**Code to reproduce the issue**
```python
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
x = np.random.random((5,1,5))
y = np.random.random((5,1,5))
inputs = tf.keras.layers.Input(shape=(3,))
outputs = tf.keras.layers.Dense(2, name="out_1")(inputs)
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer="Adam", loss="mse", metrics=["acc"])
pg = tfa.callbacks.TQDMProgressBar()
model_callbacks = [pg, ]
VERBOSE=0
history = model.fit(
x,
y,
epochs=100,
verbose=VERBOSE,
callbacks=model_callbacks
)
````
**Other info / logs**
```python
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-23-fdbb03f574a1> in <module>
48 # class_weight=class_weights,
49 verbose=VERBOSE,
---> 50 callbacks=model_callbacks,
51 )
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
63 def _method_wrapper(self, *args, **kwargs):
64 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 65 return method(self, *args, **kwargs)
66
67 # Running inside `run_distribute_coordinator` already.
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
763 self.stop_training = False
764 train_function = self.make_train_function()
--> 765 callbacks.on_train_begin()
766 # Handle fault-tolerance for multi-worker.
767 # TODO(omalleyt): Fix the ordering issues that mean this has to
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.py in on_train_begin(self, logs)
445 logs = self._process_logs(logs)
446 for callback in self.callbacks:
--> 447 callback.on_train_begin(logs)
448
449 def on_train_end(self, logs=None):
~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow_addons/callbacks/tqdm_progress_bar.py in on_train_begin(self, logs)
100 def on_train_begin(self, logs=None):
101 self.num_epochs = self.params["epochs"]
--> 102 self.metrics = self.params["metrics"]
103
104 if self.show_overall_progress:
KeyError: 'metrics'
```
</issue>
<code>
[start of tensorflow_addons/callbacks/tqdm_progress_bar.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TQDM Progress Bar."""
16
17 import time
18 import tensorflow as tf
19 from collections import defaultdict
20 from typeguard import typechecked
21
22 from tensorflow.keras.callbacks import Callback
23
24
25 @tf.keras.utils.register_keras_serializable(package="Addons")
26 class TQDMProgressBar(Callback):
27 """TQDM Progress Bar for Tensorflow Keras.
28
29 Args:
30 metrics_separator: Custom separator between metrics.
31 Defaults to ' - '.
32 overall_bar_format: Custom bar format for overall
33 (outer) progress bar, see https://github.com/tqdm/tqdm#parameters
34 for more detail.
35 epoch_bar_format: Custom bar format for epoch
36 (inner) progress bar, see https://github.com/tqdm/tqdm#parameters
37 for more detail.
38 update_per_second: Maximum number of updates in the epochs bar
39 per second, this is to prevent small batches from slowing down
40 training. Defaults to 10.
41 metrics_format: Custom format for how metrics are formatted.
42 See https://github.com/tqdm/tqdm#parameters for more detail.
43 leave_epoch_progress: True to leave epoch progress bars.
44 leave_overall_progress: True to leave overall progress bar.
45 show_epoch_progress: False to hide epoch progress bars.
46 show_overall_progress: False to hide overall progress bar.
47 """
48
49 @typechecked
50 def __init__(
51 self,
52 metrics_separator: str = " - ",
53 overall_bar_format: str = "{l_bar}{bar} {n_fmt}/{total_fmt} ETA: "
54 "{remaining}s, {rate_fmt}{postfix}",
55 epoch_bar_format: str = "{n_fmt}/{total_fmt}{bar} ETA: "
56 "{remaining}s - {desc}",
57 metrics_format: str = "{name}: {value:0.4f}",
58 update_per_second: int = 10,
59 leave_epoch_progress: bool = True,
60 leave_overall_progress: bool = True,
61 show_epoch_progress: bool = True,
62 show_overall_progress: bool = True,
63 ):
64
65 try:
66 # import tqdm here because tqdm is not a required package
67 # for addons
68 import tqdm
69
70 version_message = "Please update your TQDM version to >= 4.36.1, "
71 "you have version {}. To update, run !pip install -U tqdm"
72 assert tqdm.__version__ >= "4.36.1", version_message.format(
73 tqdm.__version__
74 )
75 from tqdm.auto import tqdm
76
77 self.tqdm = tqdm
78 except ImportError:
79 raise ImportError("Please install tqdm via pip install tqdm")
80
81 self.metrics_separator = metrics_separator
82 self.overall_bar_format = overall_bar_format
83 self.epoch_bar_format = epoch_bar_format
84 self.leave_epoch_progress = leave_epoch_progress
85 self.leave_overall_progress = leave_overall_progress
86 self.show_epoch_progress = show_epoch_progress
87 self.show_overall_progress = show_overall_progress
88 self.metrics_format = metrics_format
89
90 # compute update interval (inverse of update per second)
91 self.update_interval = 1 / update_per_second
92
93 self.last_update_time = time.time()
94 self.overall_progress_tqdm = None
95 self.epoch_progress_tqdm = None
96 self.num_epochs = None
97 self.logs = None
98 self.metrics = None
99
100 def on_train_begin(self, logs=None):
101 self.num_epochs = self.params["epochs"]
102 self.metrics = self.params["metrics"]
103
104 if self.show_overall_progress:
105 self.overall_progress_tqdm = self.tqdm(
106 desc="Training",
107 total=self.num_epochs,
108 bar_format=self.overall_bar_format,
109 leave=self.leave_overall_progress,
110 dynamic_ncols=True,
111 unit="epochs",
112 )
113
114 # set counting mode
115 if "samples" in self.params:
116 self.mode = "samples"
117 self.total_steps = self.params["samples"]
118 else:
119 self.mode = "steps"
120 self.total_steps = self.params["steps"]
121
122 def on_train_end(self, logs={}):
123 if self.show_overall_progress:
124 self.overall_progress_tqdm.close()
125
126 def on_epoch_begin(self, epoch, logs={}):
127 current_epoch_description = "Epoch {epoch}/{num_epochs}".format(
128 epoch=epoch + 1, num_epochs=self.num_epochs
129 )
130
131 if self.show_epoch_progress:
132 print(current_epoch_description)
133 self.epoch_progress_tqdm = self.tqdm(
134 total=self.total_steps,
135 bar_format=self.epoch_bar_format,
136 leave=self.leave_epoch_progress,
137 dynamic_ncols=True,
138 unit=self.mode,
139 )
140
141 self.num_samples_seen = 0
142 self.steps_to_update = 0
143 self.steps_so_far = 0
144 self.logs = defaultdict(float)
145
146 def on_epoch_end(self, epoch, logs={}):
147
148 if self.show_epoch_progress:
149 metrics = self.format_metrics(logs)
150 self.epoch_progress_tqdm.desc = metrics
151
152 # set miniters and mininterval to 0 so last update displays
153 self.epoch_progress_tqdm.miniters = 0
154 self.epoch_progress_tqdm.mininterval = 0
155
156 # update the rest of the steps in epoch progress bar
157 self.epoch_progress_tqdm.update(
158 self.total_steps - self.epoch_progress_tqdm.n
159 )
160 self.epoch_progress_tqdm.close()
161
162 if self.show_overall_progress:
163 self.overall_progress_tqdm.update(1)
164
165 def on_batch_end(self, batch, logs={}):
166 if self.mode == "samples":
167 batch_size = logs["size"]
168 else:
169 batch_size = 1
170
171 self.num_samples_seen += batch_size
172 self.steps_to_update += 1
173 self.steps_so_far += 1
174
175 if self.steps_so_far < self.total_steps:
176
177 for metric, value in logs.items():
178 self.logs[metric] += value * batch_size
179
180 now = time.time()
181 time_diff = now - self.last_update_time
182 if self.show_epoch_progress and time_diff >= self.update_interval:
183
184 # update the epoch progress bar
185 metrics = self.format_metrics(self.logs, self.num_samples_seen)
186 self.epoch_progress_tqdm.desc = metrics
187 self.epoch_progress_tqdm.update(self.steps_to_update)
188
189 # reset steps to update
190 self.steps_to_update = 0
191
192 # update timestamp for last update
193 self.last_update_time = now
194
195 def format_metrics(self, logs={}, factor=1):
196 """Format metrics in logs into a string.
197
198 Arguments:
199 logs: dictionary of metrics and their values. Defaults to
200 empty dictionary.
201 factor (int): The factor we want to divide the metrics in logs
202 by, useful when we are computing the logs after each batch.
203 Defaults to 1.
204
205 Returns:
206 metrics_string: a string displaying metrics using the given
207 formators passed in through the constructor.
208 """
209
210 metric_value_pairs = []
211 for metric in self.metrics:
212 if metric in logs:
213 value = logs[metric] / factor
214 pair = self.metrics_format.format(name=metric, value=value)
215 metric_value_pairs.append(pair)
216 metrics_string = self.metrics_separator.join(metric_value_pairs)
217 return metrics_string
218
219 def get_config(self):
220 config = {
221 "metrics_separator": self.metrics_separator,
222 "overall_bar_format": self.overall_bar_format,
223 "epoch_bar_format": self.epoch_bar_format,
224 "leave_epoch_progress": self.leave_epoch_progress,
225 "leave_overall_progress": self.leave_overall_progress,
226 "show_epoch_progress": self.show_epoch_progress,
227 "show_overall_progress": self.show_overall_progress,
228 }
229
230 base_config = super().get_config()
231 return {**base_config, **config}
232
[end of tensorflow_addons/callbacks/tqdm_progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/callbacks/tqdm_progress_bar.py b/tensorflow_addons/callbacks/tqdm_progress_bar.py
--- a/tensorflow_addons/callbacks/tqdm_progress_bar.py
+++ b/tensorflow_addons/callbacks/tqdm_progress_bar.py
@@ -95,11 +95,10 @@
self.epoch_progress_tqdm = None
self.num_epochs = None
self.logs = None
- self.metrics = None
+ super().__init__()
def on_train_begin(self, logs=None):
self.num_epochs = self.params["epochs"]
- self.metrics = self.params["metrics"]
if self.show_overall_progress:
self.overall_progress_tqdm = self.tqdm(
@@ -112,12 +111,8 @@
)
# set counting mode
- if "samples" in self.params:
- self.mode = "samples"
- self.total_steps = self.params["samples"]
- else:
- self.mode = "steps"
- self.total_steps = self.params["steps"]
+ self.mode = "steps"
+ self.total_steps = self.params["steps"]
def on_train_end(self, logs={}):
if self.show_overall_progress:
@@ -208,11 +203,11 @@
"""
metric_value_pairs = []
- for metric in self.metrics:
- if metric in logs:
- value = logs[metric] / factor
- pair = self.metrics_format.format(name=metric, value=value)
- metric_value_pairs.append(pair)
+ for key, value in logs.items():
+ if key in ["batch", "size"]:
+ continue
+ pair = self.metrics_format.format(name=key, value=value / factor)
+ metric_value_pairs.append(pair)
metrics_string = self.metrics_separator.join(metric_value_pairs)
return metrics_string
|
{"golden_diff": "diff --git a/tensorflow_addons/callbacks/tqdm_progress_bar.py b/tensorflow_addons/callbacks/tqdm_progress_bar.py\n--- a/tensorflow_addons/callbacks/tqdm_progress_bar.py\n+++ b/tensorflow_addons/callbacks/tqdm_progress_bar.py\n@@ -95,11 +95,10 @@\n self.epoch_progress_tqdm = None\n self.num_epochs = None\n self.logs = None\n- self.metrics = None\n+ super().__init__()\n \n def on_train_begin(self, logs=None):\n self.num_epochs = self.params[\"epochs\"]\n- self.metrics = self.params[\"metrics\"]\n \n if self.show_overall_progress:\n self.overall_progress_tqdm = self.tqdm(\n@@ -112,12 +111,8 @@\n )\n \n # set counting mode\n- if \"samples\" in self.params:\n- self.mode = \"samples\"\n- self.total_steps = self.params[\"samples\"]\n- else:\n- self.mode = \"steps\"\n- self.total_steps = self.params[\"steps\"]\n+ self.mode = \"steps\"\n+ self.total_steps = self.params[\"steps\"]\n \n def on_train_end(self, logs={}):\n if self.show_overall_progress:\n@@ -208,11 +203,11 @@\n \"\"\"\n \n metric_value_pairs = []\n- for metric in self.metrics:\n- if metric in logs:\n- value = logs[metric] / factor\n- pair = self.metrics_format.format(name=metric, value=value)\n- metric_value_pairs.append(pair)\n+ for key, value in logs.items():\n+ if key in [\"batch\", \"size\"]:\n+ continue\n+ pair = self.metrics_format.format(name=key, value=value / factor)\n+ metric_value_pairs.append(pair)\n metrics_string = self.metrics_separator.join(metric_value_pairs)\n return metrics_string\n", "issue": "TQDMProgressBar not working in TF-2.2.0rc1\n**System information**\r\n- OS Platform and Distribution: Linux Ubuntu 18.04\r\n- TensorFlow version and how it was installed (source or binary): TF-2.2.0rc1 (wheel compiled from source)\r\n- TensorFlow-Addons version and how it was installed (source or binary): 0.8.3 installed via pip\r\n- Python version: 3.7.6\r\n- Is GPU used? (yes/no): Yes\r\n\r\n**Describe the bug**\r\n\r\nExecuting `model.fit()` with the `TQDMProgressBar()` callback results in `KeyError: 'metrics'` because of a change in TF-2.2 that moves initialization of `model.metrics` (and `model.metrics_names`) from compile stage to train stage.\r\n\r\n**Code to reproduce the issue**\r\n\r\n```python\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\n\r\nx = np.random.random((5,1,5))\r\ny = np.random.random((5,1,5))\r\n\r\ninputs = tf.keras.layers.Input(shape=(3,))\r\noutputs = tf.keras.layers.Dense(2, name=\"out_1\")(inputs)\r\nmodel = tf.keras.models.Model(inputs=inputs, outputs=outputs)\r\nmodel.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"acc\"])\r\n\r\npg = tfa.callbacks.TQDMProgressBar()\r\nmodel_callbacks = [pg, ]\r\nVERBOSE=0\r\nhistory = model.fit(\r\n x,\r\n y,\r\n epochs=100,\r\n verbose=VERBOSE,\r\n callbacks=model_callbacks\r\n)\r\n````\r\n\r\n**Other info / logs**\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-23-fdbb03f574a1> in <module>\r\n 48 # class_weight=class_weights,\r\n 49 verbose=VERBOSE,\r\n---> 50 callbacks=model_callbacks,\r\n 51 )\r\n\r\n~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)\r\n 63 def _method_wrapper(self, *args, **kwargs):\r\n 64 if not self._in_multi_worker_mode(): # pylint: disable=protected-access\r\n---> 65 return method(self, *args, **kwargs)\r\n 66 \r\n 67 # Running inside `run_distribute_coordinator` already.\r\n\r\n~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)\r\n 763 self.stop_training = False\r\n 764 train_function = self.make_train_function()\r\n--> 765 callbacks.on_train_begin()\r\n 766 # Handle fault-tolerance for multi-worker.\r\n 767 # TODO(omalleyt): Fix the ordering issues that mean this has to\r\n\r\n~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.py in on_train_begin(self, logs)\r\n 445 logs = self._process_logs(logs)\r\n 446 for callback in self.callbacks:\r\n--> 447 callback.on_train_begin(logs)\r\n 448 \r\n 449 def on_train_end(self, logs=None):\r\n\r\n~/.pyenv/versions/3.7.6/lib/python3.7/site-packages/tensorflow_addons/callbacks/tqdm_progress_bar.py in on_train_begin(self, logs)\r\n 100 def on_train_begin(self, logs=None):\r\n 101 self.num_epochs = self.params[\"epochs\"]\r\n--> 102 self.metrics = self.params[\"metrics\"]\r\n 103 \r\n 104 if self.show_overall_progress:\r\n\r\nKeyError: 'metrics'\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TQDM Progress Bar.\"\"\"\n\nimport time\nimport tensorflow as tf\nfrom collections import defaultdict\nfrom typeguard import typechecked\n\nfrom tensorflow.keras.callbacks import Callback\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass TQDMProgressBar(Callback):\n \"\"\"TQDM Progress Bar for Tensorflow Keras.\n\n Args:\n metrics_separator: Custom separator between metrics.\n Defaults to ' - '.\n overall_bar_format: Custom bar format for overall\n (outer) progress bar, see https://github.com/tqdm/tqdm#parameters\n for more detail.\n epoch_bar_format: Custom bar format for epoch\n (inner) progress bar, see https://github.com/tqdm/tqdm#parameters\n for more detail.\n update_per_second: Maximum number of updates in the epochs bar\n per second, this is to prevent small batches from slowing down\n training. Defaults to 10.\n metrics_format: Custom format for how metrics are formatted.\n See https://github.com/tqdm/tqdm#parameters for more detail.\n leave_epoch_progress: True to leave epoch progress bars.\n leave_overall_progress: True to leave overall progress bar.\n show_epoch_progress: False to hide epoch progress bars.\n show_overall_progress: False to hide overall progress bar.\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n metrics_separator: str = \" - \",\n overall_bar_format: str = \"{l_bar}{bar} {n_fmt}/{total_fmt} ETA: \"\n \"{remaining}s, {rate_fmt}{postfix}\",\n epoch_bar_format: str = \"{n_fmt}/{total_fmt}{bar} ETA: \"\n \"{remaining}s - {desc}\",\n metrics_format: str = \"{name}: {value:0.4f}\",\n update_per_second: int = 10,\n leave_epoch_progress: bool = True,\n leave_overall_progress: bool = True,\n show_epoch_progress: bool = True,\n show_overall_progress: bool = True,\n ):\n\n try:\n # import tqdm here because tqdm is not a required package\n # for addons\n import tqdm\n\n version_message = \"Please update your TQDM version to >= 4.36.1, \"\n \"you have version {}. To update, run !pip install -U tqdm\"\n assert tqdm.__version__ >= \"4.36.1\", version_message.format(\n tqdm.__version__\n )\n from tqdm.auto import tqdm\n\n self.tqdm = tqdm\n except ImportError:\n raise ImportError(\"Please install tqdm via pip install tqdm\")\n\n self.metrics_separator = metrics_separator\n self.overall_bar_format = overall_bar_format\n self.epoch_bar_format = epoch_bar_format\n self.leave_epoch_progress = leave_epoch_progress\n self.leave_overall_progress = leave_overall_progress\n self.show_epoch_progress = show_epoch_progress\n self.show_overall_progress = show_overall_progress\n self.metrics_format = metrics_format\n\n # compute update interval (inverse of update per second)\n self.update_interval = 1 / update_per_second\n\n self.last_update_time = time.time()\n self.overall_progress_tqdm = None\n self.epoch_progress_tqdm = None\n self.num_epochs = None\n self.logs = None\n self.metrics = None\n\n def on_train_begin(self, logs=None):\n self.num_epochs = self.params[\"epochs\"]\n self.metrics = self.params[\"metrics\"]\n\n if self.show_overall_progress:\n self.overall_progress_tqdm = self.tqdm(\n desc=\"Training\",\n total=self.num_epochs,\n bar_format=self.overall_bar_format,\n leave=self.leave_overall_progress,\n dynamic_ncols=True,\n unit=\"epochs\",\n )\n\n # set counting mode\n if \"samples\" in self.params:\n self.mode = \"samples\"\n self.total_steps = self.params[\"samples\"]\n else:\n self.mode = \"steps\"\n self.total_steps = self.params[\"steps\"]\n\n def on_train_end(self, logs={}):\n if self.show_overall_progress:\n self.overall_progress_tqdm.close()\n\n def on_epoch_begin(self, epoch, logs={}):\n current_epoch_description = \"Epoch {epoch}/{num_epochs}\".format(\n epoch=epoch + 1, num_epochs=self.num_epochs\n )\n\n if self.show_epoch_progress:\n print(current_epoch_description)\n self.epoch_progress_tqdm = self.tqdm(\n total=self.total_steps,\n bar_format=self.epoch_bar_format,\n leave=self.leave_epoch_progress,\n dynamic_ncols=True,\n unit=self.mode,\n )\n\n self.num_samples_seen = 0\n self.steps_to_update = 0\n self.steps_so_far = 0\n self.logs = defaultdict(float)\n\n def on_epoch_end(self, epoch, logs={}):\n\n if self.show_epoch_progress:\n metrics = self.format_metrics(logs)\n self.epoch_progress_tqdm.desc = metrics\n\n # set miniters and mininterval to 0 so last update displays\n self.epoch_progress_tqdm.miniters = 0\n self.epoch_progress_tqdm.mininterval = 0\n\n # update the rest of the steps in epoch progress bar\n self.epoch_progress_tqdm.update(\n self.total_steps - self.epoch_progress_tqdm.n\n )\n self.epoch_progress_tqdm.close()\n\n if self.show_overall_progress:\n self.overall_progress_tqdm.update(1)\n\n def on_batch_end(self, batch, logs={}):\n if self.mode == \"samples\":\n batch_size = logs[\"size\"]\n else:\n batch_size = 1\n\n self.num_samples_seen += batch_size\n self.steps_to_update += 1\n self.steps_so_far += 1\n\n if self.steps_so_far < self.total_steps:\n\n for metric, value in logs.items():\n self.logs[metric] += value * batch_size\n\n now = time.time()\n time_diff = now - self.last_update_time\n if self.show_epoch_progress and time_diff >= self.update_interval:\n\n # update the epoch progress bar\n metrics = self.format_metrics(self.logs, self.num_samples_seen)\n self.epoch_progress_tqdm.desc = metrics\n self.epoch_progress_tqdm.update(self.steps_to_update)\n\n # reset steps to update\n self.steps_to_update = 0\n\n # update timestamp for last update\n self.last_update_time = now\n\n def format_metrics(self, logs={}, factor=1):\n \"\"\"Format metrics in logs into a string.\n\n Arguments:\n logs: dictionary of metrics and their values. Defaults to\n empty dictionary.\n factor (int): The factor we want to divide the metrics in logs\n by, useful when we are computing the logs after each batch.\n Defaults to 1.\n\n Returns:\n metrics_string: a string displaying metrics using the given\n formators passed in through the constructor.\n \"\"\"\n\n metric_value_pairs = []\n for metric in self.metrics:\n if metric in logs:\n value = logs[metric] / factor\n pair = self.metrics_format.format(name=metric, value=value)\n metric_value_pairs.append(pair)\n metrics_string = self.metrics_separator.join(metric_value_pairs)\n return metrics_string\n\n def get_config(self):\n config = {\n \"metrics_separator\": self.metrics_separator,\n \"overall_bar_format\": self.overall_bar_format,\n \"epoch_bar_format\": self.epoch_bar_format,\n \"leave_epoch_progress\": self.leave_epoch_progress,\n \"leave_overall_progress\": self.leave_overall_progress,\n \"show_epoch_progress\": self.show_epoch_progress,\n \"show_overall_progress\": self.show_overall_progress,\n }\n\n base_config = super().get_config()\n return {**base_config, **config}\n", "path": "tensorflow_addons/callbacks/tqdm_progress_bar.py"}]}
| 3,882 | 422 |
gh_patches_debug_37636
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1222
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Enhancement request] Meaningful error on labels naming conflict
Feature description
---------
Try rename a label to an existing name.
You get a 500 error.
Desired: a meaningful error.
Related: #601, #826.
</issue>
<code>
[start of app/api/views/label.py]
1 import json
2
3 from django.db import IntegrityError, transaction
4 from django.shortcuts import get_object_or_404
5 from rest_framework import generics, status
6 from rest_framework.exceptions import ParseError
7 from rest_framework.parsers import MultiPartParser
8 from rest_framework.permissions import IsAuthenticated
9 from rest_framework.response import Response
10 from rest_framework.views import APIView
11
12 from ..models import Label, Project
13 from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin
14 from ..serializers import LabelSerializer
15
16
17 class LabelList(generics.ListCreateAPIView):
18 serializer_class = LabelSerializer
19 pagination_class = None
20 permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
21
22 def get_queryset(self):
23 project = get_object_or_404(Project, pk=self.kwargs['project_id'])
24 return project.labels
25
26 def perform_create(self, serializer):
27 project = get_object_or_404(Project, pk=self.kwargs['project_id'])
28 serializer.save(project=project)
29
30
31 class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
32 queryset = Label.objects.all()
33 serializer_class = LabelSerializer
34 lookup_url_kwarg = 'label_id'
35 permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
36
37
38 class LabelUploadAPI(APIView):
39 parser_classes = (MultiPartParser,)
40 permission_classes = [IsAuthenticated & IsProjectAdmin]
41
42 @transaction.atomic
43 def post(self, request, *args, **kwargs):
44 if 'file' not in request.data:
45 raise ParseError('Empty content')
46 labels = json.load(request.data['file'])
47 project = get_object_or_404(Project, pk=kwargs['project_id'])
48 try:
49 for label in labels:
50 serializer = LabelSerializer(data=label)
51 serializer.is_valid(raise_exception=True)
52 serializer.save(project=project)
53 return Response(status=status.HTTP_201_CREATED)
54 except IntegrityError:
55 content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
56 return Response(content, status=status.HTTP_400_BAD_REQUEST)
57
[end of app/api/views/label.py]
[start of app/api/exceptions.py]
1 from rest_framework import status
2 from rest_framework.exceptions import (APIException, PermissionDenied,
3 ValidationError)
4
5
6 class FileParseException(APIException):
7 status_code = status.HTTP_400_BAD_REQUEST
8 default_detail = 'Invalid file format, line {}: {}'
9 default_code = 'invalid'
10
11 def __init__(self, line_num, line, code=None):
12 detail = self.default_detail.format(line_num, line)
13 super().__init__(detail, code)
14
15
16 class AutoLabelingException(APIException):
17 status_code = status.HTTP_400_BAD_REQUEST
18 default_detail = 'Auto labeling not allowed for the document with labels.'
19
20
21 class AutoLabeliingPermissionDenied(PermissionDenied):
22 default_detail = 'You do not have permission to perform auto labeling.' \
23 'Please ask the project administrators to add you.'
24
25
26 class URLConnectionError(ValidationError):
27 default_detail = 'Failed to establish a connection. Please check the URL or network.'
28
29
30 class AWSTokenError(ValidationError):
31 default_detail = 'The security token included in the request is invalid.'
32
33
34 class SampleDataException(ValidationError):
35 default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \
36 'Please specify another sample data which returns at least one label.'
37
[end of app/api/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/exceptions.py b/app/api/exceptions.py
--- a/app/api/exceptions.py
+++ b/app/api/exceptions.py
@@ -34,3 +34,8 @@
class SampleDataException(ValidationError):
default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \
'Please specify another sample data which returns at least one label.'
+
+
+class LabelValidationError(APIException):
+ status_code = status.HTTP_400_BAD_REQUEST
+ default_detail = 'You cannot create a label with same name or shortcut key.'
diff --git a/app/api/views/label.py b/app/api/views/label.py
--- a/app/api/views/label.py
+++ b/app/api/views/label.py
@@ -9,6 +9,7 @@
from rest_framework.response import Response
from rest_framework.views import APIView
+from ..exceptions import LabelValidationError
from ..models import Label, Project
from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin
from ..serializers import LabelSerializer
@@ -27,6 +28,11 @@
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
serializer.save(project=project)
+ def delete(self, request, *args, **kwargs):
+ delete_ids = request.data['ids']
+ Label.objects.filter(pk__in=delete_ids).delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Label.objects.all()
@@ -43,14 +49,14 @@
def post(self, request, *args, **kwargs):
if 'file' not in request.data:
raise ParseError('Empty content')
- labels = json.load(request.data['file'])
project = get_object_or_404(Project, pk=kwargs['project_id'])
try:
- for label in labels:
- serializer = LabelSerializer(data=label)
- serializer.is_valid(raise_exception=True)
- serializer.save(project=project)
+ labels = json.load(request.data['file'])
+ serializer = LabelSerializer(data=labels, many=True)
+ serializer.is_valid(raise_exception=True)
+ serializer.save(project=project)
return Response(status=status.HTTP_201_CREATED)
+ except json.decoder.JSONDecodeError:
+ raise ParseError('The file format is invalid.')
except IntegrityError:
- content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
- return Response(content, status=status.HTTP_400_BAD_REQUEST)
+ raise LabelValidationError
|
{"golden_diff": "diff --git a/app/api/exceptions.py b/app/api/exceptions.py\n--- a/app/api/exceptions.py\n+++ b/app/api/exceptions.py\n@@ -34,3 +34,8 @@\n class SampleDataException(ValidationError):\n default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \\\n 'Please specify another sample data which returns at least one label.'\n+\n+\n+class LabelValidationError(APIException):\n+ status_code = status.HTTP_400_BAD_REQUEST\n+ default_detail = 'You cannot create a label with same name or shortcut key.'\ndiff --git a/app/api/views/label.py b/app/api/views/label.py\n--- a/app/api/views/label.py\n+++ b/app/api/views/label.py\n@@ -9,6 +9,7 @@\n from rest_framework.response import Response\n from rest_framework.views import APIView\n \n+from ..exceptions import LabelValidationError\n from ..models import Label, Project\n from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin\n from ..serializers import LabelSerializer\n@@ -27,6 +28,11 @@\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n serializer.save(project=project)\n \n+ def delete(self, request, *args, **kwargs):\n+ delete_ids = request.data['ids']\n+ Label.objects.filter(pk__in=delete_ids).delete()\n+ return Response(status=status.HTTP_204_NO_CONTENT)\n+\n \n class LabelDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Label.objects.all()\n@@ -43,14 +49,14 @@\n def post(self, request, *args, **kwargs):\n if 'file' not in request.data:\n raise ParseError('Empty content')\n- labels = json.load(request.data['file'])\n project = get_object_or_404(Project, pk=kwargs['project_id'])\n try:\n- for label in labels:\n- serializer = LabelSerializer(data=label)\n- serializer.is_valid(raise_exception=True)\n- serializer.save(project=project)\n+ labels = json.load(request.data['file'])\n+ serializer = LabelSerializer(data=labels, many=True)\n+ serializer.is_valid(raise_exception=True)\n+ serializer.save(project=project)\n return Response(status=status.HTTP_201_CREATED)\n+ except json.decoder.JSONDecodeError:\n+ raise ParseError('The file format is invalid.')\n except IntegrityError:\n- content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}\n- return Response(content, status=status.HTTP_400_BAD_REQUEST)\n+ raise LabelValidationError\n", "issue": "[Enhancement request] Meaningful error on labels naming conflict\nFeature description\r\n---------\r\nTry rename a label to an existing name.\r\n\r\nYou get a 500 error.\r\n\r\nDesired: a meaningful error.\r\n\r\nRelated: #601, #826.\n", "before_files": [{"content": "import json\n\nfrom django.db import IntegrityError, transaction\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, status\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom ..models import Label, Project\nfrom ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin\nfrom ..serializers import LabelSerializer\n\n\nclass LabelList(generics.ListCreateAPIView):\n serializer_class = LabelSerializer\n pagination_class = None\n permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]\n\n def get_queryset(self):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n return project.labels\n\n def perform_create(self, serializer):\n project = get_object_or_404(Project, pk=self.kwargs['project_id'])\n serializer.save(project=project)\n\n\nclass LabelDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Label.objects.all()\n serializer_class = LabelSerializer\n lookup_url_kwarg = 'label_id'\n permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]\n\n\nclass LabelUploadAPI(APIView):\n parser_classes = (MultiPartParser,)\n permission_classes = [IsAuthenticated & IsProjectAdmin]\n\n @transaction.atomic\n def post(self, request, *args, **kwargs):\n if 'file' not in request.data:\n raise ParseError('Empty content')\n labels = json.load(request.data['file'])\n project = get_object_or_404(Project, pk=kwargs['project_id'])\n try:\n for label in labels:\n serializer = LabelSerializer(data=label)\n serializer.is_valid(raise_exception=True)\n serializer.save(project=project)\n return Response(status=status.HTTP_201_CREATED)\n except IntegrityError:\n content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n", "path": "app/api/views/label.py"}, {"content": "from rest_framework import status\nfrom rest_framework.exceptions import (APIException, PermissionDenied,\n ValidationError)\n\n\nclass FileParseException(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Invalid file format, line {}: {}'\n default_code = 'invalid'\n\n def __init__(self, line_num, line, code=None):\n detail = self.default_detail.format(line_num, line)\n super().__init__(detail, code)\n\n\nclass AutoLabelingException(APIException):\n status_code = status.HTTP_400_BAD_REQUEST\n default_detail = 'Auto labeling not allowed for the document with labels.'\n\n\nclass AutoLabeliingPermissionDenied(PermissionDenied):\n default_detail = 'You do not have permission to perform auto labeling.' \\\n 'Please ask the project administrators to add you.'\n\n\nclass URLConnectionError(ValidationError):\n default_detail = 'Failed to establish a connection. Please check the URL or network.'\n\n\nclass AWSTokenError(ValidationError):\n default_detail = 'The security token included in the request is invalid.'\n\n\nclass SampleDataException(ValidationError):\n default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \\\n 'Please specify another sample data which returns at least one label.'\n", "path": "app/api/exceptions.py"}]}
| 1,499 | 577 |
gh_patches_debug_37307
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scanning IAM policy only takes First SID in json rather than looping through
**Describe the bug**
It seems when specifying more than one SID in a json, the policies do not loop through each one rather it just looks at the first one and ends.
**To Reproduce**
Steps to reproduce the behavior:
1. Create policy with more than one SID
`{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SqsAllow",
"Effect": "Allow",
"Action": [
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"sqs:ListDeadLetterSourceQueues",
"sqs:ListQueues",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SendMessageBatch"
],
"Resource": "*"
},
{
"Sid": "ALL",
"Effect": "Allow",
"Action": [ "*"
],
"Resource": ["*"]
},`
2. Run Checkov against policy
**Expected behavior**
I would expect the scan to check each json within the policy rather than the first one
**Desktop (please complete the following information):**
- OS: Mac
- Checkov Version: 1.0.442
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 import json
4
5
6 class IAMStarActionPolicyDocument(BaseResourceCheck):
7
8 def __init__(self):
9 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions"
10 id = "CKV_AWS_63"
11 supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']
12 categories = [CheckCategories.IAM]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 if 'policy' in conf.keys():
17 try:
18 policy_block = json.loads(conf['policy'][0])
19 if 'Statement' in policy_block.keys():
20 if 'Action' in policy_block['Statement'][0] and \
21 policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
22 policy_block['Statement'][0]['Action'][0] == "*":
23 return CheckResult.FAILED
24 except: # nosec
25 pass
26 return CheckResult.PASSED
27
28
29 check = IAMStarActionPolicyDocument()
30
[end of checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py]
[start of checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 import json
4
5
6 class IAMAdminPolicyDocument(BaseResourceCheck):
7
8 def __init__(self):
9 name = "Ensure IAM policies that allow full \"*-*\" administrative privileges are not created"
10 id = "CKV_AWS_62"
11 supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']
12 categories = [CheckCategories.IAM]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 if 'policy' in conf.keys():
17 try:
18 policy_block = json.loads(conf['policy'][0])
19 if 'Statement' in policy_block.keys():
20 if 'Action' in policy_block['Statement'][0] and \
21 policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
22 policy_block['Statement'][0]['Action'][0] == "*" and \
23 'Resource' in policy_block['Statement'][0] and \
24 policy_block['Statement'][0]['Resource'] == '*':
25 return CheckResult.FAILED
26 except: # nosec
27 pass
28 return CheckResult.PASSED
29
30
31 check = IAMAdminPolicyDocument()
32
[end of checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py]
[start of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]
1 from checkov.terraform.checks.data.base_check import BaseDataCheck
2 from checkov.common.models.enums import CheckResult, CheckCategories
3
4
5 class StarActionPolicyDocument(BaseDataCheck):
6 def __init__(self):
7 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions"
8 id = "CKV_AWS_49"
9 supported_data = ['aws_iam_policy_document']
10 categories = [CheckCategories.IAM]
11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
12
13 def scan_data_conf(self, conf):
14 """
15 validates iam policy document
16 https://learn.hashicorp.com/terraform/aws/iam-policy
17 :param conf: aws_kms_key configuration
18 :return: <CheckResult>
19 """
20 key = 'statement'
21 if key in conf.keys():
22 for statement in conf['statement']:
23 if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26
27
28 check = StarActionPolicyDocument()
29
[end of checkov/terraform/checks/data/aws/StarActionPolicyDocument.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
@@ -19,7 +19,7 @@
"""
key = 'statement'
if key in conf.keys():
- for statement in conf['statement']:
+ for statement in conf[key]:
if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
--- a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
+++ b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py
@@ -17,13 +17,13 @@
try:
policy_block = json.loads(conf['policy'][0])
if 'Statement' in policy_block.keys():
- if 'Action' in policy_block['Statement'][0] and \
- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
- policy_block['Statement'][0]['Action'][0] == "*" and \
- 'Resource' in policy_block['Statement'][0] and \
- policy_block['Statement'][0]['Resource'] == '*':
+ for statement in policy_block['Statement']:
+ if 'Action' in statement and \
+ statement.get('Effect', ['Allow']) == 'Allow' and \
+ '*' in statement.get('Action', ['']) and \
+ '*' in statement.get('Resource', ['']):
return CheckResult.FAILED
- except: # nosec
+ except: # nosec
pass
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
--- a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
+++ b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py
@@ -17,9 +17,10 @@
try:
policy_block = json.loads(conf['policy'][0])
if 'Statement' in policy_block.keys():
- if 'Action' in policy_block['Statement'][0] and \
- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \
- policy_block['Statement'][0]['Action'][0] == "*":
+ for statement in policy_block['Statement']:
+ if 'Action' in statement and \
+ statement.get('Effect', ['Allow']) == 'Allow' and \
+ '*' in statement.get('Action', ['']):
return CheckResult.FAILED
except: # nosec
pass
|
{"golden_diff": "diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n@@ -19,7 +19,7 @@\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n- for statement in conf['statement']:\n+ for statement in conf[key]:\n if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\ndiff --git a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n--- a/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n+++ b/checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py\n@@ -17,13 +17,13 @@\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n- if 'Action' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n- policy_block['Statement'][0]['Action'][0] == \"*\" and \\\n- 'Resource' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0]['Resource'] == '*':\n+ for statement in policy_block['Statement']:\n+ if 'Action' in statement and \\\n+ statement.get('Effect', ['Allow']) == 'Allow' and \\\n+ '*' in statement.get('Action', ['']) and \\\n+ '*' in statement.get('Resource', ['']):\n return CheckResult.FAILED\n- except: # nosec\n+ except: # nosec\n pass\n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n--- a/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py\n@@ -17,9 +17,10 @@\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n- if 'Action' in policy_block['Statement'][0] and \\\n- policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n- policy_block['Statement'][0]['Action'][0] == \"*\":\n+ for statement in policy_block['Statement']:\n+ if 'Action' in statement and \\\n+ statement.get('Effect', ['Allow']) == 'Allow' and \\\n+ '*' in statement.get('Action', ['']):\n return CheckResult.FAILED\n except: # nosec\n pass\n", "issue": "Scanning IAM policy only takes First SID in json rather than looping through\n**Describe the bug**\r\nIt seems when specifying more than one SID in a json, the policies do not loop through each one rather it just looks at the first one and ends. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create policy with more than one SID\r\n`{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"SqsAllow\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [\r\n \"sqs:GetQueueAttributes\",\r\n \"sqs:GetQueueUrl\",\r\n \"sqs:ListDeadLetterSourceQueues\",\r\n \"sqs:ListQueues\",\r\n \"sqs:ReceiveMessage\",\r\n \"sqs:SendMessage\",\r\n \"sqs:SendMessageBatch\"\r\n ],\r\n \"Resource\": \"*\"\r\n },\r\n {\r\n \"Sid\": \"ALL\",\r\n \"Effect\": \"Allow\",\r\n \"Action\": [ \"*\"\r\n ],\r\n \"Resource\": [\"*\"]\r\n },`\r\n2. Run Checkov against policy\r\n\r\n\r\n**Expected behavior**\r\nI would expect the scan to check each json within the policy rather than the first one\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Mac\r\n - Checkov Version: 1.0.442\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nimport json\n\n\nclass IAMStarActionPolicyDocument(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_63\"\n supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'policy' in conf.keys():\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n if 'Action' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n policy_block['Statement'][0]['Action'][0] == \"*\":\n return CheckResult.FAILED\n except: # nosec\n pass\n return CheckResult.PASSED\n\n\ncheck = IAMStarActionPolicyDocument()\n", "path": "checkov/terraform/checks/resource/aws/IAMStarActionPolicyDocument.py"}, {"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nimport json\n\n\nclass IAMAdminPolicyDocument(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n id = \"CKV_AWS_62\"\n supported_resources = ['aws_iam_role_policy', 'aws_iam_user_policy', 'aws_iam_group_policy', 'aws_iam_policy']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'policy' in conf.keys():\n try:\n policy_block = json.loads(conf['policy'][0])\n if 'Statement' in policy_block.keys():\n if 'Action' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0].get('Effect', ['Allow']) == 'Allow' and \\\n policy_block['Statement'][0]['Action'][0] == \"*\" and \\\n 'Resource' in policy_block['Statement'][0] and \\\n policy_block['Statement'][0]['Resource'] == '*':\n return CheckResult.FAILED\n except: # nosec\n pass\n return CheckResult.PASSED\n\n\ncheck = IAMAdminPolicyDocument()\n", "path": "checkov/terraform/checks/resource/aws/IAMAdminPolicyDocument.py"}, {"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass StarActionPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_49\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = StarActionPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/StarActionPolicyDocument.py"}]}
| 1,891 | 675 |
gh_patches_debug_20479
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-3235
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The ticks for the HGS overlay on map plots are white and invisible by default
Also the HPC ticks are on all four axes.
</issue>
<code>
[start of sunpy/visualization/wcsaxes_compat.py]
1 """
2 This module provides functions to make WCSAxes work in SunPy.
3 """
4 import matplotlib.pyplot as plt
5
6 import astropy.units as u
7 from astropy.visualization import wcsaxes
8
9 # Force is put here to enable disabling all checks in this module.
10 # It should only be used by tests and other such hacks.
11 _FORCE_NO_WCSAXES = False
12
13 __all__ = ["is_wcsaxes", "gca_wcs", "get_world_transform",
14 "default_wcs_grid", "wcsaxes_heliographic_overlay"]
15
16
17 def is_wcsaxes(axes):
18 """
19 Tests a `matplotlib.axes.Axes` object to see if it is an instance of
20 `~astropy.visualization.wcsaxes.WCSAxes`.
21
22 Parameters
23 ----------
24 axes : `matplotlib.axes`
25 Axes to test.
26
27 Returns
28 -------
29 `bool`
30 Result of the test.
31 """
32 if not _FORCE_NO_WCSAXES:
33 return isinstance(axes, wcsaxes.WCSAxes)
34 else:
35 return False
36
37
38 def gca_wcs(wcs, fig=None, slices=None):
39 """
40 Get the current axes, and return a `~astropy.visualization.wcsaxes.WCSAxes`
41 if possible.
42
43 Parameters
44 ----------
45 wcs : `astropy.wcs.WCS`
46 A `~astropy.wcs.WCS` object used to create a new axes.
47 fig : `matplotlib.figure.Figure`
48 The figure in which to check for the axes.
49 slices : `tuple`
50 ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe
51 which two dimensions of the `~astropy.wcs.WCS` object are being plotted.
52 This slices the multidimensional wcs object in the way it needs to be sliced.
53
54 Returns
55 -------
56 `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`
57 The current axes, or a new one if created.
58 """
59 if not fig:
60 fig = plt.gcf()
61
62 if not len(fig.get_axes()):
63 if not _FORCE_NO_WCSAXES:
64 ax = plt.gca(projection=wcs, slices=slices)
65 else:
66 ax = plt.gca()
67 else:
68 ax = plt.gca()
69
70 return ax
71
72
73 def get_world_transform(axes):
74 """
75 Get the transformation to world coordinates.
76
77 If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this
78 returns the transform to the "world" coordinates, otherwise it returns
79 the transform to the matplotlib data coordinates, which are assumed to be in
80 world coordinates.
81
82 Parameters
83 ----------
84 axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`
85 The axes to get the transform from.
86
87 Returns
88 -------
89 `~matplotlib.transforms.CompositeGenericTransform`
90 The transformation object.
91 """
92 if is_wcsaxes(axes):
93 transform = axes.get_transform('world')
94 else:
95 transform = axes.transData
96
97 return transform
98
99
100 def default_wcs_grid(axes):
101 """
102 Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid
103 formatting.
104
105 Parameters
106 ----------
107 axes : `~astropy.visualization.wcsaxes.WCSAxes`
108 The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world
109 coordinate grid on.
110 """
111 axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',
112 linewidth=0.5)
113
114
115 @u.quantity_input
116 def wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg = 10*u.deg, **kwargs):
117 """
118 Create a heliographic overlay using
119 `~astropy.visualization.wcsaxes.WCSAxes`.
120
121 Will draw a grid and label the top axes.
122
123 Parameters
124 ----------
125 axes : `~astropy.visualization.wcsaxes.WCSAxes`
126 The `~astropy.visualization.wcsaxes.WCSAxes` object to create the HGS overlay on.
127 grid_spacing: `~astropy.units.Quantity`
128 Spacing for longitude and latitude grid in degrees.
129
130 Returns
131 -------
132 `~astropy.visualization.wcsaxes.WCSAxes`
133 The overlay object.
134
135 Notes
136 -----
137 Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.
138 """
139 # Unpack spacing
140 if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:
141 lon_space = lat_space = grid_spacing
142 elif grid_spacing.size == 2:
143 lon_space, lat_space = grid_spacing
144 else:
145 raise ValueError("grid_spacing must be a Quantity of length one or two.")
146
147 overlay = axes.get_coords_overlay('heliographic_stonyhurst')
148
149 lon = overlay[0]
150 lat = overlay[1]
151
152 lon.coord_wrap = 180
153 lon.set_major_formatter('dd')
154
155 lon.set_axislabel('Solar Longitude', minpad=0.8)
156 lat.set_axislabel('Solar Latitude', minpad=0.9)
157
158 lon.set_ticks_position('tr')
159 lat.set_ticks_position('tr')
160
161 grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
162 grid_kw.update(kwargs)
163
164 lon.set_ticks(spacing=lon_space, color=grid_kw['color'])
165 lat.set_ticks(spacing=lat_space, color=grid_kw['color'])
166
167 overlay.grid(**grid_kw)
168
169 if axes.title:
170 x, y = axes.title.get_position()
171 axes.title.set_position([x, y + 0.08])
172
173 return overlay
174
[end of sunpy/visualization/wcsaxes_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sunpy/visualization/wcsaxes_compat.py b/sunpy/visualization/wcsaxes_compat.py
--- a/sunpy/visualization/wcsaxes_compat.py
+++ b/sunpy/visualization/wcsaxes_compat.py
@@ -144,6 +144,12 @@
else:
raise ValueError("grid_spacing must be a Quantity of length one or two.")
+ # Set the native coordinates to be bottom and left only so they don't share
+ # axes with the overlay.
+ c1, c2 = axes.coords
+ c1.set_ticks_position('bl')
+ c2.set_ticks_position('bl')
+
overlay = axes.get_coords_overlay('heliographic_stonyhurst')
lon = overlay[0]
@@ -161,8 +167,10 @@
grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}
grid_kw.update(kwargs)
- lon.set_ticks(spacing=lon_space, color=grid_kw['color'])
- lat.set_ticks(spacing=lat_space, color=grid_kw['color'])
+ # Don't plot white ticks by default (only if explicitly asked)
+ tick_color = grid_kw['color'] if 'color' in kwargs else 'k'
+ lon.set_ticks(spacing=lon_space, color=tick_color)
+ lat.set_ticks(spacing=lat_space, color=tick_color)
overlay.grid(**grid_kw)
|
{"golden_diff": "diff --git a/sunpy/visualization/wcsaxes_compat.py b/sunpy/visualization/wcsaxes_compat.py\n--- a/sunpy/visualization/wcsaxes_compat.py\n+++ b/sunpy/visualization/wcsaxes_compat.py\n@@ -144,6 +144,12 @@\n else:\n raise ValueError(\"grid_spacing must be a Quantity of length one or two.\")\n \n+ # Set the native coordinates to be bottom and left only so they don't share\n+ # axes with the overlay.\n+ c1, c2 = axes.coords\n+ c1.set_ticks_position('bl')\n+ c2.set_ticks_position('bl')\n+\n overlay = axes.get_coords_overlay('heliographic_stonyhurst')\n \n lon = overlay[0]\n@@ -161,8 +167,10 @@\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n \n- lon.set_ticks(spacing=lon_space, color=grid_kw['color'])\n- lat.set_ticks(spacing=lat_space, color=grid_kw['color'])\n+ # Don't plot white ticks by default (only if explicitly asked)\n+ tick_color = grid_kw['color'] if 'color' in kwargs else 'k'\n+ lon.set_ticks(spacing=lon_space, color=tick_color)\n+ lat.set_ticks(spacing=lat_space, color=tick_color)\n \n overlay.grid(**grid_kw)\n", "issue": "The ticks for the HGS overlay on map plots are white and invisible by default\nAlso the HPC ticks are on all four axes.\n", "before_files": [{"content": "\"\"\"\nThis module provides functions to make WCSAxes work in SunPy.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\nfrom astropy.visualization import wcsaxes\n\n# Force is put here to enable disabling all checks in this module.\n# It should only be used by tests and other such hacks.\n_FORCE_NO_WCSAXES = False\n\n__all__ = [\"is_wcsaxes\", \"gca_wcs\", \"get_world_transform\",\n \"default_wcs_grid\", \"wcsaxes_heliographic_overlay\"]\n\n\ndef is_wcsaxes(axes):\n \"\"\"\n Tests a `matplotlib.axes.Axes` object to see if it is an instance of\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Parameters\n ----------\n axes : `matplotlib.axes`\n Axes to test.\n\n Returns\n -------\n `bool`\n Result of the test.\n \"\"\"\n if not _FORCE_NO_WCSAXES:\n return isinstance(axes, wcsaxes.WCSAxes)\n else:\n return False\n\n\ndef gca_wcs(wcs, fig=None, slices=None):\n \"\"\"\n Get the current axes, and return a `~astropy.visualization.wcsaxes.WCSAxes`\n if possible.\n\n Parameters\n ----------\n wcs : `astropy.wcs.WCS`\n A `~astropy.wcs.WCS` object used to create a new axes.\n fig : `matplotlib.figure.Figure`\n The figure in which to check for the axes.\n slices : `tuple`\n ``slices`` is passed to `~astropy.visualization.wcsaxes.WCSAxes` to describe\n which two dimensions of the `~astropy.wcs.WCS` object are being plotted.\n This slices the multidimensional wcs object in the way it needs to be sliced.\n\n Returns\n -------\n `matplotlib.axes.Axes` or `~astropy.visualization.wcsaxes.WCSAxes`\n The current axes, or a new one if created.\n \"\"\"\n if not fig:\n fig = plt.gcf()\n\n if not len(fig.get_axes()):\n if not _FORCE_NO_WCSAXES:\n ax = plt.gca(projection=wcs, slices=slices)\n else:\n ax = plt.gca()\n else:\n ax = plt.gca()\n\n return ax\n\n\ndef get_world_transform(axes):\n \"\"\"\n Get the transformation to world coordinates.\n\n If the axes is a `~astropy.visualization.wcsaxes.WCSAxes` instance this\n returns the transform to the \"world\" coordinates, otherwise it returns\n the transform to the matplotlib data coordinates, which are assumed to be in\n world coordinates.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes` or `~matplotlib.axes.Axes`\n The axes to get the transform from.\n\n Returns\n -------\n `~matplotlib.transforms.CompositeGenericTransform`\n The transformation object.\n \"\"\"\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform\n\n\ndef default_wcs_grid(axes):\n \"\"\"\n Apply some default `~astropy.visualization.wcsaxes.WCSAxes` grid\n formatting.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to draw the world\n coordinate grid on.\n \"\"\"\n axes.coords.grid(color='white', alpha=0.6, linestyle='dotted',\n linewidth=0.5)\n\n\[email protected]_input\ndef wcsaxes_heliographic_overlay(axes, grid_spacing: u.deg = 10*u.deg, **kwargs):\n \"\"\"\n Create a heliographic overlay using\n `~astropy.visualization.wcsaxes.WCSAxes`.\n\n Will draw a grid and label the top axes.\n\n Parameters\n ----------\n axes : `~astropy.visualization.wcsaxes.WCSAxes`\n The `~astropy.visualization.wcsaxes.WCSAxes` object to create the HGS overlay on.\n grid_spacing: `~astropy.units.Quantity`\n Spacing for longitude and latitude grid in degrees.\n\n Returns\n -------\n `~astropy.visualization.wcsaxes.WCSAxes`\n The overlay object.\n\n Notes\n -----\n Keywords are passed to `~astropy.visualization.wcsaxes.coordinates_map.CoordinatesMap.grid`.\n \"\"\"\n # Unpack spacing\n if isinstance(grid_spacing, u.Quantity) and grid_spacing.size == 1:\n lon_space = lat_space = grid_spacing\n elif grid_spacing.size == 2:\n lon_space, lat_space = grid_spacing\n else:\n raise ValueError(\"grid_spacing must be a Quantity of length one or two.\")\n\n overlay = axes.get_coords_overlay('heliographic_stonyhurst')\n\n lon = overlay[0]\n lat = overlay[1]\n\n lon.coord_wrap = 180\n lon.set_major_formatter('dd')\n\n lon.set_axislabel('Solar Longitude', minpad=0.8)\n lat.set_axislabel('Solar Latitude', minpad=0.9)\n\n lon.set_ticks_position('tr')\n lat.set_ticks_position('tr')\n\n grid_kw = {'color': 'white', 'zorder': 100, 'alpha': 0.5}\n grid_kw.update(kwargs)\n\n lon.set_ticks(spacing=lon_space, color=grid_kw['color'])\n lat.set_ticks(spacing=lat_space, color=grid_kw['color'])\n\n overlay.grid(**grid_kw)\n\n if axes.title:\n x, y = axes.title.get_position()\n axes.title.set_position([x, y + 0.08])\n\n return overlay\n", "path": "sunpy/visualization/wcsaxes_compat.py"}]}
| 2,229 | 330 |
gh_patches_debug_39145
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5027
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test_pipeline_images.py fails with "TypeError: Skipped expected string as 'msg' parameter, got 'bool' instead."
See e.g. https://github.com/scrapy/scrapy/pull/5019/checks?check_run_id=2012658916
This should be related to the skip attribute, though I'm not sure why did it start happening now.
</issue>
<code>
[start of scrapy/pipelines/images.py]
1 """
2 Images Pipeline
3
4 See documentation in topics/media-pipeline.rst
5 """
6 import functools
7 import hashlib
8 from contextlib import suppress
9 from io import BytesIO
10
11 from itemadapter import ItemAdapter
12 from PIL import Image
13
14 from scrapy.exceptions import DropItem
15 from scrapy.http import Request
16 from scrapy.pipelines.files import FileException, FilesPipeline
17 # TODO: from scrapy.pipelines.media import MediaPipeline
18 from scrapy.settings import Settings
19 from scrapy.utils.misc import md5sum
20 from scrapy.utils.python import to_bytes
21
22
23 class NoimagesDrop(DropItem):
24 """Product with no images exception"""
25
26
27 class ImageException(FileException):
28 """General image error exception"""
29
30
31 class ImagesPipeline(FilesPipeline):
32 """Abstract pipeline that implement the image thumbnail generation logic
33
34 """
35
36 MEDIA_NAME = 'image'
37
38 # Uppercase attributes kept for backward compatibility with code that subclasses
39 # ImagesPipeline. They may be overridden by settings.
40 MIN_WIDTH = 0
41 MIN_HEIGHT = 0
42 EXPIRES = 90
43 THUMBS = {}
44 DEFAULT_IMAGES_URLS_FIELD = 'image_urls'
45 DEFAULT_IMAGES_RESULT_FIELD = 'images'
46
47 def __init__(self, store_uri, download_func=None, settings=None):
48 super().__init__(store_uri, settings=settings, download_func=download_func)
49
50 if isinstance(settings, dict) or settings is None:
51 settings = Settings(settings)
52
53 resolve = functools.partial(self._key_for_pipe,
54 base_class_name="ImagesPipeline",
55 settings=settings)
56 self.expires = settings.getint(
57 resolve("IMAGES_EXPIRES"), self.EXPIRES
58 )
59
60 if not hasattr(self, "IMAGES_RESULT_FIELD"):
61 self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD
62 if not hasattr(self, "IMAGES_URLS_FIELD"):
63 self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD
64
65 self.images_urls_field = settings.get(
66 resolve('IMAGES_URLS_FIELD'),
67 self.IMAGES_URLS_FIELD
68 )
69 self.images_result_field = settings.get(
70 resolve('IMAGES_RESULT_FIELD'),
71 self.IMAGES_RESULT_FIELD
72 )
73 self.min_width = settings.getint(
74 resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH
75 )
76 self.min_height = settings.getint(
77 resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT
78 )
79 self.thumbs = settings.get(
80 resolve('IMAGES_THUMBS'), self.THUMBS
81 )
82
83 @classmethod
84 def from_settings(cls, settings):
85 s3store = cls.STORE_SCHEMES['s3']
86 s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
87 s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
88 s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']
89 s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']
90 s3store.AWS_USE_SSL = settings['AWS_USE_SSL']
91 s3store.AWS_VERIFY = settings['AWS_VERIFY']
92 s3store.POLICY = settings['IMAGES_STORE_S3_ACL']
93
94 gcs_store = cls.STORE_SCHEMES['gs']
95 gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']
96 gcs_store.POLICY = settings['IMAGES_STORE_GCS_ACL'] or None
97
98 ftp_store = cls.STORE_SCHEMES['ftp']
99 ftp_store.FTP_USERNAME = settings['FTP_USER']
100 ftp_store.FTP_PASSWORD = settings['FTP_PASSWORD']
101 ftp_store.USE_ACTIVE_MODE = settings.getbool('FEED_STORAGE_FTP_ACTIVE')
102
103 store_uri = settings['IMAGES_STORE']
104 return cls(store_uri, settings=settings)
105
106 def file_downloaded(self, response, request, info, *, item=None):
107 return self.image_downloaded(response, request, info, item=item)
108
109 def image_downloaded(self, response, request, info, *, item=None):
110 checksum = None
111 for path, image, buf in self.get_images(response, request, info, item=item):
112 if checksum is None:
113 buf.seek(0)
114 checksum = md5sum(buf)
115 width, height = image.size
116 self.store.persist_file(
117 path, buf, info,
118 meta={'width': width, 'height': height},
119 headers={'Content-Type': 'image/jpeg'})
120 return checksum
121
122 def get_images(self, response, request, info, *, item=None):
123 path = self.file_path(request, response=response, info=info, item=item)
124 orig_image = Image.open(BytesIO(response.body))
125
126 width, height = orig_image.size
127 if width < self.min_width or height < self.min_height:
128 raise ImageException("Image too small "
129 f"({width}x{height} < "
130 f"{self.min_width}x{self.min_height})")
131
132 image, buf = self.convert_image(orig_image)
133 yield path, image, buf
134
135 for thumb_id, size in self.thumbs.items():
136 thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)
137 thumb_image, thumb_buf = self.convert_image(image, size)
138 yield thumb_path, thumb_image, thumb_buf
139
140 def convert_image(self, image, size=None):
141 if image.format == 'PNG' and image.mode == 'RGBA':
142 background = Image.new('RGBA', image.size, (255, 255, 255))
143 background.paste(image, image)
144 image = background.convert('RGB')
145 elif image.mode == 'P':
146 image = image.convert("RGBA")
147 background = Image.new('RGBA', image.size, (255, 255, 255))
148 background.paste(image, image)
149 image = background.convert('RGB')
150 elif image.mode != 'RGB':
151 image = image.convert('RGB')
152
153 if size:
154 image = image.copy()
155 image.thumbnail(size, Image.ANTIALIAS)
156
157 buf = BytesIO()
158 image.save(buf, 'JPEG')
159 return image, buf
160
161 def get_media_requests(self, item, info):
162 urls = ItemAdapter(item).get(self.images_urls_field, [])
163 return [Request(u) for u in urls]
164
165 def item_completed(self, results, item, info):
166 with suppress(KeyError):
167 ItemAdapter(item)[self.images_result_field] = [x for ok, x in results if ok]
168 return item
169
170 def file_path(self, request, response=None, info=None, *, item=None):
171 image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
172 return f'full/{image_guid}.jpg'
173
174 def thumb_path(self, request, thumb_id, response=None, info=None):
175 thumb_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
176 return f'thumbs/{thumb_id}/{thumb_guid}.jpg'
177
[end of scrapy/pipelines/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/pipelines/images.py b/scrapy/pipelines/images.py
--- a/scrapy/pipelines/images.py
+++ b/scrapy/pipelines/images.py
@@ -9,9 +9,8 @@
from io import BytesIO
from itemadapter import ItemAdapter
-from PIL import Image
-from scrapy.exceptions import DropItem
+from scrapy.exceptions import DropItem, NotConfigured
from scrapy.http import Request
from scrapy.pipelines.files import FileException, FilesPipeline
# TODO: from scrapy.pipelines.media import MediaPipeline
@@ -45,6 +44,14 @@
DEFAULT_IMAGES_RESULT_FIELD = 'images'
def __init__(self, store_uri, download_func=None, settings=None):
+ try:
+ from PIL import Image
+ self._Image = Image
+ except ImportError:
+ raise NotConfigured(
+ 'ImagesPipeline requires installing Pillow 4.0.0 or later'
+ )
+
super().__init__(store_uri, settings=settings, download_func=download_func)
if isinstance(settings, dict) or settings is None:
@@ -121,7 +128,7 @@
def get_images(self, response, request, info, *, item=None):
path = self.file_path(request, response=response, info=info, item=item)
- orig_image = Image.open(BytesIO(response.body))
+ orig_image = self._Image.open(BytesIO(response.body))
width, height = orig_image.size
if width < self.min_width or height < self.min_height:
@@ -139,12 +146,12 @@
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
- background = Image.new('RGBA', image.size, (255, 255, 255))
+ background = self._Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode == 'P':
image = image.convert("RGBA")
- background = Image.new('RGBA', image.size, (255, 255, 255))
+ background = self._Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
@@ -152,7 +159,7 @@
if size:
image = image.copy()
- image.thumbnail(size, Image.ANTIALIAS)
+ image.thumbnail(size, self._Image.ANTIALIAS)
buf = BytesIO()
image.save(buf, 'JPEG')
|
{"golden_diff": "diff --git a/scrapy/pipelines/images.py b/scrapy/pipelines/images.py\n--- a/scrapy/pipelines/images.py\n+++ b/scrapy/pipelines/images.py\n@@ -9,9 +9,8 @@\n from io import BytesIO\n \n from itemadapter import ItemAdapter\n-from PIL import Image\n \n-from scrapy.exceptions import DropItem\n+from scrapy.exceptions import DropItem, NotConfigured\n from scrapy.http import Request\n from scrapy.pipelines.files import FileException, FilesPipeline\n # TODO: from scrapy.pipelines.media import MediaPipeline\n@@ -45,6 +44,14 @@\n DEFAULT_IMAGES_RESULT_FIELD = 'images'\n \n def __init__(self, store_uri, download_func=None, settings=None):\n+ try:\n+ from PIL import Image\n+ self._Image = Image\n+ except ImportError:\n+ raise NotConfigured(\n+ 'ImagesPipeline requires installing Pillow 4.0.0 or later'\n+ )\n+\n super().__init__(store_uri, settings=settings, download_func=download_func)\n \n if isinstance(settings, dict) or settings is None:\n@@ -121,7 +128,7 @@\n \n def get_images(self, response, request, info, *, item=None):\n path = self.file_path(request, response=response, info=info, item=item)\n- orig_image = Image.open(BytesIO(response.body))\n+ orig_image = self._Image.open(BytesIO(response.body))\n \n width, height = orig_image.size\n if width < self.min_width or height < self.min_height:\n@@ -139,12 +146,12 @@\n \n def convert_image(self, image, size=None):\n if image.format == 'PNG' and image.mode == 'RGBA':\n- background = Image.new('RGBA', image.size, (255, 255, 255))\n+ background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode == 'P':\n image = image.convert(\"RGBA\")\n- background = Image.new('RGBA', image.size, (255, 255, 255))\n+ background = self._Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode != 'RGB':\n@@ -152,7 +159,7 @@\n \n if size:\n image = image.copy()\n- image.thumbnail(size, Image.ANTIALIAS)\n+ image.thumbnail(size, self._Image.ANTIALIAS)\n \n buf = BytesIO()\n image.save(buf, 'JPEG')\n", "issue": "test_pipeline_images.py fails with \"TypeError: Skipped expected string as 'msg' parameter, got 'bool' instead.\"\nSee e.g. https://github.com/scrapy/scrapy/pull/5019/checks?check_run_id=2012658916\r\n\r\nThis should be related to the skip attribute, though I'm not sure why did it start happening now.\n", "before_files": [{"content": "\"\"\"\nImages Pipeline\n\nSee documentation in topics/media-pipeline.rst\n\"\"\"\nimport functools\nimport hashlib\nfrom contextlib import suppress\nfrom io import BytesIO\n\nfrom itemadapter import ItemAdapter\nfrom PIL import Image\n\nfrom scrapy.exceptions import DropItem\nfrom scrapy.http import Request\nfrom scrapy.pipelines.files import FileException, FilesPipeline\n# TODO: from scrapy.pipelines.media import MediaPipeline\nfrom scrapy.settings import Settings\nfrom scrapy.utils.misc import md5sum\nfrom scrapy.utils.python import to_bytes\n\n\nclass NoimagesDrop(DropItem):\n \"\"\"Product with no images exception\"\"\"\n\n\nclass ImageException(FileException):\n \"\"\"General image error exception\"\"\"\n\n\nclass ImagesPipeline(FilesPipeline):\n \"\"\"Abstract pipeline that implement the image thumbnail generation logic\n\n \"\"\"\n\n MEDIA_NAME = 'image'\n\n # Uppercase attributes kept for backward compatibility with code that subclasses\n # ImagesPipeline. They may be overridden by settings.\n MIN_WIDTH = 0\n MIN_HEIGHT = 0\n EXPIRES = 90\n THUMBS = {}\n DEFAULT_IMAGES_URLS_FIELD = 'image_urls'\n DEFAULT_IMAGES_RESULT_FIELD = 'images'\n\n def __init__(self, store_uri, download_func=None, settings=None):\n super().__init__(store_uri, settings=settings, download_func=download_func)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n resolve = functools.partial(self._key_for_pipe,\n base_class_name=\"ImagesPipeline\",\n settings=settings)\n self.expires = settings.getint(\n resolve(\"IMAGES_EXPIRES\"), self.EXPIRES\n )\n\n if not hasattr(self, \"IMAGES_RESULT_FIELD\"):\n self.IMAGES_RESULT_FIELD = self.DEFAULT_IMAGES_RESULT_FIELD\n if not hasattr(self, \"IMAGES_URLS_FIELD\"):\n self.IMAGES_URLS_FIELD = self.DEFAULT_IMAGES_URLS_FIELD\n\n self.images_urls_field = settings.get(\n resolve('IMAGES_URLS_FIELD'),\n self.IMAGES_URLS_FIELD\n )\n self.images_result_field = settings.get(\n resolve('IMAGES_RESULT_FIELD'),\n self.IMAGES_RESULT_FIELD\n )\n self.min_width = settings.getint(\n resolve('IMAGES_MIN_WIDTH'), self.MIN_WIDTH\n )\n self.min_height = settings.getint(\n resolve('IMAGES_MIN_HEIGHT'), self.MIN_HEIGHT\n )\n self.thumbs = settings.get(\n resolve('IMAGES_THUMBS'), self.THUMBS\n )\n\n @classmethod\n def from_settings(cls, settings):\n s3store = cls.STORE_SCHEMES['s3']\n s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']\n s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']\n s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']\n s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']\n s3store.AWS_USE_SSL = settings['AWS_USE_SSL']\n s3store.AWS_VERIFY = settings['AWS_VERIFY']\n s3store.POLICY = settings['IMAGES_STORE_S3_ACL']\n\n gcs_store = cls.STORE_SCHEMES['gs']\n gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']\n gcs_store.POLICY = settings['IMAGES_STORE_GCS_ACL'] or None\n\n ftp_store = cls.STORE_SCHEMES['ftp']\n ftp_store.FTP_USERNAME = settings['FTP_USER']\n ftp_store.FTP_PASSWORD = settings['FTP_PASSWORD']\n ftp_store.USE_ACTIVE_MODE = settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n\n store_uri = settings['IMAGES_STORE']\n return cls(store_uri, settings=settings)\n\n def file_downloaded(self, response, request, info, *, item=None):\n return self.image_downloaded(response, request, info, item=item)\n\n def image_downloaded(self, response, request, info, *, item=None):\n checksum = None\n for path, image, buf in self.get_images(response, request, info, item=item):\n if checksum is None:\n buf.seek(0)\n checksum = md5sum(buf)\n width, height = image.size\n self.store.persist_file(\n path, buf, info,\n meta={'width': width, 'height': height},\n headers={'Content-Type': 'image/jpeg'})\n return checksum\n\n def get_images(self, response, request, info, *, item=None):\n path = self.file_path(request, response=response, info=info, item=item)\n orig_image = Image.open(BytesIO(response.body))\n\n width, height = orig_image.size\n if width < self.min_width or height < self.min_height:\n raise ImageException(\"Image too small \"\n f\"({width}x{height} < \"\n f\"{self.min_width}x{self.min_height})\")\n\n image, buf = self.convert_image(orig_image)\n yield path, image, buf\n\n for thumb_id, size in self.thumbs.items():\n thumb_path = self.thumb_path(request, thumb_id, response=response, info=info)\n thumb_image, thumb_buf = self.convert_image(image, size)\n yield thumb_path, thumb_image, thumb_buf\n\n def convert_image(self, image, size=None):\n if image.format == 'PNG' and image.mode == 'RGBA':\n background = Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode == 'P':\n image = image.convert(\"RGBA\")\n background = Image.new('RGBA', image.size, (255, 255, 255))\n background.paste(image, image)\n image = background.convert('RGB')\n elif image.mode != 'RGB':\n image = image.convert('RGB')\n\n if size:\n image = image.copy()\n image.thumbnail(size, Image.ANTIALIAS)\n\n buf = BytesIO()\n image.save(buf, 'JPEG')\n return image, buf\n\n def get_media_requests(self, item, info):\n urls = ItemAdapter(item).get(self.images_urls_field, [])\n return [Request(u) for u in urls]\n\n def item_completed(self, results, item, info):\n with suppress(KeyError):\n ItemAdapter(item)[self.images_result_field] = [x for ok, x in results if ok]\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'full/{image_guid}.jpg'\n\n def thumb_path(self, request, thumb_id, response=None, info=None):\n thumb_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()\n return f'thumbs/{thumb_id}/{thumb_guid}.jpg'\n", "path": "scrapy/pipelines/images.py"}]}
| 2,515 | 611 |
gh_patches_debug_23676
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-929
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error handling all requests on master - attempt to read attribute of NoneType
I'm trying to run Gunicorn on the master branch (latest commit is 4c601ce447fafbeed27f0f0a238e0e48c928b6f9). But every incoming request generates this error:
```
[2014-10-27 20:36:55 +0000] [22663] [ERROR] Error handling request
Traceback (most recent call last):
File "/mnt/runscope/.virtualenvs/embedcurl/src/gunicorn/gunicorn/workers/async.py", line 41, in handle
proxy_protocol_info = req.proxy_protocol_info
AttributeError: 'NoneType' object has no attribute 'proxy_protocol_info'
```
This is my gunicorn command line:
```
/usr/local/runscope/.virtualenvs/embedcurl/bin/gunicorn \
--name embedcurl -k gevent --workers=2 --bind 0.0.0.0:3002 \
--error-logfile /var/log/runscope/embedcurl.error.log \
--access-logfile /var/log/runscope/embedcurl.access.log \
--access-logformat '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s %(T)s %(D)s "%(f)s" "%(a)s"' \
--max-requests 50000 \
--max-requests-jitter 500 \
--statsd-prefix service.embedcurl.test004. \
-D "embedcurl:app" --pid /var/run/embedcurl.pid
```
I think the problem comes from commit adf353f213d12994cc36ecbbcb6a084baf1fda12. See the file https://github.com/benoitc/gunicorn/blob/adf353f213d12994cc36ecbbcb6a084baf1fda12/gunicorn/workers/async.py -- line 41 reads from `req.proxy_protocol_info` but `req` is always None when that line runs. I'm not familiar with `proxy_protocol_info` myself so I'm not quite sure what the fix is.
Error handling all requests on master - attempt to read attribute of NoneType
I'm trying to run Gunicorn on the master branch (latest commit is 4c601ce447fafbeed27f0f0a238e0e48c928b6f9). But every incoming request generates this error:
```
[2014-10-27 20:36:55 +0000] [22663] [ERROR] Error handling request
Traceback (most recent call last):
File "/mnt/runscope/.virtualenvs/embedcurl/src/gunicorn/gunicorn/workers/async.py", line 41, in handle
proxy_protocol_info = req.proxy_protocol_info
AttributeError: 'NoneType' object has no attribute 'proxy_protocol_info'
```
This is my gunicorn command line:
```
/usr/local/runscope/.virtualenvs/embedcurl/bin/gunicorn \
--name embedcurl -k gevent --workers=2 --bind 0.0.0.0:3002 \
--error-logfile /var/log/runscope/embedcurl.error.log \
--access-logfile /var/log/runscope/embedcurl.access.log \
--access-logformat '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s %(T)s %(D)s "%(f)s" "%(a)s"' \
--max-requests 50000 \
--max-requests-jitter 500 \
--statsd-prefix service.embedcurl.test004. \
-D "embedcurl:app" --pid /var/run/embedcurl.pid
```
I think the problem comes from commit adf353f213d12994cc36ecbbcb6a084baf1fda12. See the file https://github.com/benoitc/gunicorn/blob/adf353f213d12994cc36ecbbcb6a084baf1fda12/gunicorn/workers/async.py -- line 41 reads from `req.proxy_protocol_info` but `req` is always None when that line runs. I'm not familiar with `proxy_protocol_info` myself so I'm not quite sure what the fix is.
</issue>
<code>
[start of gunicorn/workers/async.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import errno
8 import socket
9 import ssl
10 import sys
11
12 import gunicorn.http as http
13 import gunicorn.http.wsgi as wsgi
14 import gunicorn.util as util
15 import gunicorn.workers.base as base
16 from gunicorn import six
17
18 ALREADY_HANDLED = object()
19
20
21 class AsyncWorker(base.Worker):
22
23 def __init__(self, *args, **kwargs):
24 super(AsyncWorker, self).__init__(*args, **kwargs)
25 self.worker_connections = self.cfg.worker_connections
26
27 def timeout_ctx(self):
28 raise NotImplementedError()
29
30 def handle(self, listener, client, addr):
31 req = None
32 try:
33 parser = http.RequestParser(self.cfg, client)
34 try:
35 listener_name = listener.getsockname()
36 if not self.cfg.keepalive:
37 req = six.next(parser)
38 self.handle_request(listener_name, req, client, addr)
39 else:
40 # keepalive loop
41 proxy_protocol_info = req.proxy_protocol_info
42 while True:
43 req = None
44 with self.timeout_ctx():
45 req = six.next(parser)
46 if not req:
47 break
48 req.proxy_protocol_info = proxy_protocol_info
49 self.handle_request(listener_name, req, client, addr)
50 except http.errors.NoMoreData as e:
51 self.log.debug("Ignored premature client disconnection. %s", e)
52 except StopIteration as e:
53 self.log.debug("Closing connection. %s", e)
54 except ssl.SSLError:
55 exc_info = sys.exc_info()
56 # pass to next try-except level
57 six.reraise(exc_info[0], exc_info[1], exc_info[2])
58 except socket.error:
59 exc_info = sys.exc_info()
60 # pass to next try-except level
61 six.reraise(exc_info[0], exc_info[1], exc_info[2])
62 except Exception as e:
63 self.handle_error(req, client, addr, e)
64 except ssl.SSLError as e:
65 if e.args[0] == ssl.SSL_ERROR_EOF:
66 self.log.debug("ssl connection closed")
67 client.close()
68 else:
69 self.log.debug("Error processing SSL request.")
70 self.handle_error(req, client, addr, e)
71 except socket.error as e:
72 if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
73 self.log.exception("Socket error processing request.")
74 else:
75 if e.args[0] == errno.ECONNRESET:
76 self.log.debug("Ignoring connection reset")
77 else:
78 self.log.debug("Ignoring EPIPE")
79 except Exception as e:
80 self.handle_error(req, client, addr, e)
81 finally:
82 util.close(client)
83
84 def handle_request(self, listener_name, req, sock, addr):
85 request_start = datetime.now()
86 environ = {}
87 resp = None
88 try:
89 self.cfg.pre_request(self, req)
90 resp, environ = wsgi.create(req, sock, addr,
91 listener_name, self.cfg)
92 environ["wsgi.multithread"] = True
93 self.nr += 1
94 if self.alive and self.nr >= self.max_requests:
95 self.log.info("Autorestarting worker after current request.")
96 resp.force_close()
97 self.alive = False
98
99 if not self.cfg.keepalive:
100 resp.force_close()
101
102 respiter = self.wsgi(environ, resp.start_response)
103 if respiter == ALREADY_HANDLED:
104 return False
105 try:
106 if isinstance(respiter, environ['wsgi.file_wrapper']):
107 resp.write_file(respiter)
108 else:
109 for item in respiter:
110 resp.write(item)
111 resp.close()
112 request_time = datetime.now() - request_start
113 self.log.access(resp, req, environ, request_time)
114 finally:
115 if hasattr(respiter, "close"):
116 respiter.close()
117 if resp.should_close():
118 raise StopIteration()
119 except StopIteration:
120 raise
121 except Exception:
122 if resp and resp.headers_sent:
123 # If the requests have already been sent, we should close the
124 # connection to indicate the error.
125 self.log.exception("Error handling request")
126 try:
127 sock.shutdown(socket.SHUT_RDWR)
128 sock.close()
129 except socket.error:
130 pass
131 raise StopIteration()
132 raise
133 finally:
134 try:
135 self.cfg.post_request(self, req, environ, resp)
136 except Exception:
137 self.log.exception("Exception in post_request hook")
138 return True
139
[end of gunicorn/workers/async.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py
--- a/gunicorn/workers/async.py
+++ b/gunicorn/workers/async.py
@@ -38,14 +38,17 @@
self.handle_request(listener_name, req, client, addr)
else:
# keepalive loop
- proxy_protocol_info = req.proxy_protocol_info
+ proxy_protocol_info = {}
while True:
req = None
with self.timeout_ctx():
req = six.next(parser)
if not req:
break
- req.proxy_protocol_info = proxy_protocol_info
+ if req.proxy_protocol_info:
+ proxy_protocol_info = req.proxy_protocol_info
+ else:
+ req.proxy_protocol_info = proxy_protocol_info
self.handle_request(listener_name, req, client, addr)
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
|
{"golden_diff": "diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py\n--- a/gunicorn/workers/async.py\n+++ b/gunicorn/workers/async.py\n@@ -38,14 +38,17 @@\n self.handle_request(listener_name, req, client, addr)\n else:\n # keepalive loop\n- proxy_protocol_info = req.proxy_protocol_info\n+ proxy_protocol_info = {}\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n- req.proxy_protocol_info = proxy_protocol_info\n+ if req.proxy_protocol_info:\n+ proxy_protocol_info = req.proxy_protocol_info\n+ else:\n+ req.proxy_protocol_info = proxy_protocol_info\n self.handle_request(listener_name, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n", "issue": "Error handling all requests on master - attempt to read attribute of NoneType\nI'm trying to run Gunicorn on the master branch (latest commit is 4c601ce447fafbeed27f0f0a238e0e48c928b6f9). But every incoming request generates this error:\n\n```\n[2014-10-27 20:36:55 +0000] [22663] [ERROR] Error handling request\nTraceback (most recent call last):\n File \"/mnt/runscope/.virtualenvs/embedcurl/src/gunicorn/gunicorn/workers/async.py\", line 41, in handle\n proxy_protocol_info = req.proxy_protocol_info\nAttributeError: 'NoneType' object has no attribute 'proxy_protocol_info'\n```\n\nThis is my gunicorn command line:\n\n```\n/usr/local/runscope/.virtualenvs/embedcurl/bin/gunicorn \\\n --name embedcurl -k gevent --workers=2 --bind 0.0.0.0:3002 \\\n --error-logfile /var/log/runscope/embedcurl.error.log \\\n --access-logfile /var/log/runscope/embedcurl.access.log \\\n --access-logformat '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s %(T)s %(D)s \"%(f)s\" \"%(a)s\"' \\\n --max-requests 50000 \\\n --max-requests-jitter 500 \\\n --statsd-prefix service.embedcurl.test004. \\\n -D \"embedcurl:app\" --pid /var/run/embedcurl.pid\n```\n\nI think the problem comes from commit adf353f213d12994cc36ecbbcb6a084baf1fda12. See the file https://github.com/benoitc/gunicorn/blob/adf353f213d12994cc36ecbbcb6a084baf1fda12/gunicorn/workers/async.py -- line 41 reads from `req.proxy_protocol_info` but `req` is always None when that line runs. I'm not familiar with `proxy_protocol_info` myself so I'm not quite sure what the fix is.\n\nError handling all requests on master - attempt to read attribute of NoneType\nI'm trying to run Gunicorn on the master branch (latest commit is 4c601ce447fafbeed27f0f0a238e0e48c928b6f9). But every incoming request generates this error:\n\n```\n[2014-10-27 20:36:55 +0000] [22663] [ERROR] Error handling request\nTraceback (most recent call last):\n File \"/mnt/runscope/.virtualenvs/embedcurl/src/gunicorn/gunicorn/workers/async.py\", line 41, in handle\n proxy_protocol_info = req.proxy_protocol_info\nAttributeError: 'NoneType' object has no attribute 'proxy_protocol_info'\n```\n\nThis is my gunicorn command line:\n\n```\n/usr/local/runscope/.virtualenvs/embedcurl/bin/gunicorn \\\n --name embedcurl -k gevent --workers=2 --bind 0.0.0.0:3002 \\\n --error-logfile /var/log/runscope/embedcurl.error.log \\\n --access-logfile /var/log/runscope/embedcurl.access.log \\\n --access-logformat '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s %(T)s %(D)s \"%(f)s\" \"%(a)s\"' \\\n --max-requests 50000 \\\n --max-requests-jitter 500 \\\n --statsd-prefix service.embedcurl.test004. \\\n -D \"embedcurl:app\" --pid /var/run/embedcurl.pid\n```\n\nI think the problem comes from commit adf353f213d12994cc36ecbbcb6a084baf1fda12. See the file https://github.com/benoitc/gunicorn/blob/adf353f213d12994cc36ecbbcb6a084baf1fda12/gunicorn/workers/async.py -- line 41 reads from `req.proxy_protocol_info` but `req` is always None when that line runs. I'm not familiar with `proxy_protocol_info` myself so I'm not quite sure what the fix is.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport errno\nimport socket\nimport ssl\nimport sys\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\nALREADY_HANDLED = object()\n\n\nclass AsyncWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super(AsyncWorker, self).__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n\n def timeout_ctx(self):\n raise NotImplementedError()\n\n def handle(self, listener, client, addr):\n req = None\n try:\n parser = http.RequestParser(self.cfg, client)\n try:\n listener_name = listener.getsockname()\n if not self.cfg.keepalive:\n req = six.next(parser)\n self.handle_request(listener_name, req, client, addr)\n else:\n # keepalive loop\n proxy_protocol_info = req.proxy_protocol_info\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n req.proxy_protocol_info = proxy_protocol_info\n self.handle_request(listener_name, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except socket.error:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except Exception as e:\n self.handle_error(req, client, addr, e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.args[0] == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener_name, req, sock, addr):\n request_start = datetime.now()\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n resp, environ = wsgi.create(req, sock, addr,\n listener_name, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n resp.force_close()\n self.alive = False\n\n if not self.cfg.keepalive:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n if respiter == ALREADY_HANDLED:\n return False\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n if resp.should_close():\n raise StopIteration()\n except StopIteration:\n raise\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n except socket.error:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n return True\n", "path": "gunicorn/workers/async.py"}]}
| 2,866 | 209 |
gh_patches_debug_12976
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2042
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
urllib3 logo is unreadable in docs in dark mode
This is a recent Furo addition, you can see it in this pull request build: https://urllib3--2026.org.readthedocs.build/en/2026/index.html. Here's what I see (with Firefox on macOS with dark mode enabled):
<img width="237" alt="urllib3 logo in dark mode in docs" src="https://user-images.githubusercontent.com/42327/96408490-ad2c8300-11f4-11eb-8054-661fb38a6c23.png">
I'm not sure what the correct fix is here. The obvious one would be to force a white background. I guess we could also... add a dark mode urllib3 logo, by switching black letters to white?
(The rest of the content looks good, even if the contrast seems low to me.)
</issue>
<code>
[start of docs/conf.py]
1 import os
2 import sys
3 from datetime import date
4
5 # If extensions (or modules to document with autodoc) are in another directory,
6 # add these directories to sys.path here. If the directory is relative to the
7 # documentation root, use os.path.abspath to make it absolute, like shown here.
8
9 root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
10 sys.path.insert(0, root_path)
11
12 # Mock some expensive/platform-specific modules so build will work.
13 # (https://read-the-docs.readthedocs.io/en/latest/faq.html#\
14 # i-get-import-errors-on-libraries-that-depend-on-c-modules)
15 from unittest import mock
16
17
18 class MockModule(mock.Mock):
19 @classmethod
20 def __getattr__(cls, name):
21 return MockModule()
22
23
24 MOCK_MODULES = ("ntlm",)
25
26 sys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)
27
28
29 import urllib3
30
31 # -- General configuration -----------------------------------------------------
32
33
34 # Add any Sphinx extension module names here, as strings. They can be extensions
35 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.doctest",
39 "sphinx.ext.intersphinx",
40 ]
41
42 # Test code blocks only when explicitly specified
43 doctest_test_doctest_blocks = ""
44
45 # Add any paths that contain templates here, relative to this directory.
46 templates_path = ["_templates"]
47
48 # The suffix of source filenames.
49 source_suffix = ".rst"
50
51 # The master toctree document.
52 master_doc = "index"
53
54 # General information about the project.
55 project = "urllib3"
56 copyright = f"{date.today().year}, Andrey Petrov"
57
58 # The short X.Y version.
59 version = urllib3.__version__
60 # The full version, including alpha/beta/rc tags.
61 release = version
62
63 # List of patterns, relative to source directory, that match files and
64 # directories to ignore when looking for source files.
65 exclude_patterns = ["_build"]
66
67 # The name of the Pygments (syntax highlighting) style to use.
68 pygments_style = "friendly"
69
70 # The theme to use for HTML and HTML Help pages. See the documentation for
71 # a list of builtin themes.
72 html_theme = "furo"
73 html_favicon = "images/favicon.png"
74 html_logo = "images/banner.svg"
75
76 html_theme_options = {
77 "announcement": """
78 <a style=\"text-decoration: none; color: white;\"
79 href=\"https://opencollective.com/urllib3\">
80 <img src=\"/en/latest/_static/favicon.png\"/> Sponsor urllib3 v2.0 on Open Collective
81 </a>
82 """,
83 "sidebar_hide_name": True,
84 }
85
86 intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
87
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -73,8 +73,8 @@
# a list of builtin themes.
html_theme = "furo"
html_favicon = "images/favicon.png"
-html_logo = "images/banner.svg"
+html_static_path = ["_static"]
html_theme_options = {
"announcement": """
<a style=\"text-decoration: none; color: white;\"
@@ -83,6 +83,8 @@
</a>
""",
"sidebar_hide_name": True,
+ "light_logo": "banner.svg",
+ "dark_logo": "dark-logo.svg",
}
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -73,8 +73,8 @@\n # a list of builtin themes.\n html_theme = \"furo\"\n html_favicon = \"images/favicon.png\"\n-html_logo = \"images/banner.svg\"\n \n+html_static_path = [\"_static\"]\n html_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n@@ -83,6 +83,8 @@\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n+ \"light_logo\": \"banner.svg\",\n+ \"dark_logo\": \"dark-logo.svg\",\n }\n \n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n", "issue": "urllib3 logo is unreadable in docs in dark mode\nThis is a recent Furo addition, you can see it in this pull request build: https://urllib3--2026.org.readthedocs.build/en/2026/index.html. Here's what I see (with Firefox on macOS with dark mode enabled):\r\n\r\n<img width=\"237\" alt=\"urllib3 logo in dark mode in docs\" src=\"https://user-images.githubusercontent.com/42327/96408490-ad2c8300-11f4-11eb-8054-661fb38a6c23.png\">\r\n\r\nI'm not sure what the correct fix is here. The obvious one would be to force a white background. I guess we could also... add a dark mode urllib3 logo, by switching black letters to white?\r\n\r\n(The rest of the content looks good, even if the contrast seems low to me.)\n", "before_files": [{"content": "import os\nimport sys\nfrom datetime import date\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nroot_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\nsys.path.insert(0, root_path)\n\n# Mock some expensive/platform-specific modules so build will work.\n# (https://read-the-docs.readthedocs.io/en/latest/faq.html#\\\n# i-get-import-errors-on-libraries-that-depend-on-c-modules)\nfrom unittest import mock\n\n\nclass MockModule(mock.Mock):\n @classmethod\n def __getattr__(cls, name):\n return MockModule()\n\n\nMOCK_MODULES = (\"ntlm\",)\n\nsys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)\n\n\nimport urllib3\n\n# -- General configuration -----------------------------------------------------\n\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n]\n\n# Test code blocks only when explicitly specified\ndoctest_test_doctest_blocks = \"\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"urllib3\"\ncopyright = f\"{date.today().year}, Andrey Petrov\"\n\n# The short X.Y version.\nversion = urllib3.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"friendly\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"furo\"\nhtml_favicon = \"images/favicon.png\"\nhtml_logo = \"images/banner.svg\"\n\nhtml_theme_options = {\n \"announcement\": \"\"\"\n <a style=\\\"text-decoration: none; color: white;\\\" \n href=\\\"https://opencollective.com/urllib3\\\">\n <img src=\\\"/en/latest/_static/favicon.png\\\"/> Sponsor urllib3 v2.0 on Open Collective\n </a>\n \"\"\",\n \"sidebar_hide_name\": True,\n}\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n", "path": "docs/conf.py"}]}
| 1,516 | 171 |
gh_patches_debug_31822
|
rasdani/github-patches
|
git_diff
|
TencentBlueKing__bk-user-805
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
启动命令: gunicorn 支持退出前打印堆栈
遇到gunicorn 直接退出的情况, 可以使用 https://stackoverflow.com/questions/57167240/is-it-possible-to-get-a-stack-trace-when-a-gunicorn-worker-hits-a-timeout 方式调试, 打印退出前堆栈, 从而推断问题代码位置
</issue>
<code>
[start of src/login/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS
4 Community Edition) available.
5 Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
6 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at http://opensource.org/licenses/MIT
8 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 specific language governing permissions and limitations under the License.
11 """
12
13 import os
14
15 from dj_static import Cling
16 from django.core.wsgi import get_wsgi_application
17
18 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bklogin.config.prod")
19
20 application = Cling(get_wsgi_application())
21
[end of src/login/wsgi.py]
[start of src/saas/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 import os
12
13 from django.core.wsgi import get_wsgi_application
14
15 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_shell.config.overlays.prod")
16
17 application = get_wsgi_application()
18
[end of src/saas/wsgi.py]
[start of src/api/wsgi.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 import os
12
13 from django.core.wsgi import get_wsgi_application
14
15 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_core.config.overlays.prod")
16
17 application = get_wsgi_application()
18
[end of src/api/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/api/wsgi.py b/src/api/wsgi.py
--- a/src/api/wsgi.py
+++ b/src/api/wsgi.py
@@ -8,10 +8,13 @@
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_core.config.overlays.prod")
application = get_wsgi_application()
diff --git a/src/login/wsgi.py b/src/login/wsgi.py
--- a/src/login/wsgi.py
+++ b/src/login/wsgi.py
@@ -10,11 +10,14 @@
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bklogin.config.prod")
application = Cling(get_wsgi_application())
diff --git a/src/saas/wsgi.py b/src/saas/wsgi.py
--- a/src/saas/wsgi.py
+++ b/src/saas/wsgi.py
@@ -8,10 +8,13 @@
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
+import faulthandler
import os
from django.core.wsgi import get_wsgi_application
+faulthandler.enable()
+
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bkuser_shell.config.overlays.prod")
application = get_wsgi_application()
|
{"golden_diff": "diff --git a/src/api/wsgi.py b/src/api/wsgi.py\n--- a/src/api/wsgi.py\n+++ b/src/api/wsgi.py\n@@ -8,10 +8,13 @@\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n \"\"\"\n+import faulthandler\n import os\n \n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_core.config.overlays.prod\")\n \n application = get_wsgi_application()\ndiff --git a/src/login/wsgi.py b/src/login/wsgi.py\n--- a/src/login/wsgi.py\n+++ b/src/login/wsgi.py\n@@ -10,11 +10,14 @@\n specific language governing permissions and limitations under the License.\n \"\"\"\n \n+import faulthandler\n import os\n \n from dj_static import Cling\n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bklogin.config.prod\")\n \n application = Cling(get_wsgi_application())\ndiff --git a/src/saas/wsgi.py b/src/saas/wsgi.py\n--- a/src/saas/wsgi.py\n+++ b/src/saas/wsgi.py\n@@ -8,10 +8,13 @@\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n \"\"\"\n+import faulthandler\n import os\n \n from django.core.wsgi import get_wsgi_application\n \n+faulthandler.enable()\n+\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_shell.config.overlays.prod\")\n \n application = get_wsgi_application()\n", "issue": "\u542f\u52a8\u547d\u4ee4: gunicorn \u652f\u6301\u9000\u51fa\u524d\u6253\u5370\u5806\u6808\n\u9047\u5230gunicorn \u76f4\u63a5\u9000\u51fa\u7684\u60c5\u51b5, \u53ef\u4ee5\u4f7f\u7528 https://stackoverflow.com/questions/57167240/is-it-possible-to-get-a-stack-trace-when-a-gunicorn-worker-hits-a-timeout \u65b9\u5f0f\u8c03\u8bd5, \u6253\u5370\u9000\u51fa\u524d\u5806\u6808, \u4ece\u800c\u63a8\u65ad\u95ee\u9898\u4ee3\u7801\u4f4d\u7f6e\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91PaaS\u5e73\u53f0\u793e\u533a\u7248 (BlueKing PaaS\nCommunity Edition) available.\nCopyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nimport os\n\nfrom dj_static import Cling\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bklogin.config.prod\")\n\napplication = Cling(get_wsgi_application())\n", "path": "src/login/wsgi.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_shell.config.overlays.prod\")\n\napplication = get_wsgi_application()\n", "path": "src/saas/wsgi.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport os\n\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"bkuser_core.config.overlays.prod\")\n\napplication = get_wsgi_application()\n", "path": "src/api/wsgi.py"}]}
| 1,369 | 399 |
gh_patches_debug_17087
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-17675
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
median
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/stat.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 to_ivy_arrays_and_back,
6 )
7
8
9 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
10 @to_ivy_arrays_and_back
11 def mean(input, axis=None, keepdim=False, out=None):
12 ret = ivy.mean(input, axis=axis, keepdims=keepdim, out=out)
13 ret = ivy.expand_dims(ret, axis=-1) if ret.ndim == 0 else ret
14 return ret
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("complex", "int8")}, "paddle")
18 @to_ivy_arrays_and_back
19 def numel(x, name=None):
20 prod = ivy.prod(x.size, dtype=ivy.int64)
21 try:
22 length = len(x)
23 except (ValueError, TypeError):
24 length = 1 # if 0 dimensional tensor with 1 element
25 return ivy.array([prod if prod > 0 else ivy.array(length, dtype=ivy.int64)])
26
27
28 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
29 @to_ivy_arrays_and_back
30 def nanquantile(a, q, axis=None, keepdims=False, interpolation="linear", out=None):
31 return ivy.nanquantile(
32 a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out
33 )
34
[end of ivy/functional/frontends/paddle/tensor/stat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/stat.py b/ivy/functional/frontends/paddle/tensor/stat.py
--- a/ivy/functional/frontends/paddle/tensor/stat.py
+++ b/ivy/functional/frontends/paddle/tensor/stat.py
@@ -1,6 +1,6 @@
# global
import ivy
-from ivy.func_wrapper import with_unsupported_dtypes
+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@@ -31,3 +31,17 @@
return ivy.nanquantile(
a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out
)
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float16", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def median(x, axis=None, keepdim=False, name=None):
+ x = (
+ ivy.astype(x, ivy.float64)
+ if ivy.dtype(x) == "float64"
+ else ivy.astype(x, ivy.float32)
+ )
+ return ivy.median(x, axis=axis, keepdims=keepdim)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/stat.py b/ivy/functional/frontends/paddle/tensor/stat.py\n--- a/ivy/functional/frontends/paddle/tensor/stat.py\n+++ b/ivy/functional/frontends/paddle/tensor/stat.py\n@@ -1,6 +1,6 @@\n # global\n import ivy\n-from ivy.func_wrapper import with_unsupported_dtypes\n+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n )\n@@ -31,3 +31,17 @@\n return ivy.nanquantile(\n a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out\n )\n+\n+\n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float16\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def median(x, axis=None, keepdim=False, name=None):\n+ x = (\n+ ivy.astype(x, ivy.float64)\n+ if ivy.dtype(x) == \"float64\"\n+ else ivy.astype(x, ivy.float32)\n+ )\n+ return ivy.median(x, axis=axis, keepdims=keepdim)\n", "issue": "median\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef mean(input, axis=None, keepdim=False, out=None):\n ret = ivy.mean(input, axis=axis, keepdims=keepdim, out=out)\n ret = ivy.expand_dims(ret, axis=-1) if ret.ndim == 0 else ret\n return ret\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"complex\", \"int8\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef numel(x, name=None):\n prod = ivy.prod(x.size, dtype=ivy.int64)\n try:\n length = len(x)\n except (ValueError, TypeError):\n length = 1 # if 0 dimensional tensor with 1 element\n return ivy.array([prod if prod > 0 else ivy.array(length, dtype=ivy.int64)])\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef nanquantile(a, q, axis=None, keepdims=False, interpolation=\"linear\", out=None):\n return ivy.nanquantile(\n a, q, axis=axis, keepdims=keepdims, interpolation=interpolation, out=out\n )\n", "path": "ivy/functional/frontends/paddle/tensor/stat.py"}]}
| 967 | 321 |
gh_patches_debug_2051
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-13
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: page.getAttribute returns None
Actual:
```py
import asyncio
from playwright_web import chromium
async def run():
browser = await chromium.launch(headless=False)
context = await browser.newContext(viewport=0) # 0 stands for no viewport
page = await context.newPage()
await page.setContent(""""
<input id="kekstar"/>
""")
await page.fill("#kekstar", "Foobar")
print(await page.getAttribute("#kekstar", 'value'))
await browser.close()
asyncio.get_event_loop().run_until_complete(run())
```
Expected: Returns Foobar
On Try Playwright, it works: https://try.playwright.tech/?s=dzmwi
</issue>
<code>
[start of playwright_web/frame.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 from playwright_web.connection import Channel, ChannelOwner, ConnectionScope, from_channel, from_nullable_channel
17 from playwright_web.element_handle import ElementHandle, convertSelectOptionValues, ValuesToSelect
18 from playwright_web.helper import ConsoleMessageLocation, FilePayload, SelectOption, is_function_body, locals_to_params
19 from playwright_web.js_handle import JSHandle, parse_result, serialize_argument
20 from playwright_web.network import Request, Response, Route
21 from typing import Any, Awaitable, Dict, List, Optional, Union
22
23 class Frame(ChannelOwner):
24
25 def __init__(self, scope: ConnectionScope, guid: str, initializer: Dict) -> None:
26 super().__init__(scope, guid, initializer)
27 self._parent_frame = from_nullable_channel(initializer['parentFrame'])
28 if self._parent_frame:
29 self._parent_frame._child_frames.append(self)
30 self._name = initializer['name']
31 self._url = initializer['url']
32 self._detached = False
33 self._child_frames: List[Frame] = list()
34 self._page: Optional['Page']
35
36 async def goto(self,
37 url: str,
38 timeout: int = None,
39 waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,
40 referer: str = None) -> Optional[Response]:
41 return from_nullable_channel(await self._channel.send('goto', locals_to_params(locals())))
42
43 async def waitForNavigation(self,
44 timeout: int = None,
45 waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,
46 url: str = None # TODO: add url, callback
47 ) -> Optional[Response]:
48 return from_nullable_channel(await self._channel.send('waitForNavigation', locals_to_params(locals())))
49
50 async def waitForLoadState(self,
51 state: str = 'load',
52 timeout: int = None) -> None:
53 await self._channel.send('waitForLoadState', locals_to_params(locals()))
54
55 async def frameElement(self) -> ElementHandle:
56 return from_channel(await self._channel.send('frameElement'))
57
58 async def evaluate(self, expression: str, arg: Any = None, force_expr: bool = False) -> Any:
59 if not is_function_body(expression):
60 force_expr = True
61 return parse_result(await self._channel.send('evaluateExpression', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))
62
63 async def evaluateHandle(self, expression: str, arg: Any = None, force_expr: bool = False) -> JSHandle:
64 if not is_function_body(expression):
65 force_expr = True
66 return from_channel(await self._channel.send('evaluateExpressionHandle', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))
67
68 async def querySelector(self, selector: str) -> Optional[ElementHandle]:
69 return from_nullable_channel(await self._channel.send('querySelector', dict(selector=selector)))
70
71 async def waitForSelector(self,
72 selector: str,
73 timeout: int = None,
74 state: str = None, # Literal['attached', 'detached', 'visible', 'hidden'] = None
75 ) -> Optional[ElementHandle]:
76 return from_nullable_channel(await self._channel.send('waitForSelector', locals_to_params(locals())))
77
78 async def dispatchEvent(self,
79 selector: str,
80 type: str,
81 eventInit: Dict = None,
82 timeout: int = None) -> None:
83 await self._channel.send('dispatchEvent', dict(selector=selector, type=type, eventInit=eventInit))
84
85 async def evalOnSelector(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:
86 return parse_result(await self._channel.send('evalOnSelector', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))
87
88 async def evalOnSelectorAll(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:
89 return parse_result(await self._channel.send('evalOnSelectorAll', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))
90
91 async def content(self) -> str:
92 return await self._channel.send('content')
93
94 async def setContent(self,
95 html: str, timeout: int = None,
96 waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None
97 ) -> None:
98 await self._channel.send('setContent', locals_to_params(locals()))
99
100 @property
101 def name(self) -> str:
102 return self._name or ''
103
104 @property
105 def url(self) -> str:
106 return self._url or ''
107
108 @property
109 def parentFrame(self) -> Optional['Frame']:
110 return self._parent_frame
111
112 @property
113 def childFrames(self) -> List['Frame']:
114 return self._child_frames.copy()
115
116 def isDetached(self) -> bool:
117 return self._detached
118
119 async def addScriptTag(self,
120 url: str = None,
121 path: str = None,
122 content: str = None) -> ElementHandle:
123 return from_channel(await self._channel.send('addScriptTag', locals_to_params(locals())))
124
125 async def addStyleTag(self,
126 url: str = None,
127 path: str = None,
128 content: str = None) -> ElementHandle:
129 return from_channel(await self._channel.send('addStyleTag', locals_to_params(locals())))
130
131 async def click(self,
132 selector: str,
133 modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,
134 position: Dict = None,
135 delay: int = None,
136 button: str = None, # Literal['left', 'right', 'middle'] = None,
137 clickCount: int = None,
138 timeout: int = None,
139 force: bool = None,
140 noWaitAfter: bool = None) -> None:
141 await self._channel.send('click', locals_to_params(locals()))
142
143 async def dblclick(self,
144 selector: str,
145 modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,
146 position: Dict = None,
147 delay: int = None,
148 button: str = None, # Literal['left', 'right', 'middle'] = None,
149 timeout: int = None,
150 force: bool = None) -> None:
151 await self._channel.send('dblclick', locals_to_params(locals()))
152
153 async def fill(self,
154 selector: str,
155 value: str,
156 timeout: int = None,
157 noWaitAfter: bool = None) -> None:
158 await self._channel.send('fill', locals_to_params(locals()))
159
160 async def focus(self,
161 selector: str,
162 timeout: int = None) -> None:
163 await self._channel.send('focus', locals_to_params(locals()))
164
165 async def textContent(self,
166 selector: str,
167 timeout: int = None) -> str:
168 return await self._channel.send('textContent', locals_to_params(locals()))
169
170 async def innerText(self,
171 selector: str,
172 timeout: int = None) -> str:
173 return await self._channel.send('innerText', locals_to_params(locals()))
174
175 async def innerHTML(self,
176 selector: str,
177 timeout: int = None) -> str:
178 return await self._channel.send('innerHTML', locals_to_params(locals()))
179
180 async def getAttribute(self,
181 selector: str,
182 name: str,
183 timeout: int = None) -> str:
184 await self._channel.send('getAttribute', locals_to_params(locals()))
185
186 async def hover(self,
187 selector: str,
188 modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,
189 position: Dict = None,
190 timeout: int = None,
191 force: bool = None) -> None:
192 await self._channel.send('hover', locals_to_params(locals()))
193
194 async def selectOption(self,
195 selector: str,
196 values: ValuesToSelect,
197 timeout: int = None,
198 noWaitAfter: bool = None) -> None:
199 await self._channel.send('selectOption', dict(selector=selector, values=convertSelectOptionValues(values), timeout=timeout, noWaitAfter=noWaitAfter))
200
201 async def setInputFiles(self,
202 selector: str,
203 files: Union[str, FilePayload, List[str], List[FilePayload]],
204 timeout: int = None,
205 noWaitAfter: bool = None) -> None:
206 await self._channel.send('setInputFiles', locals_to_params(locals()))
207
208 async def type(self,
209 selector: str,
210 text: str,
211 delay: int = None,
212 timeout: int = None,
213 noWaitAfter: bool = None) -> None:
214 await self._channel.send('type', locals_to_params(locals()))
215
216 async def press(self,
217 selector: str,
218 key: str,
219 delay: int = None,
220 timeout: int = None,
221 noWaitAfter: bool = None) -> None:
222 await self._channel.send('press', locals_to_params(locals()))
223
224 async def check(self,
225 selector: str,
226 timeout: int = None,
227 force: bool = None,
228 noWaitAfter: bool = None) -> None:
229 await self._channel.send('check', locals_to_params(locals()))
230
231 async def uncheck(self,
232 selector: str,
233 timeout: int = None,
234 force: bool = None,
235 noWaitAfter: bool = None) -> None:
236 await self._channel.send('uncheck', locals_to_params(locals()))
237
238 async def waitForTimeout(self, timeout: int) -> Awaitable[None]:
239 return self._scope._loop.create_task(asyncio.sleep(timeout / 1000))
240
241 async def waitForFunction(self,
242 expression: str,
243 arg: Any = None,
244 force_expr: bool = False,
245 timeout: int = None,
246 polling: Union[int, str] = None # Union[int, Literal["raf"]]
247 ) -> JSHandle:
248 if not is_function_body(expression):
249 force_expr = True
250 params = locals_to_params(locals())
251 params['isFunction'] = not(force_expr)
252 params['arg'] = serialize_argument(arg)
253 return from_channel(await self._channel.send('waitForFunction', params))
254
255 async def title(self) -> str:
256 return await self._channel.send('title')
257
[end of playwright_web/frame.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright_web/frame.py b/playwright_web/frame.py
--- a/playwright_web/frame.py
+++ b/playwright_web/frame.py
@@ -181,7 +181,7 @@
selector: str,
name: str,
timeout: int = None) -> str:
- await self._channel.send('getAttribute', locals_to_params(locals()))
+ return await self._channel.send('getAttribute', locals_to_params(locals()))
async def hover(self,
selector: str,
|
{"golden_diff": "diff --git a/playwright_web/frame.py b/playwright_web/frame.py\n--- a/playwright_web/frame.py\n+++ b/playwright_web/frame.py\n@@ -181,7 +181,7 @@\n selector: str,\n name: str,\n timeout: int = None) -> str:\n- await self._channel.send('getAttribute', locals_to_params(locals()))\n+ return await self._channel.send('getAttribute', locals_to_params(locals()))\n \n async def hover(self,\n selector: str,\n", "issue": "[BUG]: page.getAttribute returns None\nActual:\r\n\r\n```py\r\nimport asyncio\r\nfrom playwright_web import chromium\r\n\r\n\r\nasync def run():\r\n browser = await chromium.launch(headless=False)\r\n context = await browser.newContext(viewport=0) # 0 stands for no viewport\r\n page = await context.newPage()\r\n\r\n await page.setContent(\"\"\"\"\r\n <input id=\"kekstar\"/>\r\n \"\"\")\r\n\r\n await page.fill(\"#kekstar\", \"Foobar\")\r\n\r\n print(await page.getAttribute(\"#kekstar\", 'value'))\r\n\r\n await browser.close()\r\n\r\n\r\nasyncio.get_event_loop().run_until_complete(run())\r\n\r\n```\r\n\r\nExpected: Returns Foobar\r\n\r\nOn Try Playwright, it works: https://try.playwright.tech/?s=dzmwi\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nfrom playwright_web.connection import Channel, ChannelOwner, ConnectionScope, from_channel, from_nullable_channel\nfrom playwright_web.element_handle import ElementHandle, convertSelectOptionValues, ValuesToSelect\nfrom playwright_web.helper import ConsoleMessageLocation, FilePayload, SelectOption, is_function_body, locals_to_params\nfrom playwright_web.js_handle import JSHandle, parse_result, serialize_argument\nfrom playwright_web.network import Request, Response, Route\nfrom typing import Any, Awaitable, Dict, List, Optional, Union\n\nclass Frame(ChannelOwner):\n\n def __init__(self, scope: ConnectionScope, guid: str, initializer: Dict) -> None:\n super().__init__(scope, guid, initializer)\n self._parent_frame = from_nullable_channel(initializer['parentFrame'])\n if self._parent_frame:\n self._parent_frame._child_frames.append(self)\n self._name = initializer['name']\n self._url = initializer['url']\n self._detached = False\n self._child_frames: List[Frame] = list()\n self._page: Optional['Page']\n\n async def goto(self,\n url: str,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n referer: str = None) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('goto', locals_to_params(locals())))\n\n async def waitForNavigation(self,\n timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None,\n url: str = None # TODO: add url, callback\n ) -> Optional[Response]:\n return from_nullable_channel(await self._channel.send('waitForNavigation', locals_to_params(locals())))\n\n async def waitForLoadState(self,\n state: str = 'load',\n timeout: int = None) -> None:\n await self._channel.send('waitForLoadState', locals_to_params(locals()))\n\n async def frameElement(self) -> ElementHandle:\n return from_channel(await self._channel.send('frameElement'))\n\n async def evaluate(self, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n if not is_function_body(expression):\n force_expr = True\n return parse_result(await self._channel.send('evaluateExpression', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evaluateHandle(self, expression: str, arg: Any = None, force_expr: bool = False) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n return from_channel(await self._channel.send('evaluateExpressionHandle', dict(expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def querySelector(self, selector: str) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('querySelector', dict(selector=selector)))\n\n async def waitForSelector(self,\n selector: str,\n timeout: int = None,\n state: str = None, # Literal['attached', 'detached', 'visible', 'hidden'] = None\n ) -> Optional[ElementHandle]:\n return from_nullable_channel(await self._channel.send('waitForSelector', locals_to_params(locals())))\n\n async def dispatchEvent(self,\n selector: str,\n type: str,\n eventInit: Dict = None,\n timeout: int = None) -> None:\n await self._channel.send('dispatchEvent', dict(selector=selector, type=type, eventInit=eventInit))\n\n async def evalOnSelector(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelector', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def evalOnSelectorAll(self, selector: str, expression: str, arg: Any = None, force_expr: bool = False) -> Any:\n return parse_result(await self._channel.send('evalOnSelectorAll', dict(selector=selector, expression=expression, isFunction=not(force_expr), arg=serialize_argument(arg))))\n\n async def content(self) -> str:\n return await self._channel.send('content')\n\n async def setContent(self,\n html: str, timeout: int = None,\n waitUntil: str = None, # Literal['load', 'domcontentloaded', 'networkidle'] = None\n ) -> None:\n await self._channel.send('setContent', locals_to_params(locals()))\n\n @property\n def name(self) -> str:\n return self._name or ''\n\n @property\n def url(self) -> str:\n return self._url or ''\n\n @property\n def parentFrame(self) -> Optional['Frame']:\n return self._parent_frame\n\n @property\n def childFrames(self) -> List['Frame']:\n return self._child_frames.copy()\n\n def isDetached(self) -> bool:\n return self._detached\n\n async def addScriptTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addScriptTag', locals_to_params(locals())))\n\n async def addStyleTag(self,\n url: str = None,\n path: str = None,\n content: str = None) -> ElementHandle:\n return from_channel(await self._channel.send('addStyleTag', locals_to_params(locals())))\n\n async def click(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n clickCount: int = None,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('click', locals_to_params(locals()))\n\n async def dblclick(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n delay: int = None,\n button: str = None, # Literal['left', 'right', 'middle'] = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('dblclick', locals_to_params(locals()))\n\n async def fill(self,\n selector: str,\n value: str,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('fill', locals_to_params(locals()))\n\n async def focus(self,\n selector: str,\n timeout: int = None) -> None:\n await self._channel.send('focus', locals_to_params(locals()))\n\n async def textContent(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('textContent', locals_to_params(locals()))\n\n async def innerText(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerText', locals_to_params(locals()))\n\n async def innerHTML(self,\n selector: str,\n timeout: int = None) -> str:\n return await self._channel.send('innerHTML', locals_to_params(locals()))\n\n async def getAttribute(self,\n selector: str,\n name: str,\n timeout: int = None) -> str:\n await self._channel.send('getAttribute', locals_to_params(locals()))\n\n async def hover(self,\n selector: str,\n modifiers: List[str] = None, # Literal['Alt', 'Control', 'Meta', 'Shift']] = None,\n position: Dict = None,\n timeout: int = None,\n force: bool = None) -> None:\n await self._channel.send('hover', locals_to_params(locals()))\n\n async def selectOption(self,\n selector: str,\n values: ValuesToSelect,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('selectOption', dict(selector=selector, values=convertSelectOptionValues(values), timeout=timeout, noWaitAfter=noWaitAfter))\n\n async def setInputFiles(self,\n selector: str,\n files: Union[str, FilePayload, List[str], List[FilePayload]],\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('setInputFiles', locals_to_params(locals()))\n\n async def type(self,\n selector: str,\n text: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('type', locals_to_params(locals()))\n\n async def press(self,\n selector: str,\n key: str,\n delay: int = None,\n timeout: int = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('press', locals_to_params(locals()))\n\n async def check(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('check', locals_to_params(locals()))\n\n async def uncheck(self,\n selector: str,\n timeout: int = None,\n force: bool = None,\n noWaitAfter: bool = None) -> None:\n await self._channel.send('uncheck', locals_to_params(locals()))\n\n async def waitForTimeout(self, timeout: int) -> Awaitable[None]:\n return self._scope._loop.create_task(asyncio.sleep(timeout / 1000))\n\n async def waitForFunction(self,\n expression: str,\n arg: Any = None,\n force_expr: bool = False,\n timeout: int = None,\n polling: Union[int, str] = None # Union[int, Literal[\"raf\"]]\n ) -> JSHandle:\n if not is_function_body(expression):\n force_expr = True\n params = locals_to_params(locals())\n params['isFunction'] = not(force_expr)\n params['arg'] = serialize_argument(arg)\n return from_channel(await self._channel.send('waitForFunction', params))\n\n async def title(self) -> str:\n return await self._channel.send('title')\n", "path": "playwright_web/frame.py"}]}
| 3,773 | 111 |
gh_patches_debug_33834
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1407
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
System calls display is wrong for x86 syscalls on x86-64
When we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.
System calls display is wrong for x86 syscalls on x86-64
When we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.
</issue>
<code>
[start of pwndbg/constants/__init__.py]
1 import pwndbg.gdblib.arch
2
3 from . import aarch64
4 from . import amd64
5 from . import arm
6 from . import i386
7 from . import mips
8 from . import thumb
9
10 arches = {"arm": arm, "armcm": arm, "i386": i386, "mips": mips, "x86-64": amd64, "aarch64": aarch64}
11
12
13 def syscall(value):
14 """
15 Given a value for a syscall number (e.g. execve == 11), return
16 the *name* of the syscall.
17 """
18 arch = arches.get(pwndbg.gdblib.arch.current, None)
19
20 if not arch:
21 return None
22
23 prefix = "__NR_"
24
25 for k, v in arch.__dict__.items():
26 if v != value:
27 continue
28
29 if not k.startswith(prefix):
30 continue
31
32 return k[len(prefix) :].lower()
33
34 return None
35
[end of pwndbg/constants/__init__.py]
[start of pwndbg/arguments.py]
1 """
2 Allows describing functions, specifically enumerating arguments which
3 may be passed in a combination of registers and stack values.
4 """
5 import gdb
6 from capstone import CS_GRP_CALL
7 from capstone import CS_GRP_INT
8
9 import pwndbg.chain
10 import pwndbg.constants
11 import pwndbg.disasm
12 import pwndbg.gdblib.arch
13 import pwndbg.gdblib.memory
14 import pwndbg.gdblib.regs
15 import pwndbg.gdblib.symbol
16 import pwndbg.gdblib.typeinfo
17 import pwndbg.ida
18 import pwndbg.lib.abi
19 import pwndbg.lib.funcparser
20 import pwndbg.lib.functions
21 from pwndbg.commands.nearpc import c as N
22
23 ida_replacements = {
24 "__int64": "signed long long int",
25 "__int32": "signed int",
26 "__int16": "signed short",
27 "__int8": "signed char",
28 "__uint64": "unsigned long long int",
29 "__uint32": "unsigned int",
30 "__uint16": "unsigned short",
31 "__uint8": "unsigned char",
32 "_BOOL_1": "unsigned char",
33 "_BOOL_2": "unsigned short",
34 "_BOOL_4": "unsigned int",
35 "_BYTE": "unsigned char",
36 "_WORD": "unsigned short",
37 "_DWORD": "unsigned int",
38 "_QWORD": "unsigned long long",
39 "__pure": "",
40 "__hidden": "",
41 "__return_ptr": "",
42 "__struct_ptr": "",
43 "__array_ptr": "",
44 "__fastcall": "",
45 "__cdecl": "",
46 "__thiscall": "",
47 "__userpurge": "",
48 }
49
50
51 def get_syscall_name(instruction):
52 if CS_GRP_INT not in instruction.groups:
53 return None
54
55 syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register
56
57 # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80
58 if syscall_register in ("eax", "rax"):
59 mnemonic = instruction.mnemonic
60 if not (mnemonic == "syscall" or (mnemonic == "int" and instruction.op_str == "0x80")):
61 return None
62
63 syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)
64 return pwndbg.constants.syscall(syscall_number) or "<unk_%d>" % syscall_number
65
66
67 def get(instruction):
68 """
69 Returns an array containing the arguments to the current function,
70 if $pc is a 'call' or 'bl' type instruction.
71
72 Otherwise, returns None.
73 """
74 n_args_default = 4
75
76 if instruction is None:
77 return []
78
79 if instruction.address != pwndbg.gdblib.regs.pc:
80 return []
81
82 if CS_GRP_CALL in instruction.groups:
83 try:
84 abi = pwndbg.lib.abi.ABI.default()
85 except KeyError:
86 return []
87
88 # Not sure of any OS which allows multiple operands on
89 # a call instruction.
90 assert len(instruction.operands) == 1
91
92 target = instruction.operands[0].int
93
94 if not target:
95 return []
96
97 name = pwndbg.gdblib.symbol.get(target)
98 if not name:
99 return []
100 elif CS_GRP_INT in instruction.groups:
101 # Get the syscall number and name
102 name = get_syscall_name(instruction)
103 abi = pwndbg.lib.abi.ABI.syscall()
104 target = None
105
106 if name is None:
107 return []
108 else:
109 return []
110
111 result = []
112 name = name or ""
113
114 sym = gdb.lookup_symbol(name)
115 name = name.replace("isoc99_", "") # __isoc99_sscanf
116 name = name.replace("@plt", "") # getpwiod@plt
117
118 # If we have particular `XXX_chk` function in our database, we use it.
119 # Otherwise, we show args for its unchecked version.
120 # We also lstrip `_` in here, as e.g. `__printf_chk` needs the underscores.
121 if name not in pwndbg.lib.functions.functions:
122 name = name.replace("_chk", "")
123 name = name.strip().lstrip("_") # _malloc
124
125 func = pwndbg.lib.functions.functions.get(name, None)
126
127 # Try to extract the data from GDB.
128 # Note that this is currently broken, pending acceptance of
129 # my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html
130 if sym and sym[0]:
131 try:
132 n_args_default = len(sym[0].type.fields())
133 except TypeError:
134 pass
135
136 # Try to grab the data out of IDA
137 if not func and target:
138 typename = pwndbg.ida.GetType(target)
139
140 if typename:
141 typename += ";"
142
143 # GetType() does not include the name.
144 typename = typename.replace("(", " function_name(", 1)
145
146 for k, v in ida_replacements.items():
147 typename = typename.replace(k, v)
148
149 func = pwndbg.lib.funcparser.ExtractFuncDeclFromSource(typename + ";")
150
151 if func:
152 args = func.args
153 else:
154 args = (
155 pwndbg.lib.functions.Argument("int", 0, argname(i, abi)) for i in range(n_args_default)
156 )
157
158 for i, arg in enumerate(args):
159 result.append((arg, argument(i, abi)))
160
161 return result
162
163
164 def argname(n, abi=None):
165 abi = abi or pwndbg.lib.abi.ABI.default()
166 regs = abi.register_arguments
167
168 if n < len(regs):
169 return regs[n]
170
171 return "arg[%i]" % n
172
173
174 def argument(n, abi=None):
175 """
176 Returns the nth argument, as if $pc were a 'call' or 'bl' type
177 instruction.
178 Works only for ABIs that use registers for arguments.
179 """
180 abi = abi or pwndbg.lib.abi.ABI.default()
181 regs = abi.register_arguments
182
183 if n < len(regs):
184 return getattr(pwndbg.gdblib.regs, regs[n])
185
186 n -= len(regs)
187
188 sp = pwndbg.gdblib.regs.sp + (n * pwndbg.gdblib.arch.ptrsize)
189
190 return int(pwndbg.gdblib.memory.poi(pwndbg.gdblib.typeinfo.ppvoid, sp))
191
192
193 def arguments(abi=None):
194 """
195 Yields (arg_name, arg_value) tuples for arguments from a given ABI.
196 Works only for ABIs that use registers for arguments.
197 """
198 abi = abi or pwndbg.lib.abi.ABI.default()
199 regs = abi.register_arguments
200
201 for i in range(len(regs)):
202 yield argname(i, abi), argument(i, abi)
203
204
205 def format_args(instruction):
206 result = []
207 for arg, value in get(instruction):
208 code = arg.type != "char"
209 pretty = pwndbg.chain.format(value, code=code)
210
211 # Enhance args display
212 if arg.name == "fd" and isinstance(value, int):
213 path = pwndbg.gdblib.file.readlink("/proc/%d/fd/%d" % (pwndbg.gdblib.proc.pid, value))
214 if path:
215 pretty += " (%s)" % path
216
217 result.append("%-10s %s" % (N.argument(arg.name) + ":", pretty))
218 return result
219
[end of pwndbg/arguments.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/arguments.py b/pwndbg/arguments.py
--- a/pwndbg/arguments.py
+++ b/pwndbg/arguments.py
@@ -53,15 +53,26 @@
return None
syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register
+ syscall_arch = pwndbg.gdblib.arch.current
- # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80
+ # On x86/x64 `syscall` and `int <value>` instructions are in CS_GRP_INT
+ # but only `syscall` and `int 0x80` actually execute syscalls on Linux.
+ # So here, we return no syscall name for other instructions and we also
+ # handle a case when 32-bit syscalls are executed on x64
if syscall_register in ("eax", "rax"):
mnemonic = instruction.mnemonic
- if not (mnemonic == "syscall" or (mnemonic == "int" and instruction.op_str == "0x80")):
+
+ is_32bit = mnemonic == "int" and instruction.op_str == "0x80"
+ if not (mnemonic == "syscall" or is_32bit):
return None
+ # On x64 the int 0x80 instruction executes 32-bit syscalls from i386
+ # On x86, the syscall_arch is already i386, so its all fine
+ if is_32bit:
+ syscall_arch = "i386"
+
syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)
- return pwndbg.constants.syscall(syscall_number) or "<unk_%d>" % syscall_number
+ return pwndbg.constants.syscall(syscall_number, syscall_arch) or "<unk_%d>" % syscall_number
def get(instruction):
diff --git a/pwndbg/constants/__init__.py b/pwndbg/constants/__init__.py
--- a/pwndbg/constants/__init__.py
+++ b/pwndbg/constants/__init__.py
@@ -10,20 +10,20 @@
arches = {"arm": arm, "armcm": arm, "i386": i386, "mips": mips, "x86-64": amd64, "aarch64": aarch64}
-def syscall(value):
+def syscall(number, arch):
"""
- Given a value for a syscall number (e.g. execve == 11), return
- the *name* of the syscall.
+ Given a syscall number and architecture, returns the name of the syscall.
+ E.g. execve == 59 on x86-64
"""
- arch = arches.get(pwndbg.gdblib.arch.current, None)
+ arch = arches.get(arch, None)
- if not arch:
+ if arch is None:
return None
prefix = "__NR_"
for k, v in arch.__dict__.items():
- if v != value:
+ if v != number:
continue
if not k.startswith(prefix):
|
{"golden_diff": "diff --git a/pwndbg/arguments.py b/pwndbg/arguments.py\n--- a/pwndbg/arguments.py\n+++ b/pwndbg/arguments.py\n@@ -53,15 +53,26 @@\n return None\n \n syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register\n+ syscall_arch = pwndbg.gdblib.arch.current\n \n- # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80\n+ # On x86/x64 `syscall` and `int <value>` instructions are in CS_GRP_INT\n+ # but only `syscall` and `int 0x80` actually execute syscalls on Linux.\n+ # So here, we return no syscall name for other instructions and we also\n+ # handle a case when 32-bit syscalls are executed on x64\n if syscall_register in (\"eax\", \"rax\"):\n mnemonic = instruction.mnemonic\n- if not (mnemonic == \"syscall\" or (mnemonic == \"int\" and instruction.op_str == \"0x80\")):\n+\n+ is_32bit = mnemonic == \"int\" and instruction.op_str == \"0x80\"\n+ if not (mnemonic == \"syscall\" or is_32bit):\n return None\n \n+ # On x64 the int 0x80 instruction executes 32-bit syscalls from i386\n+ # On x86, the syscall_arch is already i386, so its all fine\n+ if is_32bit:\n+ syscall_arch = \"i386\"\n+\n syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)\n- return pwndbg.constants.syscall(syscall_number) or \"<unk_%d>\" % syscall_number\n+ return pwndbg.constants.syscall(syscall_number, syscall_arch) or \"<unk_%d>\" % syscall_number\n \n \n def get(instruction):\ndiff --git a/pwndbg/constants/__init__.py b/pwndbg/constants/__init__.py\n--- a/pwndbg/constants/__init__.py\n+++ b/pwndbg/constants/__init__.py\n@@ -10,20 +10,20 @@\n arches = {\"arm\": arm, \"armcm\": arm, \"i386\": i386, \"mips\": mips, \"x86-64\": amd64, \"aarch64\": aarch64}\n \n \n-def syscall(value):\n+def syscall(number, arch):\n \"\"\"\n- Given a value for a syscall number (e.g. execve == 11), return\n- the *name* of the syscall.\n+ Given a syscall number and architecture, returns the name of the syscall.\n+ E.g. execve == 59 on x86-64\n \"\"\"\n- arch = arches.get(pwndbg.gdblib.arch.current, None)\n+ arch = arches.get(arch, None)\n \n- if not arch:\n+ if arch is None:\n return None\n \n prefix = \"__NR_\"\n \n for k, v in arch.__dict__.items():\n- if v != value:\n+ if v != number:\n continue\n \n if not k.startswith(prefix):\n", "issue": "System calls display is wrong for x86 syscalls on x86-64\nWhen we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.\nSystem calls display is wrong for x86 syscalls on x86-64\nWhen we step on `int 0x80` in a 64-bit (x86-64) program, we display wrong syscall number and arguments.\n", "before_files": [{"content": "import pwndbg.gdblib.arch\n\nfrom . import aarch64\nfrom . import amd64\nfrom . import arm\nfrom . import i386\nfrom . import mips\nfrom . import thumb\n\narches = {\"arm\": arm, \"armcm\": arm, \"i386\": i386, \"mips\": mips, \"x86-64\": amd64, \"aarch64\": aarch64}\n\n\ndef syscall(value):\n \"\"\"\n Given a value for a syscall number (e.g. execve == 11), return\n the *name* of the syscall.\n \"\"\"\n arch = arches.get(pwndbg.gdblib.arch.current, None)\n\n if not arch:\n return None\n\n prefix = \"__NR_\"\n\n for k, v in arch.__dict__.items():\n if v != value:\n continue\n\n if not k.startswith(prefix):\n continue\n\n return k[len(prefix) :].lower()\n\n return None\n", "path": "pwndbg/constants/__init__.py"}, {"content": "\"\"\"\nAllows describing functions, specifically enumerating arguments which\nmay be passed in a combination of registers and stack values.\n\"\"\"\nimport gdb\nfrom capstone import CS_GRP_CALL\nfrom capstone import CS_GRP_INT\n\nimport pwndbg.chain\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.regs\nimport pwndbg.gdblib.symbol\nimport pwndbg.gdblib.typeinfo\nimport pwndbg.ida\nimport pwndbg.lib.abi\nimport pwndbg.lib.funcparser\nimport pwndbg.lib.functions\nfrom pwndbg.commands.nearpc import c as N\n\nida_replacements = {\n \"__int64\": \"signed long long int\",\n \"__int32\": \"signed int\",\n \"__int16\": \"signed short\",\n \"__int8\": \"signed char\",\n \"__uint64\": \"unsigned long long int\",\n \"__uint32\": \"unsigned int\",\n \"__uint16\": \"unsigned short\",\n \"__uint8\": \"unsigned char\",\n \"_BOOL_1\": \"unsigned char\",\n \"_BOOL_2\": \"unsigned short\",\n \"_BOOL_4\": \"unsigned int\",\n \"_BYTE\": \"unsigned char\",\n \"_WORD\": \"unsigned short\",\n \"_DWORD\": \"unsigned int\",\n \"_QWORD\": \"unsigned long long\",\n \"__pure\": \"\",\n \"__hidden\": \"\",\n \"__return_ptr\": \"\",\n \"__struct_ptr\": \"\",\n \"__array_ptr\": \"\",\n \"__fastcall\": \"\",\n \"__cdecl\": \"\",\n \"__thiscall\": \"\",\n \"__userpurge\": \"\",\n}\n\n\ndef get_syscall_name(instruction):\n if CS_GRP_INT not in instruction.groups:\n return None\n\n syscall_register = pwndbg.lib.abi.ABI.syscall().syscall_register\n\n # If we are on x86/x64, return no syscall name for other instructions than syscall and int 0x80\n if syscall_register in (\"eax\", \"rax\"):\n mnemonic = instruction.mnemonic\n if not (mnemonic == \"syscall\" or (mnemonic == \"int\" and instruction.op_str == \"0x80\")):\n return None\n\n syscall_number = getattr(pwndbg.gdblib.regs, syscall_register)\n return pwndbg.constants.syscall(syscall_number) or \"<unk_%d>\" % syscall_number\n\n\ndef get(instruction):\n \"\"\"\n Returns an array containing the arguments to the current function,\n if $pc is a 'call' or 'bl' type instruction.\n\n Otherwise, returns None.\n \"\"\"\n n_args_default = 4\n\n if instruction is None:\n return []\n\n if instruction.address != pwndbg.gdblib.regs.pc:\n return []\n\n if CS_GRP_CALL in instruction.groups:\n try:\n abi = pwndbg.lib.abi.ABI.default()\n except KeyError:\n return []\n\n # Not sure of any OS which allows multiple operands on\n # a call instruction.\n assert len(instruction.operands) == 1\n\n target = instruction.operands[0].int\n\n if not target:\n return []\n\n name = pwndbg.gdblib.symbol.get(target)\n if not name:\n return []\n elif CS_GRP_INT in instruction.groups:\n # Get the syscall number and name\n name = get_syscall_name(instruction)\n abi = pwndbg.lib.abi.ABI.syscall()\n target = None\n\n if name is None:\n return []\n else:\n return []\n\n result = []\n name = name or \"\"\n\n sym = gdb.lookup_symbol(name)\n name = name.replace(\"isoc99_\", \"\") # __isoc99_sscanf\n name = name.replace(\"@plt\", \"\") # getpwiod@plt\n\n # If we have particular `XXX_chk` function in our database, we use it.\n # Otherwise, we show args for its unchecked version.\n # We also lstrip `_` in here, as e.g. `__printf_chk` needs the underscores.\n if name not in pwndbg.lib.functions.functions:\n name = name.replace(\"_chk\", \"\")\n name = name.strip().lstrip(\"_\") # _malloc\n\n func = pwndbg.lib.functions.functions.get(name, None)\n\n # Try to extract the data from GDB.\n # Note that this is currently broken, pending acceptance of\n # my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html\n if sym and sym[0]:\n try:\n n_args_default = len(sym[0].type.fields())\n except TypeError:\n pass\n\n # Try to grab the data out of IDA\n if not func and target:\n typename = pwndbg.ida.GetType(target)\n\n if typename:\n typename += \";\"\n\n # GetType() does not include the name.\n typename = typename.replace(\"(\", \" function_name(\", 1)\n\n for k, v in ida_replacements.items():\n typename = typename.replace(k, v)\n\n func = pwndbg.lib.funcparser.ExtractFuncDeclFromSource(typename + \";\")\n\n if func:\n args = func.args\n else:\n args = (\n pwndbg.lib.functions.Argument(\"int\", 0, argname(i, abi)) for i in range(n_args_default)\n )\n\n for i, arg in enumerate(args):\n result.append((arg, argument(i, abi)))\n\n return result\n\n\ndef argname(n, abi=None):\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return regs[n]\n\n return \"arg[%i]\" % n\n\n\ndef argument(n, abi=None):\n \"\"\"\n Returns the nth argument, as if $pc were a 'call' or 'bl' type\n instruction.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n if n < len(regs):\n return getattr(pwndbg.gdblib.regs, regs[n])\n\n n -= len(regs)\n\n sp = pwndbg.gdblib.regs.sp + (n * pwndbg.gdblib.arch.ptrsize)\n\n return int(pwndbg.gdblib.memory.poi(pwndbg.gdblib.typeinfo.ppvoid, sp))\n\n\ndef arguments(abi=None):\n \"\"\"\n Yields (arg_name, arg_value) tuples for arguments from a given ABI.\n Works only for ABIs that use registers for arguments.\n \"\"\"\n abi = abi or pwndbg.lib.abi.ABI.default()\n regs = abi.register_arguments\n\n for i in range(len(regs)):\n yield argname(i, abi), argument(i, abi)\n\n\ndef format_args(instruction):\n result = []\n for arg, value in get(instruction):\n code = arg.type != \"char\"\n pretty = pwndbg.chain.format(value, code=code)\n\n # Enhance args display\n if arg.name == \"fd\" and isinstance(value, int):\n path = pwndbg.gdblib.file.readlink(\"/proc/%d/fd/%d\" % (pwndbg.gdblib.proc.pid, value))\n if path:\n pretty += \" (%s)\" % path\n\n result.append(\"%-10s %s\" % (N.argument(arg.name) + \":\", pretty))\n return result\n", "path": "pwndbg/arguments.py"}]}
| 3,158 | 732 |
gh_patches_debug_2019
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1005
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: openapi render for multiple tags isn't consistent
**Describe the bug**
When the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.
I'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...
**To Reproduce**
```python
from typing import Dict
from starlite import Starlite, Controller, get
class TestController(Controller):
tags = ["a"]
@get("/", tags=["b"])
def hello_world(self) -> Dict[str, str]:
"""Handler function that returns a greeting dictionary."""
return {"hello": "world"}
app = Starlite(route_handlers=[TestController])
print(app.openapi_schema.paths["/"].get.tags)
```
If you run that multiple times, you will see you get either:
```python
['a', 'b']
```
or
```python
['b', 'a']
```
**Additional context**
I believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?
But as I said, it may not be a problem you care about as I could be doing something silly.
</issue>
<code>
[start of starlite/openapi/path_item.py]
1 from inspect import cleandoc
2 from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast
3
4 from pydantic_openapi_schema.v3_1_0.operation import Operation
5 from pydantic_openapi_schema.v3_1_0.path_item import PathItem
6
7 from starlite.openapi.parameters import create_parameter_for_handler
8 from starlite.openapi.request_body import create_request_body
9 from starlite.openapi.responses import create_responses
10 from starlite.utils.helpers import unwrap_partial
11
12 if TYPE_CHECKING:
13 from pydantic import BaseModel
14 from pydantic_openapi_schema.v3_1_0 import SecurityRequirement
15
16 from starlite.handlers import HTTPRouteHandler
17 from starlite.plugins.base import PluginProtocol
18 from starlite.routes import HTTPRoute
19
20
21 def get_description_for_handler(route_handler: "HTTPRouteHandler", use_handler_docstrings: bool) -> Optional[str]:
22 """Produce the operation description for a route handler, either by using the description value if provided,
23
24 or the docstring - if config is enabled.
25
26 Args:
27 route_handler: A route handler instance.
28 use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped
29 handler function.
30
31 Returns:
32 An optional description string
33 """
34 handler_description = route_handler.description
35 if handler_description is None and use_handler_docstrings:
36 fn = unwrap_partial(route_handler.fn.value)
37 return cleandoc(fn.__doc__) if fn.__doc__ else None
38 return handler_description
39
40
41 def extract_layered_values(
42 route_handler: "HTTPRouteHandler",
43 ) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:
44 """Extract the tags and security values from the route handler layers.
45
46 Args:
47 route_handler: A Route Handler instance.
48
49 Returns:
50 A tuple of optional lists.
51 """
52 tags: List[str] = []
53 security: List["SecurityRequirement"] = []
54 for layer in route_handler.ownership_layers:
55 if layer.tags:
56 tags.extend(layer.tags)
57 if layer.security:
58 security.extend(layer.security)
59 return list(set(tags)) if tags else None, security or None
60
61
62 def create_path_item(
63 route: "HTTPRoute", create_examples: bool, plugins: List["PluginProtocol"], use_handler_docstrings: bool
64 ) -> PathItem:
65 """Create a PathItem model for the given route parsing all http_methods into Operation Models."""
66 path_item = PathItem()
67 for http_method, handler_tuple in route.route_handler_map.items():
68 route_handler, _ = handler_tuple
69 if route_handler.include_in_schema:
70 handler_fields = cast("BaseModel", route_handler.signature_model).__fields__
71 parameters = (
72 create_parameter_for_handler(
73 route_handler=route_handler,
74 handler_fields=handler_fields,
75 path_parameters=route.path_parameters,
76 generate_examples=create_examples,
77 )
78 or None
79 )
80 raises_validation_error = bool("data" in handler_fields or path_item.parameters or parameters)
81 handler_name = unwrap_partial(route_handler.handler_name).replace("_", " ").title()
82 request_body = None
83 if "data" in handler_fields:
84 request_body = create_request_body(
85 field=handler_fields["data"], generate_examples=create_examples, plugins=plugins
86 )
87
88 tags, security = extract_layered_values(route_handler)
89 operation = Operation(
90 operationId=route_handler.operation_id or handler_name,
91 tags=tags,
92 summary=route_handler.summary,
93 description=get_description_for_handler(route_handler, use_handler_docstrings),
94 deprecated=route_handler.deprecated,
95 responses=create_responses(
96 route_handler=route_handler,
97 raises_validation_error=raises_validation_error,
98 generate_examples=create_examples,
99 plugins=plugins,
100 ),
101 requestBody=request_body,
102 parameters=parameters, # type: ignore[arg-type]
103 security=security,
104 )
105 setattr(path_item, http_method.lower(), operation)
106 return path_item
107
[end of starlite/openapi/path_item.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py
--- a/starlite/openapi/path_item.py
+++ b/starlite/openapi/path_item.py
@@ -56,7 +56,7 @@
tags.extend(layer.tags)
if layer.security:
security.extend(layer.security)
- return list(set(tags)) if tags else None, security or None
+ return sorted(set(tags)) if tags else None, security or None
def create_path_item(
|
{"golden_diff": "diff --git a/starlite/openapi/path_item.py b/starlite/openapi/path_item.py\n--- a/starlite/openapi/path_item.py\n+++ b/starlite/openapi/path_item.py\n@@ -56,7 +56,7 @@\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n- return list(set(tags)) if tags else None, security or None\n+ return sorted(set(tags)) if tags else None, security or None\n \n \n def create_path_item(\n", "issue": "Bug: openapi render for multiple tags isn't consistent\n**Describe the bug**\r\nWhen the openapi renders tags from both a controller and a route it is not deterministic. This may not be a bug? But it surprised me so thought I'd raise it.\r\n\r\nI'm unsure if I'm doing something crazy but for a project, we check in the generated json openapi schema so we can browse the API live in gitlab. I've recently added a tag to both a controller and a route in it. But because the order of the tags isn't consistent they are going to keep flip flopping as we have a pre-commit that generates the json to make sure it's up to date. I hope that ramble makes sense...\r\n\r\n**To Reproduce**\r\n```python\r\nfrom typing import Dict \r\n \r\nfrom starlite import Starlite, Controller, get \r\n \r\nclass TestController(Controller): \r\n tags = [\"a\"] \r\n \r\n @get(\"/\", tags=[\"b\"]) \r\n def hello_world(self) -> Dict[str, str]: \r\n \"\"\"Handler function that returns a greeting dictionary.\"\"\"\r\n return {\"hello\": \"world\"} \r\n \r\n \r\napp = Starlite(route_handlers=[TestController]) \r\nprint(app.openapi_schema.paths[\"/\"].get.tags) \r\n```\r\nIf you run that multiple times, you will see you get either:\r\n```python\r\n['a', 'b']\r\n```\r\nor\r\n```python\r\n['b', 'a']\r\n```\r\n\r\n**Additional context**\r\nI believe the problem is [here](https://github.com/starlite-api/starlite/blob/835749112e8364c1516f45973c924774aca22ca9/starlite/openapi/path_item.py#L59) as it forces construction of a new set. Sorting them before returning would be viable as there shouldn't be _too many_ tags and it's a one time thing I believe?\r\n\r\nBut as I said, it may not be a problem you care about as I could be doing something silly.\r\n\n", "before_files": [{"content": "from inspect import cleandoc\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast\n\nfrom pydantic_openapi_schema.v3_1_0.operation import Operation\nfrom pydantic_openapi_schema.v3_1_0.path_item import PathItem\n\nfrom starlite.openapi.parameters import create_parameter_for_handler\nfrom starlite.openapi.request_body import create_request_body\nfrom starlite.openapi.responses import create_responses\nfrom starlite.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from pydantic import BaseModel\n from pydantic_openapi_schema.v3_1_0 import SecurityRequirement\n\n from starlite.handlers import HTTPRouteHandler\n from starlite.plugins.base import PluginProtocol\n from starlite.routes import HTTPRoute\n\n\ndef get_description_for_handler(route_handler: \"HTTPRouteHandler\", use_handler_docstrings: bool) -> Optional[str]:\n \"\"\"Produce the operation description for a route handler, either by using the description value if provided,\n\n or the docstring - if config is enabled.\n\n Args:\n route_handler: A route handler instance.\n use_handler_docstrings: If `True` and `route_handler.description` is `None` returns docstring of wrapped\n handler function.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn.value)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef extract_layered_values(\n route_handler: \"HTTPRouteHandler\",\n) -> Tuple[Optional[List[str]], Optional[List[Dict[str, List[str]]]]]:\n \"\"\"Extract the tags and security values from the route handler layers.\n\n Args:\n route_handler: A Route Handler instance.\n\n Returns:\n A tuple of optional lists.\n \"\"\"\n tags: List[str] = []\n security: List[\"SecurityRequirement\"] = []\n for layer in route_handler.ownership_layers:\n if layer.tags:\n tags.extend(layer.tags)\n if layer.security:\n security.extend(layer.security)\n return list(set(tags)) if tags else None, security or None\n\n\ndef create_path_item(\n route: \"HTTPRoute\", create_examples: bool, plugins: List[\"PluginProtocol\"], use_handler_docstrings: bool\n) -> PathItem:\n \"\"\"Create a PathItem model for the given route parsing all http_methods into Operation Models.\"\"\"\n path_item = PathItem()\n for http_method, handler_tuple in route.route_handler_map.items():\n route_handler, _ = handler_tuple\n if route_handler.include_in_schema:\n handler_fields = cast(\"BaseModel\", route_handler.signature_model).__fields__\n parameters = (\n create_parameter_for_handler(\n route_handler=route_handler,\n handler_fields=handler_fields,\n path_parameters=route.path_parameters,\n generate_examples=create_examples,\n )\n or None\n )\n raises_validation_error = bool(\"data\" in handler_fields or path_item.parameters or parameters)\n handler_name = unwrap_partial(route_handler.handler_name).replace(\"_\", \" \").title()\n request_body = None\n if \"data\" in handler_fields:\n request_body = create_request_body(\n field=handler_fields[\"data\"], generate_examples=create_examples, plugins=plugins\n )\n\n tags, security = extract_layered_values(route_handler)\n operation = Operation(\n operationId=route_handler.operation_id or handler_name,\n tags=tags,\n summary=route_handler.summary,\n description=get_description_for_handler(route_handler, use_handler_docstrings),\n deprecated=route_handler.deprecated,\n responses=create_responses(\n route_handler=route_handler,\n raises_validation_error=raises_validation_error,\n generate_examples=create_examples,\n plugins=plugins,\n ),\n requestBody=request_body,\n parameters=parameters, # type: ignore[arg-type]\n security=security,\n )\n setattr(path_item, http_method.lower(), operation)\n return path_item\n", "path": "starlite/openapi/path_item.py"}]}
| 2,040 | 107 |
gh_patches_debug_3584
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-220
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Только часть id до дефиса выделена когда тебя @тэгнули

https://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5
</issue>
<code>
[start of common/regexp.py]
1 import re
2
3 USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
4 IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
5 VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
6 YOUTUBE_RE = re.compile(
7 r"http(?:s?):\/\/(?:www\.)?youtu(?:be\.com\/watch\?v=|\.be\/)([\w\-\_]*)(&(amp;)?[\w\?=]*)?"
8 )
9 TWITTER_RE = re.compile(r"(https?:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/[\d]+)")
10 FAVICON_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png|ico)")
11
[end of common/regexp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/common/regexp.py b/common/regexp.py
--- a/common/regexp.py
+++ b/common/regexp.py
@@ -1,6 +1,6 @@
import re
-USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_]{3,})")
+USERNAME_RE = re.compile(r"(?:\s|\n|^)@([A-Za-z0-9_-]{3,})")
IMAGE_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:jpg|jpeg|gif|png)")
VIDEO_RE = re.compile(r"(http(s?):)([/|.|\w|\s|-])*\.(?:mov|mp4)")
YOUTUBE_RE = re.compile(
|
{"golden_diff": "diff --git a/common/regexp.py b/common/regexp.py\n--- a/common/regexp.py\n+++ b/common/regexp.py\n@@ -1,6 +1,6 @@\n import re\n \n-USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\n+USERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_-]{3,})\")\n IMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\n VIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\n YOUTUBE_RE = re.compile(\n", "issue": "\u0422\u043e\u043b\u044c\u043a\u043e \u0447\u0430\u0441\u0442\u044c id \u0434\u043e \u0434\u0435\u0444\u0438\u0441\u0430 \u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0430 \u043a\u043e\u0433\u0434\u0430 \u0442\u0435\u0431\u044f @\u0442\u044d\u0433\u043d\u0443\u043b\u0438\n\r\nhttps://vas3k.club/post/2295/#comment-8177cee9-5bef-49bf-bade-44deea61e5d5\r\n\r\n\r\n\n", "before_files": [{"content": "import re\n\nUSERNAME_RE = re.compile(r\"(?:\\s|\\n|^)@([A-Za-z0-9_]{3,})\")\nIMAGE_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png)\")\nVIDEO_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:mov|mp4)\")\nYOUTUBE_RE = re.compile(\n r\"http(?:s?):\\/\\/(?:www\\.)?youtu(?:be\\.com\\/watch\\?v=|\\.be\\/)([\\w\\-\\_]*)(&(amp;)?\u200c\u200b[\\w\\?\u200c\u200b=]*)?\"\n)\nTWITTER_RE = re.compile(r\"(https?:\\/\\/twitter.com\\/[a-zA-Z0-9_]+\\/status\\/[\\d]+)\")\nFAVICON_RE = re.compile(r\"(http(s?):)([/|.|\\w|\\s|-])*\\.(?:jpg|jpeg|gif|png|ico)\")\n", "path": "common/regexp.py"}]}
| 882 | 164 |
gh_patches_debug_10369
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-3235
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash when editing str-options
##### Steps to reproduce the problem:
1. Start `mitmproxy` in interactive mode
2. Type `O` to switch to the options view
3. Edit a random `str`-option
4. See `mitmproxy` crash with the following stacktrace:
```
Traceback (most recent call last):
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/master.py", line 86, in run_loop
loop()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 286, in run
self._run()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 384, in _run
self.event_loop.run()
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 1484, in run
reraise(*exc_info)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/compat.py", line 58, in reraise
raise value
File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py", line 404, in <lambda>
event_loop, callback, self.get_available_raw_input())
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py", line 502, in parse_input
callback(processed, processed_codes)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 411, in _update
self.process_input(keys)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py", line 511, in process_input
k = self._topmost_widget.keypress(self.screen_size, k)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py", line 309, in keypress
k = super().keypress(size, k)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py", line 1131, in keypress
return self.body.keypress( (maxcol, remaining), key )
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py", line 44, in keypress
ret = super().keypress(size, key)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py", line 1131, in keypress
return self.body.keypress( (maxcol, remaining), key )
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py", line 283, in keypress
return self.focus_item.keypress(tsize, key)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py", line 177, in keypress
d = self.master.options.parse_setval(foc.opt.name, v)
File "/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/optmanager.py", line 332, in parse_setval
if o.typespec in (str, typing.Optional[str]):
AttributeError: 'str' object has no attribute 'typespec'
mitmproxy has crashed!
Please lodge a bug report at:
https://github.com/mitmproxy/mitmproxy
```
##### System information
```
Mitmproxy: 4.0.3
Python: 3.6.6
OpenSSL: OpenSSL 1.1.0h 27 Mar 2018
Platform: Linux-4.17.0-x86_64-with-debian-buster-sid
```
</issue>
<code>
[start of mitmproxy/tools/console/options.py]
1 import urwid
2 import blinker
3 import textwrap
4 import pprint
5 from typing import Optional, Sequence
6
7 from mitmproxy import exceptions
8 from mitmproxy import optmanager
9 from mitmproxy.tools.console import layoutwidget
10 from mitmproxy.tools.console import signals
11 from mitmproxy.tools.console import overlay
12
13 HELP_HEIGHT = 5
14
15
16 def can_edit_inplace(opt):
17 if opt.choices:
18 return False
19 if opt.typespec in [str, int, Optional[str], Optional[int]]:
20 return True
21
22
23 def fcol(s, width, attr):
24 s = str(s)
25 return (
26 "fixed",
27 width,
28 urwid.Text((attr, s))
29 )
30
31
32 option_focus_change = blinker.Signal()
33
34
35 class OptionItem(urwid.WidgetWrap):
36 def __init__(self, walker, opt, focused, namewidth, editing):
37 self.walker, self.opt, self.focused = walker, opt, focused
38 self.namewidth = namewidth
39 self.editing = editing
40 super().__init__(None)
41 self._w = self.get_widget()
42
43 def get_widget(self):
44 val = self.opt.current()
45 if self.opt.typespec == bool:
46 displayval = "true" if val else "false"
47 elif not val:
48 displayval = ""
49 elif self.opt.typespec == Sequence[str]:
50 displayval = pprint.pformat(val, indent=1)
51 else:
52 displayval = str(val)
53
54 changed = self.walker.master.options.has_changed(self.opt.name)
55 if self.focused:
56 valstyle = "option_active_selected" if changed else "option_selected"
57 else:
58 valstyle = "option_active" if changed else "text"
59
60 if self.editing:
61 valw = urwid.Edit(edit_text=displayval)
62 else:
63 valw = urwid.AttrMap(
64 urwid.Padding(
65 urwid.Text([(valstyle, displayval)])
66 ),
67 valstyle
68 )
69
70 return urwid.Columns(
71 [
72 (
73 self.namewidth,
74 urwid.Text([("title", self.opt.name.ljust(self.namewidth))])
75 ),
76 valw
77 ],
78 dividechars=2,
79 focus_column=1
80 )
81
82 def get_edit_text(self):
83 return self._w[1].get_edit_text()
84
85 def selectable(self):
86 return True
87
88 def keypress(self, size, key):
89 if self.editing:
90 self._w[1].keypress(size, key)
91 return
92 return key
93
94
95 class OptionListWalker(urwid.ListWalker):
96 def __init__(self, master):
97 self.master = master
98
99 self.index = 0
100 self.focusobj = None
101
102 self.opts = sorted(master.options.keys())
103 self.maxlen = max(len(i) for i in self.opts)
104 self.editing = False
105 self.set_focus(0)
106 self.master.options.changed.connect(self.sig_mod)
107
108 def sig_mod(self, *args, **kwargs):
109 self.opts = sorted(self.master.options.keys())
110 self.maxlen = max(len(i) for i in self.opts)
111 self._modified()
112 self.set_focus(self.index)
113
114 def start_editing(self):
115 self.editing = True
116 self.focus_obj = self._get(self.index, True)
117 self._modified()
118
119 def stop_editing(self):
120 self.editing = False
121 self.focus_obj = self._get(self.index, False)
122 self.set_focus(self.index)
123 self._modified()
124
125 def get_edit_text(self):
126 return self.focus_obj.get_edit_text()
127
128 def _get(self, pos, editing):
129 name = self.opts[pos]
130 opt = self.master.options._options[name]
131 return OptionItem(
132 self, opt, pos == self.index, self.maxlen, editing
133 )
134
135 def get_focus(self):
136 return self.focus_obj, self.index
137
138 def set_focus(self, index):
139 self.editing = False
140 name = self.opts[index]
141 opt = self.master.options._options[name]
142 self.index = index
143 self.focus_obj = self._get(self.index, self.editing)
144 option_focus_change.send(opt.help)
145
146 def get_next(self, pos):
147 if pos >= len(self.opts) - 1:
148 return None, None
149 pos = pos + 1
150 return self._get(pos, False), pos
151
152 def get_prev(self, pos):
153 pos = pos - 1
154 if pos < 0:
155 return None, None
156 return self._get(pos, False), pos
157
158
159 class OptionsList(urwid.ListBox):
160 def __init__(self, master):
161 self.master = master
162 self.walker = OptionListWalker(master)
163 super().__init__(self.walker)
164
165 def save_config(self, path):
166 try:
167 optmanager.save(self.master.options, path)
168 except exceptions.OptionsError as e:
169 signals.status_message.send(message=str(e))
170
171 def keypress(self, size, key):
172 if self.walker.editing:
173 if key == "enter":
174 foc, idx = self.get_focus()
175 v = self.walker.get_edit_text()
176 try:
177 d = self.master.options.parse_setval(foc.opt.name, v)
178 self.master.options.update(**{foc.opt.name: d})
179 except exceptions.OptionsError as v:
180 signals.status_message.send(message=str(v))
181 self.walker.stop_editing()
182 return None
183 elif key == "esc":
184 self.walker.stop_editing()
185 return None
186 else:
187 if key == "m_start":
188 self.set_focus(0)
189 self.walker._modified()
190 elif key == "m_end":
191 self.set_focus(len(self.walker.opts) - 1)
192 self.walker._modified()
193 elif key == "m_select":
194 foc, idx = self.get_focus()
195 if foc.opt.typespec == bool:
196 self.master.options.toggler(foc.opt.name)()
197 # Bust the focus widget cache
198 self.set_focus(self.walker.index)
199 elif can_edit_inplace(foc.opt):
200 self.walker.start_editing()
201 self.walker._modified()
202 elif foc.opt.choices:
203 self.master.overlay(
204 overlay.Chooser(
205 self.master,
206 foc.opt.name,
207 foc.opt.choices,
208 foc.opt.current(),
209 self.master.options.setter(foc.opt.name)
210 )
211 )
212 elif foc.opt.typespec == Sequence[str]:
213 self.master.overlay(
214 overlay.OptionsOverlay(
215 self.master,
216 foc.opt.name,
217 foc.opt.current(),
218 HELP_HEIGHT + 5
219 ),
220 valign="top"
221 )
222 else:
223 raise NotImplementedError()
224 return super().keypress(size, key)
225
226
227 class OptionHelp(urwid.Frame):
228 def __init__(self, master):
229 self.master = master
230 super().__init__(self.widget(""))
231 self.set_active(False)
232 option_focus_change.connect(self.sig_mod)
233
234 def set_active(self, val):
235 h = urwid.Text("Option Help")
236 style = "heading" if val else "heading_inactive"
237 self.header = urwid.AttrWrap(h, style)
238
239 def widget(self, txt):
240 cols, _ = self.master.ui.get_cols_rows()
241 return urwid.ListBox(
242 [urwid.Text(i) for i in textwrap.wrap(txt, cols)]
243 )
244
245 def sig_mod(self, txt):
246 self.set_body(self.widget(txt))
247
248
249 class Options(urwid.Pile, layoutwidget.LayoutWidget):
250 title = "Options"
251 keyctx = "options"
252
253 def __init__(self, master):
254 oh = OptionHelp(master)
255 self.optionslist = OptionsList(master)
256 super().__init__(
257 [
258 self.optionslist,
259 (HELP_HEIGHT, oh),
260 ]
261 )
262 self.master = master
263
264 def current_name(self):
265 foc, idx = self.optionslist.get_focus()
266 return foc.opt.name
267
268 def keypress(self, size, key):
269 if key == "m_next":
270 self.focus_position = (
271 self.focus_position + 1
272 ) % len(self.widget_list)
273 self.widget_list[1].set_active(self.focus_position == 1)
274 key = None
275
276 # This is essentially a copypasta from urwid.Pile's keypress handler.
277 # So much for "closed for modification, but open for extension".
278 item_rows = None
279 if len(size) == 2:
280 item_rows = self.get_item_rows(size, focus = True)
281 i = self.widget_list.index(self.focus_item)
282 tsize = self.get_item_size(size, i, True, item_rows)
283 return self.focus_item.keypress(tsize, key)
284
[end of mitmproxy/tools/console/options.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/tools/console/options.py b/mitmproxy/tools/console/options.py
--- a/mitmproxy/tools/console/options.py
+++ b/mitmproxy/tools/console/options.py
@@ -174,7 +174,7 @@
foc, idx = self.get_focus()
v = self.walker.get_edit_text()
try:
- d = self.master.options.parse_setval(foc.opt.name, v)
+ d = self.master.options.parse_setval(foc.opt, v)
self.master.options.update(**{foc.opt.name: d})
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
|
{"golden_diff": "diff --git a/mitmproxy/tools/console/options.py b/mitmproxy/tools/console/options.py\n--- a/mitmproxy/tools/console/options.py\n+++ b/mitmproxy/tools/console/options.py\n@@ -174,7 +174,7 @@\n foc, idx = self.get_focus()\n v = self.walker.get_edit_text()\n try:\n- d = self.master.options.parse_setval(foc.opt.name, v)\n+ d = self.master.options.parse_setval(foc.opt, v)\n self.master.options.update(**{foc.opt.name: d})\n except exceptions.OptionsError as v:\n signals.status_message.send(message=str(v))\n", "issue": "Crash when editing str-options\n##### Steps to reproduce the problem:\r\n\r\n1. Start `mitmproxy` in interactive mode\r\n2. Type `O` to switch to the options view\r\n3. Edit a random `str`-option\r\n4. See `mitmproxy` crash with the following stacktrace:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/master.py\", line 86, in run_loop\r\n loop()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 286, in run\r\n self._run()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 384, in _run\r\n self.event_loop.run()\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 1484, in run\r\n reraise(*exc_info)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/compat.py\", line 58, in reraise\r\n raise value\r\n File \"/usr/lib/python3.6/asyncio/events.py\", line 145, in _run\r\n self._callback(*self._args)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py\", line 404, in <lambda>\r\n event_loop, callback, self.get_available_raw_input())\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/raw_display.py\", line 502, in parse_input\r\n callback(processed, processed_codes)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 411, in _update\r\n self.process_input(keys)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/main_loop.py\", line 511, in process_input\r\n k = self._topmost_widget.keypress(self.screen_size, k)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py\", line 309, in keypress\r\n k = super().keypress(size, k)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py\", line 1131, in keypress\r\n return self.body.keypress( (maxcol, remaining), key )\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/window.py\", line 44, in keypress\r\n ret = super().keypress(size, key)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/urwid/container.py\", line 1131, in keypress\r\n return self.body.keypress( (maxcol, remaining), key )\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py\", line 283, in keypress\r\n return self.focus_item.keypress(tsize, key)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/tools/console/options.py\", line 177, in keypress\r\n d = self.master.options.parse_setval(foc.opt.name, v)\r\n File \"/tmp/mitmproxy-crash/lib/python3.6/site-packages/mitmproxy/optmanager.py\", line 332, in parse_setval\r\n if o.typespec in (str, typing.Optional[str]):\r\nAttributeError: 'str' object has no attribute 'typespec'\r\n\r\nmitmproxy has crashed!\r\nPlease lodge a bug report at:\r\n https://github.com/mitmproxy/mitmproxy\r\n```\r\n\r\n\r\n\r\n##### System information\r\n\r\n```\r\nMitmproxy: 4.0.3\r\nPython: 3.6.6\r\nOpenSSL: OpenSSL 1.1.0h 27 Mar 2018\r\nPlatform: Linux-4.17.0-x86_64-with-debian-buster-sid\r\n```\n", "before_files": [{"content": "import urwid\nimport blinker\nimport textwrap\nimport pprint\nfrom typing import Optional, Sequence\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import optmanager\nfrom mitmproxy.tools.console import layoutwidget\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console import overlay\n\nHELP_HEIGHT = 5\n\n\ndef can_edit_inplace(opt):\n if opt.choices:\n return False\n if opt.typespec in [str, int, Optional[str], Optional[int]]:\n return True\n\n\ndef fcol(s, width, attr):\n s = str(s)\n return (\n \"fixed\",\n width,\n urwid.Text((attr, s))\n )\n\n\noption_focus_change = blinker.Signal()\n\n\nclass OptionItem(urwid.WidgetWrap):\n def __init__(self, walker, opt, focused, namewidth, editing):\n self.walker, self.opt, self.focused = walker, opt, focused\n self.namewidth = namewidth\n self.editing = editing\n super().__init__(None)\n self._w = self.get_widget()\n\n def get_widget(self):\n val = self.opt.current()\n if self.opt.typespec == bool:\n displayval = \"true\" if val else \"false\"\n elif not val:\n displayval = \"\"\n elif self.opt.typespec == Sequence[str]:\n displayval = pprint.pformat(val, indent=1)\n else:\n displayval = str(val)\n\n changed = self.walker.master.options.has_changed(self.opt.name)\n if self.focused:\n valstyle = \"option_active_selected\" if changed else \"option_selected\"\n else:\n valstyle = \"option_active\" if changed else \"text\"\n\n if self.editing:\n valw = urwid.Edit(edit_text=displayval)\n else:\n valw = urwid.AttrMap(\n urwid.Padding(\n urwid.Text([(valstyle, displayval)])\n ),\n valstyle\n )\n\n return urwid.Columns(\n [\n (\n self.namewidth,\n urwid.Text([(\"title\", self.opt.name.ljust(self.namewidth))])\n ),\n valw\n ],\n dividechars=2,\n focus_column=1\n )\n\n def get_edit_text(self):\n return self._w[1].get_edit_text()\n\n def selectable(self):\n return True\n\n def keypress(self, size, key):\n if self.editing:\n self._w[1].keypress(size, key)\n return\n return key\n\n\nclass OptionListWalker(urwid.ListWalker):\n def __init__(self, master):\n self.master = master\n\n self.index = 0\n self.focusobj = None\n\n self.opts = sorted(master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self.editing = False\n self.set_focus(0)\n self.master.options.changed.connect(self.sig_mod)\n\n def sig_mod(self, *args, **kwargs):\n self.opts = sorted(self.master.options.keys())\n self.maxlen = max(len(i) for i in self.opts)\n self._modified()\n self.set_focus(self.index)\n\n def start_editing(self):\n self.editing = True\n self.focus_obj = self._get(self.index, True)\n self._modified()\n\n def stop_editing(self):\n self.editing = False\n self.focus_obj = self._get(self.index, False)\n self.set_focus(self.index)\n self._modified()\n\n def get_edit_text(self):\n return self.focus_obj.get_edit_text()\n\n def _get(self, pos, editing):\n name = self.opts[pos]\n opt = self.master.options._options[name]\n return OptionItem(\n self, opt, pos == self.index, self.maxlen, editing\n )\n\n def get_focus(self):\n return self.focus_obj, self.index\n\n def set_focus(self, index):\n self.editing = False\n name = self.opts[index]\n opt = self.master.options._options[name]\n self.index = index\n self.focus_obj = self._get(self.index, self.editing)\n option_focus_change.send(opt.help)\n\n def get_next(self, pos):\n if pos >= len(self.opts) - 1:\n return None, None\n pos = pos + 1\n return self._get(pos, False), pos\n\n def get_prev(self, pos):\n pos = pos - 1\n if pos < 0:\n return None, None\n return self._get(pos, False), pos\n\n\nclass OptionsList(urwid.ListBox):\n def __init__(self, master):\n self.master = master\n self.walker = OptionListWalker(master)\n super().__init__(self.walker)\n\n def save_config(self, path):\n try:\n optmanager.save(self.master.options, path)\n except exceptions.OptionsError as e:\n signals.status_message.send(message=str(e))\n\n def keypress(self, size, key):\n if self.walker.editing:\n if key == \"enter\":\n foc, idx = self.get_focus()\n v = self.walker.get_edit_text()\n try:\n d = self.master.options.parse_setval(foc.opt.name, v)\n self.master.options.update(**{foc.opt.name: d})\n except exceptions.OptionsError as v:\n signals.status_message.send(message=str(v))\n self.walker.stop_editing()\n return None\n elif key == \"esc\":\n self.walker.stop_editing()\n return None\n else:\n if key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker.opts) - 1)\n self.walker._modified()\n elif key == \"m_select\":\n foc, idx = self.get_focus()\n if foc.opt.typespec == bool:\n self.master.options.toggler(foc.opt.name)()\n # Bust the focus widget cache\n self.set_focus(self.walker.index)\n elif can_edit_inplace(foc.opt):\n self.walker.start_editing()\n self.walker._modified()\n elif foc.opt.choices:\n self.master.overlay(\n overlay.Chooser(\n self.master,\n foc.opt.name,\n foc.opt.choices,\n foc.opt.current(),\n self.master.options.setter(foc.opt.name)\n )\n )\n elif foc.opt.typespec == Sequence[str]:\n self.master.overlay(\n overlay.OptionsOverlay(\n self.master,\n foc.opt.name,\n foc.opt.current(),\n HELP_HEIGHT + 5\n ),\n valign=\"top\"\n )\n else:\n raise NotImplementedError()\n return super().keypress(size, key)\n\n\nclass OptionHelp(urwid.Frame):\n def __init__(self, master):\n self.master = master\n super().__init__(self.widget(\"\"))\n self.set_active(False)\n option_focus_change.connect(self.sig_mod)\n\n def set_active(self, val):\n h = urwid.Text(\"Option Help\")\n style = \"heading\" if val else \"heading_inactive\"\n self.header = urwid.AttrWrap(h, style)\n\n def widget(self, txt):\n cols, _ = self.master.ui.get_cols_rows()\n return urwid.ListBox(\n [urwid.Text(i) for i in textwrap.wrap(txt, cols)]\n )\n\n def sig_mod(self, txt):\n self.set_body(self.widget(txt))\n\n\nclass Options(urwid.Pile, layoutwidget.LayoutWidget):\n title = \"Options\"\n keyctx = \"options\"\n\n def __init__(self, master):\n oh = OptionHelp(master)\n self.optionslist = OptionsList(master)\n super().__init__(\n [\n self.optionslist,\n (HELP_HEIGHT, oh),\n ]\n )\n self.master = master\n\n def current_name(self):\n foc, idx = self.optionslist.get_focus()\n return foc.opt.name\n\n def keypress(self, size, key):\n if key == \"m_next\":\n self.focus_position = (\n self.focus_position + 1\n ) % len(self.widget_list)\n self.widget_list[1].set_active(self.focus_position == 1)\n key = None\n\n # This is essentially a copypasta from urwid.Pile's keypress handler.\n # So much for \"closed for modification, but open for extension\".\n item_rows = None\n if len(size) == 2:\n item_rows = self.get_item_rows(size, focus = True)\n i = self.widget_list.index(self.focus_item)\n tsize = self.get_item_size(size, i, True, item_rows)\n return self.focus_item.keypress(tsize, key)\n", "path": "mitmproxy/tools/console/options.py"}]}
| 4,079 | 140 |
gh_patches_debug_5253
|
rasdani/github-patches
|
git_diff
|
statsmodels__statsmodels-3430
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SARIMAX predict does not actually allow for datetime start/end
Docs indicate it does, but TimeSeriesModel._get_predict_start requires either a string or an int.
</issue>
<code>
[start of statsmodels/tsa/base/tsa_model.py]
1 from statsmodels.compat.python import lrange, long
2 from statsmodels.compat.pandas import is_numeric_dtype
3
4 import datetime
5
6 from pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp
7
8 from statsmodels.base import data
9 import statsmodels.base.model as base
10 import statsmodels.base.wrapper as wrap
11 from statsmodels.tsa.base import datetools
12
13 _freq_to_pandas = datetools._freq_to_pandas
14
15 _tsa_doc = """
16 %(model)s
17
18 Parameters
19 ----------
20 %(params)s
21 dates : array-like of datetime, optional
22 An array-like object of datetime objects. If a pandas object is given
23 for endog or exog, it is assumed to have a DateIndex.
24 freq : str, optional
25 The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
26 'M', 'A', or 'Q'. This is optional if dates are given.
27 %(extra_params)s
28 %(extra_sections)s
29 """
30
31 _model_doc = "Timeseries model base class"
32
33 _generic_params = base._model_params_doc
34 _missing_param_doc = base._missing_param_doc
35
36 class TimeSeriesModel(base.LikelihoodModel):
37
38 __doc__ = _tsa_doc % {"model" : _model_doc, "params" : _generic_params,
39 "extra_params" : _missing_param_doc,
40 "extra_sections" : ""}
41
42 def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):
43 super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)
44 self._init_dates(dates, freq)
45
46 def _init_dates(self, dates, freq):
47 if dates is None:
48 dates = self.data.row_labels
49
50 if dates is not None:
51 if (not datetools._is_datetime_index(dates) and
52 isinstance(self.data, data.PandasData)):
53 try:
54 if is_numeric_dtype(dates):
55 raise ValueError
56 dates = to_datetime(dates)
57 except ValueError:
58 raise ValueError("Given a pandas object and the index does "
59 "not contain dates")
60 if not freq:
61 try:
62 freq = datetools._infer_freq(dates)
63 except:
64 raise ValueError("Frequency inference failed. Use `freq` "
65 "keyword.")
66
67 if isinstance(dates[0], datetime.datetime):
68 dates = DatetimeIndex(dates)
69 else: # preserve PeriodIndex
70 dates = PeriodIndex(dates)
71 self.data.dates = dates
72 self.data.freq = freq
73
74 # Test for nanoseconds in early pandas versions
75 if freq is not None and _freq_to_pandas[freq].freqstr == 'N':
76 from distutils.version import LooseVersion
77 from pandas import __version__ as pd_version
78 if LooseVersion(pd_version) < '0.14':
79 raise NotImplementedError('Nanosecond index not available in'
80 ' Pandas < 0.14')
81
82
83 def _get_exog_names(self):
84 return self.data.xnames
85
86 def _set_exog_names(self, vals):
87 if not isinstance(vals, list):
88 vals = [vals]
89 self.data.xnames = vals
90
91 #overwrite with writable property for (V)AR models
92 exog_names = property(_get_exog_names, _set_exog_names)
93
94 def _get_dates_loc(self, dates, date):
95 date = dates.get_loc(date)
96 return date
97
98 def _str_to_date(self, date):
99 """
100 Takes a string and returns a datetime object
101 """
102 if isinstance(self.data.dates, PeriodIndex):
103 return Period(date)
104 else:
105 return datetools.date_parser(date)
106
107 def _set_predict_start_date(self, start):
108 dates = self.data.dates
109 if dates is None:
110 return
111 if start > len(dates):
112 raise ValueError("Start must be <= len(endog)")
113 if start == len(dates):
114 self.data.predict_start = datetools._date_from_idx(dates[-1],
115 1, self.data.freq)
116 elif start < len(dates):
117 self.data.predict_start = dates[start]
118 else:
119 raise ValueError("Start must be <= len(dates)")
120
121 def _get_predict_start(self, start):
122 """
123 Returns the index of the given start date. Subclasses should define
124 default behavior for start = None. That isn't handled here.
125
126 Start can be a string or an integer if self.data.dates is None.
127 """
128 dates = self.data.dates
129 if not isinstance(start, (int, long)):
130 start = str(start)
131 if dates is None:
132 raise ValueError("Got a string for start and dates is None")
133 dtstart = self._str_to_date(start)
134 self.data.predict_start = dtstart
135 try:
136 start = self._get_dates_loc(dates, dtstart)
137 except KeyError:
138 raise ValueError("Start must be in dates. Got %s | %s" %
139 (str(start), str(dtstart)))
140
141 self._set_predict_start_date(start)
142 return start
143
144
145 def _get_predict_end(self, end):
146 """
147 See _get_predict_start for more information. Subclasses do not
148 need to define anything for this.
149 """
150
151 out_of_sample = 0 # will be overwritten if needed
152 if end is None: # use data for ARIMA - endog changes
153 end = len(self.data.endog) - 1
154
155 dates = self.data.dates
156 freq = self.data.freq
157
158 if isinstance(end, str) or (dates is not None
159 and isinstance(end, type(dates[0]))):
160 if dates is None:
161 raise ValueError("Got a string or date for `end` and `dates` is None")
162
163 if isinstance(end, str):
164 dtend = self._str_to_date(end)
165 else:
166 dtend = end # end could be a pandas TimeStamp not a datetime
167
168 self.data.predict_end = dtend
169 try:
170 end = self._get_dates_loc(dates, dtend)
171 except KeyError as err: # end is greater than dates[-1]...probably
172 if dtend > self.data.dates[-1]:
173 end = len(self.data.endog) - 1
174 freq = self.data.freq
175 out_of_sample = datetools._idx_from_dates(dates[-1], dtend,
176 freq)
177 else:
178 if freq is None:
179 raise ValueError("There is no frequency for these "
180 "dates and date %s is not in dates "
181 "index. Try giving a date that is in "
182 "the dates index or use an integer."
183 % dtend)
184 else: #pragma: no cover
185 raise err # should never get here
186 self._make_predict_dates() # attaches self.data.predict_dates
187
188 elif isinstance(end, (int, long)) and dates is not None:
189 try:
190 self.data.predict_end = dates[end]
191 except IndexError as err:
192 nobs = len(self.data.endog) - 1 # as an index
193 out_of_sample = end - nobs
194 end = nobs
195 if freq is not None:
196 self.data.predict_end = datetools._date_from_idx(dates[-1],
197 out_of_sample, freq)
198 elif out_of_sample <= 0: # have no frequency but are in sample
199 #TODO: what error to catch here to make sure dates is
200 #on the index?
201 try:
202 self.data.predict_end = self._get_dates_loc(dates, end)
203 except KeyError:
204 raise
205 else:
206 self.data.predict_end = end + out_of_sample
207 self.data.predict_start = self._get_dates_loc(dates,
208 self.data.predict_start)
209
210 self._make_predict_dates()
211
212 elif isinstance(end, (int, long)):
213 nobs = len(self.data.endog) - 1 # is an index
214 if end > nobs:
215 out_of_sample = end - nobs
216 end = nobs
217
218 elif freq is None: # should have a date with freq = None
219 print('#'*80)
220 print(freq)
221 print(type(freq))
222 print('#'*80)
223 raise ValueError("When freq is None, you must give an integer "
224 "index for end.")
225
226 else:
227 print('#'*80)
228 print(freq)
229 print(type(freq))
230 print('#'*80)
231 raise ValueError("no rule for interpreting end")
232
233 return end, out_of_sample
234
235 def _make_predict_dates(self):
236 data = self.data
237 dtstart = data.predict_start
238 dtend = data.predict_end
239 freq = data.freq
240
241 if freq is not None:
242 pandas_freq = _freq_to_pandas[freq]
243 # preserve PeriodIndex or DatetimeIndex
244 dates = self.data.dates.__class__(start=dtstart,
245 end=dtend,
246 freq=pandas_freq)
247
248 if pandas_freq.freqstr == 'N':
249 _dtend = dtend
250 if isinstance(dates[-1], Period):
251 _dtend = pd.to_datetime(_dtend).to_period(dates.freq)
252 if not dates[-1] == _dtend:
253 # TODO: this is a hack because a DatetimeIndex with
254 # nanosecond frequency does not include "end"
255 dtend = Timestamp(dtend.value + 1)
256 dates = self.data.dates.__class__(start=dtstart,
257 end=dtend,
258 freq=pandas_freq)
259 # handle
260 elif freq is None and (isinstance(dtstart, (int, long)) and
261 isinstance(dtend, (int, long))):
262 from pandas import Index
263 dates = Index(lrange(dtstart, dtend+1))
264 # if freq is None and dtstart and dtend aren't integers, we're
265 # in sample
266 else:
267 dates = self.data.dates
268 start = self._get_dates_loc(dates, dtstart)
269 end = self._get_dates_loc(dates, dtend)
270 dates = dates[start:end+1] # is this index inclusive?
271 self.data.predict_dates = dates
272
273 class TimeSeriesModelResults(base.LikelihoodModelResults):
274 def __init__(self, model, params, normalized_cov_params, scale=1.):
275 self.data = model.data
276 super(TimeSeriesModelResults,
277 self).__init__(model, params, normalized_cov_params, scale)
278
279 class TimeSeriesResultsWrapper(wrap.ResultsWrapper):
280 _attrs = {}
281 _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,
282 _attrs)
283 _methods = {'predict' : 'dates'}
284 _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,
285 _methods)
286 wrap.populate_wrapper(TimeSeriesResultsWrapper,
287 TimeSeriesModelResults)
288
289 if __name__ == "__main__":
290 import statsmodels.api as sm
291 import pandas
292
293 data = sm.datasets.macrodata.load()
294
295 #make a DataFrame
296 #TODO: attach a DataFrame to some of the datasets, for quicker use
297 dates = [str(int(x[0])) +':'+ str(int(x[1])) \
298 for x in data.data[['year','quarter']]]
299
300 df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)
301 ex_mod = TimeSeriesModel(df)
302
[end of statsmodels/tsa/base/tsa_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/statsmodels/tsa/base/tsa_model.py b/statsmodels/tsa/base/tsa_model.py
--- a/statsmodels/tsa/base/tsa_model.py
+++ b/statsmodels/tsa/base/tsa_model.py
@@ -155,6 +155,9 @@
dates = self.data.dates
freq = self.data.freq
+ if isinstance(end, datetime.datetime):
+ end = self._str_to_date(str(end))
+
if isinstance(end, str) or (dates is not None
and isinstance(end, type(dates[0]))):
if dates is None:
|
{"golden_diff": "diff --git a/statsmodels/tsa/base/tsa_model.py b/statsmodels/tsa/base/tsa_model.py\n--- a/statsmodels/tsa/base/tsa_model.py\n+++ b/statsmodels/tsa/base/tsa_model.py\n@@ -155,6 +155,9 @@\n dates = self.data.dates\n freq = self.data.freq\n \n+ if isinstance(end, datetime.datetime):\n+ end = self._str_to_date(str(end))\n+\n if isinstance(end, str) or (dates is not None\n and isinstance(end, type(dates[0]))):\n if dates is None:\n", "issue": "SARIMAX predict does not actually allow for datetime start/end\nDocs indicate it does, but TimeSeriesModel._get_predict_start requires either a string or an int.\n\n", "before_files": [{"content": "from statsmodels.compat.python import lrange, long\nfrom statsmodels.compat.pandas import is_numeric_dtype\n\nimport datetime\n\nfrom pandas import to_datetime, DatetimeIndex, Period, PeriodIndex, Timestamp\n\nfrom statsmodels.base import data\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.tsa.base import datetools\n\n_freq_to_pandas = datetools._freq_to_pandas\n\n_tsa_doc = \"\"\"\n %(model)s\n\n Parameters\n ----------\n %(params)s\n dates : array-like of datetime, optional\n An array-like object of datetime objects. If a pandas object is given\n for endog or exog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n %(extra_params)s\n %(extra_sections)s\n\"\"\"\n\n_model_doc = \"Timeseries model base class\"\n\n_generic_params = base._model_params_doc\n_missing_param_doc = base._missing_param_doc\n\nclass TimeSeriesModel(base.LikelihoodModel):\n\n __doc__ = _tsa_doc % {\"model\" : _model_doc, \"params\" : _generic_params,\n \"extra_params\" : _missing_param_doc,\n \"extra_sections\" : \"\"}\n\n def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):\n super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)\n self._init_dates(dates, freq)\n\n def _init_dates(self, dates, freq):\n if dates is None:\n dates = self.data.row_labels\n\n if dates is not None:\n if (not datetools._is_datetime_index(dates) and\n isinstance(self.data, data.PandasData)):\n try:\n if is_numeric_dtype(dates):\n raise ValueError\n dates = to_datetime(dates)\n except ValueError:\n raise ValueError(\"Given a pandas object and the index does \"\n \"not contain dates\")\n if not freq:\n try:\n freq = datetools._infer_freq(dates)\n except:\n raise ValueError(\"Frequency inference failed. Use `freq` \"\n \"keyword.\")\n\n if isinstance(dates[0], datetime.datetime):\n dates = DatetimeIndex(dates)\n else: # preserve PeriodIndex\n dates = PeriodIndex(dates)\n self.data.dates = dates\n self.data.freq = freq\n\n # Test for nanoseconds in early pandas versions\n if freq is not None and _freq_to_pandas[freq].freqstr == 'N':\n from distutils.version import LooseVersion\n from pandas import __version__ as pd_version\n if LooseVersion(pd_version) < '0.14':\n raise NotImplementedError('Nanosecond index not available in'\n ' Pandas < 0.14')\n\n\n def _get_exog_names(self):\n return self.data.xnames\n\n def _set_exog_names(self, vals):\n if not isinstance(vals, list):\n vals = [vals]\n self.data.xnames = vals\n\n #overwrite with writable property for (V)AR models\n exog_names = property(_get_exog_names, _set_exog_names)\n\n def _get_dates_loc(self, dates, date):\n date = dates.get_loc(date)\n return date\n\n def _str_to_date(self, date):\n \"\"\"\n Takes a string and returns a datetime object\n \"\"\"\n if isinstance(self.data.dates, PeriodIndex):\n return Period(date)\n else:\n return datetools.date_parser(date)\n\n def _set_predict_start_date(self, start):\n dates = self.data.dates\n if dates is None:\n return\n if start > len(dates):\n raise ValueError(\"Start must be <= len(endog)\")\n if start == len(dates):\n self.data.predict_start = datetools._date_from_idx(dates[-1],\n 1, self.data.freq)\n elif start < len(dates):\n self.data.predict_start = dates[start]\n else:\n raise ValueError(\"Start must be <= len(dates)\")\n\n def _get_predict_start(self, start):\n \"\"\"\n Returns the index of the given start date. Subclasses should define\n default behavior for start = None. That isn't handled here.\n\n Start can be a string or an integer if self.data.dates is None.\n \"\"\"\n dates = self.data.dates\n if not isinstance(start, (int, long)):\n start = str(start)\n if dates is None:\n raise ValueError(\"Got a string for start and dates is None\")\n dtstart = self._str_to_date(start)\n self.data.predict_start = dtstart\n try:\n start = self._get_dates_loc(dates, dtstart)\n except KeyError:\n raise ValueError(\"Start must be in dates. Got %s | %s\" %\n (str(start), str(dtstart)))\n\n self._set_predict_start_date(start)\n return start\n\n\n def _get_predict_end(self, end):\n \"\"\"\n See _get_predict_start for more information. Subclasses do not\n need to define anything for this.\n \"\"\"\n\n out_of_sample = 0 # will be overwritten if needed\n if end is None: # use data for ARIMA - endog changes\n end = len(self.data.endog) - 1\n\n dates = self.data.dates\n freq = self.data.freq\n\n if isinstance(end, str) or (dates is not None\n and isinstance(end, type(dates[0]))):\n if dates is None:\n raise ValueError(\"Got a string or date for `end` and `dates` is None\")\n\n if isinstance(end, str):\n dtend = self._str_to_date(end)\n else:\n dtend = end # end could be a pandas TimeStamp not a datetime\n\n self.data.predict_end = dtend\n try:\n end = self._get_dates_loc(dates, dtend)\n except KeyError as err: # end is greater than dates[-1]...probably\n if dtend > self.data.dates[-1]:\n end = len(self.data.endog) - 1\n freq = self.data.freq\n out_of_sample = datetools._idx_from_dates(dates[-1], dtend,\n freq)\n else:\n if freq is None:\n raise ValueError(\"There is no frequency for these \"\n \"dates and date %s is not in dates \"\n \"index. Try giving a date that is in \"\n \"the dates index or use an integer.\"\n % dtend)\n else: #pragma: no cover\n raise err # should never get here\n self._make_predict_dates() # attaches self.data.predict_dates\n\n elif isinstance(end, (int, long)) and dates is not None:\n try:\n self.data.predict_end = dates[end]\n except IndexError as err:\n nobs = len(self.data.endog) - 1 # as an index\n out_of_sample = end - nobs\n end = nobs\n if freq is not None:\n self.data.predict_end = datetools._date_from_idx(dates[-1],\n out_of_sample, freq)\n elif out_of_sample <= 0: # have no frequency but are in sample\n #TODO: what error to catch here to make sure dates is\n #on the index?\n try:\n self.data.predict_end = self._get_dates_loc(dates, end)\n except KeyError:\n raise\n else:\n self.data.predict_end = end + out_of_sample\n self.data.predict_start = self._get_dates_loc(dates,\n self.data.predict_start)\n\n self._make_predict_dates()\n\n elif isinstance(end, (int, long)):\n nobs = len(self.data.endog) - 1 # is an index\n if end > nobs:\n out_of_sample = end - nobs\n end = nobs\n\n elif freq is None: # should have a date with freq = None\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"When freq is None, you must give an integer \"\n \"index for end.\")\n\n else:\n print('#'*80)\n print(freq)\n print(type(freq))\n print('#'*80)\n raise ValueError(\"no rule for interpreting end\")\n\n return end, out_of_sample\n\n def _make_predict_dates(self):\n data = self.data\n dtstart = data.predict_start\n dtend = data.predict_end\n freq = data.freq\n\n if freq is not None:\n pandas_freq = _freq_to_pandas[freq]\n # preserve PeriodIndex or DatetimeIndex\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n\n if pandas_freq.freqstr == 'N':\n _dtend = dtend\n if isinstance(dates[-1], Period):\n _dtend = pd.to_datetime(_dtend).to_period(dates.freq)\n if not dates[-1] == _dtend:\n # TODO: this is a hack because a DatetimeIndex with\n # nanosecond frequency does not include \"end\"\n dtend = Timestamp(dtend.value + 1)\n dates = self.data.dates.__class__(start=dtstart,\n end=dtend,\n freq=pandas_freq)\n # handle\n elif freq is None and (isinstance(dtstart, (int, long)) and\n isinstance(dtend, (int, long))):\n from pandas import Index\n dates = Index(lrange(dtstart, dtend+1))\n # if freq is None and dtstart and dtend aren't integers, we're\n # in sample\n else:\n dates = self.data.dates\n start = self._get_dates_loc(dates, dtstart)\n end = self._get_dates_loc(dates, dtend)\n dates = dates[start:end+1] # is this index inclusive?\n self.data.predict_dates = dates\n\nclass TimeSeriesModelResults(base.LikelihoodModelResults):\n def __init__(self, model, params, normalized_cov_params, scale=1.):\n self.data = model.data\n super(TimeSeriesModelResults,\n self).__init__(model, params, normalized_cov_params, scale)\n\nclass TimeSeriesResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {'predict' : 'dates'}\n _wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(TimeSeriesResultsWrapper,\n TimeSeriesModelResults)\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n import pandas\n\n data = sm.datasets.macrodata.load()\n\n #make a DataFrame\n #TODO: attach a DataFrame to some of the datasets, for quicker use\n dates = [str(int(x[0])) +':'+ str(int(x[1])) \\\n for x in data.data[['year','quarter']]]\n\n df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)\n ex_mod = TimeSeriesModel(df)\n", "path": "statsmodels/tsa/base/tsa_model.py"}]}
| 3,874 | 131 |
gh_patches_debug_13278
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-1882
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incomplete download of MNIST data
### Description
`make test-examples` fails to fully download mnist data dependency for the air example
### Details
On `osx==10.14.1` and `docker==18.09.2`
```
git checkout 3ac5a02e0e6b0a11ae796707413c11df2c14ff6b
make build pyro_branch=dev pytorch_branch=release python_verion=3.6
make run pyro_branch=dev pytorch_branch=release python_verion=3.6
cd pyro
pip install numpy==1.15
pip install scipy==1.2
make test-examples > output 2>&1
```
[Output](https://github.com/pyro-ppl/pyro/files/3204055/make_test_examples.txt)
Resulting files in `.data`
```
ls -lh -rw-r--r-- 1 matt staff 19M May 21 15:03 train-images-idx3-ubyte.gz.part
```
</issue>
<code>
[start of setup.py]
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import subprocess
5 import sys
6
7 from setuptools import find_packages, setup
8
9 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
10 VERSION = """
11 # This file is auto-generated with the version information during setup.py installation.
12
13 __version__ = '{}'
14 """
15
16 # Find pyro version.
17 for line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):
18 if line.startswith('version_prefix = '):
19 version = line.strip().split()[2][1:-1]
20
21 # Append current commit sha to version
22 commit_sha = ''
23 try:
24 current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],
25 cwd=PROJECT_PATH).decode('ascii').strip()
26 # only add sha if HEAD does not point to the release tag
27 if not current_tag == version:
28 commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
29 cwd=PROJECT_PATH).decode('ascii').strip()
30 # catch all exception to be safe
31 except Exception:
32 pass # probably not a git repo
33
34 # Write version to _version.py
35 if commit_sha:
36 version += '+{}'.format(commit_sha)
37 with open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:
38 f.write(VERSION.format(version))
39
40 # Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
41 # When releasing on pypi, make sure pandoc is on your system:
42 # $ brew install pandoc # OS X
43 # $ sudo apt-get install pandoc # Ubuntu Linux
44 try:
45 import pypandoc
46 long_description = pypandoc.convert('README.md', 'rst')
47 print(long_description)
48 except Exception as e:
49 sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
50 sys.stderr.flush()
51 long_description = open('README.md').read()
52
53 # Remove badges since they will always be obsolete.
54 # This assumes the first 10 lines contain badge info.
55 long_description = '\n'.join([str(line) for line in long_description.split('\n')[10:]])
56
57 # examples/tutorials
58 EXTRAS_REQUIRE = [
59 'jupyter>=1.0.0',
60 'matplotlib>=1.3',
61 'observations>=0.1.4',
62 'pillow',
63 'torchvision>=0.2.2',
64 'visdom>=0.1.4',
65 'pandas',
66 'seaborn',
67 'wget',
68 ]
69
70 if sys.version_info[0] == 2:
71 EXTRAS_REQUIRE.append('functools32')
72
73 setup(
74 name='pyro-ppl',
75 version=version,
76 description='A Python library for probabilistic modeling and inference',
77 long_description=long_description,
78 packages=find_packages(include=['pyro', 'pyro.*']),
79 url='http://pyro.ai',
80 author='Uber AI Labs',
81 author_email='[email protected]',
82 install_requires=[
83 # if you add any additional libraries, please also
84 # add them to `docs/requirements.txt`
85 'contextlib2',
86 'graphviz>=0.8',
87 # numpy is necessary for some functionality of PyTorch
88 'numpy>=1.7',
89 'opt_einsum>=2.3.2',
90 'six>=1.10.0',
91 'torch>=1.1.0',
92 'tqdm>=4.31',
93 ],
94 extras_require={
95 'extras': EXTRAS_REQUIRE,
96 'test': EXTRAS_REQUIRE + [
97 'nbval',
98 'pytest>=4.1',
99 'pytest-cov',
100 # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871
101 # is fixed.
102 'scipy>=1.1, <1.3',
103 ],
104 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],
105 'dev': EXTRAS_REQUIRE + [
106 'flake8',
107 'isort',
108 'nbformat',
109 'nbsphinx>=0.3.2',
110 'nbstripout',
111 'nbval',
112 'pypandoc',
113 'pytest>=4.1',
114 'pytest-xdist',
115 # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871
116 # is fixed.
117 'scipy>=1.1, < 1.3',
118 'sphinx',
119 'sphinx_rtd_theme',
120 'yapf',
121 ],
122 },
123 tests_require=['flake8', 'pytest>=4.1'],
124 keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
125 license='MIT License',
126 classifiers=[
127 'Intended Audience :: Developers',
128 'Intended Audience :: Education',
129 'Intended Audience :: Science/Research',
130 'Operating System :: POSIX :: Linux',
131 'Operating System :: MacOS :: MacOS X',
132 'Programming Language :: Python :: 2.7',
133 'Programming Language :: Python :: 3.6',
134 ],
135 # yapf
136 )
137
[end of setup.py]
[start of pyro/contrib/examples/multi_mnist.py]
1 """
2 This script generates a dataset similar to the Multi-MNIST dataset
3 described in [1].
4
5 [1] Eslami, SM Ali, et al. "Attend, infer, repeat: Fast scene
6 understanding with generative models." Advances in Neural Information
7 Processing Systems. 2016.
8 """
9
10 import os
11
12 import numpy as np
13 from PIL import Image
14
15 from pyro.contrib.examples.util import get_data_loader
16
17
18 def imresize(arr, size):
19 return np.array(Image.fromarray(arr).resize(size))
20
21
22 def sample_one(canvas_size, mnist):
23 i = np.random.randint(mnist['digits'].shape[0])
24 digit = mnist['digits'][i]
25 label = mnist['labels'][i].item()
26 scale = 0.1 * np.random.randn() + 1.3
27 new_size = tuple(int(s / scale) for s in digit.shape)
28 resized = imresize(digit, new_size)
29 w = resized.shape[0]
30 assert w == resized.shape[1]
31 padding = canvas_size - w
32 pad_l = np.random.randint(0, padding)
33 pad_r = np.random.randint(0, padding)
34 pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))
35 positioned = np.pad(resized, pad_width, 'constant', constant_values=0)
36 return positioned, label
37
38
39 def sample_multi(num_digits, canvas_size, mnist):
40 canvas = np.zeros((canvas_size, canvas_size))
41 labels = []
42 for _ in range(num_digits):
43 positioned_digit, label = sample_one(canvas_size, mnist)
44 canvas += positioned_digit
45 labels.append(label)
46 # Crude check for overlapping digits.
47 if np.max(canvas) > 255:
48 return sample_multi(num_digits, canvas_size, mnist)
49 else:
50 return canvas, labels
51
52
53 def mk_dataset(n, mnist, max_digits, canvas_size):
54 x = []
55 y = []
56 for _ in range(n):
57 num_digits = np.random.randint(max_digits + 1)
58 canvas, labels = sample_multi(num_digits, canvas_size, mnist)
59 x.append(canvas)
60 y.append(labels)
61 return np.array(x, dtype=np.uint8), y
62
63
64 def load_mnist(root_path):
65 loader = get_data_loader('MNIST', root_path)
66 return {
67 'digits': loader.dataset.data.cpu().numpy(),
68 'labels': loader.dataset.targets
69 }
70
71
72 def load(root_path):
73 file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')
74 if os.path.exists(file_path):
75 data = np.load(file_path)
76 return data['x'], data['y']
77 else:
78 # Set RNG to known state.
79 rng_state = np.random.get_state()
80 np.random.seed(681307)
81 mnist = load_mnist(root_path)
82 print('Generating multi-MNIST dataset...')
83 x, y = mk_dataset(60000, mnist, 2, 50)
84 # Revert RNG state.
85 np.random.set_state(rng_state)
86 # Crude checksum.
87 # assert x.sum() == 883114919, 'Did not generate the expected data.'
88 with open(file_path, 'wb') as f:
89 np.savez_compressed(f, x=x, y=y)
90 print('Done!')
91 return x, y
92
[end of pyro/contrib/examples/multi_mnist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/contrib/examples/multi_mnist.py b/pyro/contrib/examples/multi_mnist.py
--- a/pyro/contrib/examples/multi_mnist.py
+++ b/pyro/contrib/examples/multi_mnist.py
@@ -72,7 +72,7 @@
def load(root_path):
file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')
if os.path.exists(file_path):
- data = np.load(file_path)
+ data = np.load(file_path, allow_pickle=True)
return data['x'], data['y']
else:
# Set RNG to known state.
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,6 @@
EXTRAS_REQUIRE = [
'jupyter>=1.0.0',
'matplotlib>=1.3',
- 'observations>=0.1.4',
'pillow',
'torchvision>=0.2.2',
'visdom>=0.1.4',
|
{"golden_diff": "diff --git a/pyro/contrib/examples/multi_mnist.py b/pyro/contrib/examples/multi_mnist.py\n--- a/pyro/contrib/examples/multi_mnist.py\n+++ b/pyro/contrib/examples/multi_mnist.py\n@@ -72,7 +72,7 @@\n def load(root_path):\n file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')\n if os.path.exists(file_path):\n- data = np.load(file_path)\n+ data = np.load(file_path, allow_pickle=True)\n return data['x'], data['y']\n else:\n # Set RNG to known state.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,6 @@\n EXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'matplotlib>=1.3',\n- 'observations>=0.1.4',\n 'pillow',\n 'torchvision>=0.2.2',\n 'visdom>=0.1.4',\n", "issue": "Incomplete download of MNIST data\n### Description\r\n\r\n`make test-examples` fails to fully download mnist data dependency for the air example\r\n\r\n### Details\r\n\r\nOn `osx==10.14.1` and `docker==18.09.2`\r\n\r\n```\r\ngit checkout 3ac5a02e0e6b0a11ae796707413c11df2c14ff6b\r\nmake build pyro_branch=dev pytorch_branch=release python_verion=3.6\r\nmake run pyro_branch=dev pytorch_branch=release python_verion=3.6\r\ncd pyro\r\npip install numpy==1.15\r\npip install scipy==1.2\r\nmake test-examples > output 2>&1\r\n```\r\n\r\n[Output](https://github.com/pyro-ppl/pyro/files/3204055/make_test_examples.txt)\r\n\r\nResulting files in `.data`\r\n```\r\nls -lh -rw-r--r-- 1 matt staff 19M May 21 15:03 train-images-idx3-ubyte.gz.part\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nVERSION = \"\"\"\n# This file is auto-generated with the version information during setup.py installation.\n\n__version__ = '{}'\n\"\"\"\n\n# Find pyro version.\nfor line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):\n if line.startswith('version_prefix = '):\n version = line.strip().split()[2][1:-1]\n\n# Append current commit sha to version\ncommit_sha = ''\ntry:\n current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n # only add sha if HEAD does not point to the release tag\n if not current_tag == version:\n commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n# catch all exception to be safe\nexcept Exception:\n pass # probably not a git repo\n\n# Write version to _version.py\nif commit_sha:\n version += '+{}'.format(commit_sha)\nwith open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:\n f.write(VERSION.format(version))\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\n print(long_description)\nexcept Exception as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\n# This assumes the first 10 lines contain badge info.\nlong_description = '\\n'.join([str(line) for line in long_description.split('\\n')[10:]])\n\n# examples/tutorials\nEXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'matplotlib>=1.3',\n 'observations>=0.1.4',\n 'pillow',\n 'torchvision>=0.2.2',\n 'visdom>=0.1.4',\n 'pandas',\n 'seaborn',\n 'wget',\n]\n\nif sys.version_info[0] == 2:\n EXTRAS_REQUIRE.append('functools32')\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(include=['pyro', 'pyro.*']),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n # if you add any additional libraries, please also\n # add them to `docs/requirements.txt`\n 'contextlib2',\n 'graphviz>=0.8',\n # numpy is necessary for some functionality of PyTorch\n 'numpy>=1.7',\n 'opt_einsum>=2.3.2',\n 'six>=1.10.0',\n 'torch>=1.1.0',\n 'tqdm>=4.31',\n ],\n extras_require={\n 'extras': EXTRAS_REQUIRE,\n 'test': EXTRAS_REQUIRE + [\n 'nbval',\n 'pytest>=4.1',\n 'pytest-cov',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, <1.3',\n ],\n 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],\n 'dev': EXTRAS_REQUIRE + [\n 'flake8',\n 'isort',\n 'nbformat',\n 'nbsphinx>=0.3.2',\n 'nbstripout',\n 'nbval',\n 'pypandoc',\n 'pytest>=4.1',\n 'pytest-xdist',\n # TODO: remove once https://github.com/pyro-ppl/pyro/issues/1871\n # is fixed.\n 'scipy>=1.1, < 1.3',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n ],\n },\n tests_require=['flake8', 'pytest>=4.1'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.6',\n ],\n # yapf\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nThis script generates a dataset similar to the Multi-MNIST dataset\ndescribed in [1].\n\n[1] Eslami, SM Ali, et al. \"Attend, infer, repeat: Fast scene\nunderstanding with generative models.\" Advances in Neural Information\nProcessing Systems. 2016.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\nfrom pyro.contrib.examples.util import get_data_loader\n\n\ndef imresize(arr, size):\n return np.array(Image.fromarray(arr).resize(size))\n\n\ndef sample_one(canvas_size, mnist):\n i = np.random.randint(mnist['digits'].shape[0])\n digit = mnist['digits'][i]\n label = mnist['labels'][i].item()\n scale = 0.1 * np.random.randn() + 1.3\n new_size = tuple(int(s / scale) for s in digit.shape)\n resized = imresize(digit, new_size)\n w = resized.shape[0]\n assert w == resized.shape[1]\n padding = canvas_size - w\n pad_l = np.random.randint(0, padding)\n pad_r = np.random.randint(0, padding)\n pad_width = ((pad_l, padding - pad_l), (pad_r, padding - pad_r))\n positioned = np.pad(resized, pad_width, 'constant', constant_values=0)\n return positioned, label\n\n\ndef sample_multi(num_digits, canvas_size, mnist):\n canvas = np.zeros((canvas_size, canvas_size))\n labels = []\n for _ in range(num_digits):\n positioned_digit, label = sample_one(canvas_size, mnist)\n canvas += positioned_digit\n labels.append(label)\n # Crude check for overlapping digits.\n if np.max(canvas) > 255:\n return sample_multi(num_digits, canvas_size, mnist)\n else:\n return canvas, labels\n\n\ndef mk_dataset(n, mnist, max_digits, canvas_size):\n x = []\n y = []\n for _ in range(n):\n num_digits = np.random.randint(max_digits + 1)\n canvas, labels = sample_multi(num_digits, canvas_size, mnist)\n x.append(canvas)\n y.append(labels)\n return np.array(x, dtype=np.uint8), y\n\n\ndef load_mnist(root_path):\n loader = get_data_loader('MNIST', root_path)\n return {\n 'digits': loader.dataset.data.cpu().numpy(),\n 'labels': loader.dataset.targets\n }\n\n\ndef load(root_path):\n file_path = os.path.join(root_path, 'multi_mnist_uint8.npz')\n if os.path.exists(file_path):\n data = np.load(file_path)\n return data['x'], data['y']\n else:\n # Set RNG to known state.\n rng_state = np.random.get_state()\n np.random.seed(681307)\n mnist = load_mnist(root_path)\n print('Generating multi-MNIST dataset...')\n x, y = mk_dataset(60000, mnist, 2, 50)\n # Revert RNG state.\n np.random.set_state(rng_state)\n # Crude checksum.\n # assert x.sum() == 883114919, 'Did not generate the expected data.'\n with open(file_path, 'wb') as f:\n np.savez_compressed(f, x=x, y=y)\n print('Done!')\n return x, y\n", "path": "pyro/contrib/examples/multi_mnist.py"}]}
| 3,154 | 234 |
gh_patches_debug_1395
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-5710
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Irreproducible results with `MultiRocketMultivariate`
`random_state` does guarantee the same results for each run.
```python
rng = np.random.default_rng()
X = pd.DataFrame([
pd.Series([
pd.Series(rng.integers(0, 10, 100)).astype(float),
pd.Series(rng.integers(0, 10, 100)).astype(float),
]),
pd.Series([
pd.Series(rng.integers(0, 10, 100)).astype(float),
pd.Series(rng.integers(0, 10, 100)).astype(float),
]),
])
MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) - MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X)
```
The output should always be a `DataFrame` of zeros, but this is not the case.
<details>
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
System:
python: 3.9.6 (default, Aug 11 2023, 19:44:49) [Clang 15.0.0 (clang-1500.0.40.1)]
executable: /Users/temp/sktime/venv/bin/python
machine: macOS-14.1.2-arm64-arm-64bit
Python dependencies:
pip: 23.3.1
sktime: 0.25.0
sklearn: 1.3.2
skbase: 0.4.6
numpy: 1.26.1
scipy: 1.11.3
pandas: 2.1.4
matplotlib: 3.8.1
joblib: 1.3.2
numba: 0.58.1
statsmodels: 0.14.0
pmdarima: None
statsforecast: None
tsfresh: 0.20.1
tslearn: 0.6.3
torch: 2.1.0
tensorflow: None
tensorflow_probability: None
</details>
<!-- Thanks for contributing! -->
</issue>
<code>
[start of sktime/transformations/panel/rocket/_multirocket_multivariate.py]
1 import multiprocessing
2
3 import numpy as np
4 import pandas as pd
5
6 from sktime.transformations.base import BaseTransformer
7
8
9 class MultiRocketMultivariate(BaseTransformer):
10 """Multi RandOm Convolutional KErnel Transform (MultiRocket).
11
12 MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw
13 series and the first order differenced series representation. It uses a different
14 set of dilations and used for each representation. In addition to percentage of
15 positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive
16 Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of
17 Positive Values (LSPV). This version is the multivariate version.
18
19 This transformer fits one set of paramereters per individual series,
20 and applies the transform with fitted parameter i to the i-th series in transform.
21 Vanilla use requires same number of series in fit and transform.
22
23 To fit and transform series at the same time,
24 without an identification of fit/transform instances,
25 wrap this transformer in ``FitInTransform``,
26 from ``sktime.transformations.compose``.
27
28 Parameters
29 ----------
30 num_kernels : int, default=6,250
31 number of random convolutional kernels. The calculated number of features is the
32 nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000
33 (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).
34 max_dilations_per_kernel : int, default=32
35 maximum number of dilations per kernel.
36 n_features_per_kernel : int, default =4
37 number of features per kernel.
38 normalise : bool, default False
39 n_jobs : int, default=1
40 The number of jobs to run in parallel for `transform`. ``-1`` means using all
41 processors.
42 random_state : None or int, default = None
43
44 Attributes
45 ----------
46 parameter : tuple
47 parameter (dilations, num_features_per_dilation, biases) for
48 transformation of input X
49 parameter1 : tuple
50 parameter (dilations, num_features_per_dilation, biases) for
51 transformation of input X1 = np.diff(X, 1)
52
53 See Also
54 --------
55 MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket
56
57 References
58 ----------
59 .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and
60 Webb, Geoffrey I, "MultiRocket: Multiple pooling operators and transformations
61 for fast and effective time series classification",2022,
62 https://link.springer.com/article/10.1007/s10618-022-00844-1
63 https://arxiv.org/abs/2102.00457
64
65 Examples
66 --------
67 >>> from sktime.transformations.panel.rocket import Rocket
68 >>> from sktime.datasets import load_basic_motions
69 >>> X_train, y_train = load_basic_motions(split="train") # doctest: +SKIP
70 >>> X_test, y_test = load_basic_motions(split="test") # doctest: +SKIP
71 >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP
72 >>> trf.fit(X_train) # doctest: +SKIP
73 MultiRocketMultivariate(...)
74 >>> X_train = trf.transform(X_train) # doctest: +SKIP
75 >>> X_test = trf.transform(X_test) # doctest: +SKIP
76 """
77
78 _tags = {
79 "univariate-only": False,
80 "fit_is_empty": False,
81 "scitype:transform-input": "Series",
82 # what is the scitype of X: Series, or Panel
83 "scitype:transform-output": "Primitives",
84 # what is the scitype of y: None (not needed), Primitives, Series, Panel
85 "scitype:instancewise": False, # is this an instance-wise transform?
86 "X_inner_mtype": "numpy3D", # which mtypes do _fit/_predict support for X?
87 "y_inner_mtype": "None", # which mtypes do _fit/_predict support for X?
88 "python_dependencies": "numba",
89 }
90
91 def __init__(
92 self,
93 num_kernels=6_250,
94 max_dilations_per_kernel=32,
95 n_features_per_kernel=4,
96 normalise=False,
97 n_jobs=1,
98 random_state=None,
99 ):
100 self.max_dilations_per_kernel = max_dilations_per_kernel
101 self.n_features_per_kernel = n_features_per_kernel
102 self.num_kernels = num_kernels
103 self.normalise = normalise
104 self.n_jobs = n_jobs
105 self.random_state = random_state if isinstance(random_state, int) else None
106
107 self.parameter = None
108 self.parameter1 = None
109
110 super().__init__()
111
112 def _fit(self, X, y=None):
113 """Fit dilations and biases to input time series.
114
115 Parameters
116 ----------
117 X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
118 panel of time series to transform
119 y : ignored argument for interface compatibility
120
121 Returns
122 -------
123 self
124 """
125 if self.normalise:
126 X = (X - X.mean(axis=-1, keepdims=True)) / (
127 X.std(axis=-1, keepdims=True) + 1e-8
128 )
129
130 if X.shape[2] < 10:
131 # handling very short series (like PensDigit from the MTSC archive)
132 # series have to be at least a length of 10 (including differencing)
133 _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)
134 _X1[:, :, : X.shape[2]] = X
135 X = _X1
136 del _X1
137
138 X = X.astype(np.float64)
139
140 self.parameter = self._get_parameter(X)
141 _X1 = np.diff(X, 1)
142
143 self.parameter1 = self._get_parameter(_X1)
144
145 return self
146
147 def _transform(self, X, y=None):
148 """Transform input time series using random convolutional kernels.
149
150 Parameters
151 ----------
152 X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
153 panel of time series to transform
154 y : ignored argument for interface compatibility
155
156 Returns
157 -------
158 pandas DataFrame, transformed features
159 """
160 from numba import get_num_threads, set_num_threads
161
162 from sktime.transformations.panel.rocket._multirocket_multi_numba import (
163 _transform,
164 )
165
166 if self.normalise:
167 X = (X - X.mean(axis=-1, keepdims=True)) / (
168 X.std(axis=-1, keepdims=True) + 1e-8
169 )
170
171 _X1 = np.diff(X, 1)
172
173 # change n_jobs depended on value and existing cores
174 prev_threads = get_num_threads()
175 if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():
176 n_jobs = multiprocessing.cpu_count()
177 else:
178 n_jobs = self.n_jobs
179 set_num_threads(n_jobs)
180
181 X = _transform(
182 X,
183 _X1,
184 self.parameter,
185 self.parameter1,
186 self.n_features_per_kernel,
187 )
188 X = np.nan_to_num(X)
189
190 set_num_threads(prev_threads)
191
192 return pd.DataFrame(X)
193
194 def _get_parameter(self, X):
195 from sktime.transformations.panel.rocket._multirocket_multi_numba import (
196 _fit_biases,
197 _fit_dilations,
198 _quantiles,
199 )
200
201 _, num_channels, input_length = X.shape
202
203 num_kernels = 84
204
205 dilations, num_features_per_dilation = _fit_dilations(
206 input_length, self.num_kernels, self.max_dilations_per_kernel
207 )
208
209 num_features_per_kernel = np.sum(num_features_per_dilation)
210
211 quantiles = _quantiles(num_kernels * num_features_per_kernel)
212
213 num_dilations = len(dilations)
214 num_combinations = num_kernels * num_dilations
215
216 max_num_channels = min(num_channels, 9)
217 max_exponent = np.log2(max_num_channels + 1)
218
219 num_channels_per_combination = (
220 2 ** np.random.uniform(0, max_exponent, num_combinations)
221 ).astype(np.int32)
222
223 channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)
224
225 num_channels_start = 0
226 for combination_index in range(num_combinations):
227 num_channels_this_combination = num_channels_per_combination[
228 combination_index
229 ]
230 num_channels_end = num_channels_start + num_channels_this_combination
231 channel_indices[num_channels_start:num_channels_end] = np.random.choice(
232 num_channels, num_channels_this_combination, replace=False
233 )
234
235 num_channels_start = num_channels_end
236
237 biases = _fit_biases(
238 X,
239 num_channels_per_combination,
240 channel_indices,
241 dilations,
242 num_features_per_dilation,
243 quantiles,
244 self.random_state,
245 )
246
247 return (
248 num_channels_per_combination,
249 channel_indices,
250 dilations,
251 num_features_per_dilation,
252 biases,
253 )
254
[end of sktime/transformations/panel/rocket/_multirocket_multivariate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/transformations/panel/rocket/_multirocket_multivariate.py b/sktime/transformations/panel/rocket/_multirocket_multivariate.py
--- a/sktime/transformations/panel/rocket/_multirocket_multivariate.py
+++ b/sktime/transformations/panel/rocket/_multirocket_multivariate.py
@@ -198,6 +198,9 @@
_quantiles,
)
+ if self.random_state is not None:
+ np.random.seed(self.random_state)
+
_, num_channels, input_length = X.shape
num_kernels = 84
|
{"golden_diff": "diff --git a/sktime/transformations/panel/rocket/_multirocket_multivariate.py b/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n--- a/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n+++ b/sktime/transformations/panel/rocket/_multirocket_multivariate.py\n@@ -198,6 +198,9 @@\n _quantiles,\n )\n \n+ if self.random_state is not None:\n+ np.random.seed(self.random_state)\n+\n _, num_channels, input_length = X.shape\n \n num_kernels = 84\n", "issue": "[BUG] Irreproducible results with `MultiRocketMultivariate`\n`random_state` does guarantee the same results for each run.\r\n\r\n```python\r\nrng = np.random.default_rng()\r\n\r\nX = pd.DataFrame([\r\n pd.Series([\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n ]),\r\n pd.Series([\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n pd.Series(rng.integers(0, 10, 100)).astype(float),\r\n ]),\r\n])\r\n\r\nMultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X) - MultiRocketMultivariate(random_state=42, num_kernels=84).fit_transform(X)\r\n```\r\n\r\nThe output should always be a `DataFrame` of zeros, but this is not the case.\r\n\r\n<details>\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n\r\nfrom sktime import show_versions; show_versions()\r\n-->\r\n\r\nSystem:\r\n python: 3.9.6 (default, Aug 11 2023, 19:44:49) [Clang 15.0.0 (clang-1500.0.40.1)]\r\nexecutable: /Users/temp/sktime/venv/bin/python\r\n machine: macOS-14.1.2-arm64-arm-64bit\r\n\r\nPython dependencies:\r\n pip: 23.3.1\r\n sktime: 0.25.0\r\n sklearn: 1.3.2\r\n skbase: 0.4.6\r\n numpy: 1.26.1\r\n scipy: 1.11.3\r\n pandas: 2.1.4\r\n matplotlib: 3.8.1\r\n joblib: 1.3.2\r\n numba: 0.58.1\r\n statsmodels: 0.14.0\r\n pmdarima: None\r\nstatsforecast: None\r\n tsfresh: 0.20.1\r\n tslearn: 0.6.3\r\n torch: 2.1.0\r\n tensorflow: None\r\ntensorflow_probability: None\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "import multiprocessing\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n\nclass MultiRocketMultivariate(BaseTransformer):\n \"\"\"Multi RandOm Convolutional KErnel Transform (MultiRocket).\n\n MultiRocket [1]_ is uses the same set of kernels as MiniRocket on both the raw\n series and the first order differenced series representation. It uses a different\n set of dilations and used for each representation. In addition to percentage of\n positive values (PPV) MultiRocket adds 3 pooling operators: Mean of Positive\n Values (MPV); Mean of Indices of Positive Values (MIPV); and Longest Stretch of\n Positive Values (LSPV). This version is the multivariate version.\n\n This transformer fits one set of paramereters per individual series,\n and applies the transform with fitted parameter i to the i-th series in transform.\n Vanilla use requires same number of series in fit and transform.\n\n To fit and transform series at the same time,\n without an identification of fit/transform instances,\n wrap this transformer in ``FitInTransform``,\n from ``sktime.transformations.compose``.\n\n Parameters\n ----------\n num_kernels : int, default=6,250\n number of random convolutional kernels. The calculated number of features is the\n nearest multiple of n_features_per_kernel(default 4)*84=336 < 50,000\n (2*n_features_per_kernel(default 4)*num_kernels(default 6,250)).\n max_dilations_per_kernel : int, default=32\n maximum number of dilations per kernel.\n n_features_per_kernel : int, default =4\n number of features per kernel.\n normalise : bool, default False\n n_jobs : int, default=1\n The number of jobs to run in parallel for `transform`. ``-1`` means using all\n processors.\n random_state : None or int, default = None\n\n Attributes\n ----------\n parameter : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X\n parameter1 : tuple\n parameter (dilations, num_features_per_dilation, biases) for\n transformation of input X1 = np.diff(X, 1)\n\n See Also\n --------\n MultiRocketMultivariate, MiniRocket, MiniRocketMultivariate, Rocket\n\n References\n ----------\n .. [1] Tan, Chang Wei and Dempster, Angus and Bergmeir, Christoph and\n Webb, Geoffrey I, \"MultiRocket: Multiple pooling operators and transformations\n for fast and effective time series classification\",2022,\n https://link.springer.com/article/10.1007/s10618-022-00844-1\n https://arxiv.org/abs/2102.00457\n\n Examples\n --------\n >>> from sktime.transformations.panel.rocket import Rocket\n >>> from sktime.datasets import load_basic_motions\n >>> X_train, y_train = load_basic_motions(split=\"train\") # doctest: +SKIP\n >>> X_test, y_test = load_basic_motions(split=\"test\") # doctest: +SKIP\n >>> trf = MultiRocketMultivariate(num_kernels=512) # doctest: +SKIP\n >>> trf.fit(X_train) # doctest: +SKIP\n MultiRocketMultivariate(...)\n >>> X_train = trf.transform(X_train) # doctest: +SKIP\n >>> X_test = trf.transform(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"univariate-only\": False,\n \"fit_is_empty\": False,\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Primitives\",\n # what is the scitype of y: None (not needed), Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"numpy3D\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n num_kernels=6_250,\n max_dilations_per_kernel=32,\n n_features_per_kernel=4,\n normalise=False,\n n_jobs=1,\n random_state=None,\n ):\n self.max_dilations_per_kernel = max_dilations_per_kernel\n self.n_features_per_kernel = n_features_per_kernel\n self.num_kernels = num_kernels\n self.normalise = normalise\n self.n_jobs = n_jobs\n self.random_state = random_state if isinstance(random_state, int) else None\n\n self.parameter = None\n self.parameter1 = None\n\n super().__init__()\n\n def _fit(self, X, y=None):\n \"\"\"Fit dilations and biases to input time series.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n self\n \"\"\"\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n if X.shape[2] < 10:\n # handling very short series (like PensDigit from the MTSC archive)\n # series have to be at least a length of 10 (including differencing)\n _X1 = np.zeros((X.shape[0], X.shape[1], 10), dtype=X.dtype)\n _X1[:, :, : X.shape[2]] = X\n X = _X1\n del _X1\n\n X = X.astype(np.float64)\n\n self.parameter = self._get_parameter(X)\n _X1 = np.diff(X, 1)\n\n self.parameter1 = self._get_parameter(_X1)\n\n return self\n\n def _transform(self, X, y=None):\n \"\"\"Transform input time series using random convolutional kernels.\n\n Parameters\n ----------\n X : 3D np.ndarray of shape = [n_instances, n_dimensions, series_length]\n panel of time series to transform\n y : ignored argument for interface compatibility\n\n Returns\n -------\n pandas DataFrame, transformed features\n \"\"\"\n from numba import get_num_threads, set_num_threads\n\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _transform,\n )\n\n if self.normalise:\n X = (X - X.mean(axis=-1, keepdims=True)) / (\n X.std(axis=-1, keepdims=True) + 1e-8\n )\n\n _X1 = np.diff(X, 1)\n\n # change n_jobs depended on value and existing cores\n prev_threads = get_num_threads()\n if self.n_jobs < 1 or self.n_jobs > multiprocessing.cpu_count():\n n_jobs = multiprocessing.cpu_count()\n else:\n n_jobs = self.n_jobs\n set_num_threads(n_jobs)\n\n X = _transform(\n X,\n _X1,\n self.parameter,\n self.parameter1,\n self.n_features_per_kernel,\n )\n X = np.nan_to_num(X)\n\n set_num_threads(prev_threads)\n\n return pd.DataFrame(X)\n\n def _get_parameter(self, X):\n from sktime.transformations.panel.rocket._multirocket_multi_numba import (\n _fit_biases,\n _fit_dilations,\n _quantiles,\n )\n\n _, num_channels, input_length = X.shape\n\n num_kernels = 84\n\n dilations, num_features_per_dilation = _fit_dilations(\n input_length, self.num_kernels, self.max_dilations_per_kernel\n )\n\n num_features_per_kernel = np.sum(num_features_per_dilation)\n\n quantiles = _quantiles(num_kernels * num_features_per_kernel)\n\n num_dilations = len(dilations)\n num_combinations = num_kernels * num_dilations\n\n max_num_channels = min(num_channels, 9)\n max_exponent = np.log2(max_num_channels + 1)\n\n num_channels_per_combination = (\n 2 ** np.random.uniform(0, max_exponent, num_combinations)\n ).astype(np.int32)\n\n channel_indices = np.zeros(num_channels_per_combination.sum(), dtype=np.int32)\n\n num_channels_start = 0\n for combination_index in range(num_combinations):\n num_channels_this_combination = num_channels_per_combination[\n combination_index\n ]\n num_channels_end = num_channels_start + num_channels_this_combination\n channel_indices[num_channels_start:num_channels_end] = np.random.choice(\n num_channels, num_channels_this_combination, replace=False\n )\n\n num_channels_start = num_channels_end\n\n biases = _fit_biases(\n X,\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n quantiles,\n self.random_state,\n )\n\n return (\n num_channels_per_combination,\n channel_indices,\n dilations,\n num_features_per_dilation,\n biases,\n )\n", "path": "sktime/transformations/panel/rocket/_multirocket_multivariate.py"}]}
| 3,840 | 138 |
gh_patches_debug_39703
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-1594
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed to create geographic feature resource
The files are from this resource https://www.hydroshare.org/resource/822fe5a7a83a49668c77a85179c3a7ee/.
The error is shown as below. Thanks for looking into this issue

</issue>
<code>
[start of hs_geographic_feature_resource/parse_lib.py]
1 import os
2 import xmltodict
3 import re
4 from osgeo import ogr, osr
5 try:
6 # Python 2.6-2.7
7 from HTMLParser import HTMLParser
8 except ImportError:
9 # Python 3
10 from html.parser import HTMLParser
11
12 UNKNOWN_STR = "unknown"
13
14
15 def parse_shp(file_path):
16 # output dictionary format
17 # shp_metadata_dict["origin_projection_string"]: original projection string
18 # shp_metadata_dict["origin_projection_name"]: origin_projection_name
19 # shp_metadata_dict["origin_datum"]: origin_datum
20 # shp_metadata_dict["origin_unit"]: origin_unit
21 # shp_metadata_dict["field_meta_dict"]["field_list"]: list [fieldname1, fieldname2...]
22 # shp_metadata_dict["field_meta_dict"]["field_attr_dic"]:
23 # dict {"fieldname": dict {
24 # "fieldName":fieldName,
25 # "fieldTypeCode":fieldTypeCode,
26 # "fieldType":fieldType,
27 # "fieldWidth:fieldWidth,
28 # "fieldPrecision:fieldPrecision"
29 # }
30 # }
31 # shp_metadata_dict["feature_count"]: feature count
32 # shp_metadata_dict["geometry_type"]: geometry_type
33 # shp_metadata_dict["origin_extent_dict"]:
34 # dict{"west": east, "north":north, "east":east, "south":south}
35 # shp_metadata_dict["wgs84_extent_dict"]:
36 # dict{"west": east, "north":north, "east":east, "south":south}
37
38 shp_metadata_dict = {}
39 # read shapefile
40 driver = ogr.GetDriverByName('ESRI Shapefile')
41 dataset = driver.Open(file_path)
42
43 # get layer
44 layer = dataset.GetLayer()
45 # get spatialRef from layer
46 spatialRef_from_layer = layer.GetSpatialRef()
47
48 if spatialRef_from_layer is not None:
49 shp_metadata_dict["origin_projection_string"] = str(spatialRef_from_layer)
50 prj_name = spatialRef_from_layer.GetAttrValue('projcs')
51 if prj_name is None:
52 prj_name = spatialRef_from_layer.GetAttrValue('geogcs')
53 shp_metadata_dict["origin_projection_name"] = prj_name
54
55 shp_metadata_dict["origin_datum"] = spatialRef_from_layer.GetAttrValue('datum')
56 shp_metadata_dict["origin_unit"] = spatialRef_from_layer.GetAttrValue('unit')
57 else:
58 shp_metadata_dict["origin_projection_string"] = UNKNOWN_STR
59 shp_metadata_dict["origin_projection_name"] = UNKNOWN_STR
60 shp_metadata_dict["origin_datum"] = UNKNOWN_STR
61 shp_metadata_dict["origin_unit"] = UNKNOWN_STR
62
63 field_list = []
64 filed_attr_dic = {}
65 field_meta_dict = {"field_list": field_list, "field_attr_dict": filed_attr_dic}
66 shp_metadata_dict["field_meta_dict"] = field_meta_dict
67 # get Attributes
68 layerDefinition = layer.GetLayerDefn()
69 for i in range(layerDefinition.GetFieldCount()):
70 fieldName = layerDefinition.GetFieldDefn(i).GetName()
71 field_list.append(fieldName)
72 attr_dict = {}
73 field_meta_dict["field_attr_dict"][fieldName] = attr_dict
74
75 attr_dict["fieldName"] = fieldName
76 fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
77 attr_dict["fieldTypeCode"] = fieldTypeCode
78 fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
79 attr_dict["fieldType"] = fieldType
80 fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
81 attr_dict["fieldWidth"] = fieldWidth
82 fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
83 attr_dict["fieldPrecision"] = fieldPrecision
84
85 # get layer extent
86 layer_extent = layer.GetExtent()
87
88 # get feature count
89 featureCount = layer.GetFeatureCount()
90 shp_metadata_dict["feature_count"] = featureCount
91
92 # get a feature from layer
93 feature = layer.GetNextFeature()
94
95 # get geometry from feature
96 geom = feature.GetGeometryRef()
97
98 # get geometry name
99 shp_metadata_dict["geometry_type"] = geom.GetGeometryName()
100
101 # reproject layer extent
102 # source SpatialReference
103 source = spatialRef_from_layer
104 # target SpatialReference
105 target = osr.SpatialReference()
106 target.ImportFromEPSG(4326)
107
108 # create two key points from layer extent
109 left_upper_point = ogr.Geometry(ogr.wkbPoint)
110 left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper
111 right_lower_point = ogr.Geometry(ogr.wkbPoint)
112 right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower
113
114 # source map always has extent, even projection is unknown
115 shp_metadata_dict["origin_extent_dict"] = {}
116 shp_metadata_dict["origin_extent_dict"]["westlimit"] = layer_extent[0]
117 shp_metadata_dict["origin_extent_dict"]["northlimit"] = layer_extent[3]
118 shp_metadata_dict["origin_extent_dict"]["eastlimit"] = layer_extent[1]
119 shp_metadata_dict["origin_extent_dict"]["southlimit"] = layer_extent[2]
120
121 # reproject to WGS84
122 shp_metadata_dict["wgs84_extent_dict"] = {}
123
124 if source is not None:
125 # define CoordinateTransformation obj
126 transform = osr.CoordinateTransformation(source, target)
127 # project two key points
128 left_upper_point.Transform(transform)
129 right_lower_point.Transform(transform)
130 shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = left_upper_point.GetX()
131 shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = left_upper_point.GetY()
132 shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = right_lower_point.GetX()
133 shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = right_lower_point.GetY()
134 shp_metadata_dict["wgs84_extent_dict"]["projection"] = "WGS 84 EPSG:4326"
135 shp_metadata_dict["wgs84_extent_dict"]["units"] = "Decimal degrees"
136 else:
137 shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = UNKNOWN_STR
138 shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = UNKNOWN_STR
139 shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = UNKNOWN_STR
140 shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = UNKNOWN_STR
141 shp_metadata_dict["wgs84_extent_dict"]["projection"] = UNKNOWN_STR
142 shp_metadata_dict["wgs84_extent_dict"]["units"] = UNKNOWN_STR
143
144 return shp_metadata_dict
145
146
147 def parse_shp_xml(shp_xml_full_path):
148 """
149 Parse ArcGIS 10.X ESRI Shapefile Metadata XML.
150 :param shp_xml_full_path: Expected fullpath to the .shp.xml file
151 :return: a list of metadata dict
152 """
153 metadata = []
154
155 try:
156 if os.path.isfile(shp_xml_full_path):
157 with open(shp_xml_full_path) as fd:
158 xml_dict = xmltodict.parse(fd.read())
159 if 'metadata' in xml_dict:
160 if 'dataIdInfo' in xml_dict['metadata']:
161 dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
162 if 'idAbs' in dataIdInfo_dict:
163 description_value = clean_text(dataIdInfo_dict['idAbs'])
164 description = {'description': {'abstract': description_value}}
165 metadata.append(description)
166 if 'idPurp' in xml_dict['metadata']['dataIdInfo']:
167 title_value = clean_text(dataIdInfo_dict['idPurp'])
168 title = {'title': {'value': title_value}}
169 metadata.append(title)
170 if 'searchKeys' in dataIdInfo_dict:
171 searchKeys_dict = dataIdInfo_dict['searchKeys']
172 if 'keyword' in searchKeys_dict:
173 keyword_list = []
174 if type(searchKeys_dict["keyword"]) is list:
175 keyword_list += searchKeys_dict["keyword"]
176 else:
177 keyword_list.append(searchKeys_dict["keyword"])
178 for k in keyword_list:
179 metadata.append({'subject': {'value': k}})
180
181 except Exception:
182 # Catch any exception silently and return an empty list
183 # Due to the variant format of ESRI Shapefile Metadata XML
184 # among different ArcGIS versions, an empty list will be returned
185 # if any exception occurs
186 metadata = []
187 finally:
188 return metadata
189
190
191 def clean_text(text):
192 # Decode html
193
194 h = HTMLParser()
195 return h.unescape(clean_html(text))
196
197
198 def clean_html(raw_html):
199 # Remove html tag from raw_html
200
201 cleanr = re.compile('<.*?>')
202 cleantext = re.sub(cleanr, '', raw_html)
203 return cleantext
204
[end of hs_geographic_feature_resource/parse_lib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hs_geographic_feature_resource/parse_lib.py b/hs_geographic_feature_resource/parse_lib.py
--- a/hs_geographic_feature_resource/parse_lib.py
+++ b/hs_geographic_feature_resource/parse_lib.py
@@ -9,6 +9,9 @@
# Python 3
from html.parser import HTMLParser
+from hs_core.models import Title
+
+
UNKNOWN_STR = "unknown"
@@ -156,27 +159,36 @@
if os.path.isfile(shp_xml_full_path):
with open(shp_xml_full_path) as fd:
xml_dict = xmltodict.parse(fd.read())
- if 'metadata' in xml_dict:
- if 'dataIdInfo' in xml_dict['metadata']:
- dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
- if 'idAbs' in dataIdInfo_dict:
- description_value = clean_text(dataIdInfo_dict['idAbs'])
- description = {'description': {'abstract': description_value}}
- metadata.append(description)
- if 'idPurp' in xml_dict['metadata']['dataIdInfo']:
- title_value = clean_text(dataIdInfo_dict['idPurp'])
- title = {'title': {'value': title_value}}
- metadata.append(title)
- if 'searchKeys' in dataIdInfo_dict:
- searchKeys_dict = dataIdInfo_dict['searchKeys']
- if 'keyword' in searchKeys_dict:
- keyword_list = []
- if type(searchKeys_dict["keyword"]) is list:
- keyword_list += searchKeys_dict["keyword"]
- else:
- keyword_list.append(searchKeys_dict["keyword"])
- for k in keyword_list:
- metadata.append({'subject': {'value': k}})
+
+ dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
+ if 'idCitation' in dataIdInfo_dict:
+ if 'resTitle' in dataIdInfo_dict['idCitation']:
+ if '#text' in dataIdInfo_dict['idCitation']['resTitle']:
+ title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']
+ else:
+ title_value = dataIdInfo_dict['idCitation']['resTitle']
+
+ title_max_length = Title._meta.get_field('value').max_length
+ if len(title_value) > title_max_length:
+ title_value = title_value[:title_max_length-1]
+ title = {'title': {'value': title_value}}
+ metadata.append(title)
+
+ if 'idAbs' in dataIdInfo_dict:
+ description_value = clean_text(dataIdInfo_dict['idAbs'])
+ description = {'description': {'abstract': description_value}}
+ metadata.append(description)
+
+ if 'searchKeys' in dataIdInfo_dict:
+ searchKeys_dict = dataIdInfo_dict['searchKeys']
+ if 'keyword' in searchKeys_dict:
+ keyword_list = []
+ if type(searchKeys_dict["keyword"]) is list:
+ keyword_list += searchKeys_dict["keyword"]
+ else:
+ keyword_list.append(searchKeys_dict["keyword"])
+ for k in keyword_list:
+ metadata.append({'subject': {'value': k}})
except Exception:
# Catch any exception silently and return an empty list
|
{"golden_diff": "diff --git a/hs_geographic_feature_resource/parse_lib.py b/hs_geographic_feature_resource/parse_lib.py\n--- a/hs_geographic_feature_resource/parse_lib.py\n+++ b/hs_geographic_feature_resource/parse_lib.py\n@@ -9,6 +9,9 @@\n # Python 3\n from html.parser import HTMLParser\n \n+from hs_core.models import Title\n+\n+\n UNKNOWN_STR = \"unknown\"\n \n \n@@ -156,27 +159,36 @@\n if os.path.isfile(shp_xml_full_path):\n with open(shp_xml_full_path) as fd:\n xml_dict = xmltodict.parse(fd.read())\n- if 'metadata' in xml_dict:\n- if 'dataIdInfo' in xml_dict['metadata']:\n- dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n- if 'idAbs' in dataIdInfo_dict:\n- description_value = clean_text(dataIdInfo_dict['idAbs'])\n- description = {'description': {'abstract': description_value}}\n- metadata.append(description)\n- if 'idPurp' in xml_dict['metadata']['dataIdInfo']:\n- title_value = clean_text(dataIdInfo_dict['idPurp'])\n- title = {'title': {'value': title_value}}\n- metadata.append(title)\n- if 'searchKeys' in dataIdInfo_dict:\n- searchKeys_dict = dataIdInfo_dict['searchKeys']\n- if 'keyword' in searchKeys_dict:\n- keyword_list = []\n- if type(searchKeys_dict[\"keyword\"]) is list:\n- keyword_list += searchKeys_dict[\"keyword\"]\n- else:\n- keyword_list.append(searchKeys_dict[\"keyword\"])\n- for k in keyword_list:\n- metadata.append({'subject': {'value': k}})\n+\n+ dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n+ if 'idCitation' in dataIdInfo_dict:\n+ if 'resTitle' in dataIdInfo_dict['idCitation']:\n+ if '#text' in dataIdInfo_dict['idCitation']['resTitle']:\n+ title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']\n+ else:\n+ title_value = dataIdInfo_dict['idCitation']['resTitle']\n+\n+ title_max_length = Title._meta.get_field('value').max_length\n+ if len(title_value) > title_max_length:\n+ title_value = title_value[:title_max_length-1]\n+ title = {'title': {'value': title_value}}\n+ metadata.append(title)\n+\n+ if 'idAbs' in dataIdInfo_dict:\n+ description_value = clean_text(dataIdInfo_dict['idAbs'])\n+ description = {'description': {'abstract': description_value}}\n+ metadata.append(description)\n+\n+ if 'searchKeys' in dataIdInfo_dict:\n+ searchKeys_dict = dataIdInfo_dict['searchKeys']\n+ if 'keyword' in searchKeys_dict:\n+ keyword_list = []\n+ if type(searchKeys_dict[\"keyword\"]) is list:\n+ keyword_list += searchKeys_dict[\"keyword\"]\n+ else:\n+ keyword_list.append(searchKeys_dict[\"keyword\"])\n+ for k in keyword_list:\n+ metadata.append({'subject': {'value': k}})\n \n except Exception:\n # Catch any exception silently and return an empty list\n", "issue": "Failed to create geographic feature resource\nThe files are from this resource https://www.hydroshare.org/resource/822fe5a7a83a49668c77a85179c3a7ee/. \r\n\r\nThe error is shown as below. Thanks for looking into this issue \r\n\r\n\n", "before_files": [{"content": "import os\nimport xmltodict\nimport re\nfrom osgeo import ogr, osr\ntry:\n # Python 2.6-2.7\n from HTMLParser import HTMLParser\nexcept ImportError:\n # Python 3\n from html.parser import HTMLParser\n\nUNKNOWN_STR = \"unknown\"\n\n\ndef parse_shp(file_path):\n # output dictionary format\n # shp_metadata_dict[\"origin_projection_string\"]: original projection string\n # shp_metadata_dict[\"origin_projection_name\"]: origin_projection_name\n # shp_metadata_dict[\"origin_datum\"]: origin_datum\n # shp_metadata_dict[\"origin_unit\"]: origin_unit\n # shp_metadata_dict[\"field_meta_dict\"][\"field_list\"]: list [fieldname1, fieldname2...]\n # shp_metadata_dict[\"field_meta_dict\"][\"field_attr_dic\"]:\n # dict {\"fieldname\": dict {\n # \"fieldName\":fieldName,\n # \"fieldTypeCode\":fieldTypeCode,\n # \"fieldType\":fieldType,\n # \"fieldWidth:fieldWidth,\n # \"fieldPrecision:fieldPrecision\"\n # }\n # }\n # shp_metadata_dict[\"feature_count\"]: feature count\n # shp_metadata_dict[\"geometry_type\"]: geometry_type\n # shp_metadata_dict[\"origin_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n # shp_metadata_dict[\"wgs84_extent_dict\"]:\n # dict{\"west\": east, \"north\":north, \"east\":east, \"south\":south}\n\n shp_metadata_dict = {}\n # read shapefile\n driver = ogr.GetDriverByName('ESRI Shapefile')\n dataset = driver.Open(file_path)\n\n # get layer\n layer = dataset.GetLayer()\n # get spatialRef from layer\n spatialRef_from_layer = layer.GetSpatialRef()\n\n if spatialRef_from_layer is not None:\n shp_metadata_dict[\"origin_projection_string\"] = str(spatialRef_from_layer)\n prj_name = spatialRef_from_layer.GetAttrValue('projcs')\n if prj_name is None:\n prj_name = spatialRef_from_layer.GetAttrValue('geogcs')\n shp_metadata_dict[\"origin_projection_name\"] = prj_name\n\n shp_metadata_dict[\"origin_datum\"] = spatialRef_from_layer.GetAttrValue('datum')\n shp_metadata_dict[\"origin_unit\"] = spatialRef_from_layer.GetAttrValue('unit')\n else:\n shp_metadata_dict[\"origin_projection_string\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_projection_name\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_datum\"] = UNKNOWN_STR\n shp_metadata_dict[\"origin_unit\"] = UNKNOWN_STR\n\n field_list = []\n filed_attr_dic = {}\n field_meta_dict = {\"field_list\": field_list, \"field_attr_dict\": filed_attr_dic}\n shp_metadata_dict[\"field_meta_dict\"] = field_meta_dict\n # get Attributes\n layerDefinition = layer.GetLayerDefn()\n for i in range(layerDefinition.GetFieldCount()):\n fieldName = layerDefinition.GetFieldDefn(i).GetName()\n field_list.append(fieldName)\n attr_dict = {}\n field_meta_dict[\"field_attr_dict\"][fieldName] = attr_dict\n\n attr_dict[\"fieldName\"] = fieldName\n fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()\n attr_dict[\"fieldTypeCode\"] = fieldTypeCode\n fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)\n attr_dict[\"fieldType\"] = fieldType\n fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()\n attr_dict[\"fieldWidth\"] = fieldWidth\n fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()\n attr_dict[\"fieldPrecision\"] = fieldPrecision\n\n # get layer extent\n layer_extent = layer.GetExtent()\n\n # get feature count\n featureCount = layer.GetFeatureCount()\n shp_metadata_dict[\"feature_count\"] = featureCount\n\n # get a feature from layer\n feature = layer.GetNextFeature()\n\n # get geometry from feature\n geom = feature.GetGeometryRef()\n\n # get geometry name\n shp_metadata_dict[\"geometry_type\"] = geom.GetGeometryName()\n\n # reproject layer extent\n # source SpatialReference\n source = spatialRef_from_layer\n # target SpatialReference\n target = osr.SpatialReference()\n target.ImportFromEPSG(4326)\n\n # create two key points from layer extent\n left_upper_point = ogr.Geometry(ogr.wkbPoint)\n left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper\n right_lower_point = ogr.Geometry(ogr.wkbPoint)\n right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower\n\n # source map always has extent, even projection is unknown\n shp_metadata_dict[\"origin_extent_dict\"] = {}\n shp_metadata_dict[\"origin_extent_dict\"][\"westlimit\"] = layer_extent[0]\n shp_metadata_dict[\"origin_extent_dict\"][\"northlimit\"] = layer_extent[3]\n shp_metadata_dict[\"origin_extent_dict\"][\"eastlimit\"] = layer_extent[1]\n shp_metadata_dict[\"origin_extent_dict\"][\"southlimit\"] = layer_extent[2]\n\n # reproject to WGS84\n shp_metadata_dict[\"wgs84_extent_dict\"] = {}\n\n if source is not None:\n # define CoordinateTransformation obj\n transform = osr.CoordinateTransformation(source, target)\n # project two key points\n left_upper_point.Transform(transform)\n right_lower_point.Transform(transform)\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = left_upper_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = left_upper_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = right_lower_point.GetX()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = right_lower_point.GetY()\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = \"WGS 84 EPSG:4326\"\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = \"Decimal degrees\"\n else:\n shp_metadata_dict[\"wgs84_extent_dict\"][\"westlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"northlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"eastlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"southlimit\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"projection\"] = UNKNOWN_STR\n shp_metadata_dict[\"wgs84_extent_dict\"][\"units\"] = UNKNOWN_STR\n\n return shp_metadata_dict\n\n\ndef parse_shp_xml(shp_xml_full_path):\n \"\"\"\n Parse ArcGIS 10.X ESRI Shapefile Metadata XML.\n :param shp_xml_full_path: Expected fullpath to the .shp.xml file\n :return: a list of metadata dict\n \"\"\"\n metadata = []\n\n try:\n if os.path.isfile(shp_xml_full_path):\n with open(shp_xml_full_path) as fd:\n xml_dict = xmltodict.parse(fd.read())\n if 'metadata' in xml_dict:\n if 'dataIdInfo' in xml_dict['metadata']:\n dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']\n if 'idAbs' in dataIdInfo_dict:\n description_value = clean_text(dataIdInfo_dict['idAbs'])\n description = {'description': {'abstract': description_value}}\n metadata.append(description)\n if 'idPurp' in xml_dict['metadata']['dataIdInfo']:\n title_value = clean_text(dataIdInfo_dict['idPurp'])\n title = {'title': {'value': title_value}}\n metadata.append(title)\n if 'searchKeys' in dataIdInfo_dict:\n searchKeys_dict = dataIdInfo_dict['searchKeys']\n if 'keyword' in searchKeys_dict:\n keyword_list = []\n if type(searchKeys_dict[\"keyword\"]) is list:\n keyword_list += searchKeys_dict[\"keyword\"]\n else:\n keyword_list.append(searchKeys_dict[\"keyword\"])\n for k in keyword_list:\n metadata.append({'subject': {'value': k}})\n\n except Exception:\n # Catch any exception silently and return an empty list\n # Due to the variant format of ESRI Shapefile Metadata XML\n # among different ArcGIS versions, an empty list will be returned\n # if any exception occurs\n metadata = []\n finally:\n return metadata\n\n\ndef clean_text(text):\n # Decode html\n\n h = HTMLParser()\n return h.unescape(clean_html(text))\n\n\ndef clean_html(raw_html):\n # Remove html tag from raw_html\n\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n", "path": "hs_geographic_feature_resource/parse_lib.py"}]}
| 3,162 | 729 |
gh_patches_debug_34408
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-12008
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Quizzes - The 'missing resource' alert is not displayed at the Home page
## Observed behavior
Observed while testing https://github.com/learningequality/kolibri/pull/11426 - quizzes with missing resources are not showing an alert on the home page.
## Expected behavior
To be further discussed.
## Steps to reproduce the issue
1. Install the latest build from https://github.com/learningequality/kolibri/pull/11426 and setup a full facility and a learn-only device.
2. Assign a quiz to the learner on the LOD.
3. Sign in as the learner and delete some of the quiz resources by going to Device > Channel
4. Go to the quiz an observe that it correctly alerts for missing resources.
5. Go to the learner's home page and observe that there the alert 'Some resources are missing or not supported..' is missing.
## More information
Video with replications steps - the original was more than 10 minutes long, so I had to shorten it:
https://github.com/learningequality/kolibri/assets/79847249/8fb9dcb4-8419-4e01-8f8f-180d39a3ad91
## Usage Details
Kolibri 0.16beta6
Ubuntu - Chrome, Firefox
</issue>
<code>
[start of kolibri/plugins/learn/viewsets.py]
1 from django.db.models import Count
2 from django.db.models import OuterRef
3 from django.db.models import Q
4 from django.db.models import Subquery
5 from django.db.models import Sum
6 from django.db.models.fields import IntegerField
7 from rest_framework.permissions import IsAuthenticated
8 from rest_framework.response import Response
9 from rest_framework.views import APIView
10
11 from kolibri.core.api import ReadOnlyValuesViewset
12 from kolibri.core.auth.models import Classroom
13 from kolibri.core.auth.models import Facility
14 from kolibri.core.content.api import ContentNodeProgressViewset
15 from kolibri.core.content.api import ContentNodeViewset
16 from kolibri.core.content.api import UserContentNodeViewset
17 from kolibri.core.content.models import ContentNode
18 from kolibri.core.exams.models import Exam
19 from kolibri.core.lessons.models import Lesson
20 from kolibri.core.logger.models import AttemptLog
21 from kolibri.core.logger.models import MasteryLog
22
23
24 contentnode_progress_viewset = ContentNodeProgressViewset()
25 contentnode_viewset = ContentNodeViewset()
26 user_contentnode_viewset = UserContentNodeViewset()
27
28
29 class LearnStateView(APIView):
30 def get(self, request, format=None):
31 """
32 Returns some configuration variables applicable to users navigating learn.
33 - in_classes: Whether the user is in any classes
34 - can_download_externally: Whether the user can download content externally from Kolibri
35 """
36 if request.user.is_anonymous:
37 default_facility = Facility.get_default_facility()
38 can_download_externally = (
39 default_facility.dataset.show_download_button_in_learn
40 if default_facility
41 else True
42 )
43 return Response(
44 {
45 "in_classes": False,
46 "can_download_externally": can_download_externally,
47 }
48 )
49 return Response(
50 {
51 "in_classes": request.user.memberships.exists(),
52 "can_download_externally": request.user.dataset.show_download_button_in_learn,
53 }
54 )
55
56
57 def _consolidate_lessons_data(request, lessons):
58 lesson_contentnode_ids = set()
59 for lesson in lessons:
60 lesson_contentnode_ids |= {
61 resource["contentnode_id"] for resource in lesson["resources"]
62 }
63
64 contentnode_progress = (
65 contentnode_progress_viewset.serialize_list(
66 request, {"ids": lesson_contentnode_ids}
67 )
68 if lesson_contentnode_ids
69 else []
70 )
71
72 contentnodes = (
73 contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids})
74 if lesson_contentnode_ids
75 else []
76 )
77
78 progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress}
79
80 contentnode_map = {c["id"]: c for c in contentnodes}
81
82 for lesson in lessons:
83 lesson["progress"] = {
84 "resource_progress": sum(
85 (
86 progress_map[resource["content_id"]]
87 for resource in lesson["resources"]
88 if resource["content_id"] in progress_map
89 )
90 ),
91 "total_resources": len(lesson["resources"]),
92 }
93 missing_resource = False
94 for resource in lesson["resources"]:
95 resource["progress"] = progress_map.get(resource["content_id"], 0)
96 resource["contentnode"] = contentnode_map.get(
97 resource["contentnode_id"], None
98 )
99 missing_resource = missing_resource or not resource["contentnode"]
100 lesson["missing_resource"] = missing_resource
101
102
103 class LearnerClassroomViewset(ReadOnlyValuesViewset):
104 """
105 Returns all Classrooms for which the requesting User is a member,
106 along with all associated assignments.
107 """
108
109 permission_classes = (IsAuthenticated,)
110
111 values = ("id", "name")
112
113 def get_queryset(self):
114 if self.request.user.is_anonymous:
115 return Classroom.objects.none()
116 return Classroom.objects.filter(membership__user=self.request.user)
117
118 def consolidate(self, items, queryset):
119 if not items:
120 return items
121 lessons = (
122 Lesson.objects.filter(
123 lesson_assignments__collection__membership__user=self.request.user,
124 is_active=True,
125 collection__in=(c["id"] for c in items),
126 )
127 .distinct()
128 .values(
129 "description", "id", "is_active", "title", "resources", "collection"
130 )
131 )
132 _consolidate_lessons_data(self.request, lessons)
133
134 user_masterylog_content_ids = MasteryLog.objects.filter(
135 user=self.request.user
136 ).values("summarylog__content_id")
137
138 exams = (
139 Exam.objects.filter(
140 assignments__collection__membership__user=self.request.user,
141 collection__in=(c["id"] for c in items),
142 )
143 .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))
144 .annotate(
145 closed=Subquery(
146 MasteryLog.objects.filter(
147 summarylog__content_id=OuterRef("id"), user=self.request.user
148 ).values("complete")[:1]
149 ),
150 score=Subquery(
151 AttemptLog.objects.filter(
152 sessionlog__content_id=OuterRef("id"), user=self.request.user
153 )
154 .order_by()
155 .values_list("item")
156 .distinct()
157 .values("masterylog")
158 .annotate(total_correct=Sum("correct"))
159 .values("total_correct"),
160 output_field=IntegerField(),
161 ),
162 answer_count=Subquery(
163 AttemptLog.objects.filter(
164 sessionlog__content_id=OuterRef("id"), user=self.request.user
165 )
166 .order_by()
167 .values_list("item")
168 .distinct()
169 .values("masterylog")
170 .annotate(total_complete=Count("id"))
171 .values("total_complete"),
172 output_field=IntegerField(),
173 ),
174 )
175 .distinct()
176 .values(
177 "collection",
178 "active",
179 "archive",
180 "id",
181 "question_count",
182 "title",
183 "closed",
184 "answer_count",
185 "score",
186 "question_sources",
187 )
188 )
189 exam_node_ids = set()
190 for exam in exams:
191 exam_node_ids |= {
192 question["exercise_id"] for question in exam.get("question_sources")
193 }
194
195 available_exam_ids = set(
196 ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(
197 "id", flat=True
198 )
199 )
200
201 for exam in exams:
202 closed = exam.pop("closed")
203 score = exam.pop("score")
204 answer_count = exam.pop("answer_count")
205 if closed is not None:
206 exam["progress"] = {
207 "closed": closed,
208 "score": score,
209 "answer_count": answer_count,
210 "started": True,
211 }
212 else:
213 exam["progress"] = {
214 "score": None,
215 "answer_count": None,
216 "closed": None,
217 "started": False,
218 }
219 exam["missing_resource"] = any(
220 question["exercise_id"] not in available_exam_ids
221 for question in exam.get("question_sources")
222 )
223 out_items = []
224 for item in items:
225 item["assignments"] = {
226 "exams": [exam for exam in exams if exam["collection"] == item["id"]],
227 "lessons": [
228 lesson for lesson in lessons if lesson["collection"] == item["id"]
229 ],
230 }
231 out_items.append(item)
232 return out_items
233
234
235 learner_classroom_viewset = LearnerClassroomViewset()
236
237
238 def _resumable_resources(classrooms):
239 for classroom in classrooms:
240 for lesson in classroom["assignments"]["lessons"]:
241 for resource in lesson["resources"]:
242 yield 0 < resource["progress"] < 1
243
244
245 class LearnHomePageHydrationView(APIView):
246 def get(self, request, format=None):
247 classrooms = []
248 resumable_resources = []
249 resumable_resources_progress = []
250 if not request.user.is_anonymous:
251 classrooms = learner_classroom_viewset.serialize_list(request)
252 if not classrooms or not any(_resumable_resources(classrooms)):
253 resumable_resources = user_contentnode_viewset.serialize_list(
254 request,
255 {"resume": True, "max_results": 12, "ordering": "-last_interacted"},
256 )
257 resumable_resources_progress = (
258 contentnode_progress_viewset.serialize_list(
259 request,
260 {
261 "resume": True,
262 "max_results": 12,
263 "ordering": "-last_interacted",
264 },
265 )
266 )
267
268 return Response(
269 {
270 "classrooms": classrooms,
271 "resumable_resources": resumable_resources,
272 "resumable_resources_progress": resumable_resources_progress,
273 }
274 )
275
276
277 def _map_lesson_classroom(item):
278 return {
279 "id": item.pop("collection__id"),
280 "name": item.pop("collection__name"),
281 "parent": item.pop("collection__parent_id"),
282 }
283
284
285 class LearnerLessonViewset(ReadOnlyValuesViewset):
286 """
287 Special Viewset for Learners to view Lessons to which they are assigned.
288 The core Lesson Viewset is locked down to Admin users only.
289 """
290
291 permission_classes = (IsAuthenticated,)
292
293 values = (
294 "id",
295 "title",
296 "description",
297 "resources",
298 "is_active",
299 "collection",
300 "collection__id",
301 "collection__name",
302 "collection__parent_id",
303 )
304
305 field_map = {"classroom": _map_lesson_classroom}
306
307 def get_queryset(self):
308 if self.request.user.is_anonymous:
309 return Lesson.objects.none()
310 return Lesson.objects.filter(
311 lesson_assignments__collection__membership__user=self.request.user,
312 is_active=True,
313 )
314
315 def consolidate(self, items, queryset):
316 if not items:
317 return items
318
319 _consolidate_lessons_data(self.request, items)
320
321 return items
322
[end of kolibri/plugins/learn/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py
--- a/kolibri/plugins/learn/viewsets.py
+++ b/kolibri/plugins/learn/viewsets.py
@@ -54,6 +54,16 @@
)
+def _map_contentnodes(request, content_ids):
+ contentnodes = (
+ contentnode_viewset.serialize_list(request, {"ids": content_ids})
+ if content_ids
+ else []
+ )
+ contentnode_map = {c["id"]: c for c in contentnodes}
+ return contentnode_map
+
+
def _consolidate_lessons_data(request, lessons):
lesson_contentnode_ids = set()
for lesson in lessons:
@@ -69,16 +79,10 @@
else []
)
- contentnodes = (
- contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids})
- if lesson_contentnode_ids
- else []
- )
+ contentnode_map = _map_contentnodes(request, lesson_contentnode_ids)
progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress}
- contentnode_map = {c["id"]: c for c in contentnodes}
-
for lesson in lessons:
lesson["progress"] = {
"resource_progress": sum(
@@ -198,6 +202,8 @@
)
)
+ contentnode_map = _map_contentnodes(self.request, available_exam_ids)
+
for exam in exams:
closed = exam.pop("closed")
score = exam.pop("score")
@@ -216,10 +222,12 @@
"closed": None,
"started": False,
}
- exam["missing_resource"] = any(
- question["exercise_id"] not in available_exam_ids
- for question in exam.get("question_sources")
- )
+ missing_resource = False
+ for question_source in exam["question_sources"]:
+ if question_source["exercise_id"] not in contentnode_map:
+ missing_resource = True
+ break
+ exam["missing_resource"] = missing_resource
out_items = []
for item in items:
item["assignments"] = {
|
{"golden_diff": "diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py\n--- a/kolibri/plugins/learn/viewsets.py\n+++ b/kolibri/plugins/learn/viewsets.py\n@@ -54,6 +54,16 @@\n )\n \n \n+def _map_contentnodes(request, content_ids):\n+ contentnodes = (\n+ contentnode_viewset.serialize_list(request, {\"ids\": content_ids})\n+ if content_ids\n+ else []\n+ )\n+ contentnode_map = {c[\"id\"]: c for c in contentnodes}\n+ return contentnode_map\n+\n+\n def _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n@@ -69,16 +79,10 @@\n else []\n )\n \n- contentnodes = (\n- contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n- if lesson_contentnode_ids\n- else []\n- )\n+ contentnode_map = _map_contentnodes(request, lesson_contentnode_ids)\n \n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n \n- contentnode_map = {c[\"id\"]: c for c in contentnodes}\n-\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n@@ -198,6 +202,8 @@\n )\n )\n \n+ contentnode_map = _map_contentnodes(self.request, available_exam_ids)\n+\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n@@ -216,10 +222,12 @@\n \"closed\": None,\n \"started\": False,\n }\n- exam[\"missing_resource\"] = any(\n- question[\"exercise_id\"] not in available_exam_ids\n- for question in exam.get(\"question_sources\")\n- )\n+ missing_resource = False\n+ for question_source in exam[\"question_sources\"]:\n+ if question_source[\"exercise_id\"] not in contentnode_map:\n+ missing_resource = True\n+ break\n+ exam[\"missing_resource\"] = missing_resource\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n", "issue": "Quizzes - The 'missing resource' alert is not displayed at the Home page\n## Observed behavior\r\n\r\nObserved while testing https://github.com/learningequality/kolibri/pull/11426 - quizzes with missing resources are not showing an alert on the home page.\r\n\r\n## Expected behavior\r\nTo be further discussed.\r\n\r\n## Steps to reproduce the issue\r\n1. Install the latest build from https://github.com/learningequality/kolibri/pull/11426 and setup a full facility and a learn-only device.\r\n2. Assign a quiz to the learner on the LOD.\r\n3. Sign in as the learner and delete some of the quiz resources by going to Device > Channel\r\n4. Go to the quiz an observe that it correctly alerts for missing resources.\r\n5. Go to the learner's home page and observe that there the alert 'Some resources are missing or not supported..' is missing.\r\n\r\n## More information\r\n\r\nVideo with replications steps - the original was more than 10 minutes long, so I had to shorten it:\r\n\r\nhttps://github.com/learningequality/kolibri/assets/79847249/8fb9dcb4-8419-4e01-8f8f-180d39a3ad91\r\n\r\n## Usage Details\r\nKolibri 0.16beta6\r\nUbuntu - Chrome, Firefox\n", "before_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n if lesson_contentnode_ids\n else []\n )\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n for exam in exams:\n exam_node_ids |= {\n question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n question[\"exercise_id\"] not in available_exam_ids\n for question in exam.get(\"question_sources\")\n )\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}]}
| 3,790 | 500 |
gh_patches_debug_13661
|
rasdani/github-patches
|
git_diff
|
jupyterhub__zero-to-jupyterhub-k8s-2769
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move changelog to docs website
I suggest we make the changelog become part of the rendered documentation instead of a dedicated markdown file to be read from GitHub, and that we let CHANGELOG.md point to this rendered view.
Note for reference that jupyterhub/jupyterhub repo only has rendered website documentation currently, [see it here](https://jupyterhub.readthedocs.io/en/stable/changelog.html).

---
I think there are two key benefits of this:
1. We can get navigation assistance out of the box
2. We can make use of myst formatting, such as note/warning callouts
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for Sphinx to build our documentation to HTML.
2 #
3 # Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html
4 #
5
6 # -- Path setup --------------------------------------------------------------
7
8 # If extensions (or modules to document with autodoc) are in another directory,
9 # add these directories to sys.path here. If the directory is relative to the
10 # documentation root, use os.path.abspath to make it absolute, like shown here.
11 #
12 # import os
13 # import sys
14 # sys.path.insert(0, os.path.abspath('.'))
15
16
17 # -- Project specific imports ------------------------------------------------
18
19 import datetime
20 import os
21 import re
22 import subprocess
23
24 import yaml
25
26 # -- Sphinx setup function ---------------------------------------------------
27 # ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function
28
29
30 def setup(app):
31 app.add_css_file("custom.css")
32
33
34 # -- Referencable variables --------------------------------------------------
35
36
37 def _get_git_ref_from_chartpress_based_version(version):
38 """
39 Get a git ref from a chartpress set version of format like
40 1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.
41 """
42 tag_hash_split = re.split(r"[\.|-]n\d\d\d\.h", version)
43 if len(tag_hash_split) == 2:
44 return tag_hash_split[1]
45 else:
46 return tag_hash_split[0]
47
48
49 # FIXME: Stop relying on chartpress to modify Chart.yaml (and values.yaml) by
50 # creating a new feature of chartpress that allows us to directly acquire
51 # the dynamically set chart version from Chart.yaml. This would be
52 # similar to the --list-images feature of chartpress.
53 subprocess.run(["chartpress", "--skip-build"], cwd=os.path.abspath("../.."))
54 with open("../../jupyterhub/Chart.yaml") as f:
55 chart = yaml.safe_load(f)
56 subprocess.run(["chartpress", "--reset"], cwd=os.path.abspath("../.."))
57
58 # These substitution variables only work in markdown contexts, and does not work
59 # within links etc. Reference using {{ variable_name }}
60 #
61 # myst_substitutions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#substitutions-with-jinja2
62 myst_substitutions = {
63 "chart_version": chart["version"],
64 "jupyterhub_version": chart["appVersion"],
65 # FIXME: kubeVersion contain >=, but by having > in the string we substitute
66 # we run into this issue:
67 # https://github.com/executablebooks/MyST-Parser/issues/282
68 "kube_version": chart["kubeVersion"].split("-", 1)[0][2:],
69 "helm_version": "3.5",
70 "requirements": f"[hub/images/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{_get_git_ref_from_chartpress_based_version(chart['version'])}/images/hub/requirements.txt)",
71 }
72
73
74 # -- General MyST configuration -----------------------------------------------------
75
76 # myst_enable_extensions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html
77 myst_enable_extensions = [
78 "substitution",
79 ]
80
81
82 # -- Project information -----------------------------------------------------
83 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
84
85 project = "Zero to JupyterHub with Kubernetes"
86 copyright = f"{datetime.date.today().year}, Project Jupyter Contributors"
87 author = "Project Jupyter Contributors"
88
89
90 # -- General Sphinx configuration ---------------------------------------------------
91 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
92
93 # Set the default role so we can use `foo` instead of ``foo``
94 default_role = "literal"
95
96 # Add any Sphinx extension module names here, as strings. They can be
97 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
98 # ones.
99 extensions = [
100 "sphinx.ext.mathjax",
101 "sphinx_copybutton",
102 "myst_parser",
103 "sphinxext.rediraffe",
104 "sphinxext.opengraph",
105 ]
106
107 # List of patterns, relative to source directory, that match files and
108 # directories to ignore when looking for source files.
109 # This pattern also affects html_static_path and html_extra_path.
110 exclude_patterns = []
111
112 # The root toctree document.
113 root_doc = master_doc = "index"
114
115 # The suffix(es) of source filenames.
116 source_suffix = [".md", ".rst"]
117
118 # Rediraffe redirects to ensure proper redirection
119 rediraffe_redirects = {
120 "customizing/user-management": "jupyterhub/customizing/user-management",
121 "customizing/user-storage": "jupyterhub/customizing/user-storage",
122 "customizing/user-resources": "jupyterhub/customizing/user-resources",
123 "customizing/user-environment": "jupyterhub/customizing/user-environment",
124 "customizing/extending-jupyterhub": "jupyterhub/customizing/extending-jupyterhub",
125 "reference/glossary": "resources/glossary",
126 "reference/tools": "resources/tools",
127 "reference/reference-docs": "resources/reference-docs",
128 "reference/reference": "resources/reference",
129 "community/additional-resources": "resources/community",
130 "community/users-list": "resources/community",
131 "community/tips": "resources/community",
132 "setup-jupyterhub/turn-off": "jupyterhub/uninstall",
133 "setup-jupyterhub/setup-jupyterhub": "jupyterhub/installation",
134 "setup-jupyterhub/setup-helm": "kubernetes/setup-helm",
135 "ovh/step-zero-ovh": "kubernetes/ovh/step-zero-ovh",
136 "digital-ocean/step-zero-digital-ocean": "kubernetes/digital-ocean/step-zero-digital-ocean",
137 "ibm/step-zero-ibm": "kubernetes/ibm/step-zero-ibm",
138 "redhat/step-zero-openshift": "kubernetes/redhat/step-zero-openshift",
139 "amazon/step-zero-aws-eks": "kubernetes/amazon/step-zero-aws-eks",
140 "amazon/step-zero-aws": "kubernetes/amazon/step-zero-aws",
141 "microsoft/step-zero-azure-autoscale": "kubernetes/microsoft/step-zero-azure-autoscale",
142 "microsoft/step-zero-azure": "kubernetes/microsoft/step-zero-azure",
143 "google/step-zero-gcp": "kubernetes/google/step-zero-gcp",
144 "create-k8s-cluster": "kubernetes/setup-kubernetes",
145 "turn-off": "jupyterhub/uninstall",
146 "setup-jupyterhub": "jupyterhub/index",
147 "setup-helm": "kubernetes/setup-helm",
148 "index-setup-jupyterhub": "jupyterhub/index",
149 "tools": "reference/tools",
150 "reference-docs": "reference/reference-docs",
151 "index-reference": "resources/reference",
152 "glossary": "reference/glossary",
153 "user-storage": "customizing/user-storage",
154 "user-resources": "customizing/user-resources",
155 "user-management": "customizing/user-management",
156 "user-environment": "customizing/user-environment",
157 "index-customization-guide": "jupyterhub/customization",
158 "extending-jupyterhub": "customizing/extending-jupyterhub",
159 "users-list": "community/users-list",
160 "tips": "community/tips",
161 "index-community-resources": "resources/community",
162 "additional-resources": "resources/community",
163 "upgrading": "administrator/upgrading",
164 "troubleshooting": "administrator/troubleshooting",
165 "security": "administrator/security",
166 "optimization": "administrator/optimization",
167 "index-administrator-guide": "administrator/index",
168 "debug": "administrator/debug",
169 "cost": "administrator/cost",
170 "authentication": "administrator/authentication",
171 "architecture": "administrator/architecture",
172 "advanced": "administrator/advanced",
173 }
174
175 # opengraph configuration
176 # ogp_site_url/prefix is set automatically by RTD
177 ogp_image = "_static/logo.png"
178 ogp_use_first_image = True
179
180 # -- Generate the Helm chart configuration reference from a schema file ------
181
182 # header
183 with open("resources/reference.txt") as f:
184 header_md = f.readlines()
185 header_md = header_md[1:]
186 header_md = [ln.strip("\n") for ln in header_md]
187
188 # schema
189 with open("../../jupyterhub/schema.yaml") as f:
190 data = yaml.safe_load(f)
191
192
193 def parse_schema(d, md=[], depth=0, pre=""):
194 """
195 Generate markdown headers from a passed python dictionary created by
196 parsing a schema.yaml file.
197 """
198 if "then" in d:
199 d = d["then"]
200
201 if "properties" in d:
202 depth += 1
203 # Create markdown headers for each schema level
204 for key, val in d["properties"].items():
205 md.append(f"(schema_{pre}{key})=")
206 md.append("#" * (depth + 1) + f" {pre}{key}")
207 md.append("")
208 if "description" in val:
209 for ln in val["description"].split("\n"):
210 md.append(ln)
211 md.append("")
212
213 parse_schema(val, md, depth, f"{pre}{key}.")
214 depth -= 1
215 return md
216
217
218 schema_md = parse_schema(data)
219
220 # reference = header + schema
221 reference_md = header_md + schema_md
222 with open("resources/reference.md", "w") as f:
223 f.write("\n".join(reference_md))
224
225
226 # -- Options for linkcheck builder -------------------------------------------
227 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder
228 linkcheck_ignore = [
229 r"(.*)github\.com(.*)#", # javascript based anchors
230 r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case
231 "https://your-domain.com", # example
232 "https://your-domain-name.com", # example
233 "https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works
234 "https://cloud.ibm.com/kubernetes/catalog/create", # works
235 "https://portal.azure.com", # sign-in redirect noise
236 "https://console.cloud.google.com", # sign-in redirect noise
237 "https://console.developers.google.com", # sign-in redirect noise
238 ]
239 linkcheck_anchors_ignore = [
240 "/#!",
241 "/#%21",
242 ]
243
244
245 # -- Options for HTML output -------------------------------------------------
246 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
247
248 # The theme to use for HTML and HTML Help pages. See the documentation for
249 # a list of builtin themes.
250 #
251
252 html_theme = "pydata_sphinx_theme"
253 html_theme_options = {
254 "github_url": "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/",
255 "use_edit_page_button": True,
256 }
257 html_context = {
258 "github_user": "jupyterhub",
259 "github_repo": "zero-to-jupyterhub-k8s",
260 "github_version": "main",
261 "doc_path": "docs/source",
262 }
263
264 html_favicon = "_static/images/logo/favicon.ico"
265 html_logo = "_static/images/logo/logo.png"
266
267 # Add any paths that contain custom static files (such as style sheets) here,
268 # relative to this directory. They are copied after the builtin static files,
269 # so a file named "default.css" will overwrite the builtin "default.css".
270 html_static_path = ["_static"]
271
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -228,6 +228,9 @@
linkcheck_ignore = [
r"(.*)github\.com(.*)#", # javascript based anchors
r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case
+ r"https://github.com/[^/]*$", # too many github usernames / searches in changelog
+ "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/", # too many PRs in changelog
+ "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/", # too many comparisons in changelog
"https://your-domain.com", # example
"https://your-domain-name.com", # example
"https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -228,6 +228,9 @@\n linkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n+ r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n+ \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/\", # too many PRs in changelog\n+ \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/\", # too many comparisons in changelog\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n", "issue": "Move changelog to docs website\nI suggest we make the changelog become part of the rendered documentation instead of a dedicated markdown file to be read from GitHub, and that we let CHANGELOG.md point to this rendered view.\r\n\r\nNote for reference that jupyterhub/jupyterhub repo only has rendered website documentation currently, [see it here](https://jupyterhub.readthedocs.io/en/stable/changelog.html).\r\n\r\n\r\n\r\n---\r\n\r\nI think there are two key benefits of this:\r\n1. We can get navigation assistance out of the box\r\n2. We can make use of myst formatting, such as note/warning callouts\n", "before_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nimport datetime\nimport os\nimport re\nimport subprocess\n\nimport yaml\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n\n\n# -- Referencable variables --------------------------------------------------\n\n\ndef _get_git_ref_from_chartpress_based_version(version):\n \"\"\"\n Get a git ref from a chartpress set version of format like\n 1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.\n \"\"\"\n tag_hash_split = re.split(r\"[\\.|-]n\\d\\d\\d\\.h\", version)\n if len(tag_hash_split) == 2:\n return tag_hash_split[1]\n else:\n return tag_hash_split[0]\n\n\n# FIXME: Stop relying on chartpress to modify Chart.yaml (and values.yaml) by\n# creating a new feature of chartpress that allows us to directly acquire\n# the dynamically set chart version from Chart.yaml. This would be\n# similar to the --list-images feature of chartpress.\nsubprocess.run([\"chartpress\", \"--skip-build\"], cwd=os.path.abspath(\"../..\"))\nwith open(\"../../jupyterhub/Chart.yaml\") as f:\n chart = yaml.safe_load(f)\nsubprocess.run([\"chartpress\", \"--reset\"], cwd=os.path.abspath(\"../..\"))\n\n# These substitution variables only work in markdown contexts, and does not work\n# within links etc. Reference using {{ variable_name }}\n#\n# myst_substitutions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html#substitutions-with-jinja2\nmyst_substitutions = {\n \"chart_version\": chart[\"version\"],\n \"jupyterhub_version\": chart[\"appVersion\"],\n # FIXME: kubeVersion contain >=, but by having > in the string we substitute\n # we run into this issue:\n # https://github.com/executablebooks/MyST-Parser/issues/282\n \"kube_version\": chart[\"kubeVersion\"].split(\"-\", 1)[0][2:],\n \"helm_version\": \"3.5\",\n \"requirements\": f\"[hub/images/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{_get_git_ref_from_chartpress_based_version(chart['version'])}/images/hub/requirements.txt)\",\n}\n\n\n# -- General MyST configuration -----------------------------------------------------\n\n# myst_enable_extensions ref: https://myst-parser.readthedocs.io/en/latest/using/syntax-optional.html\nmyst_enable_extensions = [\n \"substitution\",\n]\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"Zero to JupyterHub with Kubernetes\"\ncopyright = f\"{datetime.date.today().year}, Project Jupyter Contributors\"\nauthor = \"Project Jupyter Contributors\"\n\n\n# -- General Sphinx configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = \"literal\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.mathjax\",\n \"sphinx_copybutton\",\n \"myst_parser\",\n \"sphinxext.rediraffe\",\n \"sphinxext.opengraph\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The root toctree document.\nroot_doc = master_doc = \"index\"\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".md\", \".rst\"]\n\n# Rediraffe redirects to ensure proper redirection\nrediraffe_redirects = {\n \"customizing/user-management\": \"jupyterhub/customizing/user-management\",\n \"customizing/user-storage\": \"jupyterhub/customizing/user-storage\",\n \"customizing/user-resources\": \"jupyterhub/customizing/user-resources\",\n \"customizing/user-environment\": \"jupyterhub/customizing/user-environment\",\n \"customizing/extending-jupyterhub\": \"jupyterhub/customizing/extending-jupyterhub\",\n \"reference/glossary\": \"resources/glossary\",\n \"reference/tools\": \"resources/tools\",\n \"reference/reference-docs\": \"resources/reference-docs\",\n \"reference/reference\": \"resources/reference\",\n \"community/additional-resources\": \"resources/community\",\n \"community/users-list\": \"resources/community\",\n \"community/tips\": \"resources/community\",\n \"setup-jupyterhub/turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub/setup-jupyterhub\": \"jupyterhub/installation\",\n \"setup-jupyterhub/setup-helm\": \"kubernetes/setup-helm\",\n \"ovh/step-zero-ovh\": \"kubernetes/ovh/step-zero-ovh\",\n \"digital-ocean/step-zero-digital-ocean\": \"kubernetes/digital-ocean/step-zero-digital-ocean\",\n \"ibm/step-zero-ibm\": \"kubernetes/ibm/step-zero-ibm\",\n \"redhat/step-zero-openshift\": \"kubernetes/redhat/step-zero-openshift\",\n \"amazon/step-zero-aws-eks\": \"kubernetes/amazon/step-zero-aws-eks\",\n \"amazon/step-zero-aws\": \"kubernetes/amazon/step-zero-aws\",\n \"microsoft/step-zero-azure-autoscale\": \"kubernetes/microsoft/step-zero-azure-autoscale\",\n \"microsoft/step-zero-azure\": \"kubernetes/microsoft/step-zero-azure\",\n \"google/step-zero-gcp\": \"kubernetes/google/step-zero-gcp\",\n \"create-k8s-cluster\": \"kubernetes/setup-kubernetes\",\n \"turn-off\": \"jupyterhub/uninstall\",\n \"setup-jupyterhub\": \"jupyterhub/index\",\n \"setup-helm\": \"kubernetes/setup-helm\",\n \"index-setup-jupyterhub\": \"jupyterhub/index\",\n \"tools\": \"reference/tools\",\n \"reference-docs\": \"reference/reference-docs\",\n \"index-reference\": \"resources/reference\",\n \"glossary\": \"reference/glossary\",\n \"user-storage\": \"customizing/user-storage\",\n \"user-resources\": \"customizing/user-resources\",\n \"user-management\": \"customizing/user-management\",\n \"user-environment\": \"customizing/user-environment\",\n \"index-customization-guide\": \"jupyterhub/customization\",\n \"extending-jupyterhub\": \"customizing/extending-jupyterhub\",\n \"users-list\": \"community/users-list\",\n \"tips\": \"community/tips\",\n \"index-community-resources\": \"resources/community\",\n \"additional-resources\": \"resources/community\",\n \"upgrading\": \"administrator/upgrading\",\n \"troubleshooting\": \"administrator/troubleshooting\",\n \"security\": \"administrator/security\",\n \"optimization\": \"administrator/optimization\",\n \"index-administrator-guide\": \"administrator/index\",\n \"debug\": \"administrator/debug\",\n \"cost\": \"administrator/cost\",\n \"authentication\": \"administrator/authentication\",\n \"architecture\": \"administrator/architecture\",\n \"advanced\": \"administrator/advanced\",\n}\n\n# opengraph configuration\n# ogp_site_url/prefix is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open(\"resources/reference.txt\") as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip(\"\\n\") for ln in header_md]\n\n# schema\nwith open(\"../../jupyterhub/schema.yaml\") as f:\n data = yaml.safe_load(f)\n\n\ndef parse_schema(d, md=[], depth=0, pre=\"\"):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if \"then\" in d:\n d = d[\"then\"]\n\n if \"properties\" in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d[\"properties\"].items():\n md.append(f\"(schema_{pre}{key})=\")\n md.append(\"#\" * (depth + 1) + f\" {pre}{key}\")\n md.append(\"\")\n if \"description\" in val:\n for ln in val[\"description\"].split(\"\\n\"):\n md.append(ln)\n md.append(\"\")\n\n parse_schema(val, md, depth, f\"{pre}{key}.\")\n depth -= 1\n return md\n\n\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open(\"resources/reference.md\", \"w\") as f:\n f.write(\"\\n\".join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n \"https://portal.azure.com\", # sign-in redirect noise\n \"https://console.cloud.google.com\", # sign-in redirect noise\n \"https://console.developers.google.com\", # sign-in redirect noise\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"pydata_sphinx_theme\"\nhtml_theme_options = {\n \"github_url\": \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/\",\n \"use_edit_page_button\": True,\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"zero-to-jupyterhub-k8s\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_logo = \"_static/images/logo/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n", "path": "docs/source/conf.py"}]}
| 3,973 | 221 |
gh_patches_debug_10581
|
rasdani/github-patches
|
git_diff
|
pytorch__rl-598
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] timeit profiling class does not correctly track how many times a function has been called.
## Describe the bug
In file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following
```
count = self._REG[self.name][1]
self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
self._REG[self.name][1] = self._REG[self.name][1] + t
self._REG[self.name][2] = count + 1
```
I understand self._REG[self.name][2] tracks the number of times a function has been called.
In that case, shouldn't the first line be changed to
```
count = self._REG[self.name][2]
```
## Checklist
- [X] I have checked that there is no similar issue in the repo (**required**)
- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)
- [ ] I have provided a minimal working example to reproduce the bug (**required**)
</issue>
<code>
[start of torchrl/_utils.py]
1 import collections
2 import math
3 import time
4
5 import numpy as np
6
7
8 class timeit:
9 """A dirty but easy to use decorator for profiling code."""
10
11 _REG = {}
12
13 def __init__(self, name):
14 self.name = name
15
16 def __call__(self, fn):
17 def decorated_fn(*args, **kwargs):
18 with self:
19 out = fn(*args, **kwargs)
20 return out
21
22 return decorated_fn
23
24 def __enter__(self):
25 self.t0 = time.time()
26
27 def __exit__(self, exc_type, exc_val, exc_tb):
28 t = time.time() - self.t0
29 self._REG.setdefault(self.name, [0.0, 0.0, 0])
30
31 count = self._REG[self.name][1]
32 self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
33 self._REG[self.name][1] = self._REG[self.name][1] + t
34 self._REG[self.name][2] = count + 1
35
36 @staticmethod
37 def print(prefix=None):
38 keys = list(timeit._REG)
39 keys.sort()
40 for name in keys:
41 strings = []
42 if prefix:
43 strings.append(prefix)
44 strings.append(
45 f"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)"
46 )
47 print(" -- ".join(strings))
48
49 @staticmethod
50 def erase():
51 for k in timeit._REG:
52 timeit._REG[k] = [0.0, 0.0, 0]
53
54
55 def _check_for_faulty_process(processes):
56 terminate = False
57 for p in processes:
58 if not p.is_alive():
59 terminate = True
60 for _p in processes:
61 if _p.is_alive():
62 _p.terminate()
63 if terminate:
64 break
65 if terminate:
66 raise RuntimeError(
67 "At least one process failed. Check for more infos in the log."
68 )
69
70
71 def seed_generator(seed):
72 """A seed generator function.
73
74 Given a seeding integer, generates a deterministic next seed to be used in a
75 seeding sequence.
76
77 Args:
78 seed (int): initial seed.
79
80 Returns: Next seed of the chain.
81
82 """
83 max_seed_val = (
84 2 ** 32 - 1
85 ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688
86 rng = np.random.default_rng(seed)
87 seed = int.from_bytes(rng.bytes(8), "big")
88 return seed % max_seed_val
89
90
91 class KeyDependentDefaultDict(collections.defaultdict):
92 """A key-dependent default dict.
93
94 Examples:
95 >>> my_dict = KeyDependentDefaultDict(lambda key: "foo_" + key)
96 >>> print(my_dict["bar"])
97 foo_bar
98 """
99
100 def __init__(self, fun):
101 self.fun = fun
102 super().__init__()
103
104 def __missing__(self, key):
105 value = self.fun(key)
106 self[key] = value
107 return value
108
109
110 def prod(sequence):
111 """General prod function, that generalised usage across math and np.
112
113 Created for multiple python versions compatibility).
114
115 """
116 if hasattr(math, "prod"):
117 return math.prod(sequence)
118 else:
119 return int(np.prod(sequence))
120
[end of torchrl/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchrl/_utils.py b/torchrl/_utils.py
--- a/torchrl/_utils.py
+++ b/torchrl/_utils.py
@@ -26,12 +26,13 @@
def __exit__(self, exc_type, exc_val, exc_tb):
t = time.time() - self.t0
- self._REG.setdefault(self.name, [0.0, 0.0, 0])
+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])
- count = self._REG[self.name][1]
- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)
- self._REG[self.name][1] = self._REG[self.name][1] + t
- self._REG[self.name][2] = count + 1
+ count = val[2]
+ N = count + 1
+ val[0] = val[0] * (count / N) + t / N
+ val[1] += t
+ val[2] = N
@staticmethod
def print(prefix=None):
|
{"golden_diff": "diff --git a/torchrl/_utils.py b/torchrl/_utils.py\n--- a/torchrl/_utils.py\n+++ b/torchrl/_utils.py\n@@ -26,12 +26,13 @@\n \n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n- self._REG.setdefault(self.name, [0.0, 0.0, 0])\n+ val = self._REG.setdefault(self.name, [0.0, 0.0, 0])\n \n- count = self._REG[self.name][1]\n- self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n- self._REG[self.name][1] = self._REG[self.name][1] + t\n- self._REG[self.name][2] = count + 1\n+ count = val[2]\n+ N = count + 1\n+ val[0] = val[0] * (count / N) + t / N\n+ val[1] += t\n+ val[2] = N\n \n @staticmethod\n def print(prefix=None):\n", "issue": "[BUG] timeit profiling class does not correctly track how many times a function has been called.\n## Describe the bug\r\n\r\nIn file **pytorch/rl/torchrl/_utils.py**, in the class **timeit**, starting at line 31, the code is the following\r\n\r\n```\r\ncount = self._REG[self.name][1]\r\nself._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\r\nself._REG[self.name][1] = self._REG[self.name][1] + t\r\nself._REG[self.name][2] = count + 1\r\n```\r\n\r\nI understand self._REG[self.name][2] tracks the number of times a function has been called.\r\nIn that case, shouldn't the first line be changed to \r\n\r\n```\r\ncount = self._REG[self.name][2]\r\n```\r\n\r\n## Checklist\r\n\r\n- [X] I have checked that there is no similar issue in the repo (**required**)\r\n- [ ] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)\r\n- [ ] I have provided a minimal working example to reproduce the bug (**required**)\r\n\n", "before_files": [{"content": "import collections\nimport math\nimport time\n\nimport numpy as np\n\n\nclass timeit:\n \"\"\"A dirty but easy to use decorator for profiling code.\"\"\"\n\n _REG = {}\n\n def __init__(self, name):\n self.name = name\n\n def __call__(self, fn):\n def decorated_fn(*args, **kwargs):\n with self:\n out = fn(*args, **kwargs)\n return out\n\n return decorated_fn\n\n def __enter__(self):\n self.t0 = time.time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time.time() - self.t0\n self._REG.setdefault(self.name, [0.0, 0.0, 0])\n\n count = self._REG[self.name][1]\n self._REG[self.name][0] = (self._REG[self.name][0] * count + t) / (count + 1)\n self._REG[self.name][1] = self._REG[self.name][1] + t\n self._REG[self.name][2] = count + 1\n\n @staticmethod\n def print(prefix=None):\n keys = list(timeit._REG)\n keys.sort()\n for name in keys:\n strings = []\n if prefix:\n strings.append(prefix)\n strings.append(\n f\"{name} took {timeit._REG[name][0] * 1000:4.4} msec (total = {timeit._REG[name][1]} sec)\"\n )\n print(\" -- \".join(strings))\n\n @staticmethod\n def erase():\n for k in timeit._REG:\n timeit._REG[k] = [0.0, 0.0, 0]\n\n\ndef _check_for_faulty_process(processes):\n terminate = False\n for p in processes:\n if not p.is_alive():\n terminate = True\n for _p in processes:\n if _p.is_alive():\n _p.terminate()\n if terminate:\n break\n if terminate:\n raise RuntimeError(\n \"At least one process failed. Check for more infos in the log.\"\n )\n\n\ndef seed_generator(seed):\n \"\"\"A seed generator function.\n\n Given a seeding integer, generates a deterministic next seed to be used in a\n seeding sequence.\n\n Args:\n seed (int): initial seed.\n\n Returns: Next seed of the chain.\n\n \"\"\"\n max_seed_val = (\n 2 ** 32 - 1\n ) # https://discuss.pytorch.org/t/what-is-the-max-seed-you-can-set-up/145688\n rng = np.random.default_rng(seed)\n seed = int.from_bytes(rng.bytes(8), \"big\")\n return seed % max_seed_val\n\n\nclass KeyDependentDefaultDict(collections.defaultdict):\n \"\"\"A key-dependent default dict.\n\n Examples:\n >>> my_dict = KeyDependentDefaultDict(lambda key: \"foo_\" + key)\n >>> print(my_dict[\"bar\"])\n foo_bar\n \"\"\"\n\n def __init__(self, fun):\n self.fun = fun\n super().__init__()\n\n def __missing__(self, key):\n value = self.fun(key)\n self[key] = value\n return value\n\n\ndef prod(sequence):\n \"\"\"General prod function, that generalised usage across math and np.\n\n Created for multiple python versions compatibility).\n\n \"\"\"\n if hasattr(math, \"prod\"):\n return math.prod(sequence)\n else:\n return int(np.prod(sequence))\n", "path": "torchrl/_utils.py"}]}
| 1,814 | 274 |
gh_patches_debug_6725
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-197
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rest API just generate HTTP urls
next links (List Views) and absolute urls are generated to HTTP protocol because the all requests are make in port 80.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3
4 setup(
5 name='bothub',
6 version='1.15.0',
7 description='bothub',
8 packages=find_packages(),
9 install_requires=[
10 'python-decouple',
11 'requests',
12 'django==2.0.6',
13 'djangorestframework==3.7.7',
14 'whitenoise',
15 'dj-database-url',
16 'django-cors-headers',
17 'django-filter',
18 'coreapi',
19 ],
20 python_requires='>=3.6',
21 )
22
[end of setup.py]
[start of bothub/settings.py]
1 import os
2 import dj_database_url
3
4 from decouple import config
5 from django.utils.log import DEFAULT_LOGGING
6
7 from .utils import cast_supported_languages
8 from .utils import cast_empty_str_to_none
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14
15 # SECURITY WARNING: keep the secret key used in production secret!
16 SECRET_KEY = config('SECRET_KEY')
17
18 # SECURITY WARNING: don't run with debug turned on in production!
19 DEBUG = config('DEBUG', default=False, cast=bool)
20
21 ALLOWED_HOSTS = config(
22 'ALLOWED_HOSTS',
23 default='*',
24 cast=lambda v: [s.strip() for s in v.split(',')])
25
26
27 # Application definition
28
29 INSTALLED_APPS = [
30 'django.contrib.admin',
31 'django.contrib.auth',
32 'django.contrib.contenttypes',
33 'django.contrib.sessions',
34 'django.contrib.messages',
35 'django.contrib.staticfiles',
36 'rest_framework',
37 'rest_framework.authtoken',
38 'django_filters',
39 'corsheaders',
40 'bothub.authentication',
41 'bothub.common',
42 'bothub.api',
43 ]
44
45 MIDDLEWARE = [
46 'django.middleware.security.SecurityMiddleware',
47 'whitenoise.middleware.WhiteNoiseMiddleware',
48 'django.contrib.sessions.middleware.SessionMiddleware',
49 'corsheaders.middleware.CorsMiddleware',
50 'django.middleware.common.CommonMiddleware',
51 'django.middleware.csrf.CsrfViewMiddleware',
52 'django.contrib.auth.middleware.AuthenticationMiddleware',
53 'django.contrib.messages.middleware.MessageMiddleware',
54 'django.middleware.clickjacking.XFrameOptionsMiddleware',
55 ]
56
57 ROOT_URLCONF = 'bothub.urls'
58
59 TEMPLATES = [
60 {
61 'BACKEND': 'django.template.backends.django.DjangoTemplates',
62 'DIRS': [],
63 'APP_DIRS': True,
64 'OPTIONS': {
65 'context_processors': [
66 'django.template.context_processors.debug',
67 'django.template.context_processors.request',
68 'django.contrib.auth.context_processors.auth',
69 'django.contrib.messages.context_processors.messages',
70 ],
71 },
72 },
73 ]
74
75 WSGI_APPLICATION = 'bothub.wsgi.application'
76
77
78 # Database
79
80 DATABASES = {}
81 DATABASES['default'] = dj_database_url.parse(
82 config(
83 'DEFAULT_DATABASE',
84 default='sqlite:///db.sqlite3'))
85
86
87 # Auth
88
89 AUTH_USER_MODEL = 'authentication.User'
90
91
92 # Password validation
93
94 AUTH_PASSWORD_VALIDATORS = [
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'UserAttributeSimilarityValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'MinimumLengthValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'CommonPasswordValidator',
106 },
107 {
108 'NAME': 'django.contrib.auth.password_validation.' +
109 'NumericPasswordValidator',
110 },
111 ]
112
113
114 # Internationalization
115
116 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
117
118 TIME_ZONE = config('TIME_ZONE', default='UTC')
119
120 USE_I18N = True
121
122 USE_L10N = True
123
124 USE_TZ = True
125
126
127 # Static files (CSS, JavaScript, Images)
128
129 STATIC_URL = config('STATIC_URL', default='/static/')
130
131 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
132
133 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
134
135
136 # rest framework
137
138 REST_FRAMEWORK = {
139 'DEFAULT_AUTHENTICATION_CLASSES': [
140 'rest_framework.authentication.TokenAuthentication',
141 ],
142 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
143 'LimitOffsetPagination',
144 'PAGE_SIZE': 20,
145 'DEFAULT_FILTER_BACKENDS': [
146 'django_filters.rest_framework.DjangoFilterBackend',
147 ],
148 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
149 }
150
151
152 # cors headers
153
154 CORS_ORIGIN_ALLOW_ALL = True
155
156
157 # mail
158
159 envvar_EMAIL_HOST = config(
160 'EMAIL_HOST',
161 default=None,
162 cast=cast_empty_str_to_none)
163
164 ADMINS = config(
165 'ADMINS',
166 default='',
167 cast=lambda v: [
168 (
169 s.strip().split('|')[0],
170 s.strip().split('|')[1],
171 ) for s in v.split(',')] if v else [])
172 EMAIL_SUBJECT_PREFIX = '[bothub] '
173 DEFAULT_FROM_EMAIL = config(
174 'DEFAULT_FROM_EMAIL',
175 default='webmaster@localhost')
176 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
177
178 if envvar_EMAIL_HOST:
179 EMAIL_HOST = envvar_EMAIL_HOST
180 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
181 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
182 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
183 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
184 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
185 else:
186 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
187
188 SEND_EMAILS = config('SEND_EMAILS', default=True, cast=bool)
189
190
191 # webapp
192
193 BOTHUB_WEBAPP_BASE_URL = config(
194 'BOTHUB_WEBAPP_BASE_URL',
195 default='http://localhost:8080/')
196
197
198 # NLP
199
200 BOTHUB_NLP_BASE_URL = config(
201 'BOTHUB_NLP_BASE_URL',
202 default='http://localhost:2657/')
203
204
205 # CSRF
206
207 CSRF_COOKIE_DOMAIN = config(
208 'CSRF_COOKIE_DOMAIN',
209 default=None,
210 cast=cast_empty_str_to_none)
211
212 CSRF_COOKIE_SECURE = config(
213 'CSRF_COOKIE_SECURE',
214 default=False,
215 cast=bool)
216
217
218 # Logging
219
220 LOGGING = DEFAULT_LOGGING
221 LOGGING['formatters']['bothub.health'] = {
222 'format': '[bothub.health] {message}',
223 'style': '{',
224 }
225 LOGGING['handlers']['bothub.health'] = {
226 'level': 'DEBUG',
227 'class': 'logging.StreamHandler',
228 'formatter': 'bothub.health',
229 }
230 LOGGING['loggers']['bothub.health.checks'] = {
231 'handlers': ['bothub.health'],
232 'level': 'DEBUG',
233 }
234
235
236 # Supported Languages
237
238 SUPPORTED_LANGUAGES = config(
239 'SUPPORTED_LANGUAGES',
240 default='en|pt',
241 cast=cast_supported_languages)
242
[end of bothub/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -239,3 +239,8 @@
'SUPPORTED_LANGUAGES',
default='en|pt',
cast=cast_supported_languages)
+
+
+# SECURE PROXY SSL HEADER
+
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
setup(
name='bothub',
- version='1.15.0',
+ version='1.15.1',
description='bothub',
packages=find_packages(),
install_requires=[
|
{"golden_diff": "diff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -239,3 +239,8 @@\n 'SUPPORTED_LANGUAGES',\n default='en|pt',\n cast=cast_supported_languages)\n+\n+\n+# SECURE PROXY SSL HEADER\n+\n+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n \n setup(\n name='bothub',\n- version='1.15.0',\n+ version='1.15.1',\n description='bothub',\n packages=find_packages(),\n install_requires=[\n", "issue": "Rest API just generate HTTP urls\nnext links (List Views) and absolute urls are generated to HTTP protocol because the all requests are make in port 80.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\nsetup(\n name='bothub',\n version='1.15.0',\n description='bothub',\n packages=find_packages(),\n install_requires=[\n 'python-decouple',\n 'requests',\n 'django==2.0.6',\n 'djangorestframework==3.7.7',\n 'whitenoise',\n 'dj-database-url',\n 'django-cors-headers',\n 'django-filter',\n 'coreapi',\n ],\n python_requires='>=3.6',\n)\n", "path": "setup.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\nfrom django.utils.log import DEFAULT_LOGGING\n\nfrom .utils import cast_supported_languages\nfrom .utils import cast_empty_str_to_none\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n\n# mail\n\nenvvar_EMAIL_HOST = config(\n 'EMAIL_HOST',\n default=None,\n cast=cast_empty_str_to_none)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nSEND_EMAILS = config('SEND_EMAILS', default=True, cast=bool)\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:2657/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None,\n cast=cast_empty_str_to_none)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n\n\n# Logging\n\nLOGGING = DEFAULT_LOGGING\nLOGGING['formatters']['bothub.health'] = {\n 'format': '[bothub.health] {message}',\n 'style': '{',\n}\nLOGGING['handlers']['bothub.health'] = {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'bothub.health',\n}\nLOGGING['loggers']['bothub.health.checks'] = {\n 'handlers': ['bothub.health'],\n 'level': 'DEBUG',\n}\n\n\n# Supported Languages\n\nSUPPORTED_LANGUAGES = config(\n 'SUPPORTED_LANGUAGES',\n default='en|pt',\n cast=cast_supported_languages)\n", "path": "bothub/settings.py"}]}
| 2,720 | 172 |
gh_patches_debug_4394
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3371
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError from contract errback
When running a contract with a URL that returns non-200 response, I get the following:
```
2018-08-09 14:40:23 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.bureauxlocaux.com/annonce/a-louer-bureaux-a-louer-a-nantes--1289-358662> (referer: None)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/local/lib/python3.6/site-packages/scrapy/contracts/__init__.py", line 89, in eb_wrapper
results.addError(case, exc_info)
File "/usr/local/lib/python3.6/unittest/runner.py", line 67, in addError
super(TextTestResult, self).addError(test, err)
File "/usr/local/lib/python3.6/unittest/result.py", line 17, in inner
return method(self, *args, **kw)
File "/usr/local/lib/python3.6/unittest/result.py", line 115, in addError
self.errors.append((test, self._exc_info_to_string(err, test)))
File "/usr/local/lib/python3.6/unittest/result.py", line 186, in _exc_info_to_string
exctype, value, tb, limit=length, capture_locals=self.tb_locals)
File "/usr/local/lib/python3.6/traceback.py", line 470, in __init__
exc_value.__cause__.__traceback__,
AttributeError: 'getset_descriptor' object has no attribute '__traceback__'
```
Here is how `exc_info` looks like:
```
(HttpError('Ignoring non-200 response',), <class 'scrapy.spidermiddlewares.httperror.HttpError'>, <traceback object at 0x7f4bdca1d948>)
```
</issue>
<code>
[start of scrapy/contracts/__init__.py]
1 import sys
2 import re
3 from functools import wraps
4 from unittest import TestCase
5
6 from scrapy.http import Request
7 from scrapy.utils.spider import iterate_spider_output
8 from scrapy.utils.python import get_spec
9
10
11 class ContractsManager(object):
12 contracts = {}
13
14 def __init__(self, contracts):
15 for contract in contracts:
16 self.contracts[contract.name] = contract
17
18 def tested_methods_from_spidercls(self, spidercls):
19 methods = []
20 for key, value in vars(spidercls).items():
21 if (callable(value) and value.__doc__ and
22 re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
23 methods.append(key)
24
25 return methods
26
27 def extract_contracts(self, method):
28 contracts = []
29 for line in method.__doc__.split('\n'):
30 line = line.strip()
31
32 if line.startswith('@'):
33 name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
34 args = re.split(r'\s+', args)
35
36 contracts.append(self.contracts[name](method, *args))
37
38 return contracts
39
40 def from_spider(self, spider, results):
41 requests = []
42 for method in self.tested_methods_from_spidercls(type(spider)):
43 bound_method = spider.__getattribute__(method)
44 requests.append(self.from_method(bound_method, results))
45
46 return requests
47
48 def from_method(self, method, results):
49 contracts = self.extract_contracts(method)
50 if contracts:
51 # calculate request args
52 args, kwargs = get_spec(Request.__init__)
53 kwargs['callback'] = method
54 for contract in contracts:
55 kwargs = contract.adjust_request_args(kwargs)
56
57 # create and prepare request
58 args.remove('self')
59 if set(args).issubset(set(kwargs)):
60 request = Request(**kwargs)
61
62 # execute pre and post hooks in order
63 for contract in reversed(contracts):
64 request = contract.add_pre_hook(request, results)
65 for contract in contracts:
66 request = contract.add_post_hook(request, results)
67
68 self._clean_req(request, method, results)
69 return request
70
71 def _clean_req(self, request, method, results):
72 """ stop the request from returning objects and records any errors """
73
74 cb = request.callback
75
76 @wraps(cb)
77 def cb_wrapper(response):
78 try:
79 output = cb(response)
80 output = list(iterate_spider_output(output))
81 except:
82 case = _create_testcase(method, 'callback')
83 results.addError(case, sys.exc_info())
84
85 def eb_wrapper(failure):
86 case = _create_testcase(method, 'errback')
87 exc_info = failure.value, failure.type, failure.getTracebackObject()
88 results.addError(case, exc_info)
89
90 request.callback = cb_wrapper
91 request.errback = eb_wrapper
92
93
94 class Contract(object):
95 """ Abstract class for contracts """
96
97 def __init__(self, method, *args):
98 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
99 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
100 self.args = args
101
102 def add_pre_hook(self, request, results):
103 if hasattr(self, 'pre_process'):
104 cb = request.callback
105
106 @wraps(cb)
107 def wrapper(response):
108 try:
109 results.startTest(self.testcase_pre)
110 self.pre_process(response)
111 results.stopTest(self.testcase_pre)
112 except AssertionError:
113 results.addFailure(self.testcase_pre, sys.exc_info())
114 except Exception:
115 results.addError(self.testcase_pre, sys.exc_info())
116 else:
117 results.addSuccess(self.testcase_pre)
118 finally:
119 return list(iterate_spider_output(cb(response)))
120
121 request.callback = wrapper
122
123 return request
124
125 def add_post_hook(self, request, results):
126 if hasattr(self, 'post_process'):
127 cb = request.callback
128
129 @wraps(cb)
130 def wrapper(response):
131 output = list(iterate_spider_output(cb(response)))
132 try:
133 results.startTest(self.testcase_post)
134 self.post_process(output)
135 results.stopTest(self.testcase_post)
136 except AssertionError:
137 results.addFailure(self.testcase_post, sys.exc_info())
138 except Exception:
139 results.addError(self.testcase_post, sys.exc_info())
140 else:
141 results.addSuccess(self.testcase_post)
142 finally:
143 return output
144
145 request.callback = wrapper
146
147 return request
148
149 def adjust_request_args(self, args):
150 return args
151
152
153 def _create_testcase(method, desc):
154 spider = method.__self__.name
155
156 class ContractTestCase(TestCase):
157 def __str__(_self):
158 return "[%s] %s (%s)" % (spider, method.__name__, desc)
159
160 name = '%s_%s' % (spider, method.__name__)
161 setattr(ContractTestCase, name, lambda x: x)
162 return ContractTestCase(name)
163
[end of scrapy/contracts/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py
--- a/scrapy/contracts/__init__.py
+++ b/scrapy/contracts/__init__.py
@@ -84,7 +84,7 @@
def eb_wrapper(failure):
case = _create_testcase(method, 'errback')
- exc_info = failure.value, failure.type, failure.getTracebackObject()
+ exc_info = failure.type, failure.value, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
|
{"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -84,7 +84,7 @@\n \n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n- exc_info = failure.value, failure.type, failure.getTracebackObject()\n+ exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n \n request.callback = cb_wrapper\n", "issue": "AttributeError from contract errback\nWhen running a contract with a URL that returns non-200 response, I get the following:\r\n```\r\n2018-08-09 14:40:23 [scrapy.core.scraper] ERROR: Spider error processing <GET https://www.bureauxlocaux.com/annonce/a-louer-bureaux-a-louer-a-nantes--1289-358662> (referer: None)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n current.result = callback(current.result, *args, **kw)\r\n File \"/usr/local/lib/python3.6/site-packages/scrapy/contracts/__init__.py\", line 89, in eb_wrapper\r\n results.addError(case, exc_info)\r\n File \"/usr/local/lib/python3.6/unittest/runner.py\", line 67, in addError\r\n super(TextTestResult, self).addError(test, err)\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 17, in inner\r\n return method(self, *args, **kw)\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 115, in addError\r\n self.errors.append((test, self._exc_info_to_string(err, test)))\r\n File \"/usr/local/lib/python3.6/unittest/result.py\", line 186, in _exc_info_to_string\r\n exctype, value, tb, limit=length, capture_locals=self.tb_locals)\r\n File \"/usr/local/lib/python3.6/traceback.py\", line 470, in __init__\r\n exc_value.__cause__.__traceback__,\r\nAttributeError: 'getset_descriptor' object has no attribute '__traceback__'\r\n```\r\n\r\nHere is how `exc_info` looks like:\r\n```\r\n(HttpError('Ignoring non-200 response',), <class 'scrapy.spidermiddlewares.httperror.HttpError'>, <traceback object at 0x7f4bdca1d948>)\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in vars(spidercls).items():\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n # calculate request args\n args, kwargs = get_spec(Request.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n # create and prepare request\n args.remove('self')\n if set(args).issubset(set(kwargs)):\n request = Request(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.value, failure.type, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]}
| 2,472 | 128 |
gh_patches_debug_39579
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-2071
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infinite loop from multidimensional array in calldata
### Version Information
* vyper Version (output of `vyper --version`): latest master
* OS: linux
* Python Version: `3.8.2`
### What's your issue about?
Using a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.
For example, each of the following methods will compile but attempting to call them fails with out of gas:
```python
@public
def foo(a: uint256[1][6]):
pass
@public
def bar(a: uint256[1][1][6]):
pass
```
For comparison, these methods execute as expected:
```python
@public
def foo(a: uint256[6][1]):
pass
@public
def bar(a: uint256[100][5][5][5]):
pass
```
#### Some observations:
* The length of the first array element has no effect.
* The data type has no effect.
* The location of the array within calldata, and total number of arguments, has no effect.
* The number of dimensions, or dimension that exceeds a length of 5, has no effect.
### How can it be fixed?
Unsure at this time.
</issue>
<code>
[start of vyper/parser/arg_clamps.py]
1 import functools
2 import uuid
3
4 from vyper.parser.lll_node import LLLnode
5 from vyper.types.types import (
6 ByteArrayLike,
7 ListType,
8 get_size_of_type,
9 is_base_type,
10 )
11 from vyper.utils import MemoryPositions
12
13
14 def _mk_calldatacopy_copier(pos, sz, mempos):
15 return ["calldatacopy", mempos, ["add", 4, pos], sz]
16
17
18 def _mk_codecopy_copier(pos, sz, mempos):
19 return ["codecopy", mempos, ["add", "~codelen", pos], sz]
20
21
22 def make_arg_clamper(datapos, mempos, typ, is_init=False):
23 """
24 Clamps argument to type limits.
25 """
26
27 if not is_init:
28 data_decl = ["calldataload", ["add", 4, datapos]]
29 copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)
30 else:
31 data_decl = ["codeload", ["add", "~codelen", datapos]]
32 copier = functools.partial(_mk_codecopy_copier, mempos=mempos)
33 # Numbers: make sure they're in range
34 if is_base_type(typ, "int128"):
35 return LLLnode.from_list(
36 [
37 "clamp",
38 ["mload", MemoryPositions.MINNUM],
39 data_decl,
40 ["mload", MemoryPositions.MAXNUM],
41 ],
42 typ=typ,
43 annotation="checking int128 input",
44 )
45 # Booleans: make sure they're zero or one
46 elif is_base_type(typ, "bool"):
47 return LLLnode.from_list(
48 ["uclamplt", data_decl, 2], typ=typ, annotation="checking bool input",
49 )
50 # Addresses: make sure they're in range
51 elif is_base_type(typ, "address"):
52 return LLLnode.from_list(
53 ["uclamplt", data_decl, ["mload", MemoryPositions.ADDRSIZE]],
54 typ=typ,
55 annotation="checking address input",
56 )
57 # Bytes: make sure they have the right size
58 elif isinstance(typ, ByteArrayLike):
59 return LLLnode.from_list(
60 [
61 "seq",
62 copier(data_decl, 32 + typ.maxlen),
63 ["assert", ["le", ["calldataload", ["add", 4, data_decl]], typ.maxlen]],
64 ],
65 typ=None,
66 annotation="checking bytearray input",
67 )
68 # Lists: recurse
69 elif isinstance(typ, ListType):
70 if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
71 subtype_size = get_size_of_type(typ.subtype)
72 i_incr = subtype_size * 32
73
74 mem_to = subtype_size * 32 * (typ.count - 1)
75 loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
76
77 offset = 288
78 o = [
79 ["mstore", offset, 0], # init loop
80 ["label", loop_label],
81 make_arg_clamper(
82 ["add", datapos, ["mload", offset]],
83 ["add", mempos, ["mload", offset]],
84 typ.subtype,
85 is_init,
86 ),
87 ["mstore", offset, ["add", ["mload", offset], i_incr]],
88 ["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
89 ]
90 else:
91 o = []
92 for i in range(typ.count):
93 offset = get_size_of_type(typ.subtype) * 32 * i
94 o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
95 return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
96 # Otherwise don't make any checks
97 else:
98 return LLLnode.from_list("pass")
99
[end of vyper/parser/arg_clamps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py
--- a/vyper/parser/arg_clamps.py
+++ b/vyper/parser/arg_clamps.py
@@ -22,6 +22,22 @@
def make_arg_clamper(datapos, mempos, typ, is_init=False):
"""
Clamps argument to type limits.
+
+ Arguments
+ ---------
+ datapos : int | LLLnode
+ Calldata offset of the value being clamped
+ mempos : int | LLLnode
+ Memory offset that the value is stored at during clamping
+ typ : vyper.types.types.BaseType
+ Type of the value
+ is_init : bool, optional
+ Boolean indicating if we are generating init bytecode
+
+ Returns
+ -------
+ LLLnode
+ Arg clamper LLL
"""
if not is_init:
@@ -68,31 +84,45 @@
# Lists: recurse
elif isinstance(typ, ListType):
if typ.count > 5 or (type(datapos) is list and type(mempos) is list):
- subtype_size = get_size_of_type(typ.subtype)
- i_incr = subtype_size * 32
+ # find ultimate base type
+ subtype = typ.subtype
+ while hasattr(subtype, "subtype"):
+ subtype = subtype.subtype
+
+ # make arg clamper for the base type
+ offset = MemoryPositions.FREE_LOOP_INDEX
+ clamper = make_arg_clamper(
+ ["add", datapos, ["mload", offset]],
+ ["add", mempos, ["mload", offset]],
+ subtype,
+ is_init,
+ )
+ if clamper.value == "pass":
+ # no point looping if the base type doesn't require clamping
+ return clamper
+
+ # loop the entire array at once, even if it's multidimensional
+ type_size = get_size_of_type(typ)
+ i_incr = get_size_of_type(subtype) * 32
- mem_to = subtype_size * 32 * (typ.count - 1)
+ mem_to = type_size * 32
loop_label = f"_check_list_loop_{str(uuid.uuid4())}"
- offset = 288
- o = [
+ lll_node = [
["mstore", offset, 0], # init loop
["label", loop_label],
- make_arg_clamper(
- ["add", datapos, ["mload", offset]],
- ["add", mempos, ["mload", offset]],
- typ.subtype,
- is_init,
- ),
+ clamper,
["mstore", offset, ["add", ["mload", offset], i_incr]],
["if", ["lt", ["mload", offset], mem_to], ["goto", loop_label]],
]
else:
- o = []
+ lll_node = []
for i in range(typ.count):
offset = get_size_of_type(typ.subtype) * 32 * i
- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))
- return LLLnode.from_list(["seq"] + o, typ=None, annotation="checking list input")
+ lll_node.append(
+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)
+ )
+ return LLLnode.from_list(["seq"] + lll_node, typ=None, annotation="checking list input")
# Otherwise don't make any checks
else:
return LLLnode.from_list("pass")
|
{"golden_diff": "diff --git a/vyper/parser/arg_clamps.py b/vyper/parser/arg_clamps.py\n--- a/vyper/parser/arg_clamps.py\n+++ b/vyper/parser/arg_clamps.py\n@@ -22,6 +22,22 @@\n def make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n+\n+ Arguments\n+ ---------\n+ datapos : int | LLLnode\n+ Calldata offset of the value being clamped\n+ mempos : int | LLLnode\n+ Memory offset that the value is stored at during clamping\n+ typ : vyper.types.types.BaseType\n+ Type of the value\n+ is_init : bool, optional\n+ Boolean indicating if we are generating init bytecode\n+\n+ Returns\n+ -------\n+ LLLnode\n+ Arg clamper LLL\n \"\"\"\n \n if not is_init:\n@@ -68,31 +84,45 @@\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n- subtype_size = get_size_of_type(typ.subtype)\n- i_incr = subtype_size * 32\n+ # find ultimate base type\n+ subtype = typ.subtype\n+ while hasattr(subtype, \"subtype\"):\n+ subtype = subtype.subtype\n+\n+ # make arg clamper for the base type\n+ offset = MemoryPositions.FREE_LOOP_INDEX\n+ clamper = make_arg_clamper(\n+ [\"add\", datapos, [\"mload\", offset]],\n+ [\"add\", mempos, [\"mload\", offset]],\n+ subtype,\n+ is_init,\n+ )\n+ if clamper.value == \"pass\":\n+ # no point looping if the base type doesn't require clamping\n+ return clamper\n+\n+ # loop the entire array at once, even if it's multidimensional\n+ type_size = get_size_of_type(typ)\n+ i_incr = get_size_of_type(subtype) * 32\n \n- mem_to = subtype_size * 32 * (typ.count - 1)\n+ mem_to = type_size * 32\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n \n- offset = 288\n- o = [\n+ lll_node = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n- make_arg_clamper(\n- [\"add\", datapos, [\"mload\", offset]],\n- [\"add\", mempos, [\"mload\", offset]],\n- typ.subtype,\n- is_init,\n- ),\n+ clamper,\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n- o = []\n+ lll_node = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n- o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n- return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n+ lll_node.append(\n+ make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init)\n+ )\n+ return LLLnode.from_list([\"seq\"] + lll_node, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "issue": "Infinite loop from multidimensional array in calldata\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master\r\n* OS: linux\r\n* Python Version: `3.8.2`\r\n\r\n### What's your issue about?\r\n\r\nUsing a multidimensional array in the function inputs, where the length of the >=2nd dimension is 6 or more, causes an infinite loop when calling the function.\r\n\r\nFor example, each of the following methods will compile but attempting to call them fails with out of gas:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[1][6]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[1][1][6]):\r\n pass\r\n```\r\n\r\nFor comparison, these methods execute as expected:\r\n\r\n```python\r\n@public\r\ndef foo(a: uint256[6][1]):\r\n pass\r\n\r\n@public\r\ndef bar(a: uint256[100][5][5][5]):\r\n pass\r\n```\r\n\r\n#### Some observations:\r\n\r\n* The length of the first array element has no effect.\r\n* The data type has no effect.\r\n* The location of the array within calldata, and total number of arguments, has no effect.\r\n* The number of dimensions, or dimension that exceeds a length of 5, has no effect.\r\n\r\n### How can it be fixed?\r\nUnsure at this time.\n", "before_files": [{"content": "import functools\nimport uuid\n\nfrom vyper.parser.lll_node import LLLnode\nfrom vyper.types.types import (\n ByteArrayLike,\n ListType,\n get_size_of_type,\n is_base_type,\n)\nfrom vyper.utils import MemoryPositions\n\n\ndef _mk_calldatacopy_copier(pos, sz, mempos):\n return [\"calldatacopy\", mempos, [\"add\", 4, pos], sz]\n\n\ndef _mk_codecopy_copier(pos, sz, mempos):\n return [\"codecopy\", mempos, [\"add\", \"~codelen\", pos], sz]\n\n\ndef make_arg_clamper(datapos, mempos, typ, is_init=False):\n \"\"\"\n Clamps argument to type limits.\n \"\"\"\n\n if not is_init:\n data_decl = [\"calldataload\", [\"add\", 4, datapos]]\n copier = functools.partial(_mk_calldatacopy_copier, mempos=mempos)\n else:\n data_decl = [\"codeload\", [\"add\", \"~codelen\", datapos]]\n copier = functools.partial(_mk_codecopy_copier, mempos=mempos)\n # Numbers: make sure they're in range\n if is_base_type(typ, \"int128\"):\n return LLLnode.from_list(\n [\n \"clamp\",\n [\"mload\", MemoryPositions.MINNUM],\n data_decl,\n [\"mload\", MemoryPositions.MAXNUM],\n ],\n typ=typ,\n annotation=\"checking int128 input\",\n )\n # Booleans: make sure they're zero or one\n elif is_base_type(typ, \"bool\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, 2], typ=typ, annotation=\"checking bool input\",\n )\n # Addresses: make sure they're in range\n elif is_base_type(typ, \"address\"):\n return LLLnode.from_list(\n [\"uclamplt\", data_decl, [\"mload\", MemoryPositions.ADDRSIZE]],\n typ=typ,\n annotation=\"checking address input\",\n )\n # Bytes: make sure they have the right size\n elif isinstance(typ, ByteArrayLike):\n return LLLnode.from_list(\n [\n \"seq\",\n copier(data_decl, 32 + typ.maxlen),\n [\"assert\", [\"le\", [\"calldataload\", [\"add\", 4, data_decl]], typ.maxlen]],\n ],\n typ=None,\n annotation=\"checking bytearray input\",\n )\n # Lists: recurse\n elif isinstance(typ, ListType):\n if typ.count > 5 or (type(datapos) is list and type(mempos) is list):\n subtype_size = get_size_of_type(typ.subtype)\n i_incr = subtype_size * 32\n\n mem_to = subtype_size * 32 * (typ.count - 1)\n loop_label = f\"_check_list_loop_{str(uuid.uuid4())}\"\n\n offset = 288\n o = [\n [\"mstore\", offset, 0], # init loop\n [\"label\", loop_label],\n make_arg_clamper(\n [\"add\", datapos, [\"mload\", offset]],\n [\"add\", mempos, [\"mload\", offset]],\n typ.subtype,\n is_init,\n ),\n [\"mstore\", offset, [\"add\", [\"mload\", offset], i_incr]],\n [\"if\", [\"lt\", [\"mload\", offset], mem_to], [\"goto\", loop_label]],\n ]\n else:\n o = []\n for i in range(typ.count):\n offset = get_size_of_type(typ.subtype) * 32 * i\n o.append(make_arg_clamper(datapos + offset, mempos + offset, typ.subtype, is_init))\n return LLLnode.from_list([\"seq\"] + o, typ=None, annotation=\"checking list input\")\n # Otherwise don't make any checks\n else:\n return LLLnode.from_list(\"pass\")\n", "path": "vyper/parser/arg_clamps.py"}]}
| 1,884 | 837 |
gh_patches_debug_1907
|
rasdani/github-patches
|
git_diff
|
google__flax-628
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'
After updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.
The Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.
</issue>
<code>
[start of setup.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """setup.py for Flax."""
16
17 import os
18 from setuptools import find_packages
19 from setuptools import setup
20
21 here = os.path.abspath(os.path.dirname(__file__))
22 try:
23 README = open(os.path.join(here, "README.md"), encoding='utf-8').read()
24 except IOError:
25 README = ""
26
27 install_requires = [
28 "numpy>=1.12",
29 "jax>=0.1.59",
30 "matplotlib", # only needed for tensorboard export
31 "dataclasses;python_version<'3.7'", # will only install on py3.6
32 "msgpack",
33 ]
34
35 tests_require = [
36 "atari-py",
37 "clu", # All examples.
38 "gym",
39 "jaxlib",
40 "ml-collections",
41 "opencv-python",
42 "pytest",
43 "pytest-cov",
44 "pytest-xdist==1.34.0", # upgrading to 2.0 broke tests, need to investigate
45 "sentencepiece", # WMT example.
46 "svn",
47 "tensorflow",
48 "tensorflow_text", # WMT example.
49 "tensorflow_datasets",
50 ]
51
52 __version__ = None
53
54 with open('flax/version.py') as f:
55 exec(f.read(), globals())
56
57 setup(
58 name="flax",
59 version=__version__,
60 description="Flax: A neural network library for JAX designed for flexibility",
61 long_description="\n\n".join([README]),
62 long_description_content_type='text/markdown',
63 classifiers=[
64 "Development Status :: 3 - Alpha",
65 "Intended Audience :: Developers",
66 "Intended Audience :: Science/Research",
67 "License :: OSI Approved :: Apache Software License",
68 "Programming Language :: Python :: 3.7",
69 "Topic :: Scientific/Engineering :: Artificial Intelligence",
70 ],
71 keywords="",
72 author="Flax team",
73 author_email="[email protected]",
74 url="https://github.com/google/flax",
75 packages=find_packages(),
76 include_package_data=False,
77 zip_safe=False,
78 install_requires=install_requires,
79 extras_require={
80 "testing": tests_require,
81 },
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
install_requires = [
"numpy>=1.12",
- "jax>=0.1.59",
+ "jax>=0.1.77",
"matplotlib", # only needed for tensorboard export
"dataclasses;python_version<'3.7'", # will only install on py3.6
"msgpack",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \n install_requires = [\n \"numpy>=1.12\",\n- \"jax>=0.1.59\",\n+ \"jax>=0.1.77\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n", "issue": "After update from 0.2.0: AttributeError: module 'jax.core' has no attribute 'eval_context'\nAfter updating from flax 0.2.0 to flax 0.2.2 I get the above error message. Downgrading to 0.2.0 solves this, so the error source is located. I'm working with the now deprecated flax.nn package if backward-compatibility might be the reason for this issue.\r\nThe Issue is encountered in a custom RNN, when using the init_by_shape function in conjunction with jax.lax.scan.\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"atari-py\",\n \"clu\", # All examples.\n \"gym\",\n \"jaxlib\",\n \"ml-collections\",\n \"opencv-python\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist==1.34.0\", # upgrading to 2.0 broke tests, need to investigate\n \"sentencepiece\", # WMT example.\n \"svn\",\n \"tensorflow\",\n \"tensorflow_text\", # WMT example.\n \"tensorflow_datasets\",\n]\n\n__version__ = None\n\nwith open('flax/version.py') as f:\n exec(f.read(), globals())\n\nsetup(\n name=\"flax\",\n version=__version__,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]}
| 1,402 | 110 |
gh_patches_debug_5356
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1093
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10
Ref : https://github.com/python/cpython/pull/25174
https://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69
</issue>
<code>
[start of sentry_sdk/worker.py]
1 import os
2 import threading
3
4 from time import sleep, time
5 from sentry_sdk._compat import check_thread_support
6 from sentry_sdk._queue import Queue, Full
7 from sentry_sdk.utils import logger
8 from sentry_sdk.consts import DEFAULT_QUEUE_SIZE
9
10 from sentry_sdk._types import MYPY
11
12 if MYPY:
13 from typing import Any
14 from typing import Optional
15 from typing import Callable
16
17
18 _TERMINATOR = object()
19
20
21 class BackgroundWorker(object):
22 def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):
23 # type: (int) -> None
24 check_thread_support()
25 self._queue = Queue(queue_size) # type: Queue
26 self._lock = threading.Lock()
27 self._thread = None # type: Optional[threading.Thread]
28 self._thread_for_pid = None # type: Optional[int]
29
30 @property
31 def is_alive(self):
32 # type: () -> bool
33 if self._thread_for_pid != os.getpid():
34 return False
35 if not self._thread:
36 return False
37 return self._thread.is_alive()
38
39 def _ensure_thread(self):
40 # type: () -> None
41 if not self.is_alive:
42 self.start()
43
44 def _timed_queue_join(self, timeout):
45 # type: (float) -> bool
46 deadline = time() + timeout
47 queue = self._queue
48
49 queue.all_tasks_done.acquire()
50
51 try:
52 while queue.unfinished_tasks:
53 delay = deadline - time()
54 if delay <= 0:
55 return False
56 queue.all_tasks_done.wait(timeout=delay)
57
58 return True
59 finally:
60 queue.all_tasks_done.release()
61
62 def start(self):
63 # type: () -> None
64 with self._lock:
65 if not self.is_alive:
66 self._thread = threading.Thread(
67 target=self._target, name="raven-sentry.BackgroundWorker"
68 )
69 self._thread.setDaemon(True)
70 self._thread.start()
71 self._thread_for_pid = os.getpid()
72
73 def kill(self):
74 # type: () -> None
75 """
76 Kill worker thread. Returns immediately. Not useful for
77 waiting on shutdown for events, use `flush` for that.
78 """
79 logger.debug("background worker got kill request")
80 with self._lock:
81 if self._thread:
82 try:
83 self._queue.put_nowait(_TERMINATOR)
84 except Full:
85 logger.debug("background worker queue full, kill failed")
86
87 self._thread = None
88 self._thread_for_pid = None
89
90 def flush(self, timeout, callback=None):
91 # type: (float, Optional[Any]) -> None
92 logger.debug("background worker got flush request")
93 with self._lock:
94 if self.is_alive and timeout > 0.0:
95 self._wait_flush(timeout, callback)
96 logger.debug("background worker flushed")
97
98 def _wait_flush(self, timeout, callback):
99 # type: (float, Optional[Any]) -> None
100 initial_timeout = min(0.1, timeout)
101 if not self._timed_queue_join(initial_timeout):
102 pending = self._queue.qsize() + 1
103 logger.debug("%d event(s) pending on flush", pending)
104 if callback is not None:
105 callback(pending, timeout)
106
107 if not self._timed_queue_join(timeout - initial_timeout):
108 pending = self._queue.qsize() + 1
109 logger.error("flush timed out, dropped %s events", pending)
110
111 def submit(self, callback):
112 # type: (Callable[[], None]) -> bool
113 self._ensure_thread()
114 try:
115 self._queue.put_nowait(callback)
116 return True
117 except Full:
118 return False
119
120 def _target(self):
121 # type: () -> None
122 while True:
123 callback = self._queue.get()
124 try:
125 if callback is _TERMINATOR:
126 break
127 try:
128 callback()
129 except Exception:
130 logger.error("Failed processing job", exc_info=True)
131 finally:
132 self._queue.task_done()
133 sleep(0)
134
[end of sentry_sdk/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py
--- a/sentry_sdk/worker.py
+++ b/sentry_sdk/worker.py
@@ -66,7 +66,7 @@
self._thread = threading.Thread(
target=self._target, name="raven-sentry.BackgroundWorker"
)
- self._thread.setDaemon(True)
+ self._thread.daemon = True
self._thread.start()
self._thread_for_pid = os.getpid()
|
{"golden_diff": "diff --git a/sentry_sdk/worker.py b/sentry_sdk/worker.py\n--- a/sentry_sdk/worker.py\n+++ b/sentry_sdk/worker.py\n@@ -66,7 +66,7 @@\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n- self._thread.setDaemon(True)\n+ self._thread.daemon = True\n self._thread.start()\n self._thread_for_pid = os.getpid()\n", "issue": "threading.setDaemon has been deprecated in favor of setting daemon attribute directly in Python 3.10\nRef : https://github.com/python/cpython/pull/25174\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/927903e3b354a42e427d91129c399d64d480a6b9/sentry_sdk/worker.py#L69\n", "before_files": [{"content": "import os\nimport threading\n\nfrom time import sleep, time\nfrom sentry_sdk._compat import check_thread_support\nfrom sentry_sdk._queue import Queue, Full\nfrom sentry_sdk.utils import logger\nfrom sentry_sdk.consts import DEFAULT_QUEUE_SIZE\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n\n_TERMINATOR = object()\n\n\nclass BackgroundWorker(object):\n def __init__(self, queue_size=DEFAULT_QUEUE_SIZE):\n # type: (int) -> None\n check_thread_support()\n self._queue = Queue(queue_size) # type: Queue\n self._lock = threading.Lock()\n self._thread = None # type: Optional[threading.Thread]\n self._thread_for_pid = None # type: Optional[int]\n\n @property\n def is_alive(self):\n # type: () -> bool\n if self._thread_for_pid != os.getpid():\n return False\n if not self._thread:\n return False\n return self._thread.is_alive()\n\n def _ensure_thread(self):\n # type: () -> None\n if not self.is_alive:\n self.start()\n\n def _timed_queue_join(self, timeout):\n # type: (float) -> bool\n deadline = time() + timeout\n queue = self._queue\n\n queue.all_tasks_done.acquire()\n\n try:\n while queue.unfinished_tasks:\n delay = deadline - time()\n if delay <= 0:\n return False\n queue.all_tasks_done.wait(timeout=delay)\n\n return True\n finally:\n queue.all_tasks_done.release()\n\n def start(self):\n # type: () -> None\n with self._lock:\n if not self.is_alive:\n self._thread = threading.Thread(\n target=self._target, name=\"raven-sentry.BackgroundWorker\"\n )\n self._thread.setDaemon(True)\n self._thread.start()\n self._thread_for_pid = os.getpid()\n\n def kill(self):\n # type: () -> None\n \"\"\"\n Kill worker thread. Returns immediately. Not useful for\n waiting on shutdown for events, use `flush` for that.\n \"\"\"\n logger.debug(\"background worker got kill request\")\n with self._lock:\n if self._thread:\n try:\n self._queue.put_nowait(_TERMINATOR)\n except Full:\n logger.debug(\"background worker queue full, kill failed\")\n\n self._thread = None\n self._thread_for_pid = None\n\n def flush(self, timeout, callback=None):\n # type: (float, Optional[Any]) -> None\n logger.debug(\"background worker got flush request\")\n with self._lock:\n if self.is_alive and timeout > 0.0:\n self._wait_flush(timeout, callback)\n logger.debug(\"background worker flushed\")\n\n def _wait_flush(self, timeout, callback):\n # type: (float, Optional[Any]) -> None\n initial_timeout = min(0.1, timeout)\n if not self._timed_queue_join(initial_timeout):\n pending = self._queue.qsize() + 1\n logger.debug(\"%d event(s) pending on flush\", pending)\n if callback is not None:\n callback(pending, timeout)\n\n if not self._timed_queue_join(timeout - initial_timeout):\n pending = self._queue.qsize() + 1\n logger.error(\"flush timed out, dropped %s events\", pending)\n\n def submit(self, callback):\n # type: (Callable[[], None]) -> bool\n self._ensure_thread()\n try:\n self._queue.put_nowait(callback)\n return True\n except Full:\n return False\n\n def _target(self):\n # type: () -> None\n while True:\n callback = self._queue.get()\n try:\n if callback is _TERMINATOR:\n break\n try:\n callback()\n except Exception:\n logger.error(\"Failed processing job\", exc_info=True)\n finally:\n self._queue.task_done()\n sleep(0)\n", "path": "sentry_sdk/worker.py"}]}
| 1,827 | 109 |
gh_patches_debug_13158
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2850
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_144 false negative after updating to AWS Provider 4.0
**Describe the issue**
After updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.
**Examples**
Sample code:
```hcl
provider "aws" {
alias = "aws-primary"
region = "us-east-1"
}
provider "aws" {
alias = "aws-dr"
region = "us-west-2"
}
resource "aws_s3_bucket" "test_bucket" {
bucket = var.bucket_name
}
resource "aws_s3_bucket" "test_dr_bucket" {
provider = aws.aws-dr
bucket = "${var.bucket_name}-dr"
}
resource "aws_s3_bucket_versioning" "test_bucket_versioning" {
bucket = aws_s3_bucket.test_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_versioning" "test_dr_bucket_versioning" {
provider = aws.aws-dr
bucket = aws_s3_bucket.test_dr_bucket.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_iam_role" "dr_replication" {
name_prefix = "replication"
description = "Allow S3 to assume the role for replication"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "s3ReplicationAssume",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_policy" "dr_replication" {
name_prefix = "replication"
description = "Allows reading for replication."
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.test_bucket.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateTags",
"s3:ObjectOwnerOverrideToBucketOwner"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.test_dr_bucket.arn}/*"
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "dr_replication" {
name = "replication"
roles = [aws_iam_role.dr_replication.name]
policy_arn = aws_iam_policy.dr_replication.arn
}
resource "aws_s3_bucket_replication_configuration" "dr_bucket_replication" {
# Must have bucket versioning enabled first
depends_on = [
aws_s3_bucket_versioning.test_bucket_versioning,
aws_s3_bucket_versioning.test_dr_bucket_versioning,
]
role = aws_iam_role.dr_replication.arn
bucket = aws_s3_bucket.test_bucket.id
rule {
id = "entire_bucket"
status = "Enabled"
destination {
bucket = aws_s3_bucket.test_dr_bucket.arn
storage_class = "DEEP_ARCHIVE"
}
}
}
```
Expected: `CKV_AWS_144` will pass.
Actual:
```
Check: CKV_AWS_144: "Ensure that S3 bucket has cross-region replication enabled"
FAILED for resource: aws_s3_bucket.test_bucket
File: /dr_test.tf:11-13
Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled
11 | resource "aws_s3_bucket" "test_bucket" {
12 | bucket = var.bucket_name
13 | }
```
**Desktop (please complete the following information):**
- OS: MacOS 10.14.6
- Checkov Version 2.0.1074
**Additional context**
On the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py]
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from checkov.common.models.enums import CheckCategories
4
5
6 class S3BucketReplicationConfiguration(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that S3 bucket has cross-region replication enabled"
9 id = "CKV_AWS_144"
10 supported_resources = ['aws_s3_bucket']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "replication_configuration/[0]/role"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = S3BucketReplicationConfiguration()
22
[end of checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from checkov.common.models.consts import ANY_VALUE
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class S3BucketReplicationConfiguration(BaseResourceValueCheck):
- def __init__(self):
- name = "Ensure that S3 bucket has cross-region replication enabled"
- id = "CKV_AWS_144"
- supported_resources = ['aws_s3_bucket']
- categories = [CheckCategories.GENERAL_SECURITY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self):
- return "replication_configuration/[0]/role"
-
- def get_expected_value(self):
- return ANY_VALUE
-
-
-check = S3BucketReplicationConfiguration()
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py b/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class S3BucketReplicationConfiguration(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that S3 bucket has cross-region replication enabled\"\n- id = \"CKV_AWS_144\"\n- supported_resources = ['aws_s3_bucket']\n- categories = [CheckCategories.GENERAL_SECURITY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"replication_configuration/[0]/role\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n-\n-\n-check = S3BucketReplicationConfiguration()\n", "issue": "CKV_AWS_144 false negative after updating to AWS Provider 4.0 \n**Describe the issue**\r\nAfter updating our AWS Provider to ~>4.0, we started getting a failure on `CKV_AWS_144` in our bucket module, despite having a properly configured `aws_s3_bucket_lifecycle_configuration` block.\r\n\r\n**Examples**\r\nSample code:\r\n\r\n```hcl\r\nprovider \"aws\" {\r\n alias = \"aws-primary\"\r\n region = \"us-east-1\"\r\n}\r\n\r\nprovider \"aws\" {\r\n alias = \"aws-dr\"\r\n region = \"us-west-2\"\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_bucket\" {\r\n bucket = var.bucket_name\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"test_dr_bucket\" {\r\n provider = aws.aws-dr\r\n bucket = \"${var.bucket_name}-dr\"\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_bucket_versioning\" {\r\n bucket = aws_s3_bucket.test_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_s3_bucket_versioning\" \"test_dr_bucket_versioning\" {\r\n provider = aws.aws-dr\r\n bucket = aws_s3_bucket.test_dr_bucket.id\r\n versioning_configuration {\r\n status = \"Enabled\"\r\n }\r\n}\r\n\r\nresource \"aws_iam_role\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allow S3 to assume the role for replication\"\r\n\r\n assume_role_policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Sid\": \"s3ReplicationAssume\",\r\n \"Effect\": \"Allow\",\r\n \"Principal\": {\r\n \"Service\": \"s3.amazonaws.com\"\r\n },\r\n \"Action\": \"sts:AssumeRole\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy\" \"dr_replication\" {\r\n name_prefix = \"replication\"\r\n description = \"Allows reading for replication.\"\r\n\r\n policy = <<POLICY\r\n{\r\n \"Version\": \"2012-10-17\",\r\n \"Statement\": [\r\n {\r\n \"Action\": [\r\n \"s3:GetReplicationConfiguration\",\r\n \"s3:ListBucket\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:GetObjectVersion\",\r\n \"s3:GetObjectVersionForReplication\",\r\n \"s3:GetObjectVersionAcl\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": [\r\n \"${aws_s3_bucket.test_bucket.arn}/*\"\r\n ]\r\n },\r\n {\r\n \"Action\": [\r\n \"s3:ReplicateObject\",\r\n \"s3:ReplicateTags\",\r\n \"s3:ObjectOwnerOverrideToBucketOwner\"\r\n ],\r\n \"Effect\": \"Allow\",\r\n \"Resource\": \"${aws_s3_bucket.test_dr_bucket.arn}/*\"\r\n }\r\n ]\r\n}\r\nPOLICY\r\n}\r\n\r\nresource \"aws_iam_policy_attachment\" \"dr_replication\" {\r\n name = \"replication\"\r\n roles = [aws_iam_role.dr_replication.name]\r\n policy_arn = aws_iam_policy.dr_replication.arn\r\n}\r\n\r\nresource \"aws_s3_bucket_replication_configuration\" \"dr_bucket_replication\" {\r\n\r\n # Must have bucket versioning enabled first\r\n depends_on = [\r\n aws_s3_bucket_versioning.test_bucket_versioning,\r\n aws_s3_bucket_versioning.test_dr_bucket_versioning,\r\n ]\r\n\r\n role = aws_iam_role.dr_replication.arn\r\n bucket = aws_s3_bucket.test_bucket.id\r\n\r\n rule {\r\n id = \"entire_bucket\"\r\n status = \"Enabled\"\r\n\r\n destination {\r\n bucket = aws_s3_bucket.test_dr_bucket.arn\r\n storage_class = \"DEEP_ARCHIVE\"\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\nExpected: `CKV_AWS_144` will pass.\r\n\r\nActual: \r\n```\r\nCheck: CKV_AWS_144: \"Ensure that S3 bucket has cross-region replication enabled\"\r\n FAILED for resource: aws_s3_bucket.test_bucket\r\n File: /dr_test.tf:11-13\r\n Guide: https://docs.bridgecrew.io/docs/ensure-that-s3-bucket-has-cross-region-replication-enabled\r\n\r\n 11 | resource \"aws_s3_bucket\" \"test_bucket\" {\r\n 12 | bucket = var.bucket_name\r\n 13 | }\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS 10.14.6\r\n - Checkov Version 2.0.1074\r\n\r\n**Additional context**\r\nOn the surface, this looks like related to https://github.com/bridgecrewio/checkov/issues/2399 and https://github.com/bridgecrewio/checkov/pull/2724, but to the `CKV_AWS_144` rule.\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass S3BucketReplicationConfiguration(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has cross-region replication enabled\"\n id = \"CKV_AWS_144\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"replication_configuration/[0]/role\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = S3BucketReplicationConfiguration()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketReplicationConfiguration.py"}]}
| 1,832 | 265 |
gh_patches_debug_38571
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1659
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Save configuration" Improve UX
*Feature Request*
I have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`.
It would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.
</issue>
<code>
[start of archinstall/lib/user_interaction/save_conf.py]
1 from __future__ import annotations
2
3 from pathlib import Path
4 from typing import Any, Dict, TYPE_CHECKING
5
6 from ..configuration import ConfigurationOutput
7 from ..menu import Menu
8 from ..menu.menu import MenuSelectionType
9 from ..output import log
10
11 if TYPE_CHECKING:
12 _: Any
13
14
15 def save_config(config: Dict):
16
17 def preview(selection: str):
18 if options['user_config'] == selection:
19 json_config = config_output.user_config_to_json()
20 return f'{config_output.user_configuration_file}\n{json_config}'
21 elif options['user_creds'] == selection:
22 if json_config := config_output.user_credentials_to_json():
23 return f'{config_output.user_credentials_file}\n{json_config}'
24 else:
25 return str(_('No configuration'))
26 elif options['disk_layout'] == selection:
27 if json_config := config_output.disk_layout_to_json():
28 return f'{config_output.disk_layout_file}\n{json_config}'
29 else:
30 return str(_('No configuration'))
31 elif options['all'] == selection:
32 output = f'{config_output.user_configuration_file}\n'
33 if json_config := config_output.user_credentials_to_json():
34 output += f'{config_output.user_credentials_file}\n'
35 if json_config := config_output.disk_layout_to_json():
36 output += f'{config_output.disk_layout_file}\n'
37 return output[:-1]
38 return None
39
40 config_output = ConfigurationOutput(config)
41
42 options = {
43 'user_config': str(_('Save user configuration')),
44 'user_creds': str(_('Save user credentials')),
45 'disk_layout': str(_('Save disk layout')),
46 'all': str(_('Save all'))
47 }
48
49 choice = Menu(
50 _('Choose which configuration to save'),
51 list(options.values()),
52 sort=False,
53 skip=True,
54 preview_size=0.75,
55 preview_command=preview
56 ).run()
57
58 if choice.type_ == MenuSelectionType.Skip:
59 return
60
61 while True:
62 path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
63 dest_path = Path(path)
64 if dest_path.exists() and dest_path.is_dir():
65 break
66 log(_('Not a valid directory: {}').format(dest_path), fg='red')
67
68 if options['user_config'] == choice.value:
69 config_output.save_user_config(dest_path)
70 elif options['user_creds'] == choice.value:
71 config_output.save_user_creds(dest_path)
72 elif options['disk_layout'] == choice.value:
73 config_output.save_disk_layout(dest_path)
74 elif options['all'] == choice.value:
75 config_output.save_user_config(dest_path)
76 config_output.save_user_creds(dest_path)
77 config_output.save_disk_layout(dest_path)
78
[end of archinstall/lib/user_interaction/save_conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py
--- a/archinstall/lib/user_interaction/save_conf.py
+++ b/archinstall/lib/user_interaction/save_conf.py
@@ -1,9 +1,12 @@
from __future__ import annotations
+import logging
+
from pathlib import Path
from typing import Any, Dict, TYPE_CHECKING
from ..configuration import ConfigurationOutput
+from ..general import SysCommand
from ..menu import Menu
from ..menu.menu import MenuSelectionType
from ..output import log
@@ -58,20 +61,75 @@
if choice.type_ == MenuSelectionType.Skip:
return
- while True:
- path = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')
- dest_path = Path(path)
- if dest_path.exists() and dest_path.is_dir():
- break
- log(_('Not a valid directory: {}').format(dest_path), fg='red')
-
- if options['user_config'] == choice.value:
- config_output.save_user_config(dest_path)
- elif options['user_creds'] == choice.value:
- config_output.save_user_creds(dest_path)
- elif options['disk_layout'] == choice.value:
- config_output.save_disk_layout(dest_path)
- elif options['all'] == choice.value:
- config_output.save_user_config(dest_path)
- config_output.save_user_creds(dest_path)
- config_output.save_disk_layout(dest_path)
+ dirs_to_exclude = [
+ '/bin',
+ '/dev',
+ '/lib',
+ '/lib64',
+ '/lost+found',
+ '/opt',
+ '/proc',
+ '/run',
+ '/sbin',
+ '/srv',
+ '/sys',
+ '/usr',
+ '/var',
+ ]
+ log(
+ _('When picking a directory to save configuration files to,'
+ ' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),
+ level=logging.DEBUG
+ )
+
+ log(_('Finding possible directories to save configuration files ...'), level=logging.INFO)
+
+ find_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '
+ file_picker_command = f'find / {find_exclude} -o -type d -print0'
+ possible_save_dirs = list(
+ filter(None, SysCommand(file_picker_command).decode().split('\x00'))
+ )
+
+ selection = Menu(
+ _('Select directory (or directories) for saving configuration files'),
+ possible_save_dirs,
+ multi=True,
+ skip=True,
+ allow_reset=False,
+ ).run()
+
+ match selection.type_:
+ case MenuSelectionType.Skip:
+ return
+ case _:
+ save_dirs = selection.value
+
+ prompt = _('Do you want to save {} configuration file(s) in the following locations?\n\n{}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ )
+ save_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()
+ if save_confirmation == Menu.no():
+ return
+
+ log(
+ _('Saving {} configuration files to {}').format(
+ list(options.keys())[list(options.values()).index(choice.value)],
+ save_dirs
+ ),
+ level=logging.DEBUG
+ )
+
+ if save_dirs is not None:
+ for save_dir_str in save_dirs:
+ save_dir = Path(save_dir_str)
+ if options['user_config'] == choice.value:
+ config_output.save_user_config(save_dir)
+ elif options['user_creds'] == choice.value:
+ config_output.save_user_creds(save_dir)
+ elif options['disk_layout'] == choice.value:
+ config_output.save_disk_layout(save_dir)
+ elif options['all'] == choice.value:
+ config_output.save_user_config(save_dir)
+ config_output.save_user_creds(save_dir)
+ config_output.save_disk_layout(save_dir)
|
{"golden_diff": "diff --git a/archinstall/lib/user_interaction/save_conf.py b/archinstall/lib/user_interaction/save_conf.py\n--- a/archinstall/lib/user_interaction/save_conf.py\n+++ b/archinstall/lib/user_interaction/save_conf.py\n@@ -1,9 +1,12 @@\n from __future__ import annotations\n \n+import logging\n+\n from pathlib import Path\n from typing import Any, Dict, TYPE_CHECKING\n \n from ..configuration import ConfigurationOutput\n+from ..general import SysCommand\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n from ..output import log\n@@ -58,20 +61,75 @@\n \tif choice.type_ == MenuSelectionType.Skip:\n \t\treturn\n \n-\twhile True:\n-\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n-\t\tdest_path = Path(path)\n-\t\tif dest_path.exists() and dest_path.is_dir():\n-\t\t\tbreak\n-\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n-\n-\tif options['user_config'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\telif options['user_creds'] == choice.value:\n-\t\tconfig_output.save_user_creds(dest_path)\n-\telif options['disk_layout'] == choice.value:\n-\t\tconfig_output.save_disk_layout(dest_path)\n-\telif options['all'] == choice.value:\n-\t\tconfig_output.save_user_config(dest_path)\n-\t\tconfig_output.save_user_creds(dest_path)\n-\t\tconfig_output.save_disk_layout(dest_path)\n+\tdirs_to_exclude = [\n+\t\t'/bin',\n+\t\t'/dev',\n+\t\t'/lib',\n+\t\t'/lib64',\n+\t\t'/lost+found',\n+\t\t'/opt',\n+\t\t'/proc',\n+\t\t'/run',\n+\t\t'/sbin',\n+\t\t'/srv',\n+\t\t'/sys',\n+\t\t'/usr',\n+\t\t'/var',\n+\t]\n+\tlog(\n+\t\t_('When picking a directory to save configuration files to,'\n+\t\t' by default we will ignore the following folders: ') + ','.join(dirs_to_exclude),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\n+\tlog(_('Finding possible directories to save configuration files ...'), level=logging.INFO)\n+\t\n+\tfind_exclude = '-path ' + ' -prune -o -path '.join(dirs_to_exclude) + ' -prune '\n+\tfile_picker_command = f'find / {find_exclude} -o -type d -print0'\n+\tpossible_save_dirs = list(\n+\t\tfilter(None, SysCommand(file_picker_command).decode().split('\\x00'))\n+\t)\n+\n+\tselection = Menu(\n+\t\t_('Select directory (or directories) for saving configuration files'),\n+\t\tpossible_save_dirs,\n+\t\tmulti=True,\n+\t\tskip=True,\n+\t\tallow_reset=False,\n+\t).run()\n+\n+\tmatch selection.type_:\n+\t\tcase MenuSelectionType.Skip:\n+\t\t\treturn\n+\t\tcase _:\n+\t\t\tsave_dirs = selection.value\n+\n+\tprompt = _('Do you want to save {} configuration file(s) in the following locations?\\n\\n{}').format(\n+\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\tsave_dirs\n+\t)\n+\tsave_confirmation = Menu(prompt, Menu.yes_no(), default_option=Menu.yes()).run()\n+\tif save_confirmation == Menu.no():\n+\t\treturn\n+\t\n+\tlog(\n+\t\t_('Saving {} configuration files to {}').format(\n+\t\t\tlist(options.keys())[list(options.values()).index(choice.value)],\n+\t\t\tsave_dirs\n+\t\t),\n+\t\tlevel=logging.DEBUG\n+\t)\n+\t\n+\tif save_dirs is not None:\n+\t\tfor save_dir_str in save_dirs:\n+\t\t\tsave_dir = Path(save_dir_str)\n+\t\t\tif options['user_config'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\telif options['user_creds'] == choice.value:\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\telif options['disk_layout'] == choice.value:\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n+\t\t\telif options['all'] == choice.value:\n+\t\t\t\tconfig_output.save_user_config(save_dir)\n+\t\t\t\tconfig_output.save_user_creds(save_dir)\n+\t\t\t\tconfig_output.save_disk_layout(save_dir)\n", "issue": "\"Save configuration\" Improve UX\n*Feature Request*\r\n\r\nI have some time in the next week, I may try and implement this depending on how many roadblocks I hit. Currently, to save a configuration, you are prompted to enter a directory. If it isn't valid, you have to scratch your head wondering what the exact paths are, because you can't discover any directory without exiting `archinstall`. \r\n\r\nIt would be great if there was a sort of tab completion, or even filesystem traversal similar to `ncdu` to find a directory to save the configuration in.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Dict, TYPE_CHECKING\n\nfrom ..configuration import ConfigurationOutput\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef save_config(config: Dict):\n\n\tdef preview(selection: str):\n\t\tif options['user_config'] == selection:\n\t\t\tjson_config = config_output.user_config_to_json()\n\t\t\treturn f'{config_output.user_configuration_file}\\n{json_config}'\n\t\telif options['user_creds'] == selection:\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\treturn f'{config_output.user_credentials_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['disk_layout'] == selection:\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\treturn f'{config_output.disk_layout_file}\\n{json_config}'\n\t\t\telse:\n\t\t\t\treturn str(_('No configuration'))\n\t\telif options['all'] == selection:\n\t\t\toutput = f'{config_output.user_configuration_file}\\n'\n\t\t\tif json_config := config_output.user_credentials_to_json():\n\t\t\t\toutput += f'{config_output.user_credentials_file}\\n'\n\t\t\tif json_config := config_output.disk_layout_to_json():\n\t\t\t\toutput += f'{config_output.disk_layout_file}\\n'\n\t\t\treturn output[:-1]\n\t\treturn None\n\n\tconfig_output = ConfigurationOutput(config)\n\n\toptions = {\n\t\t'user_config': str(_('Save user configuration')),\n\t\t'user_creds': str(_('Save user credentials')),\n\t\t'disk_layout': str(_('Save disk layout')),\n\t\t'all': str(_('Save all'))\n\t}\n\n\tchoice = Menu(\n\t\t_('Choose which configuration to save'),\n\t\tlist(options.values()),\n\t\tsort=False,\n\t\tskip=True,\n\t\tpreview_size=0.75,\n\t\tpreview_command=preview\n\t).run()\n\n\tif choice.type_ == MenuSelectionType.Skip:\n\t\treturn\n\n\twhile True:\n\t\tpath = input(_('Enter a directory for the configuration(s) to be saved: ')).strip(' ')\n\t\tdest_path = Path(path)\n\t\tif dest_path.exists() and dest_path.is_dir():\n\t\t\tbreak\n\t\tlog(_('Not a valid directory: {}').format(dest_path), fg='red')\n\n\tif options['user_config'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\telif options['user_creds'] == choice.value:\n\t\tconfig_output.save_user_creds(dest_path)\n\telif options['disk_layout'] == choice.value:\n\t\tconfig_output.save_disk_layout(dest_path)\n\telif options['all'] == choice.value:\n\t\tconfig_output.save_user_config(dest_path)\n\t\tconfig_output.save_user_creds(dest_path)\n\t\tconfig_output.save_disk_layout(dest_path)\n", "path": "archinstall/lib/user_interaction/save_conf.py"}]}
| 1,395 | 923 |
gh_patches_debug_14642
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-6819
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mitmdump does not exit
#### Problem Description
Mitmdump does not exit automatically when executing:
`mitmdump -nr infile -w outfile
`
Until version 10.0.0 it was working properly and when running mitmdump with "-n" the process finished automatically once the outfile was written.
#### Steps to reproduce the behavior:
1. Generate a mitm file
2. Execute mitmdump -nr infile -w outfile
#### System Information
Mitmproxy: 10.3.0 binary
Python: 3.12.3
OpenSSL: OpenSSL 3.2.1 30 Jan 2024
Platform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35
</issue>
<code>
[start of mitmproxy/addons/readfile.py]
1 import asyncio
2 import logging
3 import os.path
4 import sys
5 from typing import BinaryIO
6 from typing import Optional
7
8 from mitmproxy import command
9 from mitmproxy import ctx
10 from mitmproxy import exceptions
11 from mitmproxy import flowfilter
12 from mitmproxy import io
13
14 logger = logging.getLogger(__name__)
15
16
17 class ReadFile:
18 """
19 An addon that handles reading from file on startup.
20 """
21
22 def __init__(self):
23 self.filter = None
24 self._read_task: asyncio.Task | None = None
25
26 def load(self, loader):
27 loader.add_option("rfile", Optional[str], None, "Read flows from file.")
28 loader.add_option(
29 "readfile_filter", Optional[str], None, "Read only matching flows."
30 )
31
32 def configure(self, updated):
33 if "readfile_filter" in updated:
34 if ctx.options.readfile_filter:
35 try:
36 self.filter = flowfilter.parse(ctx.options.readfile_filter)
37 except ValueError as e:
38 raise exceptions.OptionsError(str(e)) from e
39 else:
40 self.filter = None
41
42 async def load_flows(self, fo: BinaryIO) -> int:
43 cnt = 0
44 freader = io.FlowReader(fo)
45 try:
46 for flow in freader.stream():
47 if self.filter and not self.filter(flow):
48 continue
49 await ctx.master.load_flow(flow)
50 cnt += 1
51 except (OSError, exceptions.FlowReadException) as e:
52 if cnt:
53 logging.warning("Flow file corrupted - loaded %i flows." % cnt)
54 else:
55 logging.error("Flow file corrupted.")
56 raise exceptions.FlowReadException(str(e)) from e
57 else:
58 return cnt
59
60 async def load_flows_from_path(self, path: str) -> int:
61 path = os.path.expanduser(path)
62 try:
63 with open(path, "rb") as f:
64 return await self.load_flows(f)
65 except OSError as e:
66 logging.error(f"Cannot load flows: {e}")
67 raise exceptions.FlowReadException(str(e)) from e
68
69 async def doread(self, rfile: str) -> None:
70 try:
71 await self.load_flows_from_path(rfile)
72 except exceptions.FlowReadException as e:
73 logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
74 finally:
75 self._read_task = None
76
77 def running(self):
78 if ctx.options.rfile:
79 self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))
80
81 @command.command("readfile.reading")
82 def reading(self) -> bool:
83 return bool(self._read_task)
84
85
86 class ReadFileStdin(ReadFile):
87 """Support the special case of "-" for reading from stdin"""
88
89 async def load_flows_from_path(self, path: str) -> int:
90 if path == "-": # pragma: no cover
91 # Need to think about how to test this. This function is scheduled
92 # onto the event loop, where a sys.stdin mock has no effect.
93 return await self.load_flows(sys.stdin.buffer)
94 else:
95 return await super().load_flows_from_path(path)
96
[end of mitmproxy/addons/readfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py
--- a/mitmproxy/addons/readfile.py
+++ b/mitmproxy/addons/readfile.py
@@ -71,8 +71,6 @@
await self.load_flows_from_path(rfile)
except exceptions.FlowReadException as e:
logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
- finally:
- self._read_task = None
def running(self):
if ctx.options.rfile:
@@ -80,7 +78,7 @@
@command.command("readfile.reading")
def reading(self) -> bool:
- return bool(self._read_task)
+ return bool(self._read_task and not self._read_task.done())
class ReadFileStdin(ReadFile):
|
{"golden_diff": "diff --git a/mitmproxy/addons/readfile.py b/mitmproxy/addons/readfile.py\n--- a/mitmproxy/addons/readfile.py\n+++ b/mitmproxy/addons/readfile.py\n@@ -71,8 +71,6 @@\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n- finally:\n- self._read_task = None\n \n def running(self):\n if ctx.options.rfile:\n@@ -80,7 +78,7 @@\n \n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n- return bool(self._read_task)\n+ return bool(self._read_task and not self._read_task.done())\n \n \n class ReadFileStdin(ReadFile):\n", "issue": "Mitmdump does not exit\n#### Problem Description\r\nMitmdump does not exit automatically when executing:\r\n\r\n`mitmdump -nr infile -w outfile\r\n`\r\n\r\nUntil version 10.0.0 it was working properly and when running mitmdump with \"-n\" the process finished automatically once the outfile was written.\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Generate a mitm file\r\n2. Execute mitmdump -nr infile -w outfile\r\n\r\n#### System Information\r\nMitmproxy: 10.3.0 binary\r\nPython: 3.12.3\r\nOpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\nPlatform: Linux-6.5.0-27-generic-x86_64-with-glibc2.35\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport os.path\nimport sys\nfrom typing import BinaryIO\nfrom typing import Optional\n\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReadFile:\n \"\"\"\n An addon that handles reading from file on startup.\n \"\"\"\n\n def __init__(self):\n self.filter = None\n self._read_task: asyncio.Task | None = None\n\n def load(self, loader):\n loader.add_option(\"rfile\", Optional[str], None, \"Read flows from file.\")\n loader.add_option(\n \"readfile_filter\", Optional[str], None, \"Read only matching flows.\"\n )\n\n def configure(self, updated):\n if \"readfile_filter\" in updated:\n if ctx.options.readfile_filter:\n try:\n self.filter = flowfilter.parse(ctx.options.readfile_filter)\n except ValueError as e:\n raise exceptions.OptionsError(str(e)) from e\n else:\n self.filter = None\n\n async def load_flows(self, fo: BinaryIO) -> int:\n cnt = 0\n freader = io.FlowReader(fo)\n try:\n for flow in freader.stream():\n if self.filter and not self.filter(flow):\n continue\n await ctx.master.load_flow(flow)\n cnt += 1\n except (OSError, exceptions.FlowReadException) as e:\n if cnt:\n logging.warning(\"Flow file corrupted - loaded %i flows.\" % cnt)\n else:\n logging.error(\"Flow file corrupted.\")\n raise exceptions.FlowReadException(str(e)) from e\n else:\n return cnt\n\n async def load_flows_from_path(self, path: str) -> int:\n path = os.path.expanduser(path)\n try:\n with open(path, \"rb\") as f:\n return await self.load_flows(f)\n except OSError as e:\n logging.error(f\"Cannot load flows: {e}\")\n raise exceptions.FlowReadException(str(e)) from e\n\n async def doread(self, rfile: str) -> None:\n try:\n await self.load_flows_from_path(rfile)\n except exceptions.FlowReadException as e:\n logger.exception(f\"Failed to read {ctx.options.rfile}: {e}\")\n finally:\n self._read_task = None\n\n def running(self):\n if ctx.options.rfile:\n self._read_task = asyncio.create_task(self.doread(ctx.options.rfile))\n\n @command.command(\"readfile.reading\")\n def reading(self) -> bool:\n return bool(self._read_task)\n\n\nclass ReadFileStdin(ReadFile):\n \"\"\"Support the special case of \"-\" for reading from stdin\"\"\"\n\n async def load_flows_from_path(self, path: str) -> int:\n if path == \"-\": # pragma: no cover\n # Need to think about how to test this. This function is scheduled\n # onto the event loop, where a sys.stdin mock has no effect.\n return await self.load_flows(sys.stdin.buffer)\n else:\n return await super().load_flows_from_path(path)\n", "path": "mitmproxy/addons/readfile.py"}]}
| 1,587 | 184 |
gh_patches_debug_33189
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1221
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistent capitalization in `plot_hdi` docstring
**Describe the bug**
Docs have inconsistent capitalization

**To Reproduce**
https://arviz-devs.github.io/arviz/generated/arviz.plot_hdi.html#arviz.plot_hdi
**Expected behavior**
Consistency across arg docstrings
</issue>
<code>
[start of arviz/plots/hdiplot.py]
1 """Plot highest density intervals for regression data."""
2 import warnings
3
4 import numpy as np
5 from scipy.interpolate import griddata
6 from scipy.signal import savgol_filter
7
8 from ..stats import hdi
9 from .plot_utils import get_plotting_function, matplotlib_kwarg_dealiaser
10 from ..rcparams import rcParams
11 from ..utils import credible_interval_warning
12
13
14 def plot_hdi(
15 x,
16 y,
17 hdi_prob=None,
18 color="C1",
19 circular=False,
20 smooth=True,
21 smooth_kwargs=None,
22 fill_kwargs=None,
23 plot_kwargs=None,
24 ax=None,
25 backend=None,
26 backend_kwargs=None,
27 show=None,
28 credible_interval=None,
29 ):
30 r"""
31 Plot hdi intervals for regression data.
32
33 Parameters
34 ----------
35 x : array-like
36 Values to plot
37 y : array-like
38 values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
39 hdi_prob : float, optional
40 Probability for the highest density interval. Defaults to 0.94.
41 color : str
42 Color used for the limits of the hdi and fill. Should be a valid matplotlib color
43 circular : bool, optional
44 Whether to compute the hdi taking into account `x` is a circular variable
45 (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
46 smooth : boolean
47 If True the result will be smoothed by first computing a linear interpolation of the data
48 over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.
49 Defaults to True.
50 smooth_kwargs : dict, optional
51 Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
52 details
53 fill_kwargs : dict
54 Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
55 plot_kwargs : dict
56 Keywords passed to hdi limits
57 ax: axes, optional
58 Matplotlib axes or bokeh figures.
59 backend: str, optional
60 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
61 backend_kwargs: bool, optional
62 These are kwargs specific to the backend being used. For additional documentation
63 check the plotting method of the backend.
64 show : bool, optional
65 Call backend show function.
66 credible_interval: float, optional
67 deprecated: Please see hdi_prob
68
69 Returns
70 -------
71 axes : matplotlib axes or bokeh figures
72 """
73 if credible_interval:
74 hdi_prob = credible_interval_warning(credible_interval, hdi_prob)
75
76 plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
77 plot_kwargs.setdefault("color", color)
78 plot_kwargs.setdefault("alpha", 0)
79
80 fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, "hexbin")
81 fill_kwargs.setdefault("color", color)
82 fill_kwargs.setdefault("alpha", 0.5)
83
84 x = np.asarray(x)
85 y = np.asarray(y)
86
87 x_shape = x.shape
88 y_shape = y.shape
89 if y_shape[-len(x_shape) :] != x_shape:
90 msg = "Dimension mismatch for x: {} and y: {}."
91 msg += " y-dimensions should be (chain, draw, *x.shape) or"
92 msg += " (draw, *x.shape)"
93 raise TypeError(msg.format(x_shape, y_shape))
94
95 if len(y_shape[: -len(x_shape)]) > 1:
96 new_shape = tuple([-1] + list(x_shape))
97 y = y.reshape(new_shape)
98
99 if hdi_prob is None:
100 hdi_prob = rcParams["stats.hdi_prob"]
101 else:
102 if not 1 >= hdi_prob > 0:
103 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
104
105 hdi_ = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False)
106
107 if smooth:
108 if smooth_kwargs is None:
109 smooth_kwargs = {}
110 smooth_kwargs.setdefault("window_length", 55)
111 smooth_kwargs.setdefault("polyorder", 2)
112 x_data = np.linspace(x.min(), x.max(), 200)
113 x_data[0] = (x_data[0] + x_data[1]) / 2
114 hdi_interp = griddata(x, hdi_, x_data)
115 y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)
116 else:
117 idx = np.argsort(x)
118 x_data = x[idx]
119 y_data = hdi_[idx]
120
121 hdiplot_kwargs = dict(
122 ax=ax,
123 x_data=x_data,
124 y_data=y_data,
125 plot_kwargs=plot_kwargs,
126 fill_kwargs=fill_kwargs,
127 backend_kwargs=backend_kwargs,
128 show=show,
129 )
130
131 if backend is None:
132 backend = rcParams["plot.backend"]
133 backend = backend.lower()
134
135 # TODO: Add backend kwargs
136 plot = get_plotting_function("plot_hdi", "hdiplot", backend)
137 ax = plot(**hdiplot_kwargs)
138 return ax
139
140
141 def plot_hpd(*args, **kwargs): # noqa: D103
142 warnings.warn("plot_hdi has been deprecated, please use plot_hdi", DeprecationWarning)
143 return plot_hdi(*args, **kwargs)
144
[end of arviz/plots/hdiplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py
--- a/arviz/plots/hdiplot.py
+++ b/arviz/plots/hdiplot.py
@@ -33,13 +33,13 @@
Parameters
----------
x : array-like
- Values to plot
+ Values to plot.
y : array-like
- values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
+ Values from which to compute the hdi. Assumed shape (chain, draw, \*shape).
hdi_prob : float, optional
Probability for the highest density interval. Defaults to 0.94.
color : str
- Color used for the limits of the hdi and fill. Should be a valid matplotlib color
+ Color used for the limits of the hdi and fill. Should be a valid matplotlib color.
circular : bool, optional
Whether to compute the hdi taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
@@ -49,11 +49,11 @@
Defaults to True.
smooth_kwargs : dict, optional
Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for
- details
+ details.
fill_kwargs : dict
Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).
plot_kwargs : dict
- Keywords passed to hdi limits
+ Keywords passed to hdi limits.
ax: axes, optional
Matplotlib axes or bokeh figures.
backend: str, optional
@@ -64,7 +64,7 @@
show : bool, optional
Call backend show function.
credible_interval: float, optional
- deprecated: Please see hdi_prob
+ Deprecated: Please see hdi_prob
Returns
-------
|
{"golden_diff": "diff --git a/arviz/plots/hdiplot.py b/arviz/plots/hdiplot.py\n--- a/arviz/plots/hdiplot.py\n+++ b/arviz/plots/hdiplot.py\n@@ -33,13 +33,13 @@\n Parameters\n ----------\n x : array-like\n- Values to plot\n+ Values to plot.\n y : array-like\n- values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n+ Values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to 0.94.\n color : str\n- Color used for the limits of the hdi and fill. Should be a valid matplotlib color\n+ Color used for the limits of the hdi and fill. Should be a valid matplotlib color.\n circular : bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n@@ -49,11 +49,11 @@\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for\n- details\n+ details.\n fill_kwargs : dict\n Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).\n plot_kwargs : dict\n- Keywords passed to hdi limits\n+ Keywords passed to hdi limits.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n@@ -64,7 +64,7 @@\n show : bool, optional\n Call backend show function.\n credible_interval: float, optional\n- deprecated: Please see hdi_prob\n+ Deprecated: Please see hdi_prob\n \n Returns\n -------\n", "issue": "Inconsistent capitalization in `plot_hdi` docstring\n**Describe the bug**\r\nDocs have inconsistent capitalization\r\n\r\n\r\n\r\n**To Reproduce**\r\nhttps://arviz-devs.github.io/arviz/generated/arviz.plot_hdi.html#arviz.plot_hdi\r\n\r\n**Expected behavior**\r\nConsistency across arg docstrings\r\n\n", "before_files": [{"content": "\"\"\"Plot highest density intervals for regression data.\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.interpolate import griddata\nfrom scipy.signal import savgol_filter\n\nfrom ..stats import hdi\nfrom .plot_utils import get_plotting_function, matplotlib_kwarg_dealiaser\nfrom ..rcparams import rcParams\nfrom ..utils import credible_interval_warning\n\n\ndef plot_hdi(\n x,\n y,\n hdi_prob=None,\n color=\"C1\",\n circular=False,\n smooth=True,\n smooth_kwargs=None,\n fill_kwargs=None,\n plot_kwargs=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n credible_interval=None,\n):\n r\"\"\"\n Plot hdi intervals for regression data.\n\n Parameters\n ----------\n x : array-like\n Values to plot\n y : array-like\n values from which to compute the hdi. Assumed shape (chain, draw, \\*shape).\n hdi_prob : float, optional\n Probability for the highest density interval. Defaults to 0.94.\n color : str\n Color used for the limits of the hdi and fill. Should be a valid matplotlib color\n circular : bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n smooth : boolean\n If True the result will be smoothed by first computing a linear interpolation of the data\n over a regular grid and then applying the Savitzky-Golay filter to the interpolated data.\n Defaults to True.\n smooth_kwargs : dict, optional\n Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for\n details\n fill_kwargs : dict\n Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill).\n plot_kwargs : dict\n Keywords passed to hdi limits\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n credible_interval: float, optional\n deprecated: Please see hdi_prob\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n \"\"\"\n if credible_interval:\n hdi_prob = credible_interval_warning(credible_interval, hdi_prob)\n\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"plot\")\n plot_kwargs.setdefault(\"color\", color)\n plot_kwargs.setdefault(\"alpha\", 0)\n\n fill_kwargs = matplotlib_kwarg_dealiaser(fill_kwargs, \"hexbin\")\n fill_kwargs.setdefault(\"color\", color)\n fill_kwargs.setdefault(\"alpha\", 0.5)\n\n x = np.asarray(x)\n y = np.asarray(y)\n\n x_shape = x.shape\n y_shape = y.shape\n if y_shape[-len(x_shape) :] != x_shape:\n msg = \"Dimension mismatch for x: {} and y: {}.\"\n msg += \" y-dimensions should be (chain, draw, *x.shape) or\"\n msg += \" (draw, *x.shape)\"\n raise TypeError(msg.format(x_shape, y_shape))\n\n if len(y_shape[: -len(x_shape)]) > 1:\n new_shape = tuple([-1] + list(x_shape))\n y = y.reshape(new_shape)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n hdi_ = hdi(y, hdi_prob=hdi_prob, circular=circular, multimodal=False)\n\n if smooth:\n if smooth_kwargs is None:\n smooth_kwargs = {}\n smooth_kwargs.setdefault(\"window_length\", 55)\n smooth_kwargs.setdefault(\"polyorder\", 2)\n x_data = np.linspace(x.min(), x.max(), 200)\n x_data[0] = (x_data[0] + x_data[1]) / 2\n hdi_interp = griddata(x, hdi_, x_data)\n y_data = savgol_filter(hdi_interp, axis=0, **smooth_kwargs)\n else:\n idx = np.argsort(x)\n x_data = x[idx]\n y_data = hdi_[idx]\n\n hdiplot_kwargs = dict(\n ax=ax,\n x_data=x_data,\n y_data=y_data,\n plot_kwargs=plot_kwargs,\n fill_kwargs=fill_kwargs,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_hdi\", \"hdiplot\", backend)\n ax = plot(**hdiplot_kwargs)\n return ax\n\n\ndef plot_hpd(*args, **kwargs): # noqa: D103\n warnings.warn(\"plot_hdi has been deprecated, please use plot_hdi\", DeprecationWarning)\n return plot_hdi(*args, **kwargs)\n", "path": "arviz/plots/hdiplot.py"}]}
| 2,167 | 444 |
gh_patches_debug_13929
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1740
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Books on multiple shelves duplicated in the "all books" view
<img width="1172" alt="Screen Shot 2021-12-02 at 11 47 40 AM" src="https://user-images.githubusercontent.com/1807695/144495864-f94425a7-b11f-4df4-8ab7-c850d5a17996.png">
</issue>
<code>
[start of bookwyrm/views/shelf/shelf.py]
1 """ shelf views """
2 from collections import namedtuple
3
4 from django.db.models import OuterRef, Subquery, F
5 from django.contrib.auth.decorators import login_required
6 from django.core.paginator import Paginator
7 from django.http import HttpResponseBadRequest
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.utils.decorators import method_decorator
11 from django.utils.translation import gettext_lazy as _
12 from django.views import View
13
14 from bookwyrm import forms, models
15 from bookwyrm.activitypub import ActivitypubResponse
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.views.helpers import is_api_request, get_user_from_username
18
19
20 # pylint: disable=no-self-use
21 class Shelf(View):
22 """shelf page"""
23
24 def get(self, request, username, shelf_identifier=None):
25 """display a shelf"""
26 user = get_user_from_username(request.user, username)
27
28 is_self = user == request.user
29
30 if is_self:
31 shelves = user.shelf_set.all()
32 else:
33 shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()
34
35 # get the shelf and make sure the logged in user should be able to see it
36 if shelf_identifier:
37 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
38 shelf.raise_visible_to_user(request.user)
39 books = shelf.books
40 else:
41 # this is a constructed "all books" view, with a fake "shelf" obj
42 FakeShelf = namedtuple(
43 "Shelf", ("identifier", "name", "user", "books", "privacy")
44 )
45 books = (
46 models.Edition.viewer_aware_objects(request.user)
47 .filter(
48 # privacy is ensured because the shelves are already filtered above
49 shelfbook__shelf__in=shelves
50 )
51 .distinct()
52 )
53 shelf = FakeShelf("all", _("All books"), user, books, "public")
54
55 if is_api_request(request) and shelf_identifier:
56 return ActivitypubResponse(shelf.to_activity(**request.GET))
57
58 reviews = models.Review.objects
59 if not is_self:
60 reviews = models.Review.privacy_filter(request.user)
61
62 reviews = reviews.filter(
63 user=user,
64 rating__isnull=False,
65 book__id=OuterRef("id"),
66 deleted=False,
67 ).order_by("-published_date")
68
69 reading = models.ReadThrough.objects
70
71 reading = reading.filter(user=user, book__id=OuterRef("id")).order_by(
72 "start_date"
73 )
74
75 books = books.annotate(
76 rating=Subquery(reviews.values("rating")[:1]),
77 shelved_date=F("shelfbook__shelved_date"),
78 start_date=Subquery(reading.values("start_date")[:1]),
79 finish_date=Subquery(reading.values("finish_date")[:1]),
80 author=Subquery(
81 models.Book.objects.filter(id=OuterRef("id")).values("authors__name")[
82 :1
83 ]
84 ),
85 ).prefetch_related("authors")
86
87 books = sort_books(books, request.GET.get("sort"))
88
89 paginated = Paginator(
90 books,
91 PAGE_LENGTH,
92 )
93 page = paginated.get_page(request.GET.get("page"))
94 data = {
95 "user": user,
96 "is_self": is_self,
97 "shelves": shelves,
98 "shelf": shelf,
99 "books": page,
100 "edit_form": forms.ShelfForm(instance=shelf if shelf_identifier else None),
101 "create_form": forms.ShelfForm(),
102 "sort": request.GET.get("sort"),
103 "page_range": paginated.get_elided_page_range(
104 page.number, on_each_side=2, on_ends=1
105 ),
106 }
107
108 return TemplateResponse(request, "shelf/shelf.html", data)
109
110 @method_decorator(login_required, name="dispatch")
111 # pylint: disable=unused-argument
112 def post(self, request, username, shelf_identifier):
113 """edit a shelf"""
114 user = get_user_from_username(request.user, username)
115 shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)
116 shelf.raise_not_editable(request.user)
117
118 # you can't change the name of the default shelves
119 if not shelf.editable and request.POST.get("name") != shelf.name:
120 return HttpResponseBadRequest()
121
122 form = forms.ShelfForm(request.POST, instance=shelf)
123 if not form.is_valid():
124 return redirect(shelf.local_path)
125 shelf = form.save()
126 return redirect(shelf.local_path)
127
128
129 def sort_books(books, sort):
130 """Books in shelf sorting"""
131 sort_fields = [
132 "title",
133 "author",
134 "shelved_date",
135 "start_date",
136 "finish_date",
137 "rating",
138 ]
139
140 if sort in sort_fields:
141 books = books.order_by(sort)
142 elif sort and sort[1:] in sort_fields:
143 books = books.order_by(F(sort[1:]).desc(nulls_last=True))
144 else:
145 books = books.order_by("-shelved_date")
146 return books
147
[end of bookwyrm/views/shelf/shelf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py
--- a/bookwyrm/views/shelf/shelf.py
+++ b/bookwyrm/views/shelf/shelf.py
@@ -72,9 +72,13 @@
"start_date"
)
+ if shelf_identifier:
+ books = books.annotate(shelved_date=F("shelfbook__shelved_date"))
+ else:
+ # sorting by shelved date will cause duplicates in the "all books" view
+ books = books.annotate(shelved_date=F("updated_date"))
books = books.annotate(
rating=Subquery(reviews.values("rating")[:1]),
- shelved_date=F("shelfbook__shelved_date"),
start_date=Subquery(reading.values("start_date")[:1]),
finish_date=Subquery(reading.values("finish_date")[:1]),
author=Subquery(
|
{"golden_diff": "diff --git a/bookwyrm/views/shelf/shelf.py b/bookwyrm/views/shelf/shelf.py\n--- a/bookwyrm/views/shelf/shelf.py\n+++ b/bookwyrm/views/shelf/shelf.py\n@@ -72,9 +72,13 @@\n \"start_date\"\n )\n \n+ if shelf_identifier:\n+ books = books.annotate(shelved_date=F(\"shelfbook__shelved_date\"))\n+ else:\n+ # sorting by shelved date will cause duplicates in the \"all books\" view\n+ books = books.annotate(shelved_date=F(\"updated_date\"))\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n- shelved_date=F(\"shelfbook__shelved_date\"),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n", "issue": "Books on multiple shelves duplicated in the \"all books\" view\n<img width=\"1172\" alt=\"Screen Shot 2021-12-02 at 11 47 40 AM\" src=\"https://user-images.githubusercontent.com/1807695/144495864-f94425a7-b11f-4df4-8ab7-c850d5a17996.png\">\r\n\n", "before_files": [{"content": "\"\"\" shelf views \"\"\"\nfrom collections import namedtuple\n\nfrom django.db.models import OuterRef, Subquery, F\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.activitypub import ActivitypubResponse\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.views.helpers import is_api_request, get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Shelf(View):\n \"\"\"shelf page\"\"\"\n\n def get(self, request, username, shelf_identifier=None):\n \"\"\"display a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n\n is_self = user == request.user\n\n if is_self:\n shelves = user.shelf_set.all()\n else:\n shelves = models.Shelf.privacy_filter(request.user).filter(user=user).all()\n\n # get the shelf and make sure the logged in user should be able to see it\n if shelf_identifier:\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_visible_to_user(request.user)\n books = shelf.books\n else:\n # this is a constructed \"all books\" view, with a fake \"shelf\" obj\n FakeShelf = namedtuple(\n \"Shelf\", (\"identifier\", \"name\", \"user\", \"books\", \"privacy\")\n )\n books = (\n models.Edition.viewer_aware_objects(request.user)\n .filter(\n # privacy is ensured because the shelves are already filtered above\n shelfbook__shelf__in=shelves\n )\n .distinct()\n )\n shelf = FakeShelf(\"all\", _(\"All books\"), user, books, \"public\")\n\n if is_api_request(request) and shelf_identifier:\n return ActivitypubResponse(shelf.to_activity(**request.GET))\n\n reviews = models.Review.objects\n if not is_self:\n reviews = models.Review.privacy_filter(request.user)\n\n reviews = reviews.filter(\n user=user,\n rating__isnull=False,\n book__id=OuterRef(\"id\"),\n deleted=False,\n ).order_by(\"-published_date\")\n\n reading = models.ReadThrough.objects\n\n reading = reading.filter(user=user, book__id=OuterRef(\"id\")).order_by(\n \"start_date\"\n )\n\n books = books.annotate(\n rating=Subquery(reviews.values(\"rating\")[:1]),\n shelved_date=F(\"shelfbook__shelved_date\"),\n start_date=Subquery(reading.values(\"start_date\")[:1]),\n finish_date=Subquery(reading.values(\"finish_date\")[:1]),\n author=Subquery(\n models.Book.objects.filter(id=OuterRef(\"id\")).values(\"authors__name\")[\n :1\n ]\n ),\n ).prefetch_related(\"authors\")\n\n books = sort_books(books, request.GET.get(\"sort\"))\n\n paginated = Paginator(\n books,\n PAGE_LENGTH,\n )\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"user\": user,\n \"is_self\": is_self,\n \"shelves\": shelves,\n \"shelf\": shelf,\n \"books\": page,\n \"edit_form\": forms.ShelfForm(instance=shelf if shelf_identifier else None),\n \"create_form\": forms.ShelfForm(),\n \"sort\": request.GET.get(\"sort\"),\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"shelf/shelf.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username, shelf_identifier):\n \"\"\"edit a shelf\"\"\"\n user = get_user_from_username(request.user, username)\n shelf = get_object_or_404(user.shelf_set, identifier=shelf_identifier)\n shelf.raise_not_editable(request.user)\n\n # you can't change the name of the default shelves\n if not shelf.editable and request.POST.get(\"name\") != shelf.name:\n return HttpResponseBadRequest()\n\n form = forms.ShelfForm(request.POST, instance=shelf)\n if not form.is_valid():\n return redirect(shelf.local_path)\n shelf = form.save()\n return redirect(shelf.local_path)\n\n\ndef sort_books(books, sort):\n \"\"\"Books in shelf sorting\"\"\"\n sort_fields = [\n \"title\",\n \"author\",\n \"shelved_date\",\n \"start_date\",\n \"finish_date\",\n \"rating\",\n ]\n\n if sort in sort_fields:\n books = books.order_by(sort)\n elif sort and sort[1:] in sort_fields:\n books = books.order_by(F(sort[1:]).desc(nulls_last=True))\n else:\n books = books.order_by(\"-shelved_date\")\n return books\n", "path": "bookwyrm/views/shelf/shelf.py"}]}
| 2,085 | 206 |
gh_patches_debug_30254
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-436
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warn when mismatching cygwin git/python
See #352 and #353 for how this can manifest itself
</issue>
<code>
[start of pre_commit/git.py]
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os.path
6 import re
7
8 from pre_commit.errors import FatalError
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11 from pre_commit.util import memoize_by_cwd
12
13
14 logger = logging.getLogger('pre_commit')
15
16
17 def get_root():
18 try:
19 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
20 except CalledProcessError:
21 raise FatalError(
22 'git failed. Is it installed, and are you in a Git repository '
23 'directory?'
24 )
25
26
27 def get_git_dir(git_root):
28 return os.path.normpath(os.path.join(
29 git_root,
30 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
31 ))
32
33
34 def is_in_merge_conflict():
35 git_dir = get_git_dir('.')
36 return (
37 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
38 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
39 )
40
41
42 def parse_merge_msg_for_conflicts(merge_msg):
43 # Conflicted files start with tabs
44 return [
45 line.lstrip('#').strip()
46 for line in merge_msg.splitlines()
47 # '#\t' for git 2.4.1
48 if line.startswith(('\t', '#\t'))
49 ]
50
51
52 @memoize_by_cwd
53 def get_conflicted_files():
54 logger.info('Checking merge-conflict files only.')
55 # Need to get the conflicted files from the MERGE_MSG because they could
56 # have resolved the conflict by choosing one side or the other
57 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
58 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
59
60 # This will get the rest of the changes made after the merge.
61 # If they resolved the merge conflict by choosing a mesh of both sides
62 # this will also include the conflicted files
63 tree_hash = cmd_output('git', 'write-tree')[1].strip()
64 merge_diff_filenames = cmd_output(
65 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
66 )[1].splitlines()
67 return set(merge_conflict_filenames) | set(merge_diff_filenames)
68
69
70 @memoize_by_cwd
71 def get_staged_files():
72 return cmd_output(
73 'git', 'diff', '--staged', '--name-only',
74 # Everything except for D
75 '--diff-filter=ACMRTUXB'
76 )[1].splitlines()
77
78
79 @memoize_by_cwd
80 def get_all_files():
81 return cmd_output('git', 'ls-files')[1].splitlines()
82
83
84 def get_files_matching(all_file_list_strategy):
85 @functools.wraps(all_file_list_strategy)
86 @memoize_by_cwd
87 def wrapper(include_expr, exclude_expr):
88 include_regex = re.compile(include_expr)
89 exclude_regex = re.compile(exclude_expr)
90 return set(
91 filename
92 for filename in all_file_list_strategy()
93 if (
94 include_regex.search(filename) and
95 not exclude_regex.search(filename) and
96 os.path.lexists(filename)
97 )
98 )
99 return wrapper
100
101
102 get_staged_files_matching = get_files_matching(get_staged_files)
103 get_all_files_matching = get_files_matching(get_all_files)
104 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
105
[end of pre_commit/git.py]
[start of pre_commit/main.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pkg_resources
8
9 from pre_commit import color
10 from pre_commit import five
11 from pre_commit import git
12 from pre_commit.commands.autoupdate import autoupdate
13 from pre_commit.commands.clean import clean
14 from pre_commit.commands.install_uninstall import install
15 from pre_commit.commands.install_uninstall import uninstall
16 from pre_commit.commands.run import run
17 from pre_commit.error_handler import error_handler
18 from pre_commit.logging_handler import add_logging_handler
19 from pre_commit.runner import Runner
20
21
22 # https://github.com/pre-commit/pre-commit/issues/217
23 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
24 # to install packages to the wrong place. We don't want anything to deal with
25 # pyvenv
26 os.environ.pop('__PYVENV_LAUNCHER__', None)
27
28
29 def _add_color_option(parser):
30 parser.add_argument(
31 '--color', default='auto', type=color.use_color,
32 metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
33 help='Whether to use color in output. Defaults to `%(default)s`.',
34 )
35
36
37 def main(argv=None):
38 argv = argv if argv is not None else sys.argv[1:]
39 argv = [five.to_text(arg) for arg in argv]
40 parser = argparse.ArgumentParser()
41
42 # http://stackoverflow.com/a/8521644/812183
43 parser.add_argument(
44 '-V', '--version',
45 action='version',
46 version='%(prog)s {}'.format(
47 pkg_resources.get_distribution('pre-commit').version
48 )
49 )
50
51 subparsers = parser.add_subparsers(dest='command')
52
53 install_parser = subparsers.add_parser(
54 'install', help='Install the pre-commit script.',
55 )
56 _add_color_option(install_parser)
57 install_parser.add_argument(
58 '-f', '--overwrite', action='store_true',
59 help='Overwrite existing hooks / remove migration mode.',
60 )
61 install_parser.add_argument(
62 '--install-hooks', action='store_true',
63 help=(
64 'Whether to install hook environments for all environments '
65 'in the config file.'
66 ),
67 )
68 install_parser.add_argument(
69 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
70 default='pre-commit',
71 )
72
73 uninstall_parser = subparsers.add_parser(
74 'uninstall', help='Uninstall the pre-commit script.',
75 )
76 _add_color_option(uninstall_parser)
77 uninstall_parser.add_argument(
78 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
79 default='pre-commit',
80 )
81
82 clean_parser = subparsers.add_parser(
83 'clean', help='Clean out pre-commit files.',
84 )
85 _add_color_option(clean_parser)
86
87 autoupdate_parser = subparsers.add_parser(
88 'autoupdate',
89 help="Auto-update pre-commit config to the latest repos' versions.",
90 )
91 _add_color_option(autoupdate_parser)
92
93 run_parser = subparsers.add_parser('run', help='Run hooks.')
94 _add_color_option(run_parser)
95 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
96 run_parser.add_argument(
97 '--no-stash', default=False, action='store_true',
98 help='Use this option to prevent auto stashing of unstaged files.',
99 )
100 run_parser.add_argument(
101 '--verbose', '-v', action='store_true', default=False,
102 )
103 run_parser.add_argument(
104 '--origin', '-o',
105 help="The origin branch's commit_id when using `git push`.",
106 )
107 run_parser.add_argument(
108 '--source', '-s',
109 help="The remote branch's commit_id when using `git push`.",
110 )
111 run_parser.add_argument(
112 '--allow-unstaged-config', default=False, action='store_true',
113 help=(
114 'Allow an unstaged config to be present. Note that this will '
115 'be stashed before parsing unless --no-stash is specified.'
116 ),
117 )
118 run_parser.add_argument(
119 '--hook-stage', choices=('commit', 'push'), default='commit',
120 help='The stage during which the hook is fired e.g. commit or push.',
121 )
122 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
123 run_mutex_group.add_argument(
124 '--all-files', '-a', action='store_true', default=False,
125 help='Run on all the files in the repo. Implies --no-stash.',
126 )
127 run_mutex_group.add_argument(
128 '--files', nargs='*', default=[],
129 help='Specific filenames to run hooks on.',
130 )
131
132 help = subparsers.add_parser(
133 'help', help='Show help for a specific command.',
134 )
135 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
136
137 # Argparse doesn't really provide a way to use a `default` subparser
138 if len(argv) == 0:
139 argv = ['run']
140 args = parser.parse_args(argv)
141 if args.command == 'run':
142 args.files = [
143 os.path.relpath(os.path.abspath(filename), git.get_root())
144 for filename in args.files
145 ]
146
147 if args.command == 'help':
148 if args.help_cmd:
149 parser.parse_args([args.help_cmd, '--help'])
150 else:
151 parser.parse_args(['--help'])
152
153 with error_handler():
154 add_logging_handler(args.color)
155 runner = Runner.create()
156
157 if args.command == 'install':
158 return install(
159 runner, overwrite=args.overwrite, hooks=args.install_hooks,
160 hook_type=args.hook_type,
161 )
162 elif args.command == 'uninstall':
163 return uninstall(runner, hook_type=args.hook_type)
164 elif args.command == 'clean':
165 return clean(runner)
166 elif args.command == 'autoupdate':
167 return autoupdate(runner)
168 elif args.command == 'run':
169 return run(runner, args)
170 else:
171 raise NotImplementedError(
172 'Command {} not implemented.'.format(args.command)
173 )
174
175 raise AssertionError(
176 'Command {} failed to exit with a returncode'.format(args.command)
177 )
178
179
180 if __name__ == '__main__':
181 exit(main())
182
[end of pre_commit/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -4,6 +4,7 @@
import logging
import os.path
import re
+import sys
from pre_commit.errors import FatalError
from pre_commit.util import CalledProcessError
@@ -102,3 +103,26 @@
get_staged_files_matching = get_files_matching(get_staged_files)
get_all_files_matching = get_files_matching(get_all_files)
get_conflicted_files_matching = get_files_matching(get_conflicted_files)
+
+
+def check_for_cygwin_mismatch():
+ """See https://github.com/pre-commit/pre-commit/issues/354"""
+ if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
+ is_cygwin_python = sys.platform == 'cygwin'
+ toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
+ is_cygwin_git = toplevel.startswith('/')
+
+ if is_cygwin_python ^ is_cygwin_git:
+ exe_type = {True: '(cygwin)', False: '(windows)'}
+ logger.warn(
+ 'pre-commit has detected a mix of cygwin python / git\n'
+ 'This combination is not supported, it is likely you will '
+ 'receive an error later in the program.\n'
+ 'Make sure to use cygwin git+python while using cygwin\n'
+ 'These can be installed through the cygwin installer.\n'
+ ' - python {}\n'
+ ' - git {}\n'.format(
+ exe_type[is_cygwin_python],
+ exe_type[is_cygwin_git],
+ )
+ )
diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -152,6 +152,7 @@
with error_handler():
add_logging_handler(args.color)
+ git.check_for_cygwin_mismatch()
runner = Runner.create()
if args.command == 'install':
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -4,6 +4,7 @@\n import logging\n import os.path\n import re\n+import sys\n \n from pre_commit.errors import FatalError\n from pre_commit.util import CalledProcessError\n@@ -102,3 +103,26 @@\n get_staged_files_matching = get_files_matching(get_staged_files)\n get_all_files_matching = get_files_matching(get_all_files)\n get_conflicted_files_matching = get_files_matching(get_conflicted_files)\n+\n+\n+def check_for_cygwin_mismatch():\n+ \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n+ if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n+ is_cygwin_python = sys.platform == 'cygwin'\n+ toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n+ is_cygwin_git = toplevel.startswith('/')\n+\n+ if is_cygwin_python ^ is_cygwin_git:\n+ exe_type = {True: '(cygwin)', False: '(windows)'}\n+ logger.warn(\n+ 'pre-commit has detected a mix of cygwin python / git\\n'\n+ 'This combination is not supported, it is likely you will '\n+ 'receive an error later in the program.\\n'\n+ 'Make sure to use cygwin git+python while using cygwin\\n'\n+ 'These can be installed through the cygwin installer.\\n'\n+ ' - python {}\\n'\n+ ' - git {}\\n'.format(\n+ exe_type[is_cygwin_python],\n+ exe_type[is_cygwin_git],\n+ )\n+ )\ndiff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -152,6 +152,7 @@\n \n with error_handler():\n add_logging_handler(args.color)\n+ git.check_for_cygwin_mismatch()\n runner = Runner.create()\n \n if args.command == 'install':\n", "issue": "Warn when mismatching cygwin git/python\nSee #352 and #353 for how this can manifest itself\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]}
| 3,340 | 477 |
gh_patches_debug_38807
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-2193
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PulpImport/Export of kickstart repos with subrepos broken
See https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.
</issue>
<code>
[start of pulpcore/app/importexport.py]
1 import os
2 import io
3 import json
4 import tarfile
5 import tempfile
6 import logging
7
8 from django.conf import settings
9 from django.db.models.query import QuerySet
10
11 from pulpcore.app.apps import get_plugin_config
12 from pulpcore.app.models.progress import ProgressReport
13 from pulpcore.app.models.repository import Repository
14 from pulpcore.app.modelresource import (
15 ArtifactResource,
16 ContentArtifactResource,
17 RepositoryResource,
18 )
19 from pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE
20
21 log = logging.getLogger(__name__)
22
23
24 def _write_export(the_tarfile, resource, dest_dir=None):
25 """
26 Write the JSON export for the specified resource to the specified tarfile.
27
28 The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir
29 is None, the file will be added at the 'top level' of the_tarfile.
30
31 Export-files are UTF-8 encoded.
32
33 Args:
34 the_tarfile (tarfile.Tarfile): tarfile we are writing into
35 resource (import_export.resources.ModelResource): ModelResource to be exported
36 dest_dir str(directory-path): directory 'inside' the tarfile to write to
37 """
38 filename = "{}.{}.json".format(resource.__module__, type(resource).__name__)
39 if dest_dir:
40 dest_filename = os.path.join(dest_dir, filename)
41 else:
42 dest_filename = filename
43
44 # If the resource is the type of QuerySet, then export the data in batch to save memory.
45 # Otherwise, export all data in oneshot. This is because the underlying libraries
46 # (json; django-import-export) do not support to stream the output to file, we export
47 # the data in batches to memory and concatenate the json lists via string manipulation.
48 with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode="w", encoding="utf8") as temp_file:
49 if isinstance(resource.queryset, QuerySet):
50 temp_file.write("[")
51 total = resource.queryset.count()
52 for i in range(0, total, EXPORT_BATCH_SIZE):
53 current_batch = i + EXPORT_BATCH_SIZE
54 dataset = resource.export(resource.queryset[i:current_batch])
55 # Strip "[" and "]" as we are writing the dataset in batch
56 temp_file.write(dataset.json.lstrip("[").rstrip("]"))
57 if current_batch < total:
58 # Write "," if not last loop
59 temp_file.write(", ")
60 temp_file.write("]")
61 else:
62 dataset = resource.export(resource.queryset)
63 temp_file.write(dataset.json)
64
65 temp_file.flush()
66 info = tarfile.TarInfo(name=dest_filename)
67 info.size = os.path.getsize(temp_file.name)
68 with open(temp_file.name, "rb") as fd:
69 the_tarfile.addfile(info, fd)
70
71
72 def export_versions(export, version_info):
73 """
74 Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile
75
76 Output format is [{"component": "<pluginname>", "version": "<pluginversion>"},...]
77
78 Args:
79 export (django.db.models.PulpExport): export instance that's doing the export
80 version_info (set): set of (distribution-label,version) tuples for repos in this export
81 """
82 # build the version-list from the distributions for each component
83 versions = [{"component": label, "version": version} for (label, version) in version_info]
84
85 version_json = json.dumps(versions).encode("utf8")
86 info = tarfile.TarInfo(name="versions.json")
87 info.size = len(version_json)
88 export.tarfile.addfile(info, io.BytesIO(version_json))
89
90
91 def export_artifacts(export, artifacts):
92 """
93 Export a set of Artifacts, ArtifactResources, and RepositoryResources
94
95 Args:
96 export (django.db.models.PulpExport): export instance that's doing the export
97 artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported
98
99 Raises:
100 ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting
101 """
102 data = dict(message="Exporting Artifacts", code="export.artifacts", total=len(artifacts))
103 with ProgressReport(**data) as pb:
104 for artifact in pb.iter(artifacts):
105 dest = artifact.file.name
106 if settings.DEFAULT_FILE_STORAGE != "pulpcore.app.models.storage.FileSystem":
107 with tempfile.TemporaryDirectory() as temp_dir:
108 with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:
109 temp_file.write(artifact.file.read())
110 temp_file.flush()
111 artifact.file.close()
112 export.tarfile.add(temp_file.name, dest)
113 else:
114 export.tarfile.add(artifact.file.path, dest)
115
116 resource = ArtifactResource()
117 resource.queryset = artifacts
118 _write_export(export.tarfile, resource)
119
120 resource = RepositoryResource()
121 resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())
122 _write_export(export.tarfile, resource)
123
124
125 def export_content(export, repository_version):
126 """
127 Export db-content, and the db-content of the owning repositories
128
129 Args:
130 export (django.db.models.PulpExport): export instance that's doing the export
131 repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported
132 """
133
134 def _combine_content_mappings(map1, map2):
135 """Combine two content mapping dicts into one by combining ids for for each key."""
136 result = {}
137 for key in map1.keys() | map2.keys():
138 result[key] = list(set(map1.get(key, []) + map2.get(key, [])))
139 return result
140
141 dest_dir = os.path.join(
142 "repository-{}_{}".format(
143 str(repository_version.repository.name), repository_version.number
144 )
145 )
146
147 # Export the connection between content and artifacts
148 resource = ContentArtifactResource(repository_version)
149 _write_export(export.tarfile, resource, dest_dir)
150
151 # content mapping is used by repo versions with subrepos (eg distribution tree repos)
152 content_mapping = {}
153
154 # find and export any ModelResource found in pulp_<repo-type>.app.modelresource
155 plugin_name = repository_version.repository.pulp_type.split(".")[0]
156 cfg = get_plugin_config(plugin_name)
157 if cfg.exportable_classes:
158 for cls in cfg.exportable_classes:
159 resource = cls(repository_version)
160 _write_export(export.tarfile, resource, dest_dir)
161
162 if hasattr(resource, "content_mapping") and resource.content_mapping:
163 content_mapping = _combine_content_mappings(
164 content_mapping, resource.content_mapping
165 )
166
167 msg = (
168 f"Exporting content for {plugin_name} "
169 f"repository-version {repository_version.repository.name}/{repository_version.number}"
170 )
171 content_count = repository_version.content.count()
172 data = dict(
173 message=msg,
174 code="export.repo.version.content",
175 total=content_count,
176 done=content_count,
177 state=TASK_STATES.COMPLETED,
178 )
179 pb = ProgressReport(**data)
180 pb.save()
181
182 if content_mapping:
183 # write the content mapping to tarfile
184 cm_json = json.dumps(content_mapping).encode("utf8")
185 info = tarfile.TarInfo(name=f"{dest_dir}/content_mapping.json")
186 info.size = len(cm_json)
187 export.tarfile.addfile(info, io.BytesIO(cm_json))
188
[end of pulpcore/app/importexport.py]
[start of pulpcore/app/modelresource.py]
1 from import_export import fields
2 from import_export.widgets import ForeignKeyWidget
3 from logging import getLogger
4
5 from pulpcore.app.models.content import (
6 Artifact,
7 Content,
8 ContentArtifact,
9 )
10 from pulpcore.app.models.repository import Repository
11 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS
12 from pulpcore.plugin.importexport import QueryModelResource
13
14
15 log = getLogger(__name__)
16
17
18 #
19 # Artifact and Repository are different from other import-export entities, in that they are not
20 # repo-version-specific.
21 #
22 class ArtifactResource(QueryModelResource):
23 """Resource for import/export of artifacts."""
24
25 def before_import_row(self, row, **kwargs):
26 """
27 Sets digests to None if they are blank strings.
28
29 Args:
30 row (tablib.Dataset row): incoming import-row representing a single Variant.
31 kwargs: args passed along from the import() call.
32
33 """
34 # the export converts None to blank strings but sha384 and sha512 have unique constraints
35 # that get triggered if they are blank. convert checksums back into None if they are blank.
36 for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:
37 if row[checksum] == "":
38 row[checksum] = None
39
40 class Meta:
41 model = Artifact
42 exclude = (
43 "pulp_id",
44 "pulp_created",
45 "pulp_last_updated",
46 )
47 import_id_fields = ("sha256",)
48
49
50 class RepositoryResource(QueryModelResource):
51 class Meta:
52 model = Repository
53 import_id_fields = ("name",)
54 exclude = (
55 "pulp_id",
56 "pulp_created",
57 "pulp_last_updated",
58 "content",
59 )
60
61
62 class ContentArtifactResource(QueryModelResource):
63 """
64 Handles import/export of the ContentArtifact model.
65
66 ContentArtifact is different from other import-export entities because it has no 'natural key'
67 other than a pulp_id, which aren't shared across instances. We do some magic to link up
68 ContentArtifacts to their matching (already-imported) Content.
69 """
70
71 artifact = fields.Field(
72 column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
73 )
74
75 def before_import_row(self, row, **kwargs):
76 """
77 Fixes the content-ptr of an incoming content-artifact row at import time.
78
79 Finds the 'original uuid' of the Content for this row, looks it up as the
80 'upstream_id' of imported Content, and then replaces the Content-pk with its
81 (new) uuid.
82
83 Args:
84 row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.
85 kwargs: args passed along from the import() call.
86
87 Returns:
88 (tablib.Dataset row): row that now points to the new downstream uuid for its content.
89 """
90
91 linked_content = Content.objects.get(upstream_id=row["content"])
92 row["content"] = str(linked_content.pulp_id)
93
94 def set_up_queryset(self):
95 return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
96 "content", "relative_path"
97 )
98
99 class Meta:
100 model = ContentArtifact
101 import_id_fields = (
102 "content",
103 "relative_path",
104 )
105 exclude = (
106 "pulp_created",
107 "pulp_last_updated",
108 "_artifacts",
109 "pulp_id",
110 )
111
[end of pulpcore/app/modelresource.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py
--- a/pulpcore/app/importexport.py
+++ b/pulpcore/app/importexport.py
@@ -144,10 +144,6 @@
)
)
- # Export the connection between content and artifacts
- resource = ContentArtifactResource(repository_version)
- _write_export(export.tarfile, resource, dest_dir)
-
# content mapping is used by repo versions with subrepos (eg distribution tree repos)
content_mapping = {}
@@ -164,6 +160,10 @@
content_mapping, resource.content_mapping
)
+ # Export the connection between content and artifacts
+ resource = ContentArtifactResource(repository_version, content_mapping)
+ _write_export(export.tarfile, resource, dest_dir)
+
msg = (
f"Exporting content for {plugin_name} "
f"repository-version {repository_version.repository.name}/{repository_version.number}"
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py
--- a/pulpcore/app/modelresource.py
+++ b/pulpcore/app/modelresource.py
@@ -66,12 +66,19 @@
ContentArtifact is different from other import-export entities because it has no 'natural key'
other than a pulp_id, which aren't shared across instances. We do some magic to link up
ContentArtifacts to their matching (already-imported) Content.
+
+ Some plugin-models have sub-repositories. We take advantage of the content-mapping
+ machinery to account for those contentartifacts as well.
"""
artifact = fields.Field(
column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256")
)
+ def __init__(self, repo_version=None, content_mapping=None):
+ self.content_mapping = content_mapping
+ super().__init__(repo_version)
+
def before_import_row(self, row, **kwargs):
"""
Fixes the content-ptr of an incoming content-artifact row at import time.
@@ -92,9 +99,15 @@
row["content"] = str(linked_content.pulp_id)
def set_up_queryset(self):
- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(
- "content", "relative_path"
- )
+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)
+ if self.content_mapping:
+ all_content = []
+ for content_ids in self.content_mapping.values():
+ all_content.extend(content_ids)
+ vers_content = vers_content.union(
+ ContentArtifact.objects.filter(content__in=all_content)
+ )
+ return vers_content.order_by("content", "relative_path")
class Meta:
model = ContentArtifact
|
{"golden_diff": "diff --git a/pulpcore/app/importexport.py b/pulpcore/app/importexport.py\n--- a/pulpcore/app/importexport.py\n+++ b/pulpcore/app/importexport.py\n@@ -144,10 +144,6 @@\n )\n )\n \n- # Export the connection between content and artifacts\n- resource = ContentArtifactResource(repository_version)\n- _write_export(export.tarfile, resource, dest_dir)\n-\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n \n@@ -164,6 +160,10 @@\n content_mapping, resource.content_mapping\n )\n \n+ # Export the connection between content and artifacts\n+ resource = ContentArtifactResource(repository_version, content_mapping)\n+ _write_export(export.tarfile, resource, dest_dir)\n+\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\ndiff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py\n--- a/pulpcore/app/modelresource.py\n+++ b/pulpcore/app/modelresource.py\n@@ -66,12 +66,19 @@\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n+\n+ Some plugin-models have sub-repositories. We take advantage of the content-mapping\n+ machinery to account for those contentartifacts as well.\n \"\"\"\n \n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n \n+ def __init__(self, repo_version=None, content_mapping=None):\n+ self.content_mapping = content_mapping\n+ super().__init__(repo_version)\n+\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n@@ -92,9 +99,15 @@\n row[\"content\"] = str(linked_content.pulp_id)\n \n def set_up_queryset(self):\n- return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(\n- \"content\", \"relative_path\"\n- )\n+ vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n+ if self.content_mapping:\n+ all_content = []\n+ for content_ids in self.content_mapping.values():\n+ all_content.extend(content_ids)\n+ vers_content = vers_content.union(\n+ ContentArtifact.objects.filter(content__in=all_content)\n+ )\n+ return vers_content.order_by(\"content\", \"relative_path\")\n \n class Meta:\n model = ContentArtifact\n", "issue": "PulpImport/Export of kickstart repos with subrepos broken\nSee https://bugzilla.redhat.com/show_bug.cgi?id=2040870 for details.\n", "before_files": [{"content": "import os\nimport io\nimport json\nimport tarfile\nimport tempfile\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models.query import QuerySet\n\nfrom pulpcore.app.apps import get_plugin_config\nfrom pulpcore.app.models.progress import ProgressReport\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.app.modelresource import (\n ArtifactResource,\n ContentArtifactResource,\n RepositoryResource,\n)\nfrom pulpcore.constants import TASK_STATES, EXPORT_BATCH_SIZE\n\nlog = logging.getLogger(__name__)\n\n\ndef _write_export(the_tarfile, resource, dest_dir=None):\n \"\"\"\n Write the JSON export for the specified resource to the specified tarfile.\n\n The resulting file will be found at <dest_dir>/<resource.__class__.__name__>.json. If dest_dir\n is None, the file will be added at the 'top level' of the_tarfile.\n\n Export-files are UTF-8 encoded.\n\n Args:\n the_tarfile (tarfile.Tarfile): tarfile we are writing into\n resource (import_export.resources.ModelResource): ModelResource to be exported\n dest_dir str(directory-path): directory 'inside' the tarfile to write to\n \"\"\"\n filename = \"{}.{}.json\".format(resource.__module__, type(resource).__name__)\n if dest_dir:\n dest_filename = os.path.join(dest_dir, filename)\n else:\n dest_filename = filename\n\n # If the resource is the type of QuerySet, then export the data in batch to save memory.\n # Otherwise, export all data in oneshot. This is because the underlying libraries\n # (json; django-import-export) do not support to stream the output to file, we export\n # the data in batches to memory and concatenate the json lists via string manipulation.\n with tempfile.NamedTemporaryFile(dir=os.getcwd(), mode=\"w\", encoding=\"utf8\") as temp_file:\n if isinstance(resource.queryset, QuerySet):\n temp_file.write(\"[\")\n total = resource.queryset.count()\n for i in range(0, total, EXPORT_BATCH_SIZE):\n current_batch = i + EXPORT_BATCH_SIZE\n dataset = resource.export(resource.queryset[i:current_batch])\n # Strip \"[\" and \"]\" as we are writing the dataset in batch\n temp_file.write(dataset.json.lstrip(\"[\").rstrip(\"]\"))\n if current_batch < total:\n # Write \",\" if not last loop\n temp_file.write(\", \")\n temp_file.write(\"]\")\n else:\n dataset = resource.export(resource.queryset)\n temp_file.write(dataset.json)\n\n temp_file.flush()\n info = tarfile.TarInfo(name=dest_filename)\n info.size = os.path.getsize(temp_file.name)\n with open(temp_file.name, \"rb\") as fd:\n the_tarfile.addfile(info, fd)\n\n\ndef export_versions(export, version_info):\n \"\"\"\n Write a JSON list of plugins and their versions as 'versions.json' to export.tarfile\n\n Output format is [{\"component\": \"<pluginname>\", \"version\": \"<pluginversion>\"},...]\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n version_info (set): set of (distribution-label,version) tuples for repos in this export\n \"\"\"\n # build the version-list from the distributions for each component\n versions = [{\"component\": label, \"version\": version} for (label, version) in version_info]\n\n version_json = json.dumps(versions).encode(\"utf8\")\n info = tarfile.TarInfo(name=\"versions.json\")\n info.size = len(version_json)\n export.tarfile.addfile(info, io.BytesIO(version_json))\n\n\ndef export_artifacts(export, artifacts):\n \"\"\"\n Export a set of Artifacts, ArtifactResources, and RepositoryResources\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n artifacts (django.db.models.Artifacts): list of artifacts in all repos being exported\n\n Raises:\n ValidationError: When path is not in the ALLOWED_EXPORT_PATHS setting\n \"\"\"\n data = dict(message=\"Exporting Artifacts\", code=\"export.artifacts\", total=len(artifacts))\n with ProgressReport(**data) as pb:\n for artifact in pb.iter(artifacts):\n dest = artifact.file.name\n if settings.DEFAULT_FILE_STORAGE != \"pulpcore.app.models.storage.FileSystem\":\n with tempfile.TemporaryDirectory() as temp_dir:\n with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file:\n temp_file.write(artifact.file.read())\n temp_file.flush()\n artifact.file.close()\n export.tarfile.add(temp_file.name, dest)\n else:\n export.tarfile.add(artifact.file.path, dest)\n\n resource = ArtifactResource()\n resource.queryset = artifacts\n _write_export(export.tarfile, resource)\n\n resource = RepositoryResource()\n resource.queryset = Repository.objects.filter(pk__in=export.exporter.repositories.all())\n _write_export(export.tarfile, resource)\n\n\ndef export_content(export, repository_version):\n \"\"\"\n Export db-content, and the db-content of the owning repositories\n\n Args:\n export (django.db.models.PulpExport): export instance that's doing the export\n repository_version (django.db.models.RepositoryVersion): RepositoryVersion being exported\n \"\"\"\n\n def _combine_content_mappings(map1, map2):\n \"\"\"Combine two content mapping dicts into one by combining ids for for each key.\"\"\"\n result = {}\n for key in map1.keys() | map2.keys():\n result[key] = list(set(map1.get(key, []) + map2.get(key, [])))\n return result\n\n dest_dir = os.path.join(\n \"repository-{}_{}\".format(\n str(repository_version.repository.name), repository_version.number\n )\n )\n\n # Export the connection between content and artifacts\n resource = ContentArtifactResource(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n # content mapping is used by repo versions with subrepos (eg distribution tree repos)\n content_mapping = {}\n\n # find and export any ModelResource found in pulp_<repo-type>.app.modelresource\n plugin_name = repository_version.repository.pulp_type.split(\".\")[0]\n cfg = get_plugin_config(plugin_name)\n if cfg.exportable_classes:\n for cls in cfg.exportable_classes:\n resource = cls(repository_version)\n _write_export(export.tarfile, resource, dest_dir)\n\n if hasattr(resource, \"content_mapping\") and resource.content_mapping:\n content_mapping = _combine_content_mappings(\n content_mapping, resource.content_mapping\n )\n\n msg = (\n f\"Exporting content for {plugin_name} \"\n f\"repository-version {repository_version.repository.name}/{repository_version.number}\"\n )\n content_count = repository_version.content.count()\n data = dict(\n message=msg,\n code=\"export.repo.version.content\",\n total=content_count,\n done=content_count,\n state=TASK_STATES.COMPLETED,\n )\n pb = ProgressReport(**data)\n pb.save()\n\n if content_mapping:\n # write the content mapping to tarfile\n cm_json = json.dumps(content_mapping).encode(\"utf8\")\n info = tarfile.TarInfo(name=f\"{dest_dir}/content_mapping.json\")\n info.size = len(cm_json)\n export.tarfile.addfile(info, io.BytesIO(cm_json))\n", "path": "pulpcore/app/importexport.py"}, {"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n return ContentArtifact.objects.filter(content__in=self.repo_version.content).order_by(\n \"content\", \"relative_path\"\n )\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}]}
| 3,580 | 624 |
gh_patches_debug_5645
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-447
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure OAuth redirect URLs are secure
We construct the OAUth redirect URLs using the Django [`reverse()`](https://docs.djangoproject.com/en/3.2/ref/urlresolvers/#reverse) helper and [`request.build_absolute_uri()`](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri).
This is done in e.g. the [`login()` view](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py):
```python
ROUTE_AUTH = "oauth:authorize"
route = reverse(ROUTE_AUTH)
redirect_uri = request.build_absolute_uri(route)
```
The docs for `build_absolute_uri` say:
> Mixing HTTP and HTTPS on the same site is discouraged, therefore [build_absolute_uri()](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri) will always generate an absolute URI with the same scheme the current request has. If you need to redirect users to HTTPS, it’s best to let your Web server redirect all HTTP traffic to HTTPS.
When running in AWS, we have the Load Balancer sitting in front of all requests; this is the layer where HTTP --> HTTPS redirection happens and where the cert that the browser sees is installed.
Then the request gets to the application container, where it first hits `nginx`. This is where requests for static files end; application requests are forwarded through to `gunicorn` over a Unix socket. Strictly speaking, Django does not receive an HTTPS request.
## Potential solution
The [`SECURE_PROXY_SSL_HEADER`](https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header) Django setting seems to be aimed at helping this situation, where `nginx` sets a header with an indicated value on the request forwarded into the application via gunicorn, to tell the app if the request was originally secure or not.
In the app's [`nginx.conf`](https://github.com/cal-itp/benefits/blob/dev/nginx.conf#L55) we do set that header on forwarded requests.
## Todo
* [x] Ensure the current `nginx.conf` setting overwrites any header value coming in with the request and/or clear that header first
* [x] Add the `SECURE_PROXY_SSL_HEADER` to `settings.py` when `DEBUG = False`
</issue>
<code>
[start of benefits/settings.py]
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6
7 def _filter_empty(ls):
8 return [s for s in ls if s]
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14 # SECURITY WARNING: keep the secret key used in production secret!
15 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
19
20 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
21
22 ALLOWED_HOSTS = _filter_empty(os.environ["DJANGO_ALLOWED_HOSTS"].split(","))
23
24 # Application definition
25
26 INSTALLED_APPS = [
27 "django.contrib.messages",
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 "benefits.oauth",
34 ]
35
36 if ADMIN:
37 INSTALLED_APPS.extend(
38 [
39 "django.contrib.admin",
40 "django.contrib.auth",
41 "django.contrib.contenttypes",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.locale.LocaleMiddleware",
50 "benefits.core.middleware.Healthcheck",
51 "django.middleware.common.CommonMiddleware",
52 "django.middleware.csrf.CsrfViewMiddleware",
53 "django.middleware.clickjacking.XFrameOptionsMiddleware",
54 "csp.middleware.CSPMiddleware",
55 "benefits.core.middleware.ChangedLanguageEvent",
56 ]
57
58 if ADMIN:
59 MIDDLEWARE.extend(
60 [
61 "django.contrib.auth.middleware.AuthenticationMiddleware",
62 "django.contrib.messages.middleware.MessageMiddleware",
63 ]
64 )
65
66 if DEBUG:
67 MIDDLEWARE.extend(["benefits.core.middleware.DebugSession"])
68
69 CSRF_COOKIE_AGE = None
70 CSRF_COOKIE_SAMESITE = "Strict"
71 CSRF_COOKIE_HTTPONLY = True
72 CSRF_TRUSTED_ORIGINS = _filter_empty(os.environ["DJANGO_TRUSTED_ORIGINS"].split(","))
73
74 # With `Strict`, the user loses their Django session between leaving our app to
75 # sign in with OAuth, and coming back into our app from the OAuth redirect.
76 # This is because `Strict` disallows our cookie being sent from an external
77 # domain and so the session cookie is lost.
78 #
79 # `Lax` allows the cookie to travel with the user and be sent back to us by the
80 # OAuth server, as long as the request is "safe" i.e. GET
81 SESSION_COOKIE_SAMESITE = "Lax"
82 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
83 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
84 SESSION_COOKIE_NAME = "_benefitssessionid"
85
86 if not DEBUG:
87 CSRF_COOKIE_SECURE = True
88 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
89 SESSION_COOKIE_SECURE = True
90
91 SECURE_BROWSER_XSS_FILTER = True
92
93 ROOT_URLCONF = "benefits.urls"
94
95 template_ctx_processors = [
96 "django.template.context_processors.request",
97 "django.contrib.messages.context_processors.messages",
98 "benefits.core.context_processors.analytics",
99 "benefits.core.context_processors.recaptcha",
100 ]
101
102 if DEBUG:
103 template_ctx_processors.extend(
104 [
105 "django.template.context_processors.debug",
106 "benefits.core.context_processors.debug",
107 ]
108 )
109
110 if ADMIN:
111 template_ctx_processors.extend(
112 [
113 "django.contrib.auth.context_processors.auth",
114 "django.contrib.messages.context_processors.messages",
115 ]
116 )
117
118 TEMPLATES = [
119 {
120 "BACKEND": "django.template.backends.django.DjangoTemplates",
121 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
122 "APP_DIRS": True,
123 "OPTIONS": {
124 "context_processors": template_ctx_processors,
125 },
126 },
127 ]
128
129 WSGI_APPLICATION = "benefits.wsgi.application"
130
131 DATABASES = {
132 "default": {
133 "ENGINE": "django.db.backends.sqlite3",
134 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
135 }
136 }
137
138 # Password validation
139
140 AUTH_PASSWORD_VALIDATORS = []
141
142 if ADMIN:
143 AUTH_PASSWORD_VALIDATORS.extend(
144 [
145 {
146 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
147 },
148 {
149 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
150 },
151 {
152 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
153 },
154 {
155 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
156 },
157 ]
158 )
159
160 # OAuth configuration
161
162 OAUTH_CLIENT_NAME = os.environ.get("DJANGO_OAUTH_CLIENT_NAME")
163
164 if OAUTH_CLIENT_NAME:
165 AUTHLIB_OAUTH_CLIENTS = {
166 OAUTH_CLIENT_NAME: {
167 "client_id": os.environ.get("DJANGO_OAUTH_CLIENT_ID"),
168 "server_metadata_url": f"{os.environ.get('DJANGO_OAUTH_AUTHORITY')}/.well-known/openid-configuration",
169 "client_kwargs": {"code_challenge_method": "S256", "scope": os.environ.get("DJANGO_OAUTH_SCOPE")},
170 }
171 }
172
173 # Internationalization
174
175 LANGUAGE_CODE = "en"
176
177 LANGUAGE_COOKIE_HTTPONLY = True
178 LANGUAGE_COOKIE_SAMESITE = "Strict"
179 LANGUAGE_COOKIE_SECURE = True
180
181 LANGUAGES = [("en", "English"), ("es", "Español")]
182
183 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
184
185 USE_I18N = True
186 USE_L10N = True
187
188 TIME_ZONE = "UTC"
189 USE_TZ = True
190
191 # Static files (CSS, JavaScript, Images)
192
193 STATIC_URL = "/static/"
194 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
195 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
196 STATIC_ROOT = os.path.join(BASE_DIR, "static")
197
198 # Logging configuration
199
200 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
201 LOGGING = {
202 "version": 1,
203 "disable_existing_loggers": False,
204 "formatters": {
205 "default": {
206 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
207 "datefmt": "%d/%b/%Y %H:%M:%S",
208 "style": "{",
209 },
210 },
211 "handlers": {
212 "default": {"class": "logging.StreamHandler", "formatter": "default"},
213 },
214 "root": {
215 "handlers": ["default"],
216 "level": LOG_LEVEL,
217 },
218 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
219 }
220
221 # Analytics configuration
222
223 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
224
225 # Content Security Policy
226 # Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
227
228 # In particular, note that the inner single-quotes are required!
229 # https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
230
231 CSP_DEFAULT_SRC = ["'self'"]
232
233 env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
234 CSP_CONNECT_SRC = ["'self'"]
235 CSP_CONNECT_SRC.extend(env_connect_src)
236
237 env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
238 CSP_FONT_SRC = list(env_font_src)
239
240 CSP_FRAME_ANCESTORS = ["'none'"]
241 CSP_FRAME_SRC = ["'none'"]
242 env_frame_src = _filter_empty(os.environ.get("DJANGO_CSP_FRAME_SRC", "").split(","))
243 if any(env_frame_src):
244 CSP_FRAME_SRC = list(env_frame_src)
245
246 env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
247 CSP_SCRIPT_SRC = ["'unsafe-inline'"]
248 CSP_SCRIPT_SRC.extend(env_script_src)
249
250 env_style_src = _filter_empty(os.environ.get("DJANGO_CSP_STYLE_SRC", "").split(","))
251 CSP_STYLE_SRC = ["'self'", "'unsafe-inline'"]
252 CSP_STYLE_SRC.extend(env_style_src)
253
254 # rate limit configuration
255
256 # number of requests allowed in the given period
257 RATE_LIMIT = int(os.environ.get("DJANGO_RATE_LIMIT", 0))
258
259 # HTTP request methods to rate limit
260 RATE_LIMIT_METHODS = os.environ.get("DJANGO_RATE_LIMIT_METHODS", "").upper().split(",")
261
262 # number of seconds before additional requests are denied
263 RATE_LIMIT_PERIOD = int(os.environ.get("DJANGO_RATE_LIMIT_PERIOD", 0))
264
265 # reCAPTCHA configuration
266
267 RECAPTCHA_API_URL = os.environ.get("DJANGO_RECAPTCHA_API_URL")
268 RECAPTCHA_SITE_KEY = os.environ.get("DJANGO_RECAPTCHA_SITE_KEY")
269 RECAPTCHA_SECRET_KEY = os.environ.get("DJANGO_RECAPTCHA_SECRET_KEY")
270 RECAPTCHA_VERIFY_URL = os.environ.get("DJANGO_RECAPTCHA_VERIFY_URL")
271 RECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))
272
[end of benefits/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -90,6 +90,14 @@
SECURE_BROWSER_XSS_FILTER = True
+# the NGINX reverse proxy sits in front of the application in deployed environments
+# SSL terminates before getting to Django, and NGINX adds this header to indicate
+# if the original request was secure or not
+#
+# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header
+if not DEBUG:
+ SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
+
ROOT_URLCONF = "benefits.urls"
template_ctx_processors = [
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -90,6 +90,14 @@\n \n SECURE_BROWSER_XSS_FILTER = True\n \n+# the NGINX reverse proxy sits in front of the application in deployed environments\n+# SSL terminates before getting to Django, and NGINX adds this header to indicate\n+# if the original request was secure or not\n+#\n+# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\n+if not DEBUG:\n+ SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n+\n ROOT_URLCONF = \"benefits.urls\"\n \n template_ctx_processors = [\n", "issue": "Ensure OAuth redirect URLs are secure\nWe construct the OAUth redirect URLs using the Django [`reverse()`](https://docs.djangoproject.com/en/3.2/ref/urlresolvers/#reverse) helper and [`request.build_absolute_uri()`](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri).\r\n\r\nThis is done in e.g. the [`login()` view](https://github.com/cal-itp/benefits/blob/dev/benefits/oauth/views.py):\r\n\r\n```python\r\nROUTE_AUTH = \"oauth:authorize\"\r\n\r\nroute = reverse(ROUTE_AUTH)\r\n\r\nredirect_uri = request.build_absolute_uri(route)\r\n```\r\n\r\nThe docs for `build_absolute_uri` say:\r\n\r\n> Mixing HTTP and HTTPS on the same site is discouraged, therefore [build_absolute_uri()](https://docs.djangoproject.com/en/3.2/ref/request-response/#django.http.HttpRequest.build_absolute_uri) will always generate an absolute URI with the same scheme the current request has. If you need to redirect users to HTTPS, it\u2019s best to let your Web server redirect all HTTP traffic to HTTPS.\r\n\r\nWhen running in AWS, we have the Load Balancer sitting in front of all requests; this is the layer where HTTP --> HTTPS redirection happens and where the cert that the browser sees is installed.\r\n\r\nThen the request gets to the application container, where it first hits `nginx`. This is where requests for static files end; application requests are forwarded through to `gunicorn` over a Unix socket. Strictly speaking, Django does not receive an HTTPS request.\r\n\r\n## Potential solution\r\n\r\nThe [`SECURE_PROXY_SSL_HEADER`](https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header) Django setting seems to be aimed at helping this situation, where `nginx` sets a header with an indicated value on the request forwarded into the application via gunicorn, to tell the app if the request was originally secure or not.\r\n\r\nIn the app's [`nginx.conf`](https://github.com/cal-itp/benefits/blob/dev/nginx.conf#L55) we do set that header on forwarded requests.\r\n\r\n## Todo\r\n\r\n* [x] Ensure the current `nginx.conf` setting overwrites any header value coming in with the request and/or clear that header first\r\n* [x] Add the `SECURE_PROXY_SSL_HEADER` to `settings.py` when `DEBUG = False`\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ[\"DJANGO_TRUSTED_ORIGINS\"].split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\"),\n \"server_metadata_url\": f\"{os.environ.get('DJANGO_OAUTH_AUTHORITY')}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": os.environ.get(\"DJANGO_OAUTH_SCOPE\")},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC = [\"'self'\"]\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC = list(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nif any(env_frame_src):\n CSP_FRAME_SRC = list(env_frame_src)\n\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC = [\"'unsafe-inline'\"]\nCSP_SCRIPT_SRC.extend(env_script_src)\n\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC = [\"'self'\", \"'unsafe-inline'\"]\nCSP_STYLE_SRC.extend(env_style_src)\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 0))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n", "path": "benefits/settings.py"}]}
| 3,685 | 162 |
gh_patches_debug_34322
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-2525
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create bridges.py
Contains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.
</issue>
<code>
[start of networkx/algorithms/bridges.py]
1 # -*- coding: utf-8 -*-
2 # bridges.py - bridge-finding algorithms
3 #
4 # Copyright 2004-2016 NetworkX developers.
5 #
6 # This file is part of NetworkX.
7 #
8 # NetworkX is distributed under a BSD license; see LICENSE.txt for more
9 # information.
10 """Bridge-finding algorithms."""
11 from itertools import chain
12
13 import networkx as nx
14 from networkx.utils import not_implemented_for
15
16 __all__ = ['bridges', 'has_bridges']
17
18
19 @not_implemented_for('multigraph')
20 @not_implemented_for('directed')
21 def bridges(G, root=None):
22 """Generate all bridges in a graph.
23
24 A *bridge* in a graph is an edge whose removal causes the number of
25 connected components of the graph to increase.
26
27 Parameters
28 ----------
29 G : undirected graph
30
31 root : node (optional)
32 A node in the graph `G`. If specified, only the bridges in the
33 connected component containing this node will be returned.
34
35 Yields
36 ------
37 e : edge
38 An edge in the graph whose removal disconnects the graph (or
39 causes the number of connected components to increase).
40
41 Raises
42 ------
43 NodeNotFound
44 If `root` is not in the graph `G`.
45
46 Examples
47 --------
48 The barbell graph with parameter zero has a single bridge::
49
50 >>> G = nx.barbell_graph(10, 0)
51 >>> list(nx.bridges(G))
52 [(9, 10)]
53
54 Notes
55 -----
56 This implementation uses the :func:`networkx.chain_decomposition`
57 function, so it shares its worst-case time complexity, :math:`O(m +
58 n)`, ignoring polylogarithmic factors, where *n* is the number of
59 nodes in the graph and *m* is the number of edges.
60
61 """
62 chains = nx.chain_decomposition(G, root=root)
63 chain_edges = set(chain.from_iterable(chains))
64 for u, v in G.edges():
65 if (u, v) not in chain_edges and (v, u) not in chain_edges:
66 yield u, v
67
68
69 @not_implemented_for('multigraph')
70 @not_implemented_for('directed')
71 def has_bridges(G, root=None):
72 """Decide whether a graph has any bridges.
73
74 A *bridge* in a graph is an edge whose removal causes the number of
75 connected components of the graph to increase.
76
77 Parameters
78 ----------
79 G : undirected graph
80
81 root : node (optional)
82 A node in the graph `G`. If specified, only the bridges in the
83 connected component containing this node will be considered.
84
85 Returns
86 -------
87 bool
88 Whether the graph (or the connected component containing `root`)
89 has any bridges.
90
91 Raises
92 ------
93 NodeNotFound
94 If `root` is not in the graph `G`.
95
96 Examples
97 --------
98 The barbell graph with parameter zero has a single bridge::
99
100 >>> G = nx.barbell_graph(10, 0)
101 >>> nx.has_bridges(G)
102 True
103
104 On the other hand, the cycle graph has no bridges::
105
106 >>> G = nx.cycle_graph(5)
107 >>> nx.has_bridges(G)
108 False
109
110 Notes
111 -----
112 This implementation uses the :func:`networkx.bridges` function, so
113 it shares its worst-case time complexity, :math:`O(m + n)`, ignoring
114 polylogarithmic factors, where *n* is the number of nodes in the
115 graph and *m* is the number of edges.
116
117 """
118 try:
119 next(bridges(G))
120 except StopIteration:
121 return False
122 else:
123 return True
124
[end of networkx/algorithms/bridges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py
--- a/networkx/algorithms/bridges.py
+++ b/networkx/algorithms/bridges.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# bridges.py - bridge-finding algorithms
#
-# Copyright 2004-2016 NetworkX developers.
+# Copyright 2004-2017 NetworkX developers.
#
# This file is part of NetworkX.
#
@@ -13,7 +13,7 @@
import networkx as nx
from networkx.utils import not_implemented_for
-__all__ = ['bridges', 'has_bridges']
+__all__ = ['bridges', 'has_bridges', 'local_bridges']
@not_implemented_for('multigraph')
@@ -121,3 +121,61 @@
return False
else:
return True
+
+
+@not_implemented_for('multigraph')
+@not_implemented_for('directed')
+def local_bridges(G, with_span=True, weight=None):
+ """Iterate over local bridges of `G` optionally computing the span
+
+ A *local bridge* is an edge whose endpoints have no common neighbors.
+ That is, the edge is not part of a triangle in the graph.
+
+ The *span* of a *local bridge* is the shortest path length between
+ the endpoints if the local bridge is removed.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ with_span : bool
+ If True, yield a 3-tuple `(u, v, span)`
+
+ weight : function, string or None (default: None)
+ If function, used to compute edge weights for the span.
+ If string, the edge data attribute used in calculating span.
+ If None, all edges have weight 1.
+
+ Yields
+ ------
+ e : edge
+ The local bridges as an edge 2-tuple of nodes `(u, v)` or
+ as a 3-tuple `(u, v, span)` when `with_span is True`.
+
+ Examples
+ --------
+ A cycle graph has every edge a local bridge with span N-1.
+
+ >>> G = nx.cycle_graph(9)
+ >>> (0, 8, 8) in set(nx.local_bridges(G))
+ True
+ """
+ if with_span is not True:
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ yield u, v
+ else:
+ wt = nx.weighted._weight_function(G, weight)
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ enodes = {u, v}
+ def hide_edge(n, nbr, d):
+ if n not in enodes or nbr not in enodes:
+ return wt(n, nbr, d)
+ return None
+
+ try:
+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)
+ yield u, v, span
+ except nx.NetworkXNoPath:
+ yield u, v, float('inf')
|
{"golden_diff": "diff --git a/networkx/algorithms/bridges.py b/networkx/algorithms/bridges.py\n--- a/networkx/algorithms/bridges.py\n+++ b/networkx/algorithms/bridges.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n # bridges.py - bridge-finding algorithms\n #\n-# Copyright 2004-2016 NetworkX developers.\n+# Copyright 2004-2017 NetworkX developers.\n #\n # This file is part of NetworkX.\n #\n@@ -13,7 +13,7 @@\n import networkx as nx\n from networkx.utils import not_implemented_for\n \n-__all__ = ['bridges', 'has_bridges']\n+__all__ = ['bridges', 'has_bridges', 'local_bridges']\n \n \n @not_implemented_for('multigraph')\n@@ -121,3 +121,61 @@\n return False\n else:\n return True\n+\n+\n+@not_implemented_for('multigraph')\n+@not_implemented_for('directed')\n+def local_bridges(G, with_span=True, weight=None):\n+ \"\"\"Iterate over local bridges of `G` optionally computing the span\n+\n+ A *local bridge* is an edge whose endpoints have no common neighbors.\n+ That is, the edge is not part of a triangle in the graph.\n+\n+ The *span* of a *local bridge* is the shortest path length between\n+ the endpoints if the local bridge is removed.\n+\n+ Parameters\n+ ----------\n+ G : undirected graph\n+\n+ with_span : bool\n+ If True, yield a 3-tuple `(u, v, span)`\n+\n+ weight : function, string or None (default: None)\n+ If function, used to compute edge weights for the span.\n+ If string, the edge data attribute used in calculating span.\n+ If None, all edges have weight 1.\n+\n+ Yields\n+ ------\n+ e : edge\n+ The local bridges as an edge 2-tuple of nodes `(u, v)` or\n+ as a 3-tuple `(u, v, span)` when `with_span is True`.\n+\n+ Examples\n+ --------\n+ A cycle graph has every edge a local bridge with span N-1.\n+\n+ >>> G = nx.cycle_graph(9)\n+ >>> (0, 8, 8) in set(nx.local_bridges(G))\n+ True\n+ \"\"\"\n+ if with_span is not True:\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ yield u, v\n+ else:\n+ wt = nx.weighted._weight_function(G, weight)\n+ for u, v in G.edges:\n+ if not (set(G[u]) & set(G[v])):\n+ enodes = {u, v}\n+ def hide_edge(n, nbr, d):\n+ if n not in enodes or nbr not in enodes:\n+ return wt(n, nbr, d)\n+ return None\n+\n+ try:\n+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)\n+ yield u, v, span\n+ except nx.NetworkXNoPath:\n+ yield u, v, float('inf')\n", "issue": "Create bridges.py\nContains three simple functions for detection and retrieval of bridges or local bridges in a undirected network.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# bridges.py - bridge-finding algorithms\n#\n# Copyright 2004-2016 NetworkX developers.\n#\n# This file is part of NetworkX.\n#\n# NetworkX is distributed under a BSD license; see LICENSE.txt for more\n# information.\n\"\"\"Bridge-finding algorithms.\"\"\"\nfrom itertools import chain\n\nimport networkx as nx\nfrom networkx.utils import not_implemented_for\n\n__all__ = ['bridges', 'has_bridges']\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef bridges(G, root=None):\n \"\"\"Generate all bridges in a graph.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be returned.\n\n Yields\n ------\n e : edge\n An edge in the graph whose removal disconnects the graph (or\n causes the number of connected components to increase).\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> list(nx.bridges(G))\n [(9, 10)]\n\n Notes\n -----\n This implementation uses the :func:`networkx.chain_decomposition`\n function, so it shares its worst-case time complexity, :math:`O(m +\n n)`, ignoring polylogarithmic factors, where *n* is the number of\n nodes in the graph and *m* is the number of edges.\n\n \"\"\"\n chains = nx.chain_decomposition(G, root=root)\n chain_edges = set(chain.from_iterable(chains))\n for u, v in G.edges():\n if (u, v) not in chain_edges and (v, u) not in chain_edges:\n yield u, v\n\n\n@not_implemented_for('multigraph')\n@not_implemented_for('directed')\ndef has_bridges(G, root=None):\n \"\"\"Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, :math:`O(m + n)`, ignoring\n polylogarithmic factors, where *n* is the number of nodes in the\n graph and *m* is the number of edges.\n\n \"\"\"\n try:\n next(bridges(G))\n except StopIteration:\n return False\n else:\n return True\n", "path": "networkx/algorithms/bridges.py"}]}
| 1,656 | 744 |
gh_patches_debug_37021
|
rasdani/github-patches
|
git_diff
|
Cloud-CV__EvalAI-855
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add nullable attribute to the Team Model
Following changes are required in the Team model:
- [x] Convert the URLFields to CharField with URL
- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields
</issue>
<code>
[start of apps/web/models.py]
1 from __future__ import unicode_literals
2
3 from django.db import models
4
5 from base.models import (TimeStampedModel, )
6
7
8 class Contact(TimeStampedModel):
9 """Model representing details of User submitting queries."""
10 name = models.CharField(max_length=100,)
11 email = models.EmailField(max_length=70,)
12 message = models.CharField(max_length=500,)
13
14 def __unicode__(self):
15 return "%s: %s: %s" % (self.name, self.email, self.message)
16
17 class Meta:
18 app_label = 'web'
19 db_table = 'contact'
20
21
22 class Team(models.Model):
23 """Model representing details of Team"""
24
25 # Team Type Options
26 CORE_TEAM = 'Core Team'
27 CONTRIBUTOR = 'Contributor'
28
29 TEAM_TYPE_OPTIONS = (
30 (CORE_TEAM, CORE_TEAM),
31 (CONTRIBUTOR, CONTRIBUTOR),
32 )
33
34 name = models.CharField(max_length=100)
35 email = models.EmailField(max_length=70, null=True)
36 description = models.TextField(null=True)
37 headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
38 visible = models.BooleanField(default=True)
39 github_url = models.CharField(max_length=200, null=True)
40 linkedin_url = models.CharField(max_length=200, null=True)
41 personal_website = models.CharField(max_length=200, null=True)
42 background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
43 team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
44
45 def __unicode__(self):
46 return self.name
47
48 class Meta:
49 app_label = 'web'
50 db_table = 'teams'
51
[end of apps/web/models.py]
[start of apps/web/migrations/0004_change_team_model_field_type.py]
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.10.2 on 2017-03-19 21:06
3 from __future__ import unicode_literals
4
5 from django.db import migrations, models
6
7
8 class Migration(migrations.Migration):
9
10 dependencies = [
11 ('web', '0003_added_description_and_background_image_to_team_model'),
12 ]
13
14 operations = [
15 migrations.AlterField(
16 model_name='team',
17 name='email',
18 field=models.EmailField(max_length=70, null=True),
19 ),
20 migrations.AlterField(
21 model_name='team',
22 name='github_url',
23 field=models.CharField(max_length=200, null=True),
24 ),
25 migrations.AlterField(
26 model_name='team',
27 name='linkedin_url',
28 field=models.CharField(max_length=200, null=True),
29 ),
30 migrations.AlterField(
31 model_name='team',
32 name='personal_website',
33 field=models.CharField(max_length=200, null=True),
34 ),
35 ]
36
[end of apps/web/migrations/0004_change_team_model_field_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py
--- a/apps/web/migrations/0004_change_team_model_field_type.py
+++ b/apps/web/migrations/0004_change_team_model_field_type.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Generated by Django 1.10.2 on 2017-03-19 21:06
+# Generated by Django 1.10.2 on 2017-03-19 21:58
from __future__ import unicode_literals
from django.db import migrations, models
@@ -15,21 +15,21 @@
migrations.AlterField(
model_name='team',
name='email',
- field=models.EmailField(max_length=70, null=True),
+ field=models.EmailField(blank=True, max_length=70, null=True),
),
migrations.AlterField(
model_name='team',
name='github_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='linkedin_url',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='team',
name='personal_website',
- field=models.CharField(max_length=200, null=True),
+ field=models.CharField(blank=True, max_length=200, null=True),
),
]
diff --git a/apps/web/models.py b/apps/web/models.py
--- a/apps/web/models.py
+++ b/apps/web/models.py
@@ -32,13 +32,13 @@
)
name = models.CharField(max_length=100)
- email = models.EmailField(max_length=70, null=True)
+ email = models.EmailField(max_length=70, null=True, blank=True)
description = models.TextField(null=True)
headshot = models.ImageField(upload_to="headshots", null=True, blank=True)
visible = models.BooleanField(default=True)
- github_url = models.CharField(max_length=200, null=True)
- linkedin_url = models.CharField(max_length=200, null=True)
- personal_website = models.CharField(max_length=200, null=True)
+ github_url = models.CharField(max_length=200, null=True, blank=True)
+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)
+ personal_website = models.CharField(max_length=200, null=True, blank=True)
background_image = models.ImageField(upload_to="bg-images", null=True, blank=True)
team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)
|
{"golden_diff": "diff --git a/apps/web/migrations/0004_change_team_model_field_type.py b/apps/web/migrations/0004_change_team_model_field_type.py\n--- a/apps/web/migrations/0004_change_team_model_field_type.py\n+++ b/apps/web/migrations/0004_change_team_model_field_type.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Generated by Django 1.10.2 on 2017-03-19 21:06\n+# Generated by Django 1.10.2 on 2017-03-19 21:58\n from __future__ import unicode_literals\n \n from django.db import migrations, models\n@@ -15,21 +15,21 @@\n migrations.AlterField(\n model_name='team',\n name='email',\n- field=models.EmailField(max_length=70, null=True),\n+ field=models.EmailField(blank=True, max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n- field=models.CharField(max_length=200, null=True),\n+ field=models.CharField(blank=True, max_length=200, null=True),\n ),\n ]\ndiff --git a/apps/web/models.py b/apps/web/models.py\n--- a/apps/web/models.py\n+++ b/apps/web/models.py\n@@ -32,13 +32,13 @@\n )\n \n name = models.CharField(max_length=100)\n- email = models.EmailField(max_length=70, null=True)\n+ email = models.EmailField(max_length=70, null=True, blank=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n- github_url = models.CharField(max_length=200, null=True)\n- linkedin_url = models.CharField(max_length=200, null=True)\n- personal_website = models.CharField(max_length=200, null=True)\n+ github_url = models.CharField(max_length=200, null=True, blank=True)\n+ linkedin_url = models.CharField(max_length=200, null=True, blank=True)\n+ personal_website = models.CharField(max_length=200, null=True, blank=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n", "issue": "Add nullable attribute to the Team Model\nFollowing changes are required in the Team model: \r\n\r\n- [x] Convert the URLFields to CharField with URL\r\n\r\n- [x] Change the fields `email`, `github_url`, `linkedin_url`, `personal_website` to nullable fields\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom base.models import (TimeStampedModel, )\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Model representing details of User submitting queries.\"\"\"\n name = models.CharField(max_length=100,)\n email = models.EmailField(max_length=70,)\n message = models.CharField(max_length=500,)\n\n def __unicode__(self):\n return \"%s: %s: %s\" % (self.name, self.email, self.message)\n\n class Meta:\n app_label = 'web'\n db_table = 'contact'\n\n\nclass Team(models.Model):\n \"\"\"Model representing details of Team\"\"\"\n\n # Team Type Options\n CORE_TEAM = 'Core Team'\n CONTRIBUTOR = 'Contributor'\n\n TEAM_TYPE_OPTIONS = (\n (CORE_TEAM, CORE_TEAM),\n (CONTRIBUTOR, CONTRIBUTOR),\n )\n\n name = models.CharField(max_length=100)\n email = models.EmailField(max_length=70, null=True)\n description = models.TextField(null=True)\n headshot = models.ImageField(upload_to=\"headshots\", null=True, blank=True)\n visible = models.BooleanField(default=True)\n github_url = models.CharField(max_length=200, null=True)\n linkedin_url = models.CharField(max_length=200, null=True)\n personal_website = models.CharField(max_length=200, null=True)\n background_image = models.ImageField(upload_to=\"bg-images\", null=True, blank=True)\n team_type = models.CharField(choices=TEAM_TYPE_OPTIONS, max_length=50)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'web'\n db_table = 'teams'\n", "path": "apps/web/models.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-03-19 21:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0003_added_description_and_background_image_to_team_model'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='team',\n name='email',\n field=models.EmailField(max_length=70, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='github_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='linkedin_url',\n field=models.CharField(max_length=200, null=True),\n ),\n migrations.AlterField(\n model_name='team',\n name='personal_website',\n field=models.CharField(max_length=200, null=True),\n ),\n ]\n", "path": "apps/web/migrations/0004_change_team_model_field_type.py"}]}
| 1,384 | 665 |
gh_patches_debug_39809
|
rasdani/github-patches
|
git_diff
|
dask__distributed-246
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Network send/recv to web UI plot
It would be nice to see network activity over time.
We already capture this data in the same way we capture the CPU/Memory data that we already plot, so the job of figuring out how to move the data into the `ColumnDataSource` isn't hard. What _is_ tricky is that we don't have a percentage, but instead have actual MB/s bandwidth counts. We would have to add a second Y-axis (this is doable) and think a bit about how best to set the axes.
This might be an interesting task for anyone looking to get into the Bokeh Web UI, notably @martindurant
</issue>
<code>
[start of distributed/bokeh/worker_monitor.py]
1 from __future__ import print_function, division, absolute_import
2
3 from collections import defaultdict
4 from itertools import chain
5
6 from toolz import pluck
7
8 from ..utils import ignoring
9
10 with ignoring(ImportError):
11 from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,
12 NumeralTickFormatter)
13 from bokeh.palettes import Spectral9
14 from bokeh.plotting import figure
15
16
17 def resource_profile_plot(width=600, height=300):
18 names = ['time', 'cpu', 'memory-percent']
19 source = ColumnDataSource({k: [] for k in names})
20
21 x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
22 y_range = Range1d(0, 1)
23 p = figure(width=width, height=height, x_axis_type='datetime',
24 responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',
25 x_range=x_range, y_range=y_range)
26 p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,
27 color=Spectral9[7], legend='Avg Memory Usage', source=source)
28 p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,
29 color=Spectral9[0], legend='Avg CPU Usage', source=source)
30 p.legend[0].location = 'top_left'
31 p.yaxis[0].formatter = NumeralTickFormatter(format="0 %")
32 p.min_border_right = 10
33
34 return source, p
35
36
37 def resource_profile_update(source, worker_buffer, times_buffer):
38 data = defaultdict(list)
39
40 workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
41
42 for name in ['cpu', 'memory-percent']:
43 data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
44 for msg in worker_buffer]
45 for w in workers]
46
47 data['workers'] = workers
48 data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'
49 for i, t in enumerate(times_buffer)]
50 for w in workers]
51
52 source.data.update(data)
53
54
55 def resource_append(lists, msg):
56 L = list(msg.values())
57 if not L:
58 return
59 for k in ['cpu', 'memory-percent']:
60 lists[k].append(mean(pluck(k, L)) / 100)
61
62 lists['time'].append(mean(pluck('time', L)) * 1000)
63
64
65 def mean(seq):
66 seq = list(seq)
67 return sum(seq) / len(seq)
68
[end of distributed/bokeh/worker_monitor.py]
[start of distributed/bokeh/status/server_lifecycle.py]
1 #!/usr/bin/env python
2 from __future__ import print_function, division, absolute_import
3
4 from collections import deque
5 import json
6 import os
7 from time import time
8
9 from tornado import gen
10 from tornado.httpclient import AsyncHTTPClient
11 from tornado.iostream import StreamClosedError
12 from tornado.ioloop import IOLoop
13
14 from distributed.core import read
15 from distributed.diagnostics.progress_stream import progress_stream
16 from distributed.bokeh.worker_monitor import resource_append
17 import distributed.bokeh
18 from distributed.utils import log_errors
19
20 client = AsyncHTTPClient()
21
22 messages = distributed.bokeh.messages # monkey-patching
23
24 if os.path.exists('.dask-web-ui.json'):
25 with open('.dask-web-ui.json', 'r') as f:
26 options = json.load(f)
27 else:
28 options = {'host': '127.0.0.1',
29 'tcp-port': 8786,
30 'http-port': 9786}
31
32
33 @gen.coroutine
34 def http_get(route):
35 """ Get data from JSON route, store in messages deques """
36 with log_errors():
37 try:
38 response = yield client.fetch(
39 'http://%(host)s:%(http-port)d/' % options
40 + route + '.json')
41 except ConnectionRefusedError:
42 import sys; sys.exit(0)
43 msg = json.loads(response.body.decode())
44 messages[route]['deque'].append(msg)
45 messages[route]['times'].append(time())
46
47
48 last_index = [0]
49 @gen.coroutine
50 def workers():
51 """ Get data from JSON route, store in messages deques """
52 with log_errors():
53 response = yield client.fetch(
54 'http://%(host)s:%(http-port)d/workers.json' % options)
55 msg = json.loads(response.body.decode())
56 if msg:
57 messages['workers']['deque'].append(msg)
58 messages['workers']['times'].append(time())
59 resource_append(messages['workers']['plot-data'], msg)
60 index = messages['workers']['index']
61 index.append(last_index[0] + 1)
62 last_index[0] += 1
63
64
65 @gen.coroutine
66 def progress():
67 with log_errors():
68 stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)
69 while True:
70 try:
71 msg = yield read(stream)
72 except StreamClosedError:
73 break
74 else:
75 messages['progress'] = msg
76
77
78 def on_server_loaded(server_context):
79 n = 60
80 messages['workers'] = {'interval': 500,
81 'deque': deque(maxlen=n),
82 'times': deque(maxlen=n),
83 'index': deque(maxlen=n),
84 'plot-data': {'time': deque(maxlen=n),
85 'cpu': deque(maxlen=n),
86 'memory-percent': deque(maxlen=n)}}
87 server_context.add_periodic_callback(workers, 500)
88
89 messages['tasks'] = {'interval': 100,
90 'deque': deque(maxlen=100),
91 'times': deque(maxlen=100)}
92 server_context.add_periodic_callback(lambda: http_get('tasks'), 100)
93
94 messages['progress'] = {'all': {}, 'in_memory': {},
95 'erred': {}, 'released': {}}
96
97 IOLoop.current().add_callback(progress)
98
[end of distributed/bokeh/status/server_lifecycle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/bokeh/status/server_lifecycle.py b/distributed/bokeh/status/server_lifecycle.py
--- a/distributed/bokeh/status/server_lifecycle.py
+++ b/distributed/bokeh/status/server_lifecycle.py
@@ -83,7 +83,8 @@
'index': deque(maxlen=n),
'plot-data': {'time': deque(maxlen=n),
'cpu': deque(maxlen=n),
- 'memory-percent': deque(maxlen=n)}}
+ 'memory-percent': deque(maxlen=n),
+ 'network-send': deque(maxlen=n)}}
server_context.add_periodic_callback(workers, 500)
messages['tasks'] = {'interval': 100,
diff --git a/distributed/bokeh/worker_monitor.py b/distributed/bokeh/worker_monitor.py
--- a/distributed/bokeh/worker_monitor.py
+++ b/distributed/bokeh/worker_monitor.py
@@ -9,13 +9,13 @@
with ignoring(ImportError):
from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,
- NumeralTickFormatter)
+ NumeralTickFormatter, LinearAxis)
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
def resource_profile_plot(width=600, height=300):
- names = ['time', 'cpu', 'memory-percent']
+ names = ['time', 'cpu', 'memory-percent', 'network-send']
source = ColumnDataSource({k: [] for k in names})
x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
@@ -30,6 +30,14 @@
p.legend[0].location = 'top_left'
p.yaxis[0].formatter = NumeralTickFormatter(format="0 %")
p.min_border_right = 10
+ p.extra_y_ranges = {"send": DataRange1d(bounds=(0, None))}
+ p.add_layout(LinearAxis(y_range_name="send", axis_label="Throughput (MB/s)"),
+ 'right')
+ p.yaxis.axis_label_text_font_size = "10pt"
+
+ p.line(x='time', y='network-send', line_width=2, line_alpha=0.8,
+ color=Spectral9[2], legend='Network', source=source,
+ y_range_name="send")
return source, p
@@ -39,7 +47,7 @@
workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
- for name in ['cpu', 'memory-percent']:
+ for name in ['cpu', 'memory-percent', 'network-send']:
data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
for msg in worker_buffer]
for w in workers]
@@ -60,6 +68,13 @@
lists[k].append(mean(pluck(k, L)) / 100)
lists['time'].append(mean(pluck('time', L)) * 1000)
+ net = mean(pluck('network-send', L, 0))
+ if len(lists['time']) >= 2:
+ t1, t2 = lists['time'][-2], lists['time'][-1]
+ interval = (t2 - t1) / 1000
+ else:
+ interval = 0.5
+ lists['network-send'].append(net / 2**20 / interval)
def mean(seq):
|
{"golden_diff": "diff --git a/distributed/bokeh/status/server_lifecycle.py b/distributed/bokeh/status/server_lifecycle.py\n--- a/distributed/bokeh/status/server_lifecycle.py\n+++ b/distributed/bokeh/status/server_lifecycle.py\n@@ -83,7 +83,8 @@\n 'index': deque(maxlen=n),\n 'plot-data': {'time': deque(maxlen=n),\n 'cpu': deque(maxlen=n),\n- 'memory-percent': deque(maxlen=n)}}\n+ 'memory-percent': deque(maxlen=n),\n+ 'network-send': deque(maxlen=n)}}\n server_context.add_periodic_callback(workers, 500)\n \n messages['tasks'] = {'interval': 100,\ndiff --git a/distributed/bokeh/worker_monitor.py b/distributed/bokeh/worker_monitor.py\n--- a/distributed/bokeh/worker_monitor.py\n+++ b/distributed/bokeh/worker_monitor.py\n@@ -9,13 +9,13 @@\n \n with ignoring(ImportError):\n from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,\n- NumeralTickFormatter)\n+ NumeralTickFormatter, LinearAxis)\n from bokeh.palettes import Spectral9\n from bokeh.plotting import figure\n \n \n def resource_profile_plot(width=600, height=300):\n- names = ['time', 'cpu', 'memory-percent']\n+ names = ['time', 'cpu', 'memory-percent', 'network-send']\n source = ColumnDataSource({k: [] for k in names})\n \n x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)\n@@ -30,6 +30,14 @@\n p.legend[0].location = 'top_left'\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n p.min_border_right = 10\n+ p.extra_y_ranges = {\"send\": DataRange1d(bounds=(0, None))}\n+ p.add_layout(LinearAxis(y_range_name=\"send\", axis_label=\"Throughput (MB/s)\"),\n+ 'right')\n+ p.yaxis.axis_label_text_font_size = \"10pt\"\n+\n+ p.line(x='time', y='network-send', line_width=2, line_alpha=0.8,\n+ color=Spectral9[2], legend='Network', source=source,\n+ y_range_name=\"send\")\n \n return source, p\n \n@@ -39,7 +47,7 @@\n \n workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))\n \n- for name in ['cpu', 'memory-percent']:\n+ for name in ['cpu', 'memory-percent', 'network-send']:\n data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'\n for msg in worker_buffer]\n for w in workers]\n@@ -60,6 +68,13 @@\n lists[k].append(mean(pluck(k, L)) / 100)\n \n lists['time'].append(mean(pluck('time', L)) * 1000)\n+ net = mean(pluck('network-send', L, 0))\n+ if len(lists['time']) >= 2:\n+ t1, t2 = lists['time'][-2], lists['time'][-1]\n+ interval = (t2 - t1) / 1000\n+ else:\n+ interval = 0.5\n+ lists['network-send'].append(net / 2**20 / interval)\n \n \n def mean(seq):\n", "issue": "Add Network send/recv to web UI plot\nIt would be nice to see network activity over time.\n\nWe already capture this data in the same way we capture the CPU/Memory data that we already plot, so the job of figuring out how to move the data into the `ColumnDataSource` isn't hard. What _is_ tricky is that we don't have a percentage, but instead have actual MB/s bandwidth counts. We would have to add a second Y-axis (this is doable) and think a bit about how best to set the axes.\n\nThis might be an interesting task for anyone looking to get into the Bokeh Web UI, notably @martindurant \n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom collections import defaultdict\nfrom itertools import chain\n\nfrom toolz import pluck\n\nfrom ..utils import ignoring\n\nwith ignoring(ImportError):\n from bokeh.models import (ColumnDataSource, DataRange1d, Range1d,\n NumeralTickFormatter)\n from bokeh.palettes import Spectral9\n from bokeh.plotting import figure\n\n\ndef resource_profile_plot(width=600, height=300):\n names = ['time', 'cpu', 'memory-percent']\n source = ColumnDataSource({k: [] for k in names})\n\n x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)\n y_range = Range1d(0, 1)\n p = figure(width=width, height=height, x_axis_type='datetime',\n responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',\n x_range=x_range, y_range=y_range)\n p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,\n color=Spectral9[7], legend='Avg Memory Usage', source=source)\n p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,\n color=Spectral9[0], legend='Avg CPU Usage', source=source)\n p.legend[0].location = 'top_left'\n p.yaxis[0].formatter = NumeralTickFormatter(format=\"0 %\")\n p.min_border_right = 10\n\n return source, p\n\n\ndef resource_profile_update(source, worker_buffer, times_buffer):\n data = defaultdict(list)\n\n workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))\n\n for name in ['cpu', 'memory-percent']:\n data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'\n for msg in worker_buffer]\n for w in workers]\n\n data['workers'] = workers\n data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'\n for i, t in enumerate(times_buffer)]\n for w in workers]\n\n source.data.update(data)\n\n\ndef resource_append(lists, msg):\n L = list(msg.values())\n if not L:\n return\n for k in ['cpu', 'memory-percent']:\n lists[k].append(mean(pluck(k, L)) / 100)\n\n lists['time'].append(mean(pluck('time', L)) * 1000)\n\n\ndef mean(seq):\n seq = list(seq)\n return sum(seq) / len(seq)\n", "path": "distributed/bokeh/worker_monitor.py"}, {"content": "#!/usr/bin/env python\nfrom __future__ import print_function, division, absolute_import\n\nfrom collections import deque\nimport json\nimport os\nfrom time import time\n\nfrom tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.iostream import StreamClosedError\nfrom tornado.ioloop import IOLoop\n\nfrom distributed.core import read\nfrom distributed.diagnostics.progress_stream import progress_stream\nfrom distributed.bokeh.worker_monitor import resource_append\nimport distributed.bokeh\nfrom distributed.utils import log_errors\n\nclient = AsyncHTTPClient()\n\nmessages = distributed.bokeh.messages # monkey-patching\n\nif os.path.exists('.dask-web-ui.json'):\n with open('.dask-web-ui.json', 'r') as f:\n options = json.load(f)\nelse:\n options = {'host': '127.0.0.1',\n 'tcp-port': 8786,\n 'http-port': 9786}\n\n\[email protected]\ndef http_get(route):\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n try:\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/' % options\n + route + '.json')\n except ConnectionRefusedError:\n import sys; sys.exit(0)\n msg = json.loads(response.body.decode())\n messages[route]['deque'].append(msg)\n messages[route]['times'].append(time())\n\n\nlast_index = [0]\[email protected]\ndef workers():\n \"\"\" Get data from JSON route, store in messages deques \"\"\"\n with log_errors():\n response = yield client.fetch(\n 'http://%(host)s:%(http-port)d/workers.json' % options)\n msg = json.loads(response.body.decode())\n if msg:\n messages['workers']['deque'].append(msg)\n messages['workers']['times'].append(time())\n resource_append(messages['workers']['plot-data'], msg)\n index = messages['workers']['index']\n index.append(last_index[0] + 1)\n last_index[0] += 1\n\n\[email protected]\ndef progress():\n with log_errors():\n stream = yield progress_stream('%(host)s:%(tcp-port)d' % options, 0.050)\n while True:\n try:\n msg = yield read(stream)\n except StreamClosedError:\n break\n else:\n messages['progress'] = msg\n\n\ndef on_server_loaded(server_context):\n n = 60\n messages['workers'] = {'interval': 500,\n 'deque': deque(maxlen=n),\n 'times': deque(maxlen=n),\n 'index': deque(maxlen=n),\n 'plot-data': {'time': deque(maxlen=n),\n 'cpu': deque(maxlen=n),\n 'memory-percent': deque(maxlen=n)}}\n server_context.add_periodic_callback(workers, 500)\n\n messages['tasks'] = {'interval': 100,\n 'deque': deque(maxlen=100),\n 'times': deque(maxlen=100)}\n server_context.add_periodic_callback(lambda: http_get('tasks'), 100)\n\n messages['progress'] = {'all': {}, 'in_memory': {},\n 'erred': {}, 'released': {}}\n\n IOLoop.current().add_callback(progress)\n", "path": "distributed/bokeh/status/server_lifecycle.py"}]}
| 2,346 | 806 |
gh_patches_debug_15190
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-490
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release Ariadne 0.13
# TODO
- [x] Update dependencies
- [x] Fix linters errors on repo
- [x] #483
- [x] Update changelog
- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)
- [x] Reach to our amazing art team for tweet graphics
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.6",
12 "Programming Language :: Python :: 3.7",
13 "Programming Language :: Python :: 3.8",
14 "Topic :: Software Development :: Libraries :: Python Modules",
15 ]
16
17 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
18 with open(README_PATH, "r", encoding="utf8") as f:
19 README = f.read()
20
21 setup(
22 name="ariadne",
23 author="Mirumee Software",
24 author_email="[email protected]",
25 description="Ariadne is a Python library for implementing GraphQL servers.",
26 long_description=README,
27 long_description_content_type="text/markdown",
28 license="BSD",
29 version="0.12.0",
30 url="https://github.com/mirumee/ariadne",
31 packages=["ariadne"],
32 include_package_data=True,
33 install_requires=[
34 "graphql-core>=3.1.0",
35 "starlette<0.15",
36 "typing_extensions>=3.6.0",
37 ],
38 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
39 classifiers=CLASSIFIERS,
40 platforms=["any"],
41 zip_safe=False,
42 )
43
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
]
@@ -26,7 +27,7 @@
long_description=README,
long_description_content_type="text/markdown",
license="BSD",
- version="0.12.0",
+ version="0.13.0",
url="https://github.com/mirumee/ariadne",
packages=["ariadne"],
include_package_data=True,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,6 +11,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n \n@@ -26,7 +27,7 @@\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n- version=\"0.12.0\",\n+ version=\"0.13.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n", "issue": "Release Ariadne 0.13\n# TODO\r\n\r\n- [x] Update dependencies\r\n- [x] Fix linters errors on repo\r\n- [x] #483 \r\n- [x] Update changelog\r\n- [x] Write [release notes](https://github.com/mirumee/ariadne-website/pull/75)\r\n- [x] Reach to our amazing art team for tweet graphics\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.12.0\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.1.0\",\n \"starlette<0.15\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 1,037 | 179 |
gh_patches_debug_25353
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-3759
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement Negation operation for FV HE Scheme
## Feature Description
Negation operations of FV Scheme need to be implemented.
It should Negate a ciphertext object and return the result in ciphertext form.
</issue>
<code>
[start of syft/frameworks/torch/he/fv/evaluator.py]
1 import copy
2
3 from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
4 from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
5 from syft.frameworks.torch.he.fv.ciphertext import CipherText
6 from syft.frameworks.torch.he.fv.plaintext import PlainText
7
8
9 class Evaluator:
10 def __init__(self, context):
11 self.context = context
12 self.coeff_modulus = context.param.coeff_modulus
13 self.plain_modulus = context.param.plain_modulus
14
15 def add(self, op1, op2):
16 """Adds two operands using FV scheme.
17
18 Args:
19 op1 (Ciphertext/Plaintext): First argument.
20 op2 (Ciphertext/Plaintext): Second argument.
21
22 Returns:
23 If both arguments are Plaintext elements then the result will be a Plaintext object
24 otherwise a Ciphertext object with value equivalent to the result of addition
25 operation of two provided arguments.
26 """
27 if isinstance(op1, CipherText) and isinstance(op2, CipherText):
28 return self._add_cipher_cipher(op1, op2)
29
30 elif isinstance(op1, PlainText) and isinstance(op2, PlainText):
31 return self._add_plain_plain(op1, op2)
32
33 elif isinstance(op1, PlainText) and isinstance(op2, CipherText):
34 return self._add_plain_cipher(op1, op2)
35
36 elif isinstance(op1, CipherText) and isinstance(op2, PlainText):
37 return self._add_plain_cipher(op2, op1)
38
39 else:
40 raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
41
42 def _add_cipher_cipher(self, ct1, ct2):
43 """Adds two ciphertexts.
44
45 Args:
46 ct1 (Ciphertext): First argument.
47 ct2 (Ciphertext): Second argument.
48
49 Returns:
50 A Ciphertext object with value equivalent to result of addition of two provided
51 arguments.
52 """
53 ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)
54 result = ct2 if len(ct2) > len(ct1) else ct1
55
56 for i in range(min(len(ct1), len(ct2))):
57 for j in range(len(self.coeff_modulus)):
58 result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])
59
60 return CipherText(result)
61
62 def _add_plain_cipher(self, pt, ct):
63 """Adds a ciphertext and a plaintext.
64
65 Args:
66 pt (Plaintext): First argument.
67 ct (Ciphertext): Second argument.
68 Returns:
69 A Ciphertext object with value equivalent to result of addition of two provided
70 arguments.
71 """
72 ct = copy.deepcopy(ct)
73 return multiply_add_plain_with_delta(ct, pt, self.context)
74
75 def _add_plain_plain(self, pt1, pt2):
76 """Adds two plaintexts object.
77
78 Args:
79 pt1 (Plaintext): First argument.
80 pt2 (Plaintext): Second argument.
81
82 Returns:
83 A Plaintext object with value equivalent to result of addition of two provided
84 arguments.
85 """
86 pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)
87 return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))
88
[end of syft/frameworks/torch/he/fv/evaluator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py
--- a/syft/frameworks/torch/he/fv/evaluator.py
+++ b/syft/frameworks/torch/he/fv/evaluator.py
@@ -1,6 +1,7 @@
import copy
from syft.frameworks.torch.he.fv.util.operations import poly_add_mod
+from syft.frameworks.torch.he.fv.util.operations import negate_mod
from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta
from syft.frameworks.torch.he.fv.ciphertext import CipherText
from syft.frameworks.torch.he.fv.plaintext import PlainText
@@ -39,6 +40,24 @@
else:
raise TypeError(f"Addition Operation not supported between {type(op1)} and {type(op2)}")
+ def negate(self, ct):
+ """Negate a cipher i.e -(ct_value)
+
+ Args:
+ ct (Ciphertext): Ciphertext to be negated.
+
+ Returns:
+ A Ciphertext object with value equivalent to result of -(ct_value).
+ """
+ result = copy.deepcopy(ct.data)
+
+ for i in range(len(result)):
+ for j in range(len(result[i])):
+ for k in range(len(result[i][j])):
+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])
+
+ return CipherText(result)
+
def _add_cipher_cipher(self, ct1, ct2):
"""Adds two ciphertexts.
|
{"golden_diff": "diff --git a/syft/frameworks/torch/he/fv/evaluator.py b/syft/frameworks/torch/he/fv/evaluator.py\n--- a/syft/frameworks/torch/he/fv/evaluator.py\n+++ b/syft/frameworks/torch/he/fv/evaluator.py\n@@ -1,6 +1,7 @@\n import copy\n \n from syft.frameworks.torch.he.fv.util.operations import poly_add_mod\n+from syft.frameworks.torch.he.fv.util.operations import negate_mod\n from syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\n from syft.frameworks.torch.he.fv.ciphertext import CipherText\n from syft.frameworks.torch.he.fv.plaintext import PlainText\n@@ -39,6 +40,24 @@\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n \n+ def negate(self, ct):\n+ \"\"\"Negate a cipher i.e -(ct_value)\n+\n+ Args:\n+ ct (Ciphertext): Ciphertext to be negated.\n+\n+ Returns:\n+ A Ciphertext object with value equivalent to result of -(ct_value).\n+ \"\"\"\n+ result = copy.deepcopy(ct.data)\n+\n+ for i in range(len(result)):\n+ for j in range(len(result[i])):\n+ for k in range(len(result[i][j])):\n+ result[i][j][k] = negate_mod(ct.data[i][j][k], self.coeff_modulus[j])\n+\n+ return CipherText(result)\n+\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n", "issue": "Implement Negation operation for FV HE Scheme\n## Feature Description\r\nNegation operations of FV Scheme need to be implemented.\r\n\r\nIt should Negate a ciphertext object and return the result in ciphertext form.\n", "before_files": [{"content": "import copy\n\nfrom syft.frameworks.torch.he.fv.util.operations import poly_add_mod\nfrom syft.frameworks.torch.he.fv.util.operations import multiply_add_plain_with_delta\nfrom syft.frameworks.torch.he.fv.ciphertext import CipherText\nfrom syft.frameworks.torch.he.fv.plaintext import PlainText\n\n\nclass Evaluator:\n def __init__(self, context):\n self.context = context\n self.coeff_modulus = context.param.coeff_modulus\n self.plain_modulus = context.param.plain_modulus\n\n def add(self, op1, op2):\n \"\"\"Adds two operands using FV scheme.\n\n Args:\n op1 (Ciphertext/Plaintext): First argument.\n op2 (Ciphertext/Plaintext): Second argument.\n\n Returns:\n If both arguments are Plaintext elements then the result will be a Plaintext object\n otherwise a Ciphertext object with value equivalent to the result of addition\n operation of two provided arguments.\n \"\"\"\n if isinstance(op1, CipherText) and isinstance(op2, CipherText):\n return self._add_cipher_cipher(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, PlainText):\n return self._add_plain_plain(op1, op2)\n\n elif isinstance(op1, PlainText) and isinstance(op2, CipherText):\n return self._add_plain_cipher(op1, op2)\n\n elif isinstance(op1, CipherText) and isinstance(op2, PlainText):\n return self._add_plain_cipher(op2, op1)\n\n else:\n raise TypeError(f\"Addition Operation not supported between {type(op1)} and {type(op2)}\")\n\n def _add_cipher_cipher(self, ct1, ct2):\n \"\"\"Adds two ciphertexts.\n\n Args:\n ct1 (Ciphertext): First argument.\n ct2 (Ciphertext): Second argument.\n\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct1, ct2 = copy.deepcopy(ct1.data), copy.deepcopy(ct2.data)\n result = ct2 if len(ct2) > len(ct1) else ct1\n\n for i in range(min(len(ct1), len(ct2))):\n for j in range(len(self.coeff_modulus)):\n result[i][j] = poly_add_mod(ct1[i][j], ct2[i][j], self.coeff_modulus[j])\n\n return CipherText(result)\n\n def _add_plain_cipher(self, pt, ct):\n \"\"\"Adds a ciphertext and a plaintext.\n\n Args:\n pt (Plaintext): First argument.\n ct (Ciphertext): Second argument.\n Returns:\n A Ciphertext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n ct = copy.deepcopy(ct)\n return multiply_add_plain_with_delta(ct, pt, self.context)\n\n def _add_plain_plain(self, pt1, pt2):\n \"\"\"Adds two plaintexts object.\n\n Args:\n pt1 (Plaintext): First argument.\n pt2 (Plaintext): Second argument.\n\n Returns:\n A Plaintext object with value equivalent to result of addition of two provided\n arguments.\n \"\"\"\n pt1, pt2 = copy.deepcopy(pt1), copy.deepcopy(pt2)\n return PlainText(poly_add_mod(pt1.data, pt2.data, self.plain_modulus))\n", "path": "syft/frameworks/torch/he/fv/evaluator.py"}]}
| 1,501 | 368 |
gh_patches_debug_28241
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2589
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
Sentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)
```
TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given
(5 additional frame(s) were not displayed)
...
File "django/contrib/admin/sites.py", line 242, in inner
return view(request, *args, **kwargs)
File "django/utils/decorators.py", line 46, in _wrapper
return bound_method(*args, **kwargs)
File "django/utils/decorators.py", line 133, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "django/contrib/admin/options.py", line 1959, in changelist_view
response = self.response_action(
File "django/contrib/admin/options.py", line 1588, in response_action
response = func(self, request, queryset)
```
</issue>
<code>
[start of website/promotion/admin.py]
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from promotion.forms import PromotionRequestForm
6 from events.services import is_organiser
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and not is_organiser(request.member, obj.event):
27 return False
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 pass
62
[end of website/promotion/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -27,25 +27,25 @@
return False
return super().has_change_permission(request, obj)
- def mark_not_started(self, queryset):
+ def mark_not_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.NOT_STARTED)
mark_not_started.short_description = "Mark requests as not started"
- def mark_started(self, queryset):
+ def mark_started(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.STARTED)
mark_started.short_description = "Mark requests as started"
- def mark_finished(self, queryset):
+ def mark_finished(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.FINISHED)
mark_finished.short_description = "Mark requests as finished"
- def mark_published(self, queryset):
+ def mark_published(self, request, queryset):
"""Change the status of the event to published."""
self._change_published(queryset, PromotionRequest.PUBLISHED)
|
{"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -27,25 +27,25 @@\n return False\n return super().has_change_permission(request, obj)\n \n- def mark_not_started(self, queryset):\n+ def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n \n mark_not_started.short_description = \"Mark requests as not started\"\n \n- def mark_started(self, queryset):\n+ def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n \n mark_started.short_description = \"Mark requests as started\"\n \n- def mark_finished(self, queryset):\n+ def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n \n mark_finished.short_description = \"Mark requests as finished\"\n \n- def mark_published(self, queryset):\n+ def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n", "issue": "TypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\nSentry Issue: [CONCREXIT-JD](https://sentry.io/organizations/thalia/issues/3668103253/?referrer=github_integration)\n\n```\nTypeError: PromotionRequestAdmin.mark_finished() takes 2 positional arguments but 3 were given\n(5 additional frame(s) were not displayed)\n...\n File \"django/contrib/admin/sites.py\", line 242, in inner\n return view(request, *args, **kwargs)\n File \"django/utils/decorators.py\", line 46, in _wrapper\n return bound_method(*args, **kwargs)\n File \"django/utils/decorators.py\", line 133, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"django/contrib/admin/options.py\", line 1959, in changelist_view\n response = self.response_action(\n File \"django/contrib/admin/options.py\", line 1588, in response_action\n response = func(self, request, queryset)\n```\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom promotion.forms import PromotionRequestForm\nfrom events.services import is_organiser\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}]}
| 1,333 | 290 |
gh_patches_debug_13577
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-1397
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Service "s3" not yet available, retrying...
Hello there
After installing localstack and trying to start several services on my machine, s3 always failed to start
The command I am using to start them up is
`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`
(With `DEBUG=1` in place already for debugging)
First few lines of the output are:
```
2018-06-19T10:05:57:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:00:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:05:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:08:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:12:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:15:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:19:WARNING:infra.py: Service "s3" not yet available, retrying...
2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 344, in check_infra
raise e
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 341, in check_infra
plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/infra.py", line 80, in check
return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)
File "/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py", line 23, in check_s3
assert isinstance(out['Buckets'], list)
TypeError: 'NoneType' object is not subscriptable
During handling of the above exception, another exception occurred:
...
```
I have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine
I am installing it trough pip (`pip install localstack`)
Thanks for any help in advance!
┆Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)
</issue>
<code>
[start of localstack/services/s3/s3_starter.py]
1 import sys
2 import logging
3 import traceback
4 from moto.s3 import models as s3_models
5 from moto.server import main as moto_main
6 from localstack import config
7 from localstack.constants import DEFAULT_PORT_S3_BACKEND
8 from localstack.utils.aws import aws_stack
9 from localstack.utils.common import wait_for_port_open
10 from localstack.services.infra import (
11 get_service_protocol, start_proxy_for_service, do_run, setup_logging)
12
13 LOGGER = logging.getLogger(__name__)
14
15 # max file size for S3 objects (in MB)
16 S3_MAX_FILE_SIZE_MB = 128
17
18
19 def check_s3(expect_shutdown=False, print_error=False):
20 out = None
21 try:
22 # wait for port to be opened
23 wait_for_port_open(DEFAULT_PORT_S3_BACKEND)
24 # check S3
25 out = aws_stack.connect_to_service(service_name='s3').list_buckets()
26 except Exception as e:
27 if print_error:
28 LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))
29 if expect_shutdown:
30 assert out is None
31 else:
32 assert isinstance(out['Buckets'], list)
33
34
35 def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
36 port = port or config.PORT_S3
37 backend_port = DEFAULT_PORT_S3_BACKEND
38 cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
39 print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
40 start_proxy_for_service('s3', port, backend_port, update_listener)
41 env_vars = {'PYTHONPATH': ':'.join(sys.path)}
42 return do_run(cmd, asynchronous, env_vars=env_vars)
43
44
45 def apply_patches():
46 s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024
47
48 def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):
49 return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,
50 version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)
51
52 original_init = s3_models.FakeKey.__init__
53 s3_models.FakeKey.__init__ = init
54
55
56 def main():
57 setup_logging()
58 # patch moto implementation
59 apply_patches()
60 # start API
61 sys.exit(moto_main())
62
63
64 if __name__ == '__main__':
65 main()
66
[end of localstack/services/s3/s3_starter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py
--- a/localstack/services/s3/s3_starter.py
+++ b/localstack/services/s3/s3_starter.py
@@ -35,7 +35,7 @@
def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_S3
backend_port = DEFAULT_PORT_S3_BACKEND
- cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
+ cmd = '%s "%s" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)
print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
start_proxy_for_service('s3', port, backend_port, update_listener)
env_vars = {'PYTHONPATH': ':'.join(sys.path)}
|
{"golden_diff": "diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py\n--- a/localstack/services/s3/s3_starter.py\n+++ b/localstack/services/s3/s3_starter.py\n@@ -35,7 +35,7 @@\n def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n- cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n+ cmd = '%s \"%s\" s3 -p %s -H 0.0.0.0' % (sys.executable, __file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n", "issue": "Service \"s3\" not yet available, retrying...\nHello there\n\nAfter installing localstack and trying to start several services on my machine, s3 always failed to start\nThe command I am using to start them up is\n\n`SERVICES=sqs,sns,s3,lambda DEBUG=1 localstack start`\n(With `DEBUG=1` in place already for debugging)\n\nFirst few lines of the output are:\n\n```\n2018-06-19T10:05:57:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:00:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:05:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:08:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:12:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:15:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:19:WARNING:infra.py: Service \"s3\" not yet available, retrying...\n2018-06-19T10:06:22:ERROR:localstack.services.s3.s3_starter: S3 health check failed: An error occurred (ExpiredToken) when calling the AssumeRole operation: The security token included in the request is expired Traceback (most recent call last):\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 344, in check_infra\n raise e\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 341, in check_infra\n plugin.check(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/infra.py\", line 80, in check\n return self.check_function(expect_shutdown=expect_shutdown, print_error=print_error)\n File \"/usr/local/lib/python3.6/site-packages/localstack/services/s3/s3_starter.py\", line 23, in check_s3\n assert isinstance(out['Buckets'], list)\nTypeError: 'NoneType' object is not subscriptable\n\nDuring handling of the above exception, another exception occurred:\n...\n```\n\nI have been trying to tackle this problem for a few hours already, without any success, I tried the latest verion of localstack as well as 0.8.6.1 which works on another machine\n\nI am installing it trough pip (`pip install localstack`)\n\nThanks for any help in advance!\n\n\n\n\u2506Issue is synchronized with this [Jira Bug](https://localstack.atlassian.net/browse/LOC-309) by [Unito](https://www.unito.io/learn-more)\n\n", "before_files": [{"content": "import sys\nimport logging\nimport traceback\nfrom moto.s3 import models as s3_models\nfrom moto.server import main as moto_main\nfrom localstack import config\nfrom localstack.constants import DEFAULT_PORT_S3_BACKEND\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import wait_for_port_open\nfrom localstack.services.infra import (\n get_service_protocol, start_proxy_for_service, do_run, setup_logging)\n\nLOGGER = logging.getLogger(__name__)\n\n# max file size for S3 objects (in MB)\nS3_MAX_FILE_SIZE_MB = 128\n\n\ndef check_s3(expect_shutdown=False, print_error=False):\n out = None\n try:\n # wait for port to be opened\n wait_for_port_open(DEFAULT_PORT_S3_BACKEND)\n # check S3\n out = aws_stack.connect_to_service(service_name='s3').list_buckets()\n except Exception as e:\n if print_error:\n LOGGER.error('S3 health check failed: %s %s' % (e, traceback.format_exc()))\n if expect_shutdown:\n assert out is None\n else:\n assert isinstance(out['Buckets'], list)\n\n\ndef start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):\n port = port or config.PORT_S3\n backend_port = DEFAULT_PORT_S3_BACKEND\n cmd = 'python \"%s\" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)\n print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))\n start_proxy_for_service('s3', port, backend_port, update_listener)\n env_vars = {'PYTHONPATH': ':'.join(sys.path)}\n return do_run(cmd, asynchronous, env_vars=env_vars)\n\n\ndef apply_patches():\n s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024\n\n def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):\n return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,\n version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)\n\n original_init = s3_models.FakeKey.__init__\n s3_models.FakeKey.__init__ = init\n\n\ndef main():\n setup_logging()\n # patch moto implementation\n apply_patches()\n # start API\n sys.exit(moto_main())\n\n\nif __name__ == '__main__':\n main()\n", "path": "localstack/services/s3/s3_starter.py"}]}
| 1,950 | 226 |
gh_patches_debug_1000
|
rasdani/github-patches
|
git_diff
|
replicate__cog-653
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support lists of `BaseModel` for outputs
The following model fails on Replicate.com with a cryptic `Can't pickle <class 'predict.Output'>: it's not the same object as predict.Output` error:
```python
class Output(BaseModel):
foo: str
bar: str
class Predictor(BasePredictor):
def predict(self) -> List[Output]:
return [Output(foo="foo", bar="bar")]
```
This is [documented deep in Cog's Python layer](https://github.com/replicate/cog/blob/main/python/cog/server/runner.py#L238).
We should support this type of output.
</issue>
<code>
[start of python/cog/server/runner.py]
1 import multiprocessing
2 import types
3 from enum import Enum
4 from multiprocessing.connection import Connection
5 from typing import Any, Dict, List, Optional
6
7 from pydantic import BaseModel
8
9 from ..predictor import load_config, load_predictor
10 from .log_capture import capture_log
11
12
13 class PredictionRunner:
14 PROCESSING_DONE = 1
15
16 class OutputType(Enum):
17 NOT_STARTED = 0
18 SINGLE = 1
19 GENERATOR = 2
20
21 def __init__(self) -> None:
22 self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(
23 duplex=False
24 )
25 (
26 self.prediction_input_pipe_reader,
27 self.prediction_input_pipe_writer,
28 ) = multiprocessing.Pipe(duplex=False)
29 self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(
30 duplex=False
31 )
32 self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(
33 duplex=False
34 )
35 self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(
36 duplex=False
37 )
38
39 def setup(self) -> None:
40 """
41 Sets up the predictor in a subprocess. Blocks until the predictor has
42 finished setup. To start a prediction after setup call `run()`.
43 """
44 # `multiprocessing.get_context("spawn")` returns the same API as
45 # `multiprocessing`, but will use the spawn method when creating any
46 # subprocess. Using the spawn method for the predictor subprocess is
47 # useful for compatibility with CUDA, which cannot run in a process
48 # that gets forked. If we can guarantee that all initialization happens
49 # within the subprocess, we could probably get away with using fork
50 # here instead.
51 self.predictor_process = multiprocessing.get_context("spawn").Process(
52 target=self._start_predictor_process
53 )
54
55 self._is_processing = True
56 self.predictor_process.start()
57
58 # poll with an infinite timeout to avoid burning resources in the loop
59 while self.done_pipe_reader.poll(timeout=None) and self.is_processing():
60 pass
61
62 def _start_predictor_process(self) -> None:
63 config = load_config()
64 self.predictor = load_predictor(config)
65 self.predictor.setup()
66
67 # tell the main process we've finished setup
68 self.done_pipe_writer.send(self.PROCESSING_DONE)
69
70 while True:
71 try:
72 prediction_input = self.prediction_input_pipe_reader.recv()
73 self._run_prediction(prediction_input)
74 except EOFError:
75 continue
76
77 def run(self, **prediction_input: Dict[str, Any]) -> None:
78 """
79 Starts running a prediction in the predictor subprocess, using the
80 inputs provided in `prediction_input`.
81
82 The subprocess will send prediction output and logs to pipes as soon as
83 they're available. You can check if the pipes have any data using
84 `has_output_waiting()` and `has_logs_waiting()`. You can read data from
85 the pipes using `read_output()` and `read_logs()`.
86
87 Use `is_processing()` to check whether more data is expected in the
88 pipe for prediction output.
89 """
90 # We're starting processing!
91 self._is_processing = True
92
93 # We don't know whether or not we've got a generator (progressive
94 # output) until we start getting output from the model
95 self._is_output_generator = self.OutputType.NOT_STARTED
96
97 # We haven't encountered an error yet
98 self._error = None
99
100 # Send prediction input through the pipe to the predictor subprocess
101 self.prediction_input_pipe_writer.send(prediction_input)
102
103 def is_processing(self) -> bool:
104 """
105 Returns True if the subprocess running the prediction is still
106 processing.
107 """
108 if self.done_pipe_reader.poll():
109 try:
110 if self.done_pipe_reader.recv() == self.PROCESSING_DONE:
111 self._is_processing = False
112 except EOFError:
113 pass
114
115 return self._is_processing
116
117 def has_output_waiting(self) -> bool:
118 return self.predictor_pipe_reader.poll()
119
120 def read_output(self) -> List[Any]:
121 if self._is_output_generator is self.OutputType.NOT_STARTED:
122 return []
123
124 output = []
125 while self.has_output_waiting():
126 try:
127 output.append(self.predictor_pipe_reader.recv())
128 except EOFError:
129 break
130 return output
131
132 def has_logs_waiting(self) -> bool:
133 return self.logs_pipe_reader.poll()
134
135 def read_logs(self) -> List[str]:
136 logs = []
137 while self.has_logs_waiting():
138 try:
139 logs.append(self.logs_pipe_reader.recv())
140 except EOFError:
141 break
142 return logs
143
144 def is_output_generator(self) -> Optional[bool]:
145 """
146 Returns `True` if the output is a generator, `False` if it's not, and
147 `None` if we don't know yet.
148 """
149 if self._is_output_generator is self.OutputType.NOT_STARTED:
150 if self.has_output_waiting():
151 # if there's output waiting use the first one to set whether
152 # we've got a generator, with a safety check
153 self._is_output_generator = self.predictor_pipe_reader.recv()
154 assert isinstance(self._is_output_generator, self.OutputType)
155
156 if self._is_output_generator is self.OutputType.NOT_STARTED:
157 return None
158 elif self._is_output_generator is self.OutputType.SINGLE:
159 return False
160 elif self._is_output_generator is self.OutputType.GENERATOR:
161 return True
162
163 def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:
164 """
165 Sends a boolean first, to indicate whether the output is a generator.
166 After that it sends the output(s).
167
168 If the predictor raises an exception it'll send it to the error pipe
169 writer and then exit.
170
171 When the prediction is finished it'll send a token to the done pipe.
172 """
173 # Empty all the pipes before we start sending more messages to them
174 drain_pipe(self.logs_pipe_reader)
175 drain_pipe(self.predictor_pipe_reader)
176 drain_pipe(self.error_pipe_reader)
177 drain_pipe(self.done_pipe_reader)
178
179 with capture_log(self.logs_pipe_writer):
180 try:
181 output = self.predictor.predict(**prediction_input)
182
183 if isinstance(output, types.GeneratorType):
184 self.predictor_pipe_writer.send(self.OutputType.GENERATOR)
185 while True:
186 try:
187 self.predictor_pipe_writer.send(
188 next(make_pickleable(output))
189 )
190 except StopIteration:
191 break
192 else:
193 self.predictor_pipe_writer.send(self.OutputType.SINGLE)
194 self.predictor_pipe_writer.send(make_pickleable(output))
195 except Exception as e:
196 self.error_pipe_writer.send(e)
197
198 self.done_pipe_writer.send(self.PROCESSING_DONE)
199
200 def error(self) -> Optional[str]:
201 """
202 Returns the error encountered by the predictor, if one exists.
203 """
204 if self._error is None and self.error_pipe_reader.poll():
205 try:
206 self._error = self.error_pipe_reader.recv()
207 except EOFError:
208 # I don't know how this is reachable ¯\_(ツ)_/¯
209 pass
210
211 return self._error
212
213
214 def drain_pipe(pipe_reader: Connection) -> None:
215 """
216 Reads all available messages from a pipe and discards them. This serves to
217 clear the pipe for future usage.
218 """
219 while pipe_reader.poll():
220 try:
221 pipe_reader.recv()
222 except EOFError:
223 break
224
225
226 def make_pickleable(obj: Any) -> Any:
227 """
228 Returns a version of `obj` which can be pickled and therefore sent through
229 the pipe to the main process.
230
231 If the predictor uses a custom output like:
232
233 class Output(BaseModel):
234 text: str
235
236 then the output can't be sent through the pipe because:
237
238 > Can't pickle <class 'predict.Output'>: it's not the same object as
239 > 'predict.Output'
240
241 The way we're getting around this here will only work for singly-nested
242 outputs. If there's a complex object inside a complex object, it's likely
243 to fall over.
244
245 A better fix for this would be to work out why the pickling process is
246 getting a different class when loading `Output`, so the pickling Just
247 Works.
248 """
249 if isinstance(obj, BaseModel):
250 return obj.dict(exclude_unset=True)
251 else:
252 return obj
253
[end of python/cog/server/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/cog/server/runner.py b/python/cog/server/runner.py
--- a/python/cog/server/runner.py
+++ b/python/cog/server/runner.py
@@ -248,5 +248,7 @@
"""
if isinstance(obj, BaseModel):
return obj.dict(exclude_unset=True)
+ elif isinstance(obj, List):
+ return [make_pickleable(item) for item in obj]
else:
return obj
|
{"golden_diff": "diff --git a/python/cog/server/runner.py b/python/cog/server/runner.py\n--- a/python/cog/server/runner.py\n+++ b/python/cog/server/runner.py\n@@ -248,5 +248,7 @@\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n+ elif isinstance(obj, List):\n+ return [make_pickleable(item) for item in obj]\n else:\n return obj\n", "issue": "Support lists of `BaseModel` for outputs\nThe following model fails on Replicate.com with a cryptic `Can't pickle <class 'predict.Output'>: it's not the same object as predict.Output` error:\r\n\r\n```python\r\nclass Output(BaseModel):\r\n foo: str\r\n bar: str\r\n\r\nclass Predictor(BasePredictor):\r\n def predict(self) -> List[Output]:\r\n return [Output(foo=\"foo\", bar=\"bar\")]\r\n```\r\n\r\nThis is [documented deep in Cog's Python layer](https://github.com/replicate/cog/blob/main/python/cog/server/runner.py#L238).\r\n\r\nWe should support this type of output.\n", "before_files": [{"content": "import multiprocessing\nimport types\nfrom enum import Enum\nfrom multiprocessing.connection import Connection\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel\n\nfrom ..predictor import load_config, load_predictor\nfrom .log_capture import capture_log\n\n\nclass PredictionRunner:\n PROCESSING_DONE = 1\n\n class OutputType(Enum):\n NOT_STARTED = 0\n SINGLE = 1\n GENERATOR = 2\n\n def __init__(self) -> None:\n self.logs_pipe_reader, self.logs_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n (\n self.prediction_input_pipe_reader,\n self.prediction_input_pipe_writer,\n ) = multiprocessing.Pipe(duplex=False)\n self.predictor_pipe_reader, self.predictor_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.error_pipe_reader, self.error_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n self.done_pipe_reader, self.done_pipe_writer = multiprocessing.Pipe(\n duplex=False\n )\n\n def setup(self) -> None:\n \"\"\"\n Sets up the predictor in a subprocess. Blocks until the predictor has\n finished setup. To start a prediction after setup call `run()`.\n \"\"\"\n # `multiprocessing.get_context(\"spawn\")` returns the same API as\n # `multiprocessing`, but will use the spawn method when creating any\n # subprocess. Using the spawn method for the predictor subprocess is\n # useful for compatibility with CUDA, which cannot run in a process\n # that gets forked. If we can guarantee that all initialization happens\n # within the subprocess, we could probably get away with using fork\n # here instead.\n self.predictor_process = multiprocessing.get_context(\"spawn\").Process(\n target=self._start_predictor_process\n )\n\n self._is_processing = True\n self.predictor_process.start()\n\n # poll with an infinite timeout to avoid burning resources in the loop\n while self.done_pipe_reader.poll(timeout=None) and self.is_processing():\n pass\n\n def _start_predictor_process(self) -> None:\n config = load_config()\n self.predictor = load_predictor(config)\n self.predictor.setup()\n\n # tell the main process we've finished setup\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n while True:\n try:\n prediction_input = self.prediction_input_pipe_reader.recv()\n self._run_prediction(prediction_input)\n except EOFError:\n continue\n\n def run(self, **prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Starts running a prediction in the predictor subprocess, using the\n inputs provided in `prediction_input`.\n\n The subprocess will send prediction output and logs to pipes as soon as\n they're available. You can check if the pipes have any data using\n `has_output_waiting()` and `has_logs_waiting()`. You can read data from\n the pipes using `read_output()` and `read_logs()`.\n\n Use `is_processing()` to check whether more data is expected in the\n pipe for prediction output.\n \"\"\"\n # We're starting processing!\n self._is_processing = True\n\n # We don't know whether or not we've got a generator (progressive\n # output) until we start getting output from the model\n self._is_output_generator = self.OutputType.NOT_STARTED\n\n # We haven't encountered an error yet\n self._error = None\n\n # Send prediction input through the pipe to the predictor subprocess\n self.prediction_input_pipe_writer.send(prediction_input)\n\n def is_processing(self) -> bool:\n \"\"\"\n Returns True if the subprocess running the prediction is still\n processing.\n \"\"\"\n if self.done_pipe_reader.poll():\n try:\n if self.done_pipe_reader.recv() == self.PROCESSING_DONE:\n self._is_processing = False\n except EOFError:\n pass\n\n return self._is_processing\n\n def has_output_waiting(self) -> bool:\n return self.predictor_pipe_reader.poll()\n\n def read_output(self) -> List[Any]:\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return []\n\n output = []\n while self.has_output_waiting():\n try:\n output.append(self.predictor_pipe_reader.recv())\n except EOFError:\n break\n return output\n\n def has_logs_waiting(self) -> bool:\n return self.logs_pipe_reader.poll()\n\n def read_logs(self) -> List[str]:\n logs = []\n while self.has_logs_waiting():\n try:\n logs.append(self.logs_pipe_reader.recv())\n except EOFError:\n break\n return logs\n\n def is_output_generator(self) -> Optional[bool]:\n \"\"\"\n Returns `True` if the output is a generator, `False` if it's not, and\n `None` if we don't know yet.\n \"\"\"\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n if self.has_output_waiting():\n # if there's output waiting use the first one to set whether\n # we've got a generator, with a safety check\n self._is_output_generator = self.predictor_pipe_reader.recv()\n assert isinstance(self._is_output_generator, self.OutputType)\n\n if self._is_output_generator is self.OutputType.NOT_STARTED:\n return None\n elif self._is_output_generator is self.OutputType.SINGLE:\n return False\n elif self._is_output_generator is self.OutputType.GENERATOR:\n return True\n\n def _run_prediction(self, prediction_input: Dict[str, Any]) -> None:\n \"\"\"\n Sends a boolean first, to indicate whether the output is a generator.\n After that it sends the output(s).\n\n If the predictor raises an exception it'll send it to the error pipe\n writer and then exit.\n\n When the prediction is finished it'll send a token to the done pipe.\n \"\"\"\n # Empty all the pipes before we start sending more messages to them\n drain_pipe(self.logs_pipe_reader)\n drain_pipe(self.predictor_pipe_reader)\n drain_pipe(self.error_pipe_reader)\n drain_pipe(self.done_pipe_reader)\n\n with capture_log(self.logs_pipe_writer):\n try:\n output = self.predictor.predict(**prediction_input)\n\n if isinstance(output, types.GeneratorType):\n self.predictor_pipe_writer.send(self.OutputType.GENERATOR)\n while True:\n try:\n self.predictor_pipe_writer.send(\n next(make_pickleable(output))\n )\n except StopIteration:\n break\n else:\n self.predictor_pipe_writer.send(self.OutputType.SINGLE)\n self.predictor_pipe_writer.send(make_pickleable(output))\n except Exception as e:\n self.error_pipe_writer.send(e)\n\n self.done_pipe_writer.send(self.PROCESSING_DONE)\n\n def error(self) -> Optional[str]:\n \"\"\"\n Returns the error encountered by the predictor, if one exists.\n \"\"\"\n if self._error is None and self.error_pipe_reader.poll():\n try:\n self._error = self.error_pipe_reader.recv()\n except EOFError:\n # I don't know how this is reachable \u00af\\_(\u30c4)_/\u00af\n pass\n\n return self._error\n\n\ndef drain_pipe(pipe_reader: Connection) -> None:\n \"\"\"\n Reads all available messages from a pipe and discards them. This serves to\n clear the pipe for future usage.\n \"\"\"\n while pipe_reader.poll():\n try:\n pipe_reader.recv()\n except EOFError:\n break\n\n\ndef make_pickleable(obj: Any) -> Any:\n \"\"\"\n Returns a version of `obj` which can be pickled and therefore sent through\n the pipe to the main process.\n\n If the predictor uses a custom output like:\n\n class Output(BaseModel):\n text: str\n\n then the output can't be sent through the pipe because:\n\n > Can't pickle <class 'predict.Output'>: it's not the same object as\n > 'predict.Output'\n\n The way we're getting around this here will only work for singly-nested\n outputs. If there's a complex object inside a complex object, it's likely\n to fall over.\n\n A better fix for this would be to work out why the pickling process is\n getting a different class when loading `Output`, so the pickling Just\n Works.\n \"\"\"\n if isinstance(obj, BaseModel):\n return obj.dict(exclude_unset=True)\n else:\n return obj\n", "path": "python/cog/server/runner.py"}]}
| 3,142 | 101 |
gh_patches_debug_28231
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1197
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Distributed model initialisation - CIFAR 10 example
## ❓ Questions/Help/Support
In the cifar10 example when a [model](https://github.com/pytorch/ignite/blob/master/examples/contrib/cifar10/main.py#L51) is defined inside the process, is the model updated and shared between the `nproc_per_node`?
In most of the TPU examples I have seen they define the model before the `Parallel` process.
- [tpu-training-super-fast-xlmroberta](https://www.kaggle.com/shonenkov/tpu-training-super-fast-xlmroberta)
- [i-like-clean-tpu-training-kernels-i-can-not-lie](https://www.kaggle.com/abhishek/i-like-clean-tpu-training-kernels-i-can-not-lie)
Does the way ignite handles the model ensure the same model is used throughout the training?
</issue>
<code>
[start of ignite/distributed/auto.py]
1 import warnings
2
3 import torch
4 import torch.nn as nn
5 from torch.optim.optimizer import Optimizer
6 from torch.utils.data import DataLoader, Dataset
7 from torch.utils.data.distributed import DistributedSampler
8 from torch.utils.data.sampler import Sampler
9
10 from ignite.distributed import utils as idist
11 from ignite.distributed.comp_models import native as idist_native
12 from ignite.distributed.comp_models import xla as idist_xla
13 from ignite.utils import setup_logger
14
15 __all__ = ["auto_dataloader", "auto_model", "auto_optim", "DistributedProxySampler"]
16
17
18 def auto_dataloader(dataset, **kwargs):
19 """Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting
20 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
21
22 Internally, we create a dataloader with provided kwargs while applying the following updates:
23
24 - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.
25 - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.
26 - if no sampler provided by user, `torch DistributedSampler` is setup.
27 - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.
28 - if the default device is 'cuda', `pin_memory` is automatically set to `True`.
29
30 .. warning::
31
32 Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch
33 sampler is compatible with distributed configuration.
34
35 Examples:
36
37 .. code-block:: python
38
39 import ignite.distribted as idist
40
41 train_loader = idist.auto_dataloader(
42 train_dataset,
43 batch_size=32,
44 num_workers=4,
45 shuffle=True,
46 pin_memory="cuda" in idist.device().type,
47 drop_last=True,
48 )
49
50 Args:
51 dataset (Dataset): input torch dataset
52 **kwargs: keyword arguments for `torch DataLoader`_.
53
54 Returns:
55 `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices
56
57 .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
58 .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178
59 .. _torch DistributedSampler:
60 https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
61 """
62 rank = idist.get_rank()
63 world_size = idist.get_world_size()
64
65 logger = setup_logger(__name__ + ".auto_dataloader")
66 if world_size > 1:
67 if "batch_size" in kwargs and kwargs["batch_size"] >= world_size:
68 kwargs["batch_size"] //= world_size
69
70 nproc = idist.get_nproc_per_node()
71 if "num_workers" in kwargs and kwargs["num_workers"] >= nproc:
72 kwargs["num_workers"] = (kwargs["num_workers"] + nproc - 1) // nproc
73
74 if "batch_sampler" not in kwargs:
75 if kwargs.get("sampler", None) is not None:
76 sampler = DistributedProxySampler(kwargs["sampler"], num_replicas=world_size, rank=rank)
77 else:
78 sampler = DistributedSampler(
79 dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get("shuffle", True)
80 )
81 # we need to remove "shuffle" from kwargs if sampler is used
82 if "shuffle" in kwargs:
83 del kwargs["shuffle"]
84
85 kwargs["sampler"] = sampler
86 else:
87 warnings.warn(
88 "Found batch_sampler in provided kwargs. Please, make sure that it is compatible "
89 "with distributed configuration"
90 )
91
92 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get("pin_memory", False):
93 # TODO: How about XLA GPU ?
94 warnings.warn(
95 "Found incompatible options: xla support and pin_memory args equal True. "
96 "Argument `pin_memory=False` will be used to construct data loader."
97 )
98 kwargs["pin_memory"] = False
99 else:
100 kwargs["pin_memory"] = kwargs.get("pin_memory", "cuda" in idist.device().type)
101
102 logger.info("Use data loader kwargs for dataset '{}': \n\t{}".format(repr(dataset)[:20].strip(), kwargs))
103 dataloader = DataLoader(dataset, **kwargs)
104
105 if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:
106
107 logger.info("DataLoader is wrapped by `MpDeviceLoader` on XLA")
108
109 mp_device_loader_cls = _MpDeviceLoader
110 try:
111 from torch_xla.distributed.parallel_loader import MpDeviceLoader
112
113 mp_device_loader_cls = MpDeviceLoader
114 except ImportError:
115 pass
116
117 sampler = dataloader.sampler
118 dataloader = mp_device_loader_cls(dataloader, idist.device())
119 dataloader.sampler = sampler
120
121 return dataloader
122
123
124 def auto_model(model: nn.Module) -> nn.Module:
125 """Helper method to adapt provided model for non-distributed and distributed configurations (supporting
126 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
127
128 Internally, we perform to following:
129
130 - send model to current :meth:`~ignite.distributed.utils.device()`.
131 - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1
132 - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
133
134 Examples:
135
136 .. code-block:: python
137
138 import ignite.distribted as idist
139
140 model = idist.auto_model(model)
141
142 Args:
143 model (torch.nn.Module): model to adapt.
144
145 Returns:
146 torch.nn.Module
147
148 .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel
149 .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel
150 """
151 logger = setup_logger(__name__ + ".auto_model")
152
153 model.to(idist.device())
154
155 # distributed data parallel model
156 if idist.get_world_size() > 1:
157 if idist.backend() == idist_native.NCCL:
158 lrank = idist.get_local_rank()
159 logger.info("Apply torch DistributedDataParallel on model, device id: {}".format(lrank))
160 model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])
161 elif idist.backend() == idist_native.GLOO:
162 logger.info("Apply torch DistributedDataParallel on model")
163 model = torch.nn.parallel.DistributedDataParallel(model)
164
165 # not distributed but multiple GPUs reachable so data parallel model
166 elif torch.cuda.device_count() > 1 and "cuda" in idist.device().type:
167 logger.info("Apply torch DataParallel on model")
168 model = torch.nn.parallel.DataParallel(model)
169
170 return model
171
172
173 def auto_optim(optimizer: Optimizer) -> Optimizer:
174 """Helper method to adapt optimizer for non-distributed and distributed configurations (supporting
175 all available backends from :meth:`~ignite.distributed.utils.available_backends()`).
176
177 Internally, this method is no-op for non-distributed and torch native distributed configuration.
178 For XLA distributed configuration, we create a new class that inherits from provided optimizer.
179 The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.
180
181 Examples:
182
183 .. code-block:: python
184
185 import ignite.distribted as idist
186
187 optimizer = idist.auto_optim(optimizer)
188
189
190 Args:
191 optimizer (Optimizer): input torch optimizer
192
193 Returns:
194 Optimizer
195
196 .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step
197
198 """
199 if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):
200 return optimizer
201
202 cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))
203 return cls(optimizer)
204
205
206 class DistributedProxySampler(DistributedSampler):
207 """Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.
208
209 Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407
210
211
212 .. note::
213 Input sampler is assumed to have a constant size.
214
215 Args:
216 sampler (Sampler): Input torch data sampler.
217 num_replicas (int, optional): Number of processes participating in distributed training.
218 rank (int, optional): Rank of the current process within ``num_replicas``.
219
220 """
221
222 def __init__(self, sampler: Sampler, num_replicas=None, rank=None):
223
224 if not isinstance(sampler, Sampler):
225 raise TypeError("Argument sampler should be instance of torch Sampler, but given: {}".format(type(sampler)))
226
227 if not hasattr(sampler, "__len__"):
228 raise TypeError("Argument sampler should have length")
229
230 super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)
231 self.sampler = sampler
232
233 def __iter__(self):
234 # deterministically shuffle based on epoch
235 torch.manual_seed(self.epoch)
236
237 indices = []
238 while len(indices) < self.total_size:
239 indices += list(self.sampler)
240
241 if len(indices) > self.total_size:
242 indices = indices[: self.total_size]
243
244 # subsample
245 indices = indices[self.rank : self.total_size : self.num_replicas]
246 if len(indices) != self.num_samples:
247 raise RuntimeError("{} vs {}".format(len(indices), self.num_samples))
248
249 return iter(indices)
250
251
252 if idist.has_xla_support:
253
254 import torch_xla.core.xla_model as xm
255 from torch_xla.distributed.parallel_loader import ParallelLoader
256
257 class _MpDeviceLoader:
258 # https://github.com/pytorch/xla/pull/2117
259 # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available
260 def __init__(self, loader, device, **kwargs):
261 self._loader = loader
262 self._device = device
263 self._parallel_loader_kwargs = kwargs
264
265 def __iter__(self):
266 parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)
267 return parallel_loader.per_device_loader(self._device)
268
269 def __len__(self):
270 return len(self._loader)
271
272 class _XLADistributedOptimizer(Optimizer):
273 def __init__(self, optimizer):
274 super(self.__class__, self).__init__(optimizer.param_groups)
275 self.wrapped_optimizer = optimizer
276
277 def step(self, closure=None):
278 xm.optimizer_step(self.wrapped_optimizer, barrier=True)
279
[end of ignite/distributed/auto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py
--- a/ignite/distributed/auto.py
+++ b/ignite/distributed/auto.py
@@ -127,8 +127,8 @@
Internally, we perform to following:
- - send model to current :meth:`~ignite.distributed.utils.device()`.
- - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1
+ - send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.
+ - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.
- wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.
Examples:
@@ -139,6 +139,15 @@
model = idist.auto_model(model)
+ In addition with NVidia/Apex, it can be used in the following way:
+
+ .. code-block:: python
+
+ import ignite.distribted as idist
+
+ model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
+ model = idist.auto_model(model)
+
Args:
model (torch.nn.Module): model to adapt.
@@ -150,7 +159,10 @@
"""
logger = setup_logger(__name__ + ".auto_model")
- model.to(idist.device())
+ # Put model's parameters to device if its parameters are not on the device
+ device = idist.device()
+ if not all([p.device == device for p in model.parameters()]):
+ model.to(device)
# distributed data parallel model
if idist.get_world_size() > 1:
|
{"golden_diff": "diff --git a/ignite/distributed/auto.py b/ignite/distributed/auto.py\n--- a/ignite/distributed/auto.py\n+++ b/ignite/distributed/auto.py\n@@ -127,8 +127,8 @@\n \n Internally, we perform to following:\n \n- - send model to current :meth:`~ignite.distributed.utils.device()`.\n- - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n+ - send model to current :meth:`~ignite.distributed.utils.device()` if model's parameters are not on the device.\n+ - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1.\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n \n Examples:\n@@ -139,6 +139,15 @@\n \n model = idist.auto_model(model)\n \n+ In addition with NVidia/Apex, it can be used in the following way:\n+\n+ .. code-block:: python\n+\n+ import ignite.distribted as idist\n+\n+ model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)\n+ model = idist.auto_model(model)\n+\n Args:\n model (torch.nn.Module): model to adapt.\n \n@@ -150,7 +159,10 @@\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n \n- model.to(idist.device())\n+ # Put model's parameters to device if its parameters are not on the device\n+ device = idist.device()\n+ if not all([p.device == device for p in model.parameters()]):\n+ model.to(device)\n \n # distributed data parallel model\n if idist.get_world_size() > 1:\n", "issue": "Distributed model initialisation - CIFAR 10 example\n## \u2753 Questions/Help/Support\r\n\r\nIn the cifar10 example when a [model](https://github.com/pytorch/ignite/blob/master/examples/contrib/cifar10/main.py#L51) is defined inside the process, is the model updated and shared between the `nproc_per_node`?\r\n\r\nIn most of the TPU examples I have seen they define the model before the `Parallel` process.\r\n- [tpu-training-super-fast-xlmroberta](https://www.kaggle.com/shonenkov/tpu-training-super-fast-xlmroberta)\r\n- [i-like-clean-tpu-training-kernels-i-can-not-lie](https://www.kaggle.com/abhishek/i-like-clean-tpu-training-kernels-i-can-not-lie)\r\n\r\nDoes the way ignite handles the model ensure the same model is used throughout the training?\n", "before_files": [{"content": "import warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import Sampler\n\nfrom ignite.distributed import utils as idist\nfrom ignite.distributed.comp_models import native as idist_native\nfrom ignite.distributed.comp_models import xla as idist_xla\nfrom ignite.utils import setup_logger\n\n__all__ = [\"auto_dataloader\", \"auto_model\", \"auto_optim\", \"DistributedProxySampler\"]\n\n\ndef auto_dataloader(dataset, **kwargs):\n \"\"\"Helper method to create a dataloader adapted for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we create a dataloader with provided kwargs while applying the following updates:\n\n - batch size is scaled by world size: ``batch_size / world_size`` if larger or equal world size.\n - number of workers is scaled by number of local processes: ``num_workers / nprocs`` if larger or equal world size.\n - if no sampler provided by user, `torch DistributedSampler` is setup.\n - if a sampler is provided by user, it is wrapped by :class:`~ignite.distributed.auto.DistributedProxySampler`.\n - if the default device is 'cuda', `pin_memory` is automatically set to `True`.\n\n .. warning::\n\n Custom batch sampler is not adapted for distributed configuration. Please, make sure that provided batch\n sampler is compatible with distributed configuration.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n train_loader = idist.auto_dataloader(\n train_dataset,\n batch_size=32,\n num_workers=4,\n shuffle=True,\n pin_memory=\"cuda\" in idist.device().type,\n drop_last=True,\n )\n\n Args:\n dataset (Dataset): input torch dataset\n **kwargs: keyword arguments for `torch DataLoader`_.\n\n Returns:\n `torch DataLoader`_ or `XLA MpDeviceLoader`_ for XLA devices\n\n .. _torch DataLoader: https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader\n .. _XLA MpDeviceLoader: https://github.com/pytorch/xla/blob/master/torch_xla/distributed/parallel_loader.py#L178\n .. _torch DistributedSampler:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler\n \"\"\"\n rank = idist.get_rank()\n world_size = idist.get_world_size()\n\n logger = setup_logger(__name__ + \".auto_dataloader\")\n if world_size > 1:\n if \"batch_size\" in kwargs and kwargs[\"batch_size\"] >= world_size:\n kwargs[\"batch_size\"] //= world_size\n\n nproc = idist.get_nproc_per_node()\n if \"num_workers\" in kwargs and kwargs[\"num_workers\"] >= nproc:\n kwargs[\"num_workers\"] = (kwargs[\"num_workers\"] + nproc - 1) // nproc\n\n if \"batch_sampler\" not in kwargs:\n if kwargs.get(\"sampler\", None) is not None:\n sampler = DistributedProxySampler(kwargs[\"sampler\"], num_replicas=world_size, rank=rank)\n else:\n sampler = DistributedSampler(\n dataset, num_replicas=world_size, rank=rank, shuffle=kwargs.get(\"shuffle\", True)\n )\n # we need to remove \"shuffle\" from kwargs if sampler is used\n if \"shuffle\" in kwargs:\n del kwargs[\"shuffle\"]\n\n kwargs[\"sampler\"] = sampler\n else:\n warnings.warn(\n \"Found batch_sampler in provided kwargs. Please, make sure that it is compatible \"\n \"with distributed configuration\"\n )\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and kwargs.get(\"pin_memory\", False):\n # TODO: How about XLA GPU ?\n warnings.warn(\n \"Found incompatible options: xla support and pin_memory args equal True. \"\n \"Argument `pin_memory=False` will be used to construct data loader.\"\n )\n kwargs[\"pin_memory\"] = False\n else:\n kwargs[\"pin_memory\"] = kwargs.get(\"pin_memory\", \"cuda\" in idist.device().type)\n\n logger.info(\"Use data loader kwargs for dataset '{}': \\n\\t{}\".format(repr(dataset)[:20].strip(), kwargs))\n dataloader = DataLoader(dataset, **kwargs)\n\n if idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU and world_size > 1:\n\n logger.info(\"DataLoader is wrapped by `MpDeviceLoader` on XLA\")\n\n mp_device_loader_cls = _MpDeviceLoader\n try:\n from torch_xla.distributed.parallel_loader import MpDeviceLoader\n\n mp_device_loader_cls = MpDeviceLoader\n except ImportError:\n pass\n\n sampler = dataloader.sampler\n dataloader = mp_device_loader_cls(dataloader, idist.device())\n dataloader.sampler = sampler\n\n return dataloader\n\n\ndef auto_model(model: nn.Module) -> nn.Module:\n \"\"\"Helper method to adapt provided model for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, we perform to following:\n\n - send model to current :meth:`~ignite.distributed.utils.device()`.\n - wrap the model to `torch DistributedDataParallel`_ for native torch distributed if world size is larger than 1\n - wrap the model to `torch DataParallel`_ if no distributed context found and more than one CUDA devices available.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n model = idist.auto_model(model)\n\n Args:\n model (torch.nn.Module): model to adapt.\n\n Returns:\n torch.nn.Module\n\n .. _torch DistributedDataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel\n .. _torch DataParallel: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n \"\"\"\n logger = setup_logger(__name__ + \".auto_model\")\n\n model.to(idist.device())\n\n # distributed data parallel model\n if idist.get_world_size() > 1:\n if idist.backend() == idist_native.NCCL:\n lrank = idist.get_local_rank()\n logger.info(\"Apply torch DistributedDataParallel on model, device id: {}\".format(lrank))\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[lrank,])\n elif idist.backend() == idist_native.GLOO:\n logger.info(\"Apply torch DistributedDataParallel on model\")\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n # not distributed but multiple GPUs reachable so data parallel model\n elif torch.cuda.device_count() > 1 and \"cuda\" in idist.device().type:\n logger.info(\"Apply torch DataParallel on model\")\n model = torch.nn.parallel.DataParallel(model)\n\n return model\n\n\ndef auto_optim(optimizer: Optimizer) -> Optimizer:\n \"\"\"Helper method to adapt optimizer for non-distributed and distributed configurations (supporting\n all available backends from :meth:`~ignite.distributed.utils.available_backends()`).\n\n Internally, this method is no-op for non-distributed and torch native distributed configuration.\n For XLA distributed configuration, we create a new class that inherits from provided optimizer.\n The goal is to override the `step()` method with specific `xm.optimizer_step`_ implementation.\n\n Examples:\n\n .. code-block:: python\n\n import ignite.distribted as idist\n\n optimizer = idist.auto_optim(optimizer)\n\n\n Args:\n optimizer (Optimizer): input torch optimizer\n\n Returns:\n Optimizer\n\n .. _xm.optimizer_step: http://pytorch.org/xla/release/1.5/index.html#torch_xla.core.xla_model.optimizer_step\n\n \"\"\"\n if not (idist.has_xla_support and idist.backend() == idist_xla.XLA_TPU):\n return optimizer\n\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,), dict(_XLADistributedOptimizer.__dict__))\n return cls(optimizer)\n\n\nclass DistributedProxySampler(DistributedSampler):\n \"\"\"Distributed sampler proxy to adapt user's sampler for distributed data parallelism configuration.\n\n Code is based on https://github.com/pytorch/pytorch/issues/23430#issuecomment-562350407\n\n\n .. note::\n Input sampler is assumed to have a constant size.\n\n Args:\n sampler (Sampler): Input torch data sampler.\n num_replicas (int, optional): Number of processes participating in distributed training.\n rank (int, optional): Rank of the current process within ``num_replicas``.\n\n \"\"\"\n\n def __init__(self, sampler: Sampler, num_replicas=None, rank=None):\n\n if not isinstance(sampler, Sampler):\n raise TypeError(\"Argument sampler should be instance of torch Sampler, but given: {}\".format(type(sampler)))\n\n if not hasattr(sampler, \"__len__\"):\n raise TypeError(\"Argument sampler should have length\")\n\n super(DistributedProxySampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=False)\n self.sampler = sampler\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n torch.manual_seed(self.epoch)\n\n indices = []\n while len(indices) < self.total_size:\n indices += list(self.sampler)\n\n if len(indices) > self.total_size:\n indices = indices[: self.total_size]\n\n # subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n if len(indices) != self.num_samples:\n raise RuntimeError(\"{} vs {}\".format(len(indices), self.num_samples))\n\n return iter(indices)\n\n\nif idist.has_xla_support:\n\n import torch_xla.core.xla_model as xm\n from torch_xla.distributed.parallel_loader import ParallelLoader\n\n class _MpDeviceLoader:\n # https://github.com/pytorch/xla/pull/2117\n # From pytorch/xla if `torch_xla.distributed.parallel_loader.MpDeviceLoader` is not available\n def __init__(self, loader, device, **kwargs):\n self._loader = loader\n self._device = device\n self._parallel_loader_kwargs = kwargs\n\n def __iter__(self):\n parallel_loader = ParallelLoader(self._loader, [self._device], **self._parallel_loader_kwargs)\n return parallel_loader.per_device_loader(self._device)\n\n def __len__(self):\n return len(self._loader)\n\n class _XLADistributedOptimizer(Optimizer):\n def __init__(self, optimizer):\n super(self.__class__, self).__init__(optimizer.param_groups)\n self.wrapped_optimizer = optimizer\n\n def step(self, closure=None):\n xm.optimizer_step(self.wrapped_optimizer, barrier=True)\n", "path": "ignite/distributed/auto.py"}]}
| 3,900 | 407 |
gh_patches_debug_6625
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-2784
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[xray] Users get no warning for infeasible tasks.
Start Ray with
```
RAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0
```
Then start `RAY_USE_XRAY=1 ipython` and run
```python
import ray
ray.init(redis_address='localhost:6379')
@ray.remote(num_gpus=1)
def f():
return 1
f.remote()
```
`f` will never execute because it is infeasible, and yet the user will get no warning.
</issue>
<code>
[start of python/ray/ray_constants.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 """Ray constants used in the Python code."""
5
6 import os
7
8 import ray
9
10
11 def env_integer(key, default):
12 if key in os.environ:
13 return int(os.environ[key])
14 return default
15
16
17 ID_SIZE = 20
18 NIL_JOB_ID = ray.ObjectID(ID_SIZE * b"\x00")
19
20 # If a remote function or actor (or some other export) has serialized size
21 # greater than this quantity, print an warning.
22 PICKLE_OBJECT_WARNING_SIZE = 10**7
23
24 # The maximum resource quantity that is allowed. TODO(rkn): This could be
25 # relaxed, but the current implementation of the node manager will be slower
26 # for large resource quantities due to bookkeeping of specific resource IDs.
27 MAX_RESOURCE_QUANTITY = 512
28
29 # Different types of Ray errors that can be pushed to the driver.
30 # TODO(rkn): These should be defined in flatbuffers and must be synced with
31 # the existing C++ definitions.
32 WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
33 PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
34 WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
35 TASK_PUSH_ERROR = "task"
36 REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
37 FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
38 VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
39 CHECKPOINT_PUSH_ERROR = "checkpoint"
40 REGISTER_ACTOR_PUSH_ERROR = "register_actor"
41 WORKER_CRASH_PUSH_ERROR = "worker_crash"
42 WORKER_DIED_PUSH_ERROR = "worker_died"
43 PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
44 HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
45
46 # Abort autoscaling if more than this number of errors are encountered. This
47 # is a safety feature to prevent e.g. runaway node launches.
48 AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
49
50 # The maximum number of nodes to launch in a single request.
51 # Multiple requests may be made for this batch size, up to
52 # the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
53 AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
54
55 # Max number of nodes to launch at a time.
56 AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
57 "AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
58
59 # Interval at which to perform autoscaling updates.
60 AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
61
62 # The autoscaler will attempt to restart Ray on nodes it hasn't heard from
63 # in more than this interval.
64 AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
65 30)
66
67 # Max number of retries to AWS (default is 5, time increases exponentially)
68 BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
69
70 # Default logger format: only contains the message.
71 LOGGER_FORMAT = "%(message)s"
72 LOGGER_FORMAT_HELP = "The logging format. default='%(message)s'"
73 LOGGER_LEVEL = "info"
74 LOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']
75 LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
76 " 'warning', 'error', 'critical'], default='info'")
77
[end of python/ray/ray_constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py
--- a/python/ray/ray_constants.py
+++ b/python/ray/ray_constants.py
@@ -42,6 +42,7 @@
WORKER_DIED_PUSH_ERROR = "worker_died"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
HASH_MISMATCH_PUSH_ERROR = "object_hash_mismatch"
+INFEASIBLE_TASK_ERROR = "infeasible_task"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
|
{"golden_diff": "diff --git a/python/ray/ray_constants.py b/python/ray/ray_constants.py\n--- a/python/ray/ray_constants.py\n+++ b/python/ray/ray_constants.py\n@@ -42,6 +42,7 @@\n WORKER_DIED_PUSH_ERROR = \"worker_died\"\n PUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\n HASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n+INFEASIBLE_TASK_ERROR = \"infeasible_task\"\n \n # Abort autoscaling if more than this number of errors are encountered. This\n # is a safety feature to prevent e.g. runaway node launches.\n", "issue": "[xray] Users get no warning for infeasible tasks.\nStart Ray with \r\n\r\n```\r\nRAY_USE_XRAY=1 ray start --head --redis-port=6379 --num-gpus=0\r\n```\r\n\r\nThen start `RAY_USE_XRAY=1 ipython` and run\r\n\r\n```python\r\nimport ray\r\n\r\nray.init(redis_address='localhost:6379')\r\n\r\[email protected](num_gpus=1)\r\ndef f():\r\n return 1\r\n\r\nf.remote()\r\n```\r\n\r\n`f` will never execute because it is infeasible, and yet the user will get no warning.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\"\"\"Ray constants used in the Python code.\"\"\"\n\nimport os\n\nimport ray\n\n\ndef env_integer(key, default):\n if key in os.environ:\n return int(os.environ[key])\n return default\n\n\nID_SIZE = 20\nNIL_JOB_ID = ray.ObjectID(ID_SIZE * b\"\\x00\")\n\n# If a remote function or actor (or some other export) has serialized size\n# greater than this quantity, print an warning.\nPICKLE_OBJECT_WARNING_SIZE = 10**7\n\n# The maximum resource quantity that is allowed. TODO(rkn): This could be\n# relaxed, but the current implementation of the node manager will be slower\n# for large resource quantities due to bookkeeping of specific resource IDs.\nMAX_RESOURCE_QUANTITY = 512\n\n# Different types of Ray errors that can be pushed to the driver.\n# TODO(rkn): These should be defined in flatbuffers and must be synced with\n# the existing C++ definitions.\nWAIT_FOR_CLASS_PUSH_ERROR = \"wait_for_class\"\nPICKLING_LARGE_OBJECT_PUSH_ERROR = \"pickling_large_object\"\nWAIT_FOR_FUNCTION_PUSH_ERROR = \"wait_for_function\"\nTASK_PUSH_ERROR = \"task\"\nREGISTER_REMOTE_FUNCTION_PUSH_ERROR = \"register_remote_function\"\nFUNCTION_TO_RUN_PUSH_ERROR = \"function_to_run\"\nVERSION_MISMATCH_PUSH_ERROR = \"version_mismatch\"\nCHECKPOINT_PUSH_ERROR = \"checkpoint\"\nREGISTER_ACTOR_PUSH_ERROR = \"register_actor\"\nWORKER_CRASH_PUSH_ERROR = \"worker_crash\"\nWORKER_DIED_PUSH_ERROR = \"worker_died\"\nPUT_RECONSTRUCTION_PUSH_ERROR = \"put_reconstruction\"\nHASH_MISMATCH_PUSH_ERROR = \"object_hash_mismatch\"\n\n# Abort autoscaling if more than this number of errors are encountered. This\n# is a safety feature to prevent e.g. runaway node launches.\nAUTOSCALER_MAX_NUM_FAILURES = env_integer(\"AUTOSCALER_MAX_NUM_FAILURES\", 5)\n\n# The maximum number of nodes to launch in a single request.\n# Multiple requests may be made for this batch size, up to\n# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.\nAUTOSCALER_MAX_LAUNCH_BATCH = env_integer(\"AUTOSCALER_MAX_LAUNCH_BATCH\", 5)\n\n# Max number of nodes to launch at a time.\nAUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(\n \"AUTOSCALER_MAX_CONCURRENT_LAUNCHES\", 10)\n\n# Interval at which to perform autoscaling updates.\nAUTOSCALER_UPDATE_INTERVAL_S = env_integer(\"AUTOSCALER_UPDATE_INTERVAL_S\", 5)\n\n# The autoscaler will attempt to restart Ray on nodes it hasn't heard from\n# in more than this interval.\nAUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer(\"AUTOSCALER_HEARTBEAT_TIMEOUT_S\",\n 30)\n\n# Max number of retries to AWS (default is 5, time increases exponentially)\nBOTO_MAX_RETRIES = env_integer(\"BOTO_MAX_RETRIES\", 12)\n\n# Default logger format: only contains the message.\nLOGGER_FORMAT = \"%(message)s\"\nLOGGER_FORMAT_HELP = \"The logging format. default='%(message)s'\"\nLOGGER_LEVEL = \"info\"\nLOGGER_LEVEL_CHOICES = ['debug', 'info', 'warning', 'error', 'critical']\nLOGGER_LEVEL_HELP = (\"The logging level threshold, choices=['debug', 'info',\"\n \" 'warning', 'error', 'critical'], default='info'\")\n", "path": "python/ray/ray_constants.py"}]}
| 1,559 | 133 |
gh_patches_debug_29014
|
rasdani/github-patches
|
git_diff
|
inventree__InvenTree-3829
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Confirmation Email uses local ip for link URL
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find similar issue
### Describe the bug*
When sending a verification email, it shows the local IP not the set base URL
Eg. mywebsite.com
Hello from InvenTree!
You're receiving this e-mail because user kyle has given your e-mail address to register an account on https://mywebsite.com
To confirm this is correct, go to http://192.168.1.111:83/accounts/confirm-email/MQ:1olnC0:UXQyFESkK7GxGnQOOEJcP8AXY1gkd-bbeIK02tDoxoo/
Thank you for using InvenTree!
https://mywebsite.com
### Steps to Reproduce
1. Set base URL
2. Add an email
3. Send a verification email
### Expected behavior
URL should respect the base URL setting
### Deployment Method
- [X] Docker
- [ ] Bare metal
### Version Information
# Version Information:
InvenTree-Version: 0.8.3
Django Version: 3.2.15
Commit Hash: f9d2b14
Commit Date: 2022-09-23
Database: postgresql
Debug-Mode: False
Deployed using Docker: True
### Relevant log output
_No response_
</issue>
<code>
[start of InvenTree/InvenTree/forms.py]
1 """Helper forms which subclass Django forms to provide additional functionality."""
2
3 import logging
4 from urllib.parse import urlencode
5
6 from django import forms
7 from django.conf import settings
8 from django.contrib.auth.models import Group, User
9 from django.http import HttpResponseRedirect
10 from django.urls import reverse
11 from django.utils.translation import gettext_lazy as _
12
13 from allauth.account.adapter import DefaultAccountAdapter
14 from allauth.account.forms import SignupForm, set_form_field_order
15 from allauth.exceptions import ImmediateHttpResponse
16 from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
17 from allauth_2fa.adapter import OTPAdapter
18 from allauth_2fa.utils import user_has_valid_totp_device
19 from crispy_forms.bootstrap import (AppendedText, PrependedAppendedText,
20 PrependedText)
21 from crispy_forms.helper import FormHelper
22 from crispy_forms.layout import Field, Layout
23
24 from common.models import InvenTreeSetting
25
26 logger = logging.getLogger('inventree')
27
28
29 class HelperForm(forms.ModelForm):
30 """Provides simple integration of crispy_forms extension."""
31
32 # Custom field decorations can be specified here, per form class
33 field_prefix = {}
34 field_suffix = {}
35 field_placeholder = {}
36
37 def __init__(self, *args, **kwargs):
38 """Setup layout."""
39 super(forms.ModelForm, self).__init__(*args, **kwargs)
40 self.helper = FormHelper()
41
42 self.helper.form_tag = False
43 self.helper.form_show_errors = True
44
45 """
46 Create a default 'layout' for this form.
47 Ref: https://django-crispy-forms.readthedocs.io/en/latest/layouts.html
48 This is required to do fancy things later (like adding PrependedText, etc).
49
50 Simply create a 'blank' layout for each available field.
51 """
52
53 self.rebuild_layout()
54
55 def rebuild_layout(self):
56 """Build crispy layout out of current fields."""
57 layouts = []
58
59 for field in self.fields:
60 prefix = self.field_prefix.get(field, None)
61 suffix = self.field_suffix.get(field, None)
62 placeholder = self.field_placeholder.get(field, '')
63
64 # Look for font-awesome icons
65 if prefix and prefix.startswith('fa-'):
66 prefix = r"<i class='fas {fa}'/>".format(fa=prefix)
67
68 if suffix and suffix.startswith('fa-'):
69 suffix = r"<i class='fas {fa}'/>".format(fa=suffix)
70
71 if prefix and suffix:
72 layouts.append(
73 Field(
74 PrependedAppendedText(
75 field,
76 prepended_text=prefix,
77 appended_text=suffix,
78 placeholder=placeholder
79 )
80 )
81 )
82
83 elif prefix:
84 layouts.append(
85 Field(
86 PrependedText(
87 field,
88 prefix,
89 placeholder=placeholder
90 )
91 )
92 )
93
94 elif suffix:
95 layouts.append(
96 Field(
97 AppendedText(
98 field,
99 suffix,
100 placeholder=placeholder
101 )
102 )
103 )
104
105 else:
106 layouts.append(Field(field, placeholder=placeholder))
107
108 self.helper.layout = Layout(*layouts)
109
110
111 class EditUserForm(HelperForm):
112 """Form for editing user information."""
113
114 class Meta:
115 """Metaclass options."""
116
117 model = User
118 fields = [
119 'first_name',
120 'last_name',
121 ]
122
123
124 class SetPasswordForm(HelperForm):
125 """Form for setting user password."""
126
127 enter_password = forms.CharField(
128 max_length=100,
129 min_length=8,
130 required=True,
131 initial='',
132 widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),
133 label=_('Enter password'),
134 help_text=_('Enter new password')
135 )
136
137 confirm_password = forms.CharField(
138 max_length=100,
139 min_length=8,
140 required=True,
141 initial='',
142 widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),
143 label=_('Confirm password'),
144 help_text=_('Confirm new password')
145 )
146
147 old_password = forms.CharField(
148 label=_("Old password"),
149 strip=False,
150 widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}),
151 )
152
153 class Meta:
154 """Metaclass options."""
155
156 model = User
157 fields = [
158 'enter_password',
159 'confirm_password',
160 'old_password',
161 ]
162
163
164 # override allauth
165 class CustomSignupForm(SignupForm):
166 """Override to use dynamic settings."""
167
168 def __init__(self, *args, **kwargs):
169 """Check settings to influence which fields are needed."""
170 kwargs['email_required'] = InvenTreeSetting.get_setting('LOGIN_MAIL_REQUIRED')
171
172 super().__init__(*args, **kwargs)
173
174 # check for two mail fields
175 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):
176 self.fields["email2"] = forms.EmailField(
177 label=_("Email (again)"),
178 widget=forms.TextInput(
179 attrs={
180 "type": "email",
181 "placeholder": _("Email address confirmation"),
182 }
183 ),
184 )
185
186 # check for two password fields
187 if not InvenTreeSetting.get_setting('LOGIN_SIGNUP_PWD_TWICE'):
188 self.fields.pop("password2")
189
190 # reorder fields
191 set_form_field_order(self, ["username", "email", "email2", "password1", "password2", ])
192
193 def clean(self):
194 """Make sure the supllied emails match if enabled in settings."""
195 cleaned_data = super().clean()
196
197 # check for two mail fields
198 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):
199 email = cleaned_data.get("email")
200 email2 = cleaned_data.get("email2")
201 if (email and email2) and email != email2:
202 self.add_error("email2", _("You must type the same email each time."))
203
204 return cleaned_data
205
206
207 class RegistratonMixin:
208 """Mixin to check if registration should be enabled."""
209
210 def is_open_for_signup(self, request, *args, **kwargs):
211 """Check if signup is enabled in settings."""
212 if settings.EMAIL_HOST and InvenTreeSetting.get_setting('LOGIN_ENABLE_REG', True):
213 return super().is_open_for_signup(request, *args, **kwargs)
214 return False
215
216 def save_user(self, request, user, form, commit=True):
217 """Check if a default group is set in settings."""
218 user = super().save_user(request, user, form)
219 start_group = InvenTreeSetting.get_setting('SIGNUP_GROUP')
220 if start_group:
221 try:
222 group = Group.objects.get(id=start_group)
223 user.groups.add(group)
224 except Group.DoesNotExist:
225 logger.error('The setting `SIGNUP_GROUP` contains an non existant group', start_group)
226 user.save()
227 return user
228
229
230 class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
231 """Override of adapter to use dynamic settings."""
232 def send_mail(self, template_prefix, email, context):
233 """Only send mail if backend configured."""
234 if settings.EMAIL_HOST:
235 return super().send_mail(template_prefix, email, context)
236 return False
237
238
239 class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):
240 """Override of adapter to use dynamic settings."""
241
242 def is_auto_signup_allowed(self, request, sociallogin):
243 """Check if auto signup is enabled in settings."""
244 if InvenTreeSetting.get_setting('LOGIN_SIGNUP_SSO_AUTO', True):
245 return super().is_auto_signup_allowed(request, sociallogin)
246 return False
247
248 # from OTPAdapter
249 def has_2fa_enabled(self, user):
250 """Returns True if the user has 2FA configured."""
251 return user_has_valid_totp_device(user)
252
253 def login(self, request, user):
254 """Ensure user is send to 2FA before login if enabled."""
255 # Require two-factor authentication if it has been configured.
256 if self.has_2fa_enabled(user):
257 # Cast to string for the case when this is not a JSON serializable
258 # object, e.g. a UUID.
259 request.session['allauth_2fa_user_id'] = str(user.id)
260
261 redirect_url = reverse('two-factor-authenticate')
262 # Add GET parameters to the URL if they exist.
263 if request.GET:
264 redirect_url += '?' + urlencode(request.GET)
265
266 raise ImmediateHttpResponse(
267 response=HttpResponseRedirect(redirect_url)
268 )
269
270 # Otherwise defer to the original allauth adapter.
271 return super().login(request, user)
272
[end of InvenTree/InvenTree/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/InvenTree/InvenTree/forms.py b/InvenTree/InvenTree/forms.py
--- a/InvenTree/InvenTree/forms.py
+++ b/InvenTree/InvenTree/forms.py
@@ -6,6 +6,7 @@
from django import forms
from django.conf import settings
from django.contrib.auth.models import Group, User
+from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
@@ -227,7 +228,16 @@
return user
-class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
+class CustomUrlMixin:
+ """Mixin to set urls."""
+
+ def get_email_confirmation_url(self, request, emailconfirmation):
+ """Custom email confirmation (activation) url."""
+ url = reverse("account_confirm_email", args=[emailconfirmation.key])
+ return Site.objects.get_current().domain + url
+
+
+class CustomAccountAdapter(CustomUrlMixin, RegistratonMixin, OTPAdapter, DefaultAccountAdapter):
"""Override of adapter to use dynamic settings."""
def send_mail(self, template_prefix, email, context):
"""Only send mail if backend configured."""
@@ -236,7 +246,7 @@
return False
-class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):
+class CustomSocialAccountAdapter(CustomUrlMixin, RegistratonMixin, DefaultSocialAccountAdapter):
"""Override of adapter to use dynamic settings."""
def is_auto_signup_allowed(self, request, sociallogin):
|
{"golden_diff": "diff --git a/InvenTree/InvenTree/forms.py b/InvenTree/InvenTree/forms.py\n--- a/InvenTree/InvenTree/forms.py\n+++ b/InvenTree/InvenTree/forms.py\n@@ -6,6 +6,7 @@\n from django import forms\n from django.conf import settings\n from django.contrib.auth.models import Group, User\n+from django.contrib.sites.models import Site\n from django.http import HttpResponseRedirect\n from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n@@ -227,7 +228,16 @@\n return user\n \n \n-class CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n+class CustomUrlMixin:\n+ \"\"\"Mixin to set urls.\"\"\"\n+\n+ def get_email_confirmation_url(self, request, emailconfirmation):\n+ \"\"\"Custom email confirmation (activation) url.\"\"\"\n+ url = reverse(\"account_confirm_email\", args=[emailconfirmation.key])\n+ return Site.objects.get_current().domain + url\n+\n+\n+class CustomAccountAdapter(CustomUrlMixin, RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n def send_mail(self, template_prefix, email, context):\n \"\"\"Only send mail if backend configured.\"\"\"\n@@ -236,7 +246,7 @@\n return False\n \n \n-class CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):\n+class CustomSocialAccountAdapter(CustomUrlMixin, RegistratonMixin, DefaultSocialAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n \n def is_auto_signup_allowed(self, request, sociallogin):\n", "issue": "[BUG] Confirmation Email uses local ip for link URL\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find similar issue\n\n### Describe the bug*\n\nWhen sending a verification email, it shows the local IP not the set base URL\r\n\r\nEg. mywebsite.com\r\n\r\nHello from InvenTree!\r\n\r\nYou're receiving this e-mail because user kyle has given your e-mail address to register an account on https://mywebsite.com\r\n\r\nTo confirm this is correct, go to http://192.168.1.111:83/accounts/confirm-email/MQ:1olnC0:UXQyFESkK7GxGnQOOEJcP8AXY1gkd-bbeIK02tDoxoo/\r\n\r\nThank you for using InvenTree!\r\nhttps://mywebsite.com\n\n### Steps to Reproduce\n\n1. Set base URL\r\n2. Add an email\r\n3. Send a verification email\n\n### Expected behavior\n\nURL should respect the base URL setting\n\n### Deployment Method\n\n- [X] Docker\n- [ ] Bare metal\n\n### Version Information\n\n# Version Information:\r\nInvenTree-Version: 0.8.3\r\nDjango Version: 3.2.15\r\nCommit Hash: f9d2b14\r\nCommit Date: 2022-09-23\r\nDatabase: postgresql\r\nDebug-Mode: False\r\nDeployed using Docker: True\r\n\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "\"\"\"Helper forms which subclass Django forms to provide additional functionality.\"\"\"\n\nimport logging\nfrom urllib.parse import urlencode\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom allauth.account.forms import SignupForm, set_form_field_order\nfrom allauth.exceptions import ImmediateHttpResponse\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom allauth_2fa.adapter import OTPAdapter\nfrom allauth_2fa.utils import user_has_valid_totp_device\nfrom crispy_forms.bootstrap import (AppendedText, PrependedAppendedText,\n PrependedText)\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Field, Layout\n\nfrom common.models import InvenTreeSetting\n\nlogger = logging.getLogger('inventree')\n\n\nclass HelperForm(forms.ModelForm):\n \"\"\"Provides simple integration of crispy_forms extension.\"\"\"\n\n # Custom field decorations can be specified here, per form class\n field_prefix = {}\n field_suffix = {}\n field_placeholder = {}\n\n def __init__(self, *args, **kwargs):\n \"\"\"Setup layout.\"\"\"\n super(forms.ModelForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n\n self.helper.form_tag = False\n self.helper.form_show_errors = True\n\n \"\"\"\n Create a default 'layout' for this form.\n Ref: https://django-crispy-forms.readthedocs.io/en/latest/layouts.html\n This is required to do fancy things later (like adding PrependedText, etc).\n\n Simply create a 'blank' layout for each available field.\n \"\"\"\n\n self.rebuild_layout()\n\n def rebuild_layout(self):\n \"\"\"Build crispy layout out of current fields.\"\"\"\n layouts = []\n\n for field in self.fields:\n prefix = self.field_prefix.get(field, None)\n suffix = self.field_suffix.get(field, None)\n placeholder = self.field_placeholder.get(field, '')\n\n # Look for font-awesome icons\n if prefix and prefix.startswith('fa-'):\n prefix = r\"<i class='fas {fa}'/>\".format(fa=prefix)\n\n if suffix and suffix.startswith('fa-'):\n suffix = r\"<i class='fas {fa}'/>\".format(fa=suffix)\n\n if prefix and suffix:\n layouts.append(\n Field(\n PrependedAppendedText(\n field,\n prepended_text=prefix,\n appended_text=suffix,\n placeholder=placeholder\n )\n )\n )\n\n elif prefix:\n layouts.append(\n Field(\n PrependedText(\n field,\n prefix,\n placeholder=placeholder\n )\n )\n )\n\n elif suffix:\n layouts.append(\n Field(\n AppendedText(\n field,\n suffix,\n placeholder=placeholder\n )\n )\n )\n\n else:\n layouts.append(Field(field, placeholder=placeholder))\n\n self.helper.layout = Layout(*layouts)\n\n\nclass EditUserForm(HelperForm):\n \"\"\"Form for editing user information.\"\"\"\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'first_name',\n 'last_name',\n ]\n\n\nclass SetPasswordForm(HelperForm):\n \"\"\"Form for setting user password.\"\"\"\n\n enter_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Enter password'),\n help_text=_('Enter new password')\n )\n\n confirm_password = forms.CharField(\n max_length=100,\n min_length=8,\n required=True,\n initial='',\n widget=forms.PasswordInput(attrs={'autocomplete': 'off'}),\n label=_('Confirm password'),\n help_text=_('Confirm new password')\n )\n\n old_password = forms.CharField(\n label=_(\"Old password\"),\n strip=False,\n widget=forms.PasswordInput(attrs={'autocomplete': 'current-password', 'autofocus': True}),\n )\n\n class Meta:\n \"\"\"Metaclass options.\"\"\"\n\n model = User\n fields = [\n 'enter_password',\n 'confirm_password',\n 'old_password',\n ]\n\n\n# override allauth\nclass CustomSignupForm(SignupForm):\n \"\"\"Override to use dynamic settings.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Check settings to influence which fields are needed.\"\"\"\n kwargs['email_required'] = InvenTreeSetting.get_setting('LOGIN_MAIL_REQUIRED')\n\n super().__init__(*args, **kwargs)\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n self.fields[\"email2\"] = forms.EmailField(\n label=_(\"Email (again)\"),\n widget=forms.TextInput(\n attrs={\n \"type\": \"email\",\n \"placeholder\": _(\"Email address confirmation\"),\n }\n ),\n )\n\n # check for two password fields\n if not InvenTreeSetting.get_setting('LOGIN_SIGNUP_PWD_TWICE'):\n self.fields.pop(\"password2\")\n\n # reorder fields\n set_form_field_order(self, [\"username\", \"email\", \"email2\", \"password1\", \"password2\", ])\n\n def clean(self):\n \"\"\"Make sure the supllied emails match if enabled in settings.\"\"\"\n cleaned_data = super().clean()\n\n # check for two mail fields\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_MAIL_TWICE'):\n email = cleaned_data.get(\"email\")\n email2 = cleaned_data.get(\"email2\")\n if (email and email2) and email != email2:\n self.add_error(\"email2\", _(\"You must type the same email each time.\"))\n\n return cleaned_data\n\n\nclass RegistratonMixin:\n \"\"\"Mixin to check if registration should be enabled.\"\"\"\n\n def is_open_for_signup(self, request, *args, **kwargs):\n \"\"\"Check if signup is enabled in settings.\"\"\"\n if settings.EMAIL_HOST and InvenTreeSetting.get_setting('LOGIN_ENABLE_REG', True):\n return super().is_open_for_signup(request, *args, **kwargs)\n return False\n\n def save_user(self, request, user, form, commit=True):\n \"\"\"Check if a default group is set in settings.\"\"\"\n user = super().save_user(request, user, form)\n start_group = InvenTreeSetting.get_setting('SIGNUP_GROUP')\n if start_group:\n try:\n group = Group.objects.get(id=start_group)\n user.groups.add(group)\n except Group.DoesNotExist:\n logger.error('The setting `SIGNUP_GROUP` contains an non existant group', start_group)\n user.save()\n return user\n\n\nclass CustomAccountAdapter(RegistratonMixin, OTPAdapter, DefaultAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n def send_mail(self, template_prefix, email, context):\n \"\"\"Only send mail if backend configured.\"\"\"\n if settings.EMAIL_HOST:\n return super().send_mail(template_prefix, email, context)\n return False\n\n\nclass CustomSocialAccountAdapter(RegistratonMixin, DefaultSocialAccountAdapter):\n \"\"\"Override of adapter to use dynamic settings.\"\"\"\n\n def is_auto_signup_allowed(self, request, sociallogin):\n \"\"\"Check if auto signup is enabled in settings.\"\"\"\n if InvenTreeSetting.get_setting('LOGIN_SIGNUP_SSO_AUTO', True):\n return super().is_auto_signup_allowed(request, sociallogin)\n return False\n\n # from OTPAdapter\n def has_2fa_enabled(self, user):\n \"\"\"Returns True if the user has 2FA configured.\"\"\"\n return user_has_valid_totp_device(user)\n\n def login(self, request, user):\n \"\"\"Ensure user is send to 2FA before login if enabled.\"\"\"\n # Require two-factor authentication if it has been configured.\n if self.has_2fa_enabled(user):\n # Cast to string for the case when this is not a JSON serializable\n # object, e.g. a UUID.\n request.session['allauth_2fa_user_id'] = str(user.id)\n\n redirect_url = reverse('two-factor-authenticate')\n # Add GET parameters to the URL if they exist.\n if request.GET:\n redirect_url += '?' + urlencode(request.GET)\n\n raise ImmediateHttpResponse(\n response=HttpResponseRedirect(redirect_url)\n )\n\n # Otherwise defer to the original allauth adapter.\n return super().login(request, user)\n", "path": "InvenTree/InvenTree/forms.py"}]}
| 3,383 | 351 |
gh_patches_debug_11862
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-18894
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: `adhoc_tool` example field values swapped
When looking at the rendered docs it was much easier to spot that the example field values here are swapped (files vs directories). 👀
_Originally posted by @kaos in https://github.com/pantsbuild/pants/pull/18237#discussion_r1184219518_
</issue>
<code>
[start of src/python/pants/backend/adhoc/target_types.py]
1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from typing import ClassVar
7
8 from pants.core.util_rules.environments import EnvironmentField
9 from pants.engine.target import (
10 COMMON_TARGET_FIELDS,
11 BoolField,
12 Dependencies,
13 IntField,
14 MultipleSourcesField,
15 SpecialCasedDependencies,
16 StringField,
17 StringSequenceField,
18 Target,
19 ValidNumbers,
20 )
21 from pants.util.strutil import help_text
22
23
24 class AdhocToolDependenciesField(Dependencies):
25 pass
26
27
28 class AdhocToolRunnableField(StringField):
29 alias: ClassVar[str] = "runnable"
30 required = True
31 help = help_text(
32 lambda: f"""
33 Address to a target that can be invoked by the `run` goal (and does not set
34 `run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments
35 specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive
36 dependencies, along with the transitive dependencies specified by
37 `{AdhocToolExecutionDependenciesField.alias}`.
38 """
39 )
40
41
42 class AdhocToolOutputFilesField(StringSequenceField):
43 alias: ClassVar[str] = "output_files"
44 required = False
45 default = ()
46 help = help_text(
47 lambda: f"""
48 Specify the output files to capture, relative to the value of
49 `{AdhocToolWorkdirField.alias}`.
50
51 For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of
52 `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
53 specified.
54
55 Relative paths (including `..`) may be used, as long as the path does not ascend further
56 than the build root.
57 """
58 )
59
60
61 class AdhocToolOutputDirectoriesField(StringSequenceField):
62 alias: ClassVar[str] = "output_directories"
63 required = False
64 default = ()
65 help = help_text(
66 lambda: f"""
67 Specify full directories (including recursive descendants) of output to capture, relative
68 to the value of `{AdhocToolWorkdirField.alias}`.
69
70 For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of
71 `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be
72 specified.
73
74 Relative paths (including `..`) may be used, as long as the path does not ascend further
75 than the build root.
76 """
77 )
78
79
80 class AdhocToolOutputDependenciesField(AdhocToolDependenciesField):
81 supports_transitive_excludes = True
82 alias: ClassVar[str] = "output_dependencies"
83
84 help = help_text(
85 lambda: """
86 Any dependencies that need to be present (as transitive dependencies) whenever the outputs
87 of this target are consumed (including as dependencies).
88 """
89 )
90
91
92 class AdhocToolExecutionDependenciesField(SpecialCasedDependencies):
93 alias: ClassVar[str] = "execution_dependencies"
94 required = False
95 default = None
96
97 help = help_text(
98 lambda: f"""
99 The execution dependencies for this command.
100
101 Dependencies specified here are those required to make the command complete successfully
102 (e.g. file inputs, packages compiled from other targets, etc), but NOT required to make
103 the outputs of the command useful. Dependencies that are required to use the outputs
104 produced by this command should be specified using the
105 `{AdhocToolOutputDependenciesField.alias}` field.
106
107 If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`
108 will not be added to the execution sandbox.
109 """
110 )
111
112
113 class AdhocToolRunnableDependenciesField(SpecialCasedDependencies):
114 alias: ClassVar[str] = "runnable_dependencies"
115 required = False
116 default = None
117
118 help = help_text(
119 lambda: f"""
120 The execution dependencies for this command.
121
122 Dependencies specified here are those required to exist on the `PATH` to make the command
123 complete successfully (interpreters specified in a `#!` command, etc). Note that these
124 dependencies will be made available on the `PATH` with the name of the target.
125
126 See also `{AdhocToolOutputDependenciesField.alias}` and
127 `{AdhocToolExecutionDependenciesField.alias}.
128 """
129 )
130
131
132 class AdhocToolSourcesField(MultipleSourcesField):
133 # We solely register this field for codegen to work.
134 alias: ClassVar[str] = "_sources"
135 uses_source_roots = False
136 expected_num_files = 0
137
138
139 class AdhocToolArgumentsField(StringSequenceField):
140 alias: ClassVar[str] = "args"
141 default = ()
142 help = help_text(
143 lambda: f"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field."
144 )
145
146
147 class AdhocToolStdoutFilenameField(StringField):
148 alias: ClassVar[str] = "stdout"
149 default = None
150 help = help_text(
151 lambda: f"""
152 A filename to capture the contents of `stdout` to. Relative paths are
153 relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
154 start at the build root.
155 """
156 )
157
158
159 class AdhocToolStderrFilenameField(StringField):
160 alias: ClassVar[str] = "stderr"
161 default = None
162 help = help_text(
163 lambda: f"""
164 A filename to capture the contents of `stderr` to. Relative paths are
165 relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths
166 start at the build root.
167 """
168 )
169
170
171 class AdhocToolTimeoutField(IntField):
172 alias: ClassVar[str] = "timeout"
173 default = 30
174 help = "Command execution timeout (in seconds)."
175 valid_numbers = ValidNumbers.positive_only
176
177
178 class AdhocToolExtraEnvVarsField(StringSequenceField):
179 alias: ClassVar[str] = "extra_env_vars"
180 help = help_text(
181 """
182 Additional environment variables to provide to the process.
183
184 Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
185 `ENV_VAR` to copy the value of a variable in Pants's own environment.
186 """
187 )
188
189
190 class AdhocToolLogOutputField(BoolField):
191 alias: ClassVar[str] = "log_output"
192 default = False
193 help = "Set to true if you want the output logged to the console."
194
195
196 class AdhocToolWorkdirField(StringField):
197 alias: ClassVar[str] = "workdir"
198 default = "."
199 help = help_text(
200 """
201 Sets the working directory for the process.
202
203 Values are relative to the build root, except in the following cases:
204
205 * `.` specifies the location of the `BUILD` file.
206 * Values beginning with `./` are relative to the location of the `BUILD` file.
207 * `/` or the empty string specifies the build root.
208 * Values beginning with `/` are also relative to the build root.
209 """
210 )
211
212
213 class AdhocToolOutputRootDirField(StringField):
214 alias: ClassVar[str] = "root_output_directory"
215 default = "/"
216 help = help_text(
217 """Adjusts the location of files output by this target, when consumed as a dependency.
218
219 Values are relative to the build root, except in the following cases:
220
221 * `.` specifies the location of the `BUILD` file.
222 * Values beginning with `./` are relative to the location of the `BUILD` file.
223 * `/` or the empty string specifies the build root.
224 * Values beginning with `/` are also relative to the build root.
225 """
226 )
227
228
229 class AdhocToolTarget(Target):
230 alias: ClassVar[str] = "adhoc_tool"
231 core_fields = (
232 *COMMON_TARGET_FIELDS,
233 AdhocToolRunnableField,
234 AdhocToolArgumentsField,
235 AdhocToolExecutionDependenciesField,
236 AdhocToolOutputDependenciesField,
237 AdhocToolRunnableDependenciesField,
238 AdhocToolLogOutputField,
239 AdhocToolOutputFilesField,
240 AdhocToolOutputDirectoriesField,
241 AdhocToolSourcesField,
242 AdhocToolTimeoutField,
243 AdhocToolExtraEnvVarsField,
244 AdhocToolWorkdirField,
245 AdhocToolOutputRootDirField,
246 AdhocToolStdoutFilenameField,
247 AdhocToolStderrFilenameField,
248 EnvironmentField,
249 )
250 help = help_text(
251 lambda: f"""
252 Execute any runnable target for its side effects.
253
254 Example BUILD file:
255
256 {AdhocToolTarget.alias}(
257 {AdhocToolRunnableField.alias}=":python_source",
258 {AdhocToolArgumentsField.alias}=[""],
259 {AdhocToolExecutionDependenciesField.alias}=[":scripts"],
260 {AdhocToolOutputDirectoriesField.alias}=["logs/my-script.log"],
261 {AdhocToolOutputFilesField.alias}=["results/"],
262 )
263
264 shell_sources(name="scripts")
265 """
266 )
267
268
269 # ---
270 # `system_binary` target
271 # ---
272
273
274 class SystemBinaryNameField(StringField):
275 alias: ClassVar[str] = "binary_name"
276 required = True
277 help = "The name of the binary to find."
278
279
280 class SystemBinaryExtraSearchPathsField(StringSequenceField):
281 alias: ClassVar[str] = "extra_search_paths"
282 default = ()
283 help = help_text(
284 """
285 Extra search paths to look for the binary. These take priority over Pants' default
286 search paths.
287 """
288 )
289
290
291 class SystemBinaryFingerprintPattern(StringField):
292 alias: ClassVar[str] = "fingerprint"
293 required = False
294 default = None
295 help = help_text(
296 """
297 A regular expression which will be used to match the fingerprint outputs from
298 candidate binaries found during the search process.
299 """
300 )
301
302
303 class SystemBinaryFingerprintArgsField(StringSequenceField):
304 alias: ClassVar[str] = "fingerprint_args"
305 default = ()
306 help = help_text(
307 "Specifies arguments that will be used to run the binary during the search process."
308 )
309
310
311 class SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):
312 alias: ClassVar[str] = "fingerprint_dependencies"
313 help = help_text(
314 """
315 Specifies any runnable dependencies that need to be available on the `PATH` when the binary
316 is run, so that the search process may complete successfully. The name of the target must
317 be the name of the runnable dependency that is called by this binary.
318 """
319 )
320
321
322 class SystemBinaryTarget(Target):
323 alias: ClassVar[str] = "system_binary"
324 core_fields = (
325 *COMMON_TARGET_FIELDS,
326 SystemBinaryNameField,
327 SystemBinaryExtraSearchPathsField,
328 SystemBinaryFingerprintPattern,
329 SystemBinaryFingerprintArgsField,
330 SystemBinaryFingerprintDependenciesField,
331 )
332 help = help_text(
333 lambda: f"""
334 A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.
335
336 Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search
337 paths provided, as well as default search paths. If
338 `{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be
339 executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries
340 whose output does not match the pattern will be excluded.
341
342 The first non-excluded binary will be the one that is resolved.
343 """
344 )
345
[end of src/python/pants/backend/adhoc/target_types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/backend/adhoc/target_types.py b/src/python/pants/backend/adhoc/target_types.py
--- a/src/python/pants/backend/adhoc/target_types.py
+++ b/src/python/pants/backend/adhoc/target_types.py
@@ -257,8 +257,8 @@
{AdhocToolRunnableField.alias}=":python_source",
{AdhocToolArgumentsField.alias}=[""],
{AdhocToolExecutionDependenciesField.alias}=[":scripts"],
- {AdhocToolOutputDirectoriesField.alias}=["logs/my-script.log"],
- {AdhocToolOutputFilesField.alias}=["results/"],
+ {AdhocToolOutputDirectoriesField.alias}=["results/"],
+ {AdhocToolOutputFilesField.alias}=["logs/my-script.log"],
)
shell_sources(name="scripts")
|
{"golden_diff": "diff --git a/src/python/pants/backend/adhoc/target_types.py b/src/python/pants/backend/adhoc/target_types.py\n--- a/src/python/pants/backend/adhoc/target_types.py\n+++ b/src/python/pants/backend/adhoc/target_types.py\n@@ -257,8 +257,8 @@\n {AdhocToolRunnableField.alias}=\":python_source\",\n {AdhocToolArgumentsField.alias}=[\"\"],\n {AdhocToolExecutionDependenciesField.alias}=[\":scripts\"],\n- {AdhocToolOutputDirectoriesField.alias}=[\"logs/my-script.log\"],\n- {AdhocToolOutputFilesField.alias}=[\"results/\"],\n+ {AdhocToolOutputDirectoriesField.alias}=[\"results/\"],\n+ {AdhocToolOutputFilesField.alias}=[\"logs/my-script.log\"],\n )\n \n shell_sources(name=\"scripts\")\n", "issue": "docs: `adhoc_tool` example field values swapped\n When looking at the rendered docs it was much easier to spot that the example field values here are swapped (files vs directories). \ud83d\udc40\r\n\r\n_Originally posted by @kaos in https://github.com/pantsbuild/pants/pull/18237#discussion_r1184219518_\r\n \n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar\n\nfrom pants.core.util_rules.environments import EnvironmentField\nfrom pants.engine.target import (\n COMMON_TARGET_FIELDS,\n BoolField,\n Dependencies,\n IntField,\n MultipleSourcesField,\n SpecialCasedDependencies,\n StringField,\n StringSequenceField,\n Target,\n ValidNumbers,\n)\nfrom pants.util.strutil import help_text\n\n\nclass AdhocToolDependenciesField(Dependencies):\n pass\n\n\nclass AdhocToolRunnableField(StringField):\n alias: ClassVar[str] = \"runnable\"\n required = True\n help = help_text(\n lambda: f\"\"\"\n Address to a target that can be invoked by the `run` goal (and does not set\n `run_in_sandbox_behavior=NOT_SUPPORTED`). This will be executed along with any arguments\n specified by `{AdhocToolArgumentsField.alias}`, in a sandbox with that target's transitive\n dependencies, along with the transitive dependencies specified by\n `{AdhocToolExecutionDependenciesField.alias}`.\n \"\"\"\n )\n\n\nclass AdhocToolOutputFilesField(StringSequenceField):\n alias: ClassVar[str] = \"output_files\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify the output files to capture, relative to the value of\n `{AdhocToolWorkdirField.alias}`.\n\n For directories, use `{AdhocToolOutputDirectoriesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDirectoriesField(StringSequenceField):\n alias: ClassVar[str] = \"output_directories\"\n required = False\n default = ()\n help = help_text(\n lambda: f\"\"\"\n Specify full directories (including recursive descendants) of output to capture, relative\n to the value of `{AdhocToolWorkdirField.alias}`.\n\n For individual files, use `{AdhocToolOutputFilesField.alias}`. At least one of\n `{AdhocToolOutputFilesField.alias}` and`{AdhocToolOutputDirectoriesField.alias}` must be\n specified.\n\n Relative paths (including `..`) may be used, as long as the path does not ascend further\n than the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputDependenciesField(AdhocToolDependenciesField):\n supports_transitive_excludes = True\n alias: ClassVar[str] = \"output_dependencies\"\n\n help = help_text(\n lambda: \"\"\"\n Any dependencies that need to be present (as transitive dependencies) whenever the outputs\n of this target are consumed (including as dependencies).\n \"\"\"\n )\n\n\nclass AdhocToolExecutionDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"execution_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to make the command complete successfully\n (e.g. file inputs, packages compiled from other targets, etc), but NOT required to make\n the outputs of the command useful. Dependencies that are required to use the outputs\n produced by this command should be specified using the\n `{AdhocToolOutputDependenciesField.alias}` field.\n\n If this field is specified, dependencies from `{AdhocToolOutputDependenciesField.alias}`\n will not be added to the execution sandbox.\n \"\"\"\n )\n\n\nclass AdhocToolRunnableDependenciesField(SpecialCasedDependencies):\n alias: ClassVar[str] = \"runnable_dependencies\"\n required = False\n default = None\n\n help = help_text(\n lambda: f\"\"\"\n The execution dependencies for this command.\n\n Dependencies specified here are those required to exist on the `PATH` to make the command\n complete successfully (interpreters specified in a `#!` command, etc). Note that these\n dependencies will be made available on the `PATH` with the name of the target.\n\n See also `{AdhocToolOutputDependenciesField.alias}` and\n `{AdhocToolExecutionDependenciesField.alias}.\n \"\"\"\n )\n\n\nclass AdhocToolSourcesField(MultipleSourcesField):\n # We solely register this field for codegen to work.\n alias: ClassVar[str] = \"_sources\"\n uses_source_roots = False\n expected_num_files = 0\n\n\nclass AdhocToolArgumentsField(StringSequenceField):\n alias: ClassVar[str] = \"args\"\n default = ()\n help = help_text(\n lambda: f\"Extra arguments to pass into the `{AdhocToolRunnableField.alias}` field.\"\n )\n\n\nclass AdhocToolStdoutFilenameField(StringField):\n alias: ClassVar[str] = \"stdout\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stdout` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolStderrFilenameField(StringField):\n alias: ClassVar[str] = \"stderr\"\n default = None\n help = help_text(\n lambda: f\"\"\"\n A filename to capture the contents of `stderr` to. Relative paths are\n relative to the value of `{AdhocToolWorkdirField.alias}`, absolute paths\n start at the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTimeoutField(IntField):\n alias: ClassVar[str] = \"timeout\"\n default = 30\n help = \"Command execution timeout (in seconds).\"\n valid_numbers = ValidNumbers.positive_only\n\n\nclass AdhocToolExtraEnvVarsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_env_vars\"\n help = help_text(\n \"\"\"\n Additional environment variables to provide to the process.\n\n Entries are strings in the form `ENV_VAR=value` to use explicitly; or just\n `ENV_VAR` to copy the value of a variable in Pants's own environment.\n \"\"\"\n )\n\n\nclass AdhocToolLogOutputField(BoolField):\n alias: ClassVar[str] = \"log_output\"\n default = False\n help = \"Set to true if you want the output logged to the console.\"\n\n\nclass AdhocToolWorkdirField(StringField):\n alias: ClassVar[str] = \"workdir\"\n default = \".\"\n help = help_text(\n \"\"\"\n Sets the working directory for the process.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolOutputRootDirField(StringField):\n alias: ClassVar[str] = \"root_output_directory\"\n default = \"/\"\n help = help_text(\n \"\"\"Adjusts the location of files output by this target, when consumed as a dependency.\n\n Values are relative to the build root, except in the following cases:\n\n * `.` specifies the location of the `BUILD` file.\n * Values beginning with `./` are relative to the location of the `BUILD` file.\n * `/` or the empty string specifies the build root.\n * Values beginning with `/` are also relative to the build root.\n \"\"\"\n )\n\n\nclass AdhocToolTarget(Target):\n alias: ClassVar[str] = \"adhoc_tool\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n AdhocToolRunnableField,\n AdhocToolArgumentsField,\n AdhocToolExecutionDependenciesField,\n AdhocToolOutputDependenciesField,\n AdhocToolRunnableDependenciesField,\n AdhocToolLogOutputField,\n AdhocToolOutputFilesField,\n AdhocToolOutputDirectoriesField,\n AdhocToolSourcesField,\n AdhocToolTimeoutField,\n AdhocToolExtraEnvVarsField,\n AdhocToolWorkdirField,\n AdhocToolOutputRootDirField,\n AdhocToolStdoutFilenameField,\n AdhocToolStderrFilenameField,\n EnvironmentField,\n )\n help = help_text(\n lambda: f\"\"\"\n Execute any runnable target for its side effects.\n\n Example BUILD file:\n\n {AdhocToolTarget.alias}(\n {AdhocToolRunnableField.alias}=\":python_source\",\n {AdhocToolArgumentsField.alias}=[\"\"],\n {AdhocToolExecutionDependenciesField.alias}=[\":scripts\"],\n {AdhocToolOutputDirectoriesField.alias}=[\"logs/my-script.log\"],\n {AdhocToolOutputFilesField.alias}=[\"results/\"],\n )\n\n shell_sources(name=\"scripts\")\n \"\"\"\n )\n\n\n# ---\n# `system_binary` target\n# ---\n\n\nclass SystemBinaryNameField(StringField):\n alias: ClassVar[str] = \"binary_name\"\n required = True\n help = \"The name of the binary to find.\"\n\n\nclass SystemBinaryExtraSearchPathsField(StringSequenceField):\n alias: ClassVar[str] = \"extra_search_paths\"\n default = ()\n help = help_text(\n \"\"\"\n Extra search paths to look for the binary. These take priority over Pants' default\n search paths.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintPattern(StringField):\n alias: ClassVar[str] = \"fingerprint\"\n required = False\n default = None\n help = help_text(\n \"\"\"\n A regular expression which will be used to match the fingerprint outputs from\n candidate binaries found during the search process.\n \"\"\"\n )\n\n\nclass SystemBinaryFingerprintArgsField(StringSequenceField):\n alias: ClassVar[str] = \"fingerprint_args\"\n default = ()\n help = help_text(\n \"Specifies arguments that will be used to run the binary during the search process.\"\n )\n\n\nclass SystemBinaryFingerprintDependenciesField(AdhocToolRunnableDependenciesField):\n alias: ClassVar[str] = \"fingerprint_dependencies\"\n help = help_text(\n \"\"\"\n Specifies any runnable dependencies that need to be available on the `PATH` when the binary\n is run, so that the search process may complete successfully. The name of the target must\n be the name of the runnable dependency that is called by this binary.\n \"\"\"\n )\n\n\nclass SystemBinaryTarget(Target):\n alias: ClassVar[str] = \"system_binary\"\n core_fields = (\n *COMMON_TARGET_FIELDS,\n SystemBinaryNameField,\n SystemBinaryExtraSearchPathsField,\n SystemBinaryFingerprintPattern,\n SystemBinaryFingerprintArgsField,\n SystemBinaryFingerprintDependenciesField,\n )\n help = help_text(\n lambda: f\"\"\"\n A system binary that can be run with `pants run` or consumed by `{AdhocToolTarget.alias}`.\n\n Pants will search for binaries with name `{SystemBinaryNameField.alias}` in the search\n paths provided, as well as default search paths. If\n `{SystemBinaryFingerprintPattern.alias}` is specified, each binary that is located will be\n executed with the arguments from `{SystemBinaryFingerprintArgsField.alias}`. Any binaries\n whose output does not match the pattern will be excluded.\n\n The first non-excluded binary will be the one that is resolved.\n \"\"\"\n )\n", "path": "src/python/pants/backend/adhoc/target_types.py"}]}
| 4,072 | 179 |
gh_patches_debug_5754
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-6745
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
revert: project-name in project_id
In https://github.com/meltano/meltano/pull/6621 we added the `project name` to the project_id on `init`. Even though we hash the id prior to sending, its addition was unnecessary and creates a few problems:
* having the project name in the id confuses the purpose of the id. We don't need to know or include the directory name in the project id and its inclusion sends the wrong signal that we do care about it somehow
* while it's true that `project_id` can be any string, we generally don't want to encourage folks to change it unless they have a strong reason to
* when trying to correlate local testing to events we're seeing through our production snowplow pipeline, having to hash the id is unnecessarily burdensome when we'd rather just look for the GUID itself
I would support a documentation update to make it clear that `project_id` can be any string but that it's generally not recommended to update if you don't need to.
Also, I did notice that as part of this update it seems like `project_id` gets put at the end of the file - is there anyway to bring that back up above the environments?
@WillDaSilva @aaronsteers
</issue>
<code>
[start of src/meltano/core/project_init_service.py]
1 """New Project Initialization Service."""
2 from __future__ import annotations
3
4 import os
5 import uuid
6
7 import click
8
9 from .cli_messages import GREETING
10 from .db import project_engine
11 from .plugin.meltano_file import MeltanoFilePlugin
12 from .project import Project
13 from .project_settings_service import ProjectSettingsService, SettingValueStore
14
15
16 class ProjectInitServiceError(Exception):
17 """Project Initialization Service Exception."""
18
19
20 class ProjectInitService:
21 """New Project Initialization Service."""
22
23 def __init__(self, project_name):
24 """Create a new ProjectInitService instance.
25
26 Args:
27 project_name: The name of the project to create
28 """
29 self.project_name = project_name.lower()
30
31 def init(self, activate: bool = True, add_discovery: bool = False) -> Project:
32 """Initialise Meltano Project.
33
34 Args:
35 activate: Activate newly created project
36 add_discovery: Add discovery.yml file to created project
37
38 Returns:
39 A new Project instance
40
41 Raises:
42 ProjectInitServiceError: Directory already exists
43 """
44 try:
45 os.mkdir(self.project_name)
46 except FileExistsError as ex:
47 raise ProjectInitServiceError(
48 f"Directory {self.project_name!r} already exists."
49 ) from ex
50 except PermissionError as ex:
51 raise ProjectInitServiceError(
52 f"Permission denied to create {self.project_name!r}."
53 ) from ex
54 except Exception as ex:
55 raise ProjectInitServiceError(
56 f"Could not create directory {self.project_name!r}. {ex}"
57 ) from ex
58
59 click.secho("Created", fg="blue", nl=False)
60 click.echo(f" {self.project_name}")
61
62 self.project = Project(self.project_name)
63
64 self.create_files(add_discovery=add_discovery)
65
66 self.settings_service = ProjectSettingsService(self.project)
67 self.settings_service.set(
68 "project_id",
69 f"{self.project_name}-{uuid.uuid4()}",
70 store=SettingValueStore.MELTANO_YML,
71 )
72 self.set_send_anonymous_usage_stats()
73 if activate:
74 Project.activate(self.project)
75
76 self.create_system_database()
77
78 return self.project
79
80 def create_dot_meltano_dir(self):
81 """Create .meltano directory."""
82 # explicitly create the .meltano directory if it doesn't exist
83 os.makedirs(self.project.meltano_dir(), exist_ok=True)
84 click.secho(" |--", fg="blue", nl=False)
85 click.echo(f" {self.project.meltano_dir().name}")
86
87 def create_files(self, add_discovery=False):
88 """Create project files.
89
90 Args:
91 add_discovery: Add discovery.yml file to created project
92 """
93 click.secho("Creating project files...", fg="blue")
94 click.echo(f" {self.project_name}/")
95
96 self.create_dot_meltano_dir()
97
98 plugin = MeltanoFilePlugin(discovery=add_discovery)
99 for path in plugin.create_files(self.project):
100 click.secho(" |--", fg="blue", nl=False)
101 click.echo(f" {path}")
102
103 def set_send_anonymous_usage_stats(self):
104 """Set Anonymous Usage Stats flag."""
105 # If set to false store explicitly in `meltano.yml`
106 if not self.settings_service.get("send_anonymous_usage_stats"):
107 self.settings_service.set(
108 "send_anonymous_usage_stats",
109 self.settings_service.get("send_anonymous_usage_stats"),
110 store=SettingValueStore.MELTANO_YML,
111 )
112
113 def create_system_database(self):
114 """Create Meltano System DB.
115
116 Raises:
117 ProjectInitServiceError: Database initialization failed
118 """
119 click.secho("Creating system database...", fg="blue", nl=False)
120
121 # register the system database connection
122 engine, _ = project_engine(self.project, default=True)
123
124 from meltano.core.migration_service import MigrationError, MigrationService
125
126 try:
127 migration_service = MigrationService(engine)
128 migration_service.upgrade(silent=True)
129 migration_service.seed(self.project)
130 click.secho(" Done!", fg="blue")
131 except MigrationError as err:
132 raise ProjectInitServiceError(str(err)) from err
133
134 def echo_instructions(self):
135 """Echo Next Steps to Click CLI."""
136 click.secho(GREETING, nl=False)
137 click.secho("\nProject ", nl=False)
138 click.secho(self.project_name, fg="magenta", nl=False)
139 click.echo(" has been created!\n")
140
141 click.echo("Meltano Environments initialized with ", nl=False)
142 click.secho("dev", fg="bright_green", nl=False)
143 click.echo(", ", nl=False)
144 click.secho("staging", fg="bright_yellow", nl=False)
145 click.echo(", and ", nl=False)
146 click.secho("prod", fg="bright_red", nl=False)
147 click.echo(".")
148 click.echo("To learn more about Environments visit: ", nl=False)
149 click.secho(
150 "https://docs.meltano.com/concepts/environments",
151 fg="cyan",
152 )
153
154 click.echo("\nNext steps:")
155 click.secho(" cd ", nl=False)
156 click.secho(self.project_name, fg="magenta")
157 click.echo(" Visit ", nl=False)
158 click.secho(
159 "https://docs.meltano.com/getting-started#create-your-meltano-project",
160 fg="cyan",
161 nl=False,
162 )
163 click.echo(" to learn where to go from here")
164
165 def join_with_project_base(self, filename):
166 """Join Path to Project base.
167
168 Args:
169 filename: File name to join with project base
170
171 Returns:
172 Joined base path and passed filename
173 """
174 return os.path.join(".", self.project_name, filename)
175
[end of src/meltano/core/project_init_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/core/project_init_service.py b/src/meltano/core/project_init_service.py
--- a/src/meltano/core/project_init_service.py
+++ b/src/meltano/core/project_init_service.py
@@ -66,7 +66,7 @@
self.settings_service = ProjectSettingsService(self.project)
self.settings_service.set(
"project_id",
- f"{self.project_name}-{uuid.uuid4()}",
+ str(uuid.uuid4()),
store=SettingValueStore.MELTANO_YML,
)
self.set_send_anonymous_usage_stats()
|
{"golden_diff": "diff --git a/src/meltano/core/project_init_service.py b/src/meltano/core/project_init_service.py\n--- a/src/meltano/core/project_init_service.py\n+++ b/src/meltano/core/project_init_service.py\n@@ -66,7 +66,7 @@\n self.settings_service = ProjectSettingsService(self.project)\n self.settings_service.set(\n \"project_id\",\n- f\"{self.project_name}-{uuid.uuid4()}\",\n+ str(uuid.uuid4()),\n store=SettingValueStore.MELTANO_YML,\n )\n self.set_send_anonymous_usage_stats()\n", "issue": "revert: project-name in project_id\nIn https://github.com/meltano/meltano/pull/6621 we added the `project name` to the project_id on `init`. Even though we hash the id prior to sending, its addition was unnecessary and creates a few problems:\r\n\r\n* having the project name in the id confuses the purpose of the id. We don't need to know or include the directory name in the project id and its inclusion sends the wrong signal that we do care about it somehow\r\n* while it's true that `project_id` can be any string, we generally don't want to encourage folks to change it unless they have a strong reason to\r\n* when trying to correlate local testing to events we're seeing through our production snowplow pipeline, having to hash the id is unnecessarily burdensome when we'd rather just look for the GUID itself\r\n\r\nI would support a documentation update to make it clear that `project_id` can be any string but that it's generally not recommended to update if you don't need to.\r\n\r\nAlso, I did notice that as part of this update it seems like `project_id` gets put at the end of the file - is there anyway to bring that back up above the environments?\r\n\r\n@WillDaSilva @aaronsteers \n", "before_files": [{"content": "\"\"\"New Project Initialization Service.\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport uuid\n\nimport click\n\nfrom .cli_messages import GREETING\nfrom .db import project_engine\nfrom .plugin.meltano_file import MeltanoFilePlugin\nfrom .project import Project\nfrom .project_settings_service import ProjectSettingsService, SettingValueStore\n\n\nclass ProjectInitServiceError(Exception):\n \"\"\"Project Initialization Service Exception.\"\"\"\n\n\nclass ProjectInitService:\n \"\"\"New Project Initialization Service.\"\"\"\n\n def __init__(self, project_name):\n \"\"\"Create a new ProjectInitService instance.\n\n Args:\n project_name: The name of the project to create\n \"\"\"\n self.project_name = project_name.lower()\n\n def init(self, activate: bool = True, add_discovery: bool = False) -> Project:\n \"\"\"Initialise Meltano Project.\n\n Args:\n activate: Activate newly created project\n add_discovery: Add discovery.yml file to created project\n\n Returns:\n A new Project instance\n\n Raises:\n ProjectInitServiceError: Directory already exists\n \"\"\"\n try:\n os.mkdir(self.project_name)\n except FileExistsError as ex:\n raise ProjectInitServiceError(\n f\"Directory {self.project_name!r} already exists.\"\n ) from ex\n except PermissionError as ex:\n raise ProjectInitServiceError(\n f\"Permission denied to create {self.project_name!r}.\"\n ) from ex\n except Exception as ex:\n raise ProjectInitServiceError(\n f\"Could not create directory {self.project_name!r}. {ex}\"\n ) from ex\n\n click.secho(\"Created\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project_name}\")\n\n self.project = Project(self.project_name)\n\n self.create_files(add_discovery=add_discovery)\n\n self.settings_service = ProjectSettingsService(self.project)\n self.settings_service.set(\n \"project_id\",\n f\"{self.project_name}-{uuid.uuid4()}\",\n store=SettingValueStore.MELTANO_YML,\n )\n self.set_send_anonymous_usage_stats()\n if activate:\n Project.activate(self.project)\n\n self.create_system_database()\n\n return self.project\n\n def create_dot_meltano_dir(self):\n \"\"\"Create .meltano directory.\"\"\"\n # explicitly create the .meltano directory if it doesn't exist\n os.makedirs(self.project.meltano_dir(), exist_ok=True)\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {self.project.meltano_dir().name}\")\n\n def create_files(self, add_discovery=False):\n \"\"\"Create project files.\n\n Args:\n add_discovery: Add discovery.yml file to created project\n \"\"\"\n click.secho(\"Creating project files...\", fg=\"blue\")\n click.echo(f\" {self.project_name}/\")\n\n self.create_dot_meltano_dir()\n\n plugin = MeltanoFilePlugin(discovery=add_discovery)\n for path in plugin.create_files(self.project):\n click.secho(\" |--\", fg=\"blue\", nl=False)\n click.echo(f\" {path}\")\n\n def set_send_anonymous_usage_stats(self):\n \"\"\"Set Anonymous Usage Stats flag.\"\"\"\n # If set to false store explicitly in `meltano.yml`\n if not self.settings_service.get(\"send_anonymous_usage_stats\"):\n self.settings_service.set(\n \"send_anonymous_usage_stats\",\n self.settings_service.get(\"send_anonymous_usage_stats\"),\n store=SettingValueStore.MELTANO_YML,\n )\n\n def create_system_database(self):\n \"\"\"Create Meltano System DB.\n\n Raises:\n ProjectInitServiceError: Database initialization failed\n \"\"\"\n click.secho(\"Creating system database...\", fg=\"blue\", nl=False)\n\n # register the system database connection\n engine, _ = project_engine(self.project, default=True)\n\n from meltano.core.migration_service import MigrationError, MigrationService\n\n try:\n migration_service = MigrationService(engine)\n migration_service.upgrade(silent=True)\n migration_service.seed(self.project)\n click.secho(\" Done!\", fg=\"blue\")\n except MigrationError as err:\n raise ProjectInitServiceError(str(err)) from err\n\n def echo_instructions(self):\n \"\"\"Echo Next Steps to Click CLI.\"\"\"\n click.secho(GREETING, nl=False)\n click.secho(\"\\nProject \", nl=False)\n click.secho(self.project_name, fg=\"magenta\", nl=False)\n click.echo(\" has been created!\\n\")\n\n click.echo(\"Meltano Environments initialized with \", nl=False)\n click.secho(\"dev\", fg=\"bright_green\", nl=False)\n click.echo(\", \", nl=False)\n click.secho(\"staging\", fg=\"bright_yellow\", nl=False)\n click.echo(\", and \", nl=False)\n click.secho(\"prod\", fg=\"bright_red\", nl=False)\n click.echo(\".\")\n click.echo(\"To learn more about Environments visit: \", nl=False)\n click.secho(\n \"https://docs.meltano.com/concepts/environments\",\n fg=\"cyan\",\n )\n\n click.echo(\"\\nNext steps:\")\n click.secho(\" cd \", nl=False)\n click.secho(self.project_name, fg=\"magenta\")\n click.echo(\" Visit \", nl=False)\n click.secho(\n \"https://docs.meltano.com/getting-started#create-your-meltano-project\",\n fg=\"cyan\",\n nl=False,\n )\n click.echo(\" to learn where to go from here\")\n\n def join_with_project_base(self, filename):\n \"\"\"Join Path to Project base.\n\n Args:\n filename: File name to join with project base\n\n Returns:\n Joined base path and passed filename\n \"\"\"\n return os.path.join(\".\", self.project_name, filename)\n", "path": "src/meltano/core/project_init_service.py"}]}
| 2,482 | 126 |
gh_patches_debug_16663
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-2989
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
raise_contract_logic_error_on_revert fails with UnicodeDecodeError
* Version: 6.4.0
* Python: 3.10
* OS: linux
* `pip freeze` output
```
aiohttp==3.8.4
aiosignal==1.3.1
async-timeout==4.0.2
attrs==23.1.0
bitarray==2.7.3
cached-property==1.5.2
certifi==2023.5.7
charset-normalizer==3.1.0
coverage==7.2.6
cytoolz==0.12.1
eth-abi==4.0.0
eth-account==0.8.0
eth-bloom==2.0.0
eth-hash==0.5.1
eth-keyfile==0.6.1
eth-keys==0.4.0
eth-rlp==0.3.0
eth-typing==3.3.0
eth-utils==2.1.0
frozenlist==1.3.3
hexbytes==0.3.0
idna==3.4
iniconfig==2.0.0
jsonschema==4.17.3
lru-dict==1.2.0
multidict==6.0.4
mypy-extensions==0.4.4
packaging==23.1
parsimonious==0.9.0
pluggy==1.0.0
protobuf==4.23.2
py-ecc==6.0.0
py-evm==0.7.0a2
pycryptodome==3.18.0
pyethash==0.1.27
pyrsistent==0.19.3
pytest==7.3.1
pytest-cov==4.1.0
regex==2023.5.5
requests==2.31.0
rlp==3.0.0
safe-eth-py==5.4.3
safe-pysha3==1.0.4
sortedcontainers==2.4.0
toolz==0.12.0
trie==2.1.0
urllib3==2.0.2
web3==6.4.0
websockets==11.0.3
yarl==1.9.2
```
### What was wrong?
Function `raise_contract_logic_error_on_revert` raises `UnicodeDecodeError` when parsing receipt for the following response
`response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}`
* Node: GnosisChain (issue happens for most nodes, https://rpc.gnosischain.com/, https://rpc.ankr.com/gnosis, etc)
```
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/contract.py:461: in call
return call_contract_function(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/utils.py:96: in call_contract_function
return_data = w3.eth.call(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:255: in call
return self._durin_call(transaction, block_identifier, state_override)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:274: in _durin_call
return self._call(transaction, block_identifier, state_override)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/module.py:68: in caller
result = w3.manager.request_blocking(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:232: in request_blocking
return self.formatted_response(
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:197: in formatted_response
apply_error_formatters(error_formatters, response)
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:73: in apply_error_formatters
formatted_resp = pipe(response, error_formatters)
cytoolz/functoolz.pyx:666: in cytoolz.functoolz.pipe
???
cytoolz/functoolz.pyx:641: in cytoolz.functoolz.c_pipe
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}
def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:
"""
Reverts contain a `data` attribute with the following layout:
"Reverted "
Function selector for Error(string): 08c379a (4 bytes)
Data offset: 32 (32 bytes)
String length (32 bytes)
Reason string (padded, use string length from above to get meaningful part)
See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert
"""
if not isinstance(response["error"], dict):
raise ValueError("Error expected to be a dict")
data = response["error"].get("data", "")
# Ganache case:
if isinstance(data, dict) and response["error"].get("message"):
raise ContractLogicError(
f'execution reverted: {response["error"]["message"]}', data=data
)
# Parity/OpenEthereum case:
if data.startswith("Reverted "):
# "Reverted", function selector and offset are always the same for revert errors
prefix = "Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020" # noqa: 501
if not data.startswith(prefix):
if data.startswith("Reverted 0x"):
# Special case for this form: 'Reverted 0x...'
receipt = data.split(" ")[1][2:]
> revert_reason = bytes.fromhex(receipt).decode("utf-8")
E UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd2 in position 0: invalid continuation byte
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/_utils/method_formatters.py:743: UnicodeDecodeError
```
</issue>
<code>
[start of web3/_utils/contract_error_handling.py]
1 from eth_abi import (
2 abi,
3 )
4 from eth_utils import (
5 to_bytes,
6 )
7
8 from web3.exceptions import (
9 ContractCustomError,
10 ContractLogicError,
11 ContractPanicError,
12 OffchainLookup,
13 )
14 from web3.types import (
15 RPCResponse,
16 )
17
18 # func selector for "Error(string)"
19 SOLIDITY_ERROR_FUNC_SELECTOR = "0x08c379a0"
20
21 # --- CCIP Read - EIP-3668 --- #
22 # the first 4 bytes of keccak hash (func selector) for:
23 # "OffchainLookup(address,string[],bytes,bytes4,bytes)"
24 OFFCHAIN_LOOKUP_FUNC_SELECTOR = "0x556f1830"
25 OFFCHAIN_LOOKUP_FIELDS = {
26 "sender": "address",
27 "urls": "string[]",
28 "callData": "bytes",
29 "callbackFunction": "bytes4",
30 "extraData": "bytes",
31 }
32
33
34 # --- Solidity Panic Error, as of Solidity 0.8.0 --- #
35 PANIC_ERROR_FUNC_SELECTOR = "0x4e487b71"
36 PANIC_ERROR_CODES = {
37 "00": "Panic error 0x00: Generic compiler inserted panics.",
38 "01": "Panic error 0x01: Assert evaluates to false.",
39 "11": "Panic error 0x11: Arithmetic operation results in underflow or overflow.",
40 "12": "Panic error 0x12: Division by zero.",
41 "21": "Panic error 0x21: Cannot convert value into an enum type.",
42 "22": "Panic error 0x12: Storage byte array is incorrectly encoded.",
43 "31": "Panic error 0x31: Call to 'pop()' on an empty array.",
44 "32": "Panic error 0x32: Array index is out of bounds.",
45 "41": "Panic error 0x41: Allocation of too much memory or array too large.",
46 "51": "Panic error 0x51: Call to a zero-initialized variable of internal "
47 "function type.",
48 }
49
50
51 def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:
52 """
53 Reverts contain a `data` attribute with the following layout:
54 "Reverted "
55 Function selector for Error(string): 08c379a (4 bytes)
56 Data offset: 32 (32 bytes)
57 String length (32 bytes)
58 Reason string (padded, use string length from above to get meaningful part)
59
60 See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert
61 """
62 if not isinstance(response["error"], dict):
63 raise ValueError("Error expected to be a dict")
64
65 data = response["error"].get("data", "")
66
67 # Ganache case:
68 if isinstance(data, dict) and response["error"].get("message"):
69 raise ContractLogicError(
70 f'execution reverted: {response["error"]["message"]}', data=data
71 )
72
73 # Parity/OpenEthereum case:
74 if data.startswith("Reverted "):
75 # "Reverted", function selector and offset are always the same for revert errors
76 prefix = f"Reverted {SOLIDITY_ERROR_FUNC_SELECTOR}"
77 data_offset = ("00" * 31) + "20" # 0x0000...0020 (32 bytes)
78 revert_pattern = prefix + data_offset
79
80 if data.startswith(revert_pattern):
81 # if common revert pattern
82 string_length = int(
83 data[len(revert_pattern) : len(revert_pattern) + 64], 16
84 )
85 reason_as_hex = data[
86 len(revert_pattern) + 64 : len(revert_pattern) + 64 + string_length * 2
87 ]
88 elif data.startswith("Reverted 0x"):
89 # Special case for this form: 'Reverted 0x...'
90 reason_as_hex = data.split(" ")[1][2:]
91 else:
92 raise ContractLogicError("execution reverted", data=data)
93
94 reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
95 raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
96
97 # --- EIP-3668 | CCIP Read --- #
98 if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:
99 parsed_data_as_bytes = to_bytes(hexstr=data[10:])
100 abi_decoded_data = abi.decode(
101 list(OFFCHAIN_LOOKUP_FIELDS.values()), parsed_data_as_bytes
102 )
103 offchain_lookup_payload = dict(
104 zip(OFFCHAIN_LOOKUP_FIELDS.keys(), abi_decoded_data)
105 )
106 raise OffchainLookup(offchain_lookup_payload, data=data)
107
108 # --- Solidity Panic Error --- #
109 if data[:10] == PANIC_ERROR_FUNC_SELECTOR:
110 panic_error_code = data[-2:]
111 raise ContractPanicError(PANIC_ERROR_CODES[panic_error_code], data=data)
112
113 # Solidity 0.8.4 introduced custom error messages that allow args to
114 # be passed in (or not). See:
115 # https://blog.soliditylang.org/2021/04/21/custom-errors/
116 if len(data) >= 10 and not data[:10] == SOLIDITY_ERROR_FUNC_SELECTOR:
117 # Raise with data as both the message and the data for backwards
118 # compatibility and so that data can be accessed via 'data' attribute
119 # on the ContractCustomError exception
120 raise ContractCustomError(data, data=data)
121
122 # Geth case:
123 if "message" in response["error"] and response["error"].get("code", "") == 3:
124 message = response["error"]["message"]
125 raise ContractLogicError(message, data=data)
126
127 # Geth Revert without error message case:
128 if "execution reverted" in response["error"].get("message"):
129 raise ContractLogicError("execution reverted", data=data)
130
131 return response
132
[end of web3/_utils/contract_error_handling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/web3/_utils/contract_error_handling.py b/web3/_utils/contract_error_handling.py
--- a/web3/_utils/contract_error_handling.py
+++ b/web3/_utils/contract_error_handling.py
@@ -1,3 +1,5 @@
+import warnings
+
from eth_abi import (
abi,
)
@@ -91,8 +93,12 @@
else:
raise ContractLogicError("execution reverted", data=data)
- reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
- raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
+ try:
+ reason_string = bytes.fromhex(reason_as_hex).decode("utf8")
+ raise ContractLogicError(f"execution reverted: {reason_string}", data=data)
+ except UnicodeDecodeError:
+ warnings.warn("Could not decode revert reason as UTF-8", RuntimeWarning)
+ raise ContractLogicError("execution reverted", data=data)
# --- EIP-3668 | CCIP Read --- #
if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:
|
{"golden_diff": "diff --git a/web3/_utils/contract_error_handling.py b/web3/_utils/contract_error_handling.py\n--- a/web3/_utils/contract_error_handling.py\n+++ b/web3/_utils/contract_error_handling.py\n@@ -1,3 +1,5 @@\n+import warnings\n+\n from eth_abi import (\n abi,\n )\n@@ -91,8 +93,12 @@\n else:\n raise ContractLogicError(\"execution reverted\", data=data)\n \n- reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n- raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n+ try:\n+ reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n+ raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n+ except UnicodeDecodeError:\n+ warnings.warn(\"Could not decode revert reason as UTF-8\", RuntimeWarning)\n+ raise ContractLogicError(\"execution reverted\", data=data)\n \n # --- EIP-3668 | CCIP Read --- #\n if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:\n", "issue": "raise_contract_logic_error_on_revert fails with UnicodeDecodeError\n* Version: 6.4.0\r\n* Python: 3.10\r\n* OS: linux\r\n* `pip freeze` output\r\n\r\n```\r\naiohttp==3.8.4\r\naiosignal==1.3.1\r\nasync-timeout==4.0.2\r\nattrs==23.1.0\r\nbitarray==2.7.3\r\ncached-property==1.5.2\r\ncertifi==2023.5.7\r\ncharset-normalizer==3.1.0\r\ncoverage==7.2.6\r\ncytoolz==0.12.1\r\neth-abi==4.0.0\r\neth-account==0.8.0\r\neth-bloom==2.0.0\r\neth-hash==0.5.1\r\neth-keyfile==0.6.1\r\neth-keys==0.4.0\r\neth-rlp==0.3.0\r\neth-typing==3.3.0\r\neth-utils==2.1.0\r\nfrozenlist==1.3.3\r\nhexbytes==0.3.0\r\nidna==3.4\r\niniconfig==2.0.0\r\njsonschema==4.17.3\r\nlru-dict==1.2.0\r\nmultidict==6.0.4\r\nmypy-extensions==0.4.4\r\npackaging==23.1\r\nparsimonious==0.9.0\r\npluggy==1.0.0\r\nprotobuf==4.23.2\r\npy-ecc==6.0.0\r\npy-evm==0.7.0a2\r\npycryptodome==3.18.0\r\npyethash==0.1.27\r\npyrsistent==0.19.3\r\npytest==7.3.1\r\npytest-cov==4.1.0\r\nregex==2023.5.5\r\nrequests==2.31.0\r\nrlp==3.0.0\r\nsafe-eth-py==5.4.3\r\nsafe-pysha3==1.0.4\r\nsortedcontainers==2.4.0\r\ntoolz==0.12.0\r\ntrie==2.1.0\r\nurllib3==2.0.2\r\nweb3==6.4.0\r\nwebsockets==11.0.3\r\nyarl==1.9.2\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\nFunction `raise_contract_logic_error_on_revert` raises `UnicodeDecodeError` when parsing receipt for the following response\r\n`response = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}`\r\n\r\n\r\n* Node: GnosisChain (issue happens for most nodes, https://rpc.gnosischain.com/, https://rpc.ankr.com/gnosis, etc)\r\n\r\n```\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/contract.py:461: in call\r\n return call_contract_function(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/contract/utils.py:96: in call_contract_function\r\n return_data = w3.eth.call(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:255: in call\r\n return self._durin_call(transaction, block_identifier, state_override)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/eth/eth.py:274: in _durin_call\r\n return self._call(transaction, block_identifier, state_override)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/module.py:68: in caller\r\n result = w3.manager.request_blocking(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:232: in request_blocking\r\n return self.formatted_response(\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:197: in formatted_response\r\n apply_error_formatters(error_formatters, response)\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/manager.py:73: in apply_error_formatters\r\n formatted_resp = pipe(response, error_formatters)\r\ncytoolz/functoolz.pyx:666: in cytoolz.functoolz.pipe\r\n ???\r\ncytoolz/functoolz.pyx:641: in cytoolz.functoolz.c_pipe\r\n ???\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nresponse = {'error': {'code': -32015, 'data': 'Reverted 0xd27b44a9', 'message': 'VM execution error.'}, 'id': 3, 'jsonrpc': '2.0'}\r\n\r\n def raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:\r\n \"\"\"\r\n Reverts contain a `data` attribute with the following layout:\r\n \"Reverted \"\r\n Function selector for Error(string): 08c379a (4 bytes)\r\n Data offset: 32 (32 bytes)\r\n String length (32 bytes)\r\n Reason string (padded, use string length from above to get meaningful part)\r\n \r\n See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert\r\n \"\"\"\r\n if not isinstance(response[\"error\"], dict):\r\n raise ValueError(\"Error expected to be a dict\")\r\n \r\n data = response[\"error\"].get(\"data\", \"\")\r\n \r\n # Ganache case:\r\n if isinstance(data, dict) and response[\"error\"].get(\"message\"):\r\n raise ContractLogicError(\r\n f'execution reverted: {response[\"error\"][\"message\"]}', data=data\r\n )\r\n \r\n # Parity/OpenEthereum case:\r\n if data.startswith(\"Reverted \"):\r\n # \"Reverted\", function selector and offset are always the same for revert errors\r\n prefix = \"Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020\" # noqa: 501\r\n if not data.startswith(prefix):\r\n if data.startswith(\"Reverted 0x\"):\r\n # Special case for this form: 'Reverted 0x...'\r\n receipt = data.split(\" \")[1][2:]\r\n> revert_reason = bytes.fromhex(receipt).decode(\"utf-8\")\r\nE UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd2 in position 0: invalid continuation byte\r\n\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/web3/_utils/method_formatters.py:743: UnicodeDecodeError\r\n```\r\n\n", "before_files": [{"content": "from eth_abi import (\n abi,\n)\nfrom eth_utils import (\n to_bytes,\n)\n\nfrom web3.exceptions import (\n ContractCustomError,\n ContractLogicError,\n ContractPanicError,\n OffchainLookup,\n)\nfrom web3.types import (\n RPCResponse,\n)\n\n# func selector for \"Error(string)\"\nSOLIDITY_ERROR_FUNC_SELECTOR = \"0x08c379a0\"\n\n# --- CCIP Read - EIP-3668 --- #\n# the first 4 bytes of keccak hash (func selector) for:\n# \"OffchainLookup(address,string[],bytes,bytes4,bytes)\"\nOFFCHAIN_LOOKUP_FUNC_SELECTOR = \"0x556f1830\"\nOFFCHAIN_LOOKUP_FIELDS = {\n \"sender\": \"address\",\n \"urls\": \"string[]\",\n \"callData\": \"bytes\",\n \"callbackFunction\": \"bytes4\",\n \"extraData\": \"bytes\",\n}\n\n\n# --- Solidity Panic Error, as of Solidity 0.8.0 --- #\nPANIC_ERROR_FUNC_SELECTOR = \"0x4e487b71\"\nPANIC_ERROR_CODES = {\n \"00\": \"Panic error 0x00: Generic compiler inserted panics.\",\n \"01\": \"Panic error 0x01: Assert evaluates to false.\",\n \"11\": \"Panic error 0x11: Arithmetic operation results in underflow or overflow.\",\n \"12\": \"Panic error 0x12: Division by zero.\",\n \"21\": \"Panic error 0x21: Cannot convert value into an enum type.\",\n \"22\": \"Panic error 0x12: Storage byte array is incorrectly encoded.\",\n \"31\": \"Panic error 0x31: Call to 'pop()' on an empty array.\",\n \"32\": \"Panic error 0x32: Array index is out of bounds.\",\n \"41\": \"Panic error 0x41: Allocation of too much memory or array too large.\",\n \"51\": \"Panic error 0x51: Call to a zero-initialized variable of internal \"\n \"function type.\",\n}\n\n\ndef raise_contract_logic_error_on_revert(response: RPCResponse) -> RPCResponse:\n \"\"\"\n Reverts contain a `data` attribute with the following layout:\n \"Reverted \"\n Function selector for Error(string): 08c379a (4 bytes)\n Data offset: 32 (32 bytes)\n String length (32 bytes)\n Reason string (padded, use string length from above to get meaningful part)\n\n See also https://solidity.readthedocs.io/en/v0.6.3/control-structures.html#revert\n \"\"\"\n if not isinstance(response[\"error\"], dict):\n raise ValueError(\"Error expected to be a dict\")\n\n data = response[\"error\"].get(\"data\", \"\")\n\n # Ganache case:\n if isinstance(data, dict) and response[\"error\"].get(\"message\"):\n raise ContractLogicError(\n f'execution reverted: {response[\"error\"][\"message\"]}', data=data\n )\n\n # Parity/OpenEthereum case:\n if data.startswith(\"Reverted \"):\n # \"Reverted\", function selector and offset are always the same for revert errors\n prefix = f\"Reverted {SOLIDITY_ERROR_FUNC_SELECTOR}\"\n data_offset = (\"00\" * 31) + \"20\" # 0x0000...0020 (32 bytes)\n revert_pattern = prefix + data_offset\n\n if data.startswith(revert_pattern):\n # if common revert pattern\n string_length = int(\n data[len(revert_pattern) : len(revert_pattern) + 64], 16\n )\n reason_as_hex = data[\n len(revert_pattern) + 64 : len(revert_pattern) + 64 + string_length * 2\n ]\n elif data.startswith(\"Reverted 0x\"):\n # Special case for this form: 'Reverted 0x...'\n reason_as_hex = data.split(\" \")[1][2:]\n else:\n raise ContractLogicError(\"execution reverted\", data=data)\n\n reason_string = bytes.fromhex(reason_as_hex).decode(\"utf8\")\n raise ContractLogicError(f\"execution reverted: {reason_string}\", data=data)\n\n # --- EIP-3668 | CCIP Read --- #\n if data[:10] == OFFCHAIN_LOOKUP_FUNC_SELECTOR:\n parsed_data_as_bytes = to_bytes(hexstr=data[10:])\n abi_decoded_data = abi.decode(\n list(OFFCHAIN_LOOKUP_FIELDS.values()), parsed_data_as_bytes\n )\n offchain_lookup_payload = dict(\n zip(OFFCHAIN_LOOKUP_FIELDS.keys(), abi_decoded_data)\n )\n raise OffchainLookup(offchain_lookup_payload, data=data)\n\n # --- Solidity Panic Error --- #\n if data[:10] == PANIC_ERROR_FUNC_SELECTOR:\n panic_error_code = data[-2:]\n raise ContractPanicError(PANIC_ERROR_CODES[panic_error_code], data=data)\n\n # Solidity 0.8.4 introduced custom error messages that allow args to\n # be passed in (or not). See:\n # https://blog.soliditylang.org/2021/04/21/custom-errors/\n if len(data) >= 10 and not data[:10] == SOLIDITY_ERROR_FUNC_SELECTOR:\n # Raise with data as both the message and the data for backwards\n # compatibility and so that data can be accessed via 'data' attribute\n # on the ContractCustomError exception\n raise ContractCustomError(data, data=data)\n\n # Geth case:\n if \"message\" in response[\"error\"] and response[\"error\"].get(\"code\", \"\") == 3:\n message = response[\"error\"][\"message\"]\n raise ContractLogicError(message, data=data)\n\n # Geth Revert without error message case:\n if \"execution reverted\" in response[\"error\"].get(\"message\"):\n raise ContractLogicError(\"execution reverted\", data=data)\n\n return response\n", "path": "web3/_utils/contract_error_handling.py"}]}
| 3,862 | 245 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.