problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3600
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1349
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Falta label de processo nos detalhes da matéria
O número do processo está perdido em meio aos detalhes da matéria. Falta o label processo



</issue>
<code>
[start of sapl/crispy_layout_mixin.py]
1 from math import ceil
2
3 from crispy_forms.bootstrap import FormActions
4 from crispy_forms.helper import FormHelper
5 from crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit
6 from django import template
7 from django.core.urlresolvers import reverse
8 from django.utils import formats
9 from django.utils.translation import ugettext as _
10 import rtyaml
11
12
13 def heads_and_tails(list_of_lists):
14 for alist in list_of_lists:
15 yield alist[0], alist[1:]
16
17
18 def to_column(name_span):
19 fieldname, span = name_span
20 return Div(fieldname, css_class='col-md-%d' % span)
21
22
23 def to_row(names_spans):
24 return Div(*map(to_column, names_spans), css_class='row-fluid')
25
26
27 def to_fieldsets(fields):
28 for field in fields:
29 if isinstance(field, list):
30 legend, row_specs = field[0], field[1:]
31 rows = [to_row(name_span_list) for name_span_list in row_specs]
32 yield Fieldset(legend, *rows)
33 else:
34 yield field
35
36
37 def form_actions(more=[], save_label=_('Salvar')):
38 return FormActions(
39 Submit('salvar', save_label, css_class='pull-right'), *more)
40
41
42 class SaplFormLayout(Layout):
43
44 def __init__(self, *fields, cancel_label=_('Cancelar'),
45 save_label=_('Salvar'), actions=None):
46
47 buttons = actions
48 if not buttons:
49 buttons = form_actions(save_label=save_label, more=[
50 HTML('<a href="{{ view.cancel_url }}"'
51 ' class="btn btn-inverse">%s</a>' % cancel_label)
52 if cancel_label else None])
53
54 _fields = list(to_fieldsets(fields))
55 if buttons:
56 _fields += [to_row([(buttons, 12)])]
57 super(SaplFormLayout, self).__init__(*_fields)
58
59
60 def get_field_display(obj, fieldname):
61 field = ''
62 try:
63 field = obj._meta.get_field(fieldname)
64 except Exception as e:
65 """ nos casos que o fieldname não é um field_model,
66 ele pode ser um aggregate, annotate, um property, um manager,
67 ou mesmo uma método no model.
68 """
69 value = getattr(obj, fieldname)
70 verbose_name = ''
71
72 else:
73 verbose_name = str(field.verbose_name)\
74 if hasattr(field, 'verbose_name') else ''
75
76 if hasattr(field, 'choices') and field.choices:
77 value = getattr(obj, 'get_%s_display' % fieldname)()
78 else:
79 value = getattr(obj, fieldname)
80
81 str_type_from_value = str(type(value))
82 str_type_from_field = str(type(field))
83
84 if value is None:
85 display = ''
86 elif 'date' in str_type_from_value:
87 display = formats.date_format(value, "SHORT_DATE_FORMAT")
88 elif 'bool' in str_type_from_value:
89 display = _('Sim') if value else _('Não')
90 elif 'ImageFieldFile' in str(type(value)):
91 if value:
92 display = '<img src="{}" />'.format(value.url)
93 else:
94 display = ''
95 elif 'FieldFile' in str_type_from_value:
96 if value:
97 display = '<a href="{}">{}</a>'.format(
98 value.url,
99 value.name.split('/')[-1:][0])
100 else:
101 display = ''
102 elif 'ManyRelatedManager' in str_type_from_value\
103 or 'RelatedManager' in str_type_from_value\
104 or 'GenericRelatedObjectManager' in str_type_from_value:
105 display = '<ul>'
106 for v in value.all():
107 display += '<li>%s</li>' % str(v)
108 display += '</ul>'
109 if not verbose_name:
110 if hasattr(field, 'related_model'):
111 verbose_name = str(
112 field.related_model._meta.verbose_name_plural)
113 elif hasattr(field, 'model'):
114 verbose_name = str(field.model._meta.verbose_name_plural)
115 elif 'GenericForeignKey' in str_type_from_field:
116 display = '<a href="{}">{}</a>'.format(
117 reverse(
118 '%s:%s_detail' % (
119 value._meta.app_config.name, obj.content_type.model),
120 args=(value.id,)),
121 value)
122 else:
123 display = str(value)
124 return verbose_name, display
125
126
127 class CrispyLayoutFormMixin:
128
129 @property
130 def layout_key(self):
131 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):
132 return super(CrispyLayoutFormMixin, self).layout_key
133 else:
134 return self.model.__name__
135
136 @property
137 def layout_key_set(self):
138 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):
139 return super(CrispyLayoutFormMixin, self).layout_key_set
140 else:
141 obj = self.crud if hasattr(self, 'crud') else self
142 return getattr(obj.model,
143 obj.model_set).field.model.__name__
144
145 def get_layout(self):
146 yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label
147 return read_layout_from_yaml(yaml_layout, self.layout_key)
148
149 def get_layout_set(self):
150 obj = self.crud if hasattr(self, 'crud') else self
151 yaml_layout = '%s/layouts.yaml' % getattr(
152 obj.model, obj.model_set).field.model._meta.app_config.label
153 return read_layout_from_yaml(yaml_layout, self.layout_key_set)
154
155 @property
156 def fields(self):
157 if hasattr(self, 'form_class') and self.form_class:
158 return None
159 else:
160 '''Returns all fields in the layout'''
161 return [fieldname for legend_rows in self.get_layout()
162 for row in legend_rows[1:]
163 for fieldname, span in row]
164
165 def get_form(self, form_class=None):
166 try:
167 form = super(CrispyLayoutFormMixin, self).get_form(form_class)
168 except AttributeError:
169 # simply return None if there is no get_form on super
170 pass
171 else:
172 if self.layout_key:
173 form.helper = FormHelper()
174 form.helper.layout = SaplFormLayout(*self.get_layout())
175 return form
176
177 @property
178 def list_field_names(self):
179 '''The list of field names to display on table
180
181 This base implementation returns the field names
182 in the first fieldset of the layout.
183 '''
184 obj = self.crud if hasattr(self, 'crud') else self
185 if hasattr(obj, 'list_field_names') and obj.list_field_names:
186 return obj.list_field_names
187 rows = self.get_layout()[0][1:]
188 return [fieldname for row in rows for fieldname, __ in row]
189
190 @property
191 def list_field_names_set(self):
192 '''The list of field names to display on table
193
194 This base implementation returns the field names
195 in the first fieldset of the layout.
196 '''
197 rows = self.get_layout_set()[0][1:]
198 return [fieldname for row in rows for fieldname, __ in row]
199
200 def get_column(self, fieldname, span):
201 obj = self.get_object()
202 verbose_name, text = get_field_display(obj, fieldname)
203 return {
204 'id': fieldname,
205 'span': span,
206 'verbose_name': verbose_name,
207 'text': text,
208 }
209
210 @property
211 def layout_display(self):
212
213 return [
214 {'legend': legend,
215 'rows': [[self.get_column(fieldname, span)
216 for fieldname, span in row]
217 for row in rows]
218 } for legend, rows in heads_and_tails(self.get_layout())]
219
220
221 def read_yaml_from_file(yaml_layout):
222 # TODO cache this at application level
223 t = template.loader.get_template(yaml_layout)
224 # aqui é importante converter para str pois, dependendo do ambiente,
225 # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream
226 rendered = str(t.render())
227 return rtyaml.load(rendered)
228
229
230 def read_layout_from_yaml(yaml_layout, key):
231 # TODO cache this at application level
232 yaml = read_yaml_from_file(yaml_layout)
233 base = yaml[key]
234
235 def line_to_namespans(line):
236 split = [cell.split(':') for cell in line.split()]
237 namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]
238 remaining = 12 - sum(s for n, s in namespans)
239 nondefined = [ns for ns in namespans if not ns[1]]
240 while nondefined:
241 span = ceil(remaining / len(nondefined))
242 namespan = nondefined.pop(0)
243 namespan[1] = span
244 remaining = remaining - span
245 return list(map(tuple, namespans))
246
247 return [[legend] + [line_to_namespans(l) for l in lines]
248 for legend, lines in base.items()]
249
[end of sapl/crispy_layout_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py
--- a/sapl/crispy_layout_mixin.py
+++ b/sapl/crispy_layout_mixin.py
@@ -67,7 +67,10 @@
ou mesmo uma método no model.
"""
value = getattr(obj, fieldname)
- verbose_name = ''
+ try:
+ verbose_name = value.model._meta.verbose_name
+ except AttributeError:
+ verbose_name = ''
else:
verbose_name = str(field.verbose_name)\
|
{"golden_diff": "diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py\n--- a/sapl/crispy_layout_mixin.py\n+++ b/sapl/crispy_layout_mixin.py\n@@ -67,7 +67,10 @@\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n- verbose_name = ''\n+ try:\n+ verbose_name = value.model._meta.verbose_name\n+ except AttributeError:\n+ verbose_name = ''\n \n else:\n verbose_name = str(field.verbose_name)\\\n", "issue": "Falta label de processo nos detalhes da mat\u00e9ria\nO n\u00famero do processo est\u00e1 perdido em meio aos detalhes da mat\u00e9ria. Falta o label processo \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from math import ceil\n\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils import formats\nfrom django.utils.translation import ugettext as _\nimport rtyaml\n\n\ndef heads_and_tails(list_of_lists):\n for alist in list_of_lists:\n yield alist[0], alist[1:]\n\n\ndef to_column(name_span):\n fieldname, span = name_span\n return Div(fieldname, css_class='col-md-%d' % span)\n\n\ndef to_row(names_spans):\n return Div(*map(to_column, names_spans), css_class='row-fluid')\n\n\ndef to_fieldsets(fields):\n for field in fields:\n if isinstance(field, list):\n legend, row_specs = field[0], field[1:]\n rows = [to_row(name_span_list) for name_span_list in row_specs]\n yield Fieldset(legend, *rows)\n else:\n yield field\n\n\ndef form_actions(more=[], save_label=_('Salvar')):\n return FormActions(\n Submit('salvar', save_label, css_class='pull-right'), *more)\n\n\nclass SaplFormLayout(Layout):\n\n def __init__(self, *fields, cancel_label=_('Cancelar'),\n save_label=_('Salvar'), actions=None):\n\n buttons = actions\n if not buttons:\n buttons = form_actions(save_label=save_label, more=[\n HTML('<a href=\"{{ view.cancel_url }}\"'\n ' class=\"btn btn-inverse\">%s</a>' % cancel_label)\n if cancel_label else None])\n\n _fields = list(to_fieldsets(fields))\n if buttons:\n _fields += [to_row([(buttons, 12)])]\n super(SaplFormLayout, self).__init__(*_fields)\n\n\ndef get_field_display(obj, fieldname):\n field = ''\n try:\n field = obj._meta.get_field(fieldname)\n except Exception as e:\n \"\"\" nos casos que o fieldname n\u00e3o \u00e9 um field_model,\n ele pode ser um aggregate, annotate, um property, um manager,\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n verbose_name = ''\n\n else:\n verbose_name = str(field.verbose_name)\\\n if hasattr(field, 'verbose_name') else ''\n\n if hasattr(field, 'choices') and field.choices:\n value = getattr(obj, 'get_%s_display' % fieldname)()\n else:\n value = getattr(obj, fieldname)\n\n str_type_from_value = str(type(value))\n str_type_from_field = str(type(field))\n\n if value is None:\n display = ''\n elif 'date' in str_type_from_value:\n display = formats.date_format(value, \"SHORT_DATE_FORMAT\")\n elif 'bool' in str_type_from_value:\n display = _('Sim') if value else _('N\u00e3o')\n elif 'ImageFieldFile' in str(type(value)):\n if value:\n display = '<img src=\"{}\" />'.format(value.url)\n else:\n display = ''\n elif 'FieldFile' in str_type_from_value:\n if value:\n display = '<a href=\"{}\">{}</a>'.format(\n value.url,\n value.name.split('/')[-1:][0])\n else:\n display = ''\n elif 'ManyRelatedManager' in str_type_from_value\\\n or 'RelatedManager' in str_type_from_value\\\n or 'GenericRelatedObjectManager' in str_type_from_value:\n display = '<ul>'\n for v in value.all():\n display += '<li>%s</li>' % str(v)\n display += '</ul>'\n if not verbose_name:\n if hasattr(field, 'related_model'):\n verbose_name = str(\n field.related_model._meta.verbose_name_plural)\n elif hasattr(field, 'model'):\n verbose_name = str(field.model._meta.verbose_name_plural)\n elif 'GenericForeignKey' in str_type_from_field:\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n else:\n display = str(value)\n return verbose_name, display\n\n\nclass CrispyLayoutFormMixin:\n\n @property\n def layout_key(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):\n return super(CrispyLayoutFormMixin, self).layout_key\n else:\n return self.model.__name__\n\n @property\n def layout_key_set(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):\n return super(CrispyLayoutFormMixin, self).layout_key_set\n else:\n obj = self.crud if hasattr(self, 'crud') else self\n return getattr(obj.model,\n obj.model_set).field.model.__name__\n\n def get_layout(self):\n yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key)\n\n def get_layout_set(self):\n obj = self.crud if hasattr(self, 'crud') else self\n yaml_layout = '%s/layouts.yaml' % getattr(\n obj.model, obj.model_set).field.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key_set)\n\n @property\n def fields(self):\n if hasattr(self, 'form_class') and self.form_class:\n return None\n else:\n '''Returns all fields in the layout'''\n return [fieldname for legend_rows in self.get_layout()\n for row in legend_rows[1:]\n for fieldname, span in row]\n\n def get_form(self, form_class=None):\n try:\n form = super(CrispyLayoutFormMixin, self).get_form(form_class)\n except AttributeError:\n # simply return None if there is no get_form on super\n pass\n else:\n if self.layout_key:\n form.helper = FormHelper()\n form.helper.layout = SaplFormLayout(*self.get_layout())\n return form\n\n @property\n def list_field_names(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n obj = self.crud if hasattr(self, 'crud') else self\n if hasattr(obj, 'list_field_names') and obj.list_field_names:\n return obj.list_field_names\n rows = self.get_layout()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n @property\n def list_field_names_set(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n rows = self.get_layout_set()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n def get_column(self, fieldname, span):\n obj = self.get_object()\n verbose_name, text = get_field_display(obj, fieldname)\n return {\n 'id': fieldname,\n 'span': span,\n 'verbose_name': verbose_name,\n 'text': text,\n }\n\n @property\n def layout_display(self):\n\n return [\n {'legend': legend,\n 'rows': [[self.get_column(fieldname, span)\n for fieldname, span in row]\n for row in rows]\n } for legend, rows in heads_and_tails(self.get_layout())]\n\n\ndef read_yaml_from_file(yaml_layout):\n # TODO cache this at application level\n t = template.loader.get_template(yaml_layout)\n # aqui \u00e9 importante converter para str pois, dependendo do ambiente,\n # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream\n rendered = str(t.render())\n return rtyaml.load(rendered)\n\n\ndef read_layout_from_yaml(yaml_layout, key):\n # TODO cache this at application level\n yaml = read_yaml_from_file(yaml_layout)\n base = yaml[key]\n\n def line_to_namespans(line):\n split = [cell.split(':') for cell in line.split()]\n namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]\n remaining = 12 - sum(s for n, s in namespans)\n nondefined = [ns for ns in namespans if not ns[1]]\n while nondefined:\n span = ceil(remaining / len(nondefined))\n namespan = nondefined.pop(0)\n namespan[1] = span\n remaining = remaining - span\n return list(map(tuple, namespans))\n\n return [[legend] + [line_to_namespans(l) for l in lines]\n for legend, lines in base.items()]\n", "path": "sapl/crispy_layout_mixin.py"}]}
| 3,346 | 126 |
gh_patches_debug_14404
|
rasdani/github-patches
|
git_diff
|
napari__napari-1147
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Toggle current layer visibility w/keyboard shortcut
## 🚀 Feature
<!-- A clear and concise description of the feature proposal -->
I'd like to introduce a new keyboard shortcut to toggle visibility of current layer
## Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
When working with a few layers, I often find myself wanting to toggle the visibility of a particular layer to look clearly at one beneath. This has been coming up most recently when painting labels.
## Pitch
<!-- A clear and concise description of what you want to happen. -->
I'd like to propose a new keyboard shortcut to toggle the visibility of the current layer.
This part is up for debate, but if this is very useful for people, it could be a single key, like `H`, or a combo like `Ctrl+H`, or some other key(s) entirely.
From looking at the other key bindings, I assume this would be straightforward to do, unless there is some `Qt` issue I'm not familiar with.
## Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
At the moment, I use the mouse to manually toggle the visibility.
</issue>
<code>
[start of napari/components/layerlist.py]
1 from ..layers import Layer
2 from ..utils.naming import inc_name_count
3 from ..utils.list import ListModel
4
5
6 def _add(event):
7 """When a layer is added, set its name."""
8 layers = event.source
9 layer = event.item
10 layer.name = layers._coerce_name(layer.name, layer)
11 layer.events.name.connect(lambda e: layers._update_name(e))
12 layers.unselect_all(ignore=layer)
13
14
15 class LayerList(ListModel):
16 """List-like layer collection with built-in reordering and callback hooks.
17
18 Parameters
19 ----------
20 iterable : iterable
21 Iterable of napari.layer.Layer
22
23 Attributes
24 ----------
25 events : vispy.util.event.EmitterGroup
26 Event hooks:
27 * added(item, index): whenever an item is added
28 * removed(item): whenever an item is removed
29 * reordered(): whenever the list is reordered
30 """
31
32 def __init__(self, iterable=()):
33 super().__init__(
34 basetype=Layer,
35 iterable=iterable,
36 lookup={str: lambda q, e: q == e.name},
37 )
38
39 self.events.added.connect(_add)
40
41 def __newlike__(self, iterable):
42 return ListModel(self._basetype, iterable, self._lookup)
43
44 def _coerce_name(self, name, layer=None):
45 """Coerce a name into a unique equivalent.
46
47 Parameters
48 ----------
49 name : str
50 Original name.
51 layer : napari.layers.Layer, optional
52 Layer for which name is generated.
53
54 Returns
55 -------
56 new_name : str
57 Coerced, unique name.
58 """
59 for l in self:
60 if l is layer:
61 continue
62 if l.name == name:
63 name = inc_name_count(name)
64
65 return name
66
67 def _update_name(self, event):
68 """Coerce name of the layer in `event.layer`."""
69 layer = event.source
70 layer.name = self._coerce_name(layer.name, layer)
71
72 def move_selected(self, index, insert):
73 """Reorder list by moving the item at index and inserting it
74 at the insert index. If additional items are selected these will
75 get inserted at the insert index too. This allows for rearranging
76 the list based on dragging and dropping a selection of items, where
77 index is the index of the primary item being dragged, and insert is
78 the index of the drop location, and the selection indicates if
79 multiple items are being dragged. If the moved layer is not selected
80 select it.
81
82 Parameters
83 ----------
84 index : int
85 Index of primary item to be moved
86 insert : int
87 Index that item(s) will be inserted at
88 """
89 total = len(self)
90 indices = list(range(total))
91 if not self[index].selected:
92 self.unselect_all()
93 self[index].selected = True
94 selected = [i for i in range(total) if self[i].selected]
95
96 # remove all indices to be moved
97 for i in selected:
98 indices.remove(i)
99 # adjust offset based on selected indices to move
100 offset = sum([i < insert and i != index for i in selected])
101 # insert indices to be moved at correct start
102 for insert_idx, elem_idx in enumerate(selected, start=insert - offset):
103 indices.insert(insert_idx, elem_idx)
104 # reorder list
105 self[:] = self[tuple(indices)]
106
107 def unselect_all(self, ignore=None):
108 """Unselects all layers expect any specified in ignore.
109
110 Parameters
111 ----------
112 ignore : Layer | None
113 Layer that should not be unselected if specified.
114 """
115 for layer in self:
116 if layer.selected and layer != ignore:
117 layer.selected = False
118
119 def select_all(self):
120 """Selects all layers."""
121 for layer in self:
122 if not layer.selected:
123 layer.selected = True
124
125 def remove_selected(self):
126 """Removes selected items from list."""
127 to_delete = []
128 for i in range(len(self)):
129 if self[i].selected:
130 to_delete.append(i)
131 to_delete.reverse()
132 for i in to_delete:
133 self.pop(i)
134 if len(to_delete) > 0:
135 first_to_delete = to_delete[-1]
136 if first_to_delete == 0 and len(self) > 0:
137 self[0].selected = True
138 elif first_to_delete > 0:
139 self[first_to_delete - 1].selected = True
140
141 def select_next(self, shift=False):
142 """Selects next item from list.
143 """
144 selected = []
145 for i in range(len(self)):
146 if self[i].selected:
147 selected.append(i)
148 if len(selected) > 0:
149 if selected[-1] == len(self) - 1:
150 if shift is False:
151 self.unselect_all(ignore=self[selected[-1]])
152 elif selected[-1] < len(self) - 1:
153 if shift is False:
154 self.unselect_all(ignore=self[selected[-1] + 1])
155 self[selected[-1] + 1].selected = True
156 elif len(self) > 0:
157 self[-1].selected = True
158
159 def select_previous(self, shift=False):
160 """Selects previous item from list.
161 """
162 selected = []
163 for i in range(len(self)):
164 if self[i].selected:
165 selected.append(i)
166 if len(selected) > 0:
167 if selected[0] == 0:
168 if shift is False:
169 self.unselect_all(ignore=self[0])
170 elif selected[0] > 0:
171 if shift is False:
172 self.unselect_all(ignore=self[selected[0] - 1])
173 self[selected[0] - 1].selected = True
174 elif len(self) > 0:
175 self[0].selected = True
176
[end of napari/components/layerlist.py]
[start of napari/_viewer_key_bindings.py]
1 import numpy as np
2 from .viewer import Viewer
3
4
5 @Viewer.bind_key('Control-F')
6 def toggle_fullscreen(viewer):
7 """Toggle fullscreen mode."""
8 if viewer.window._qt_window.isFullScreen():
9 viewer.window._qt_window.showNormal()
10 else:
11 viewer.window._qt_window.showFullScreen()
12
13
14 @Viewer.bind_key('Control-Y')
15 def toggle_ndisplay(viewer):
16 """Toggle ndisplay."""
17 if viewer.dims.ndisplay == 3:
18 viewer.dims.ndisplay = 2
19 else:
20 viewer.dims.ndisplay = 3
21
22
23 @Viewer.bind_key('Left')
24 def increment_dims_left(viewer):
25 """Increment dimensions slider to the left."""
26 axis = viewer.window.qt_viewer.dims.last_used
27 if axis is not None:
28 cur_point = viewer.dims.point[axis]
29 axis_range = viewer.dims.range[axis]
30 new_point = np.clip(
31 cur_point - axis_range[2],
32 axis_range[0],
33 axis_range[1] - axis_range[2],
34 )
35 viewer.dims.set_point(axis, new_point)
36
37
38 @Viewer.bind_key('Right')
39 def increment_dims_right(viewer):
40 """Increment dimensions slider to the right."""
41 axis = viewer.window.qt_viewer.dims.last_used
42 if axis is not None:
43 cur_point = viewer.dims.point[axis]
44 axis_range = viewer.dims.range[axis]
45 new_point = np.clip(
46 cur_point + axis_range[2],
47 axis_range[0],
48 axis_range[1] - axis_range[2],
49 )
50 viewer.dims.set_point(axis, new_point)
51
52
53 @Viewer.bind_key('Control-E')
54 def roll_axes(viewer):
55 """Change order of the visible axes, e.g. [0, 1, 2] -> [2, 0, 1]."""
56 viewer.dims._roll()
57
58
59 @Viewer.bind_key('Control-T')
60 def transpose_axes(viewer):
61 """Transpose order of the last two visible axes, e.g. [0, 1] -> [1, 0]."""
62 viewer.dims._transpose()
63
64
65 @Viewer.bind_key('Alt-Up')
66 def focus_axes_up(viewer):
67 """Move focus of dimensions slider up."""
68 viewer.window.qt_viewer.dims.focus_up()
69
70
71 @Viewer.bind_key('Alt-Down')
72 def focus_axes_down(viewer):
73 """Move focus of dimensions slider down."""
74 viewer.window.qt_viewer.dims.focus_down()
75
76
77 @Viewer.bind_key('Control-Backspace')
78 def remove_selected(viewer):
79 """Remove selected layers."""
80 viewer.layers.remove_selected()
81
82
83 @Viewer.bind_key('Control-A')
84 def select_all(viewer):
85 """Selected all layers."""
86 viewer.layers.select_all()
87
88
89 @Viewer.bind_key('Control-Shift-Backspace')
90 def remove_all_layers(viewer):
91 """Remove all layers."""
92 viewer.layers.select_all()
93 viewer.layers.remove_selected()
94
95
96 @Viewer.bind_key('Up')
97 def select_layer_above(viewer):
98 """Select layer above."""
99 viewer.layers.select_next()
100
101
102 @Viewer.bind_key('Down')
103 def select_layer_below(viewer):
104 """Select layer below."""
105 viewer.layers.select_previous()
106
107
108 @Viewer.bind_key('Shift-Up')
109 def also_select_layer_above(viewer):
110 """Also select layer above."""
111 viewer.layers.select_next(shift=True)
112
113
114 @Viewer.bind_key('Shift-Down')
115 def also_select_layer_below(viewer):
116 """Also select layer below."""
117 viewer.layers.select_previous(shift=True)
118
119
120 @Viewer.bind_key('Control-R')
121 def reset_view(viewer):
122 """Reset view to original state."""
123 viewer.reset_view()
124
125
126 @Viewer.bind_key('Control-G')
127 def toggle_grid(viewer):
128 """Toggle grid mode."""
129 if np.all(viewer.grid_size == (1, 1)):
130 viewer.grid_view()
131 else:
132 viewer.stack_view()
133
134
135 @Viewer.bind_key('Control-Alt-P')
136 def play(viewer):
137 """Toggle animation on the first axis"""
138 if viewer.window.qt_viewer.dims.is_playing:
139 viewer.window.qt_viewer.dims.stop()
140 else:
141 axis = viewer.window.qt_viewer.dims.last_used or 0
142 viewer.window.qt_viewer.dims.play(axis)
143
[end of napari/_viewer_key_bindings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/_viewer_key_bindings.py b/napari/_viewer_key_bindings.py
--- a/napari/_viewer_key_bindings.py
+++ b/napari/_viewer_key_bindings.py
@@ -140,3 +140,9 @@
else:
axis = viewer.window.qt_viewer.dims.last_used or 0
viewer.window.qt_viewer.dims.play(axis)
+
+
[email protected]_key('V')
+def toggle_selected_visibility(viewer):
+ """Toggle visibility of selected layers"""
+ viewer.layers.toggle_selected_visibility()
diff --git a/napari/components/layerlist.py b/napari/components/layerlist.py
--- a/napari/components/layerlist.py
+++ b/napari/components/layerlist.py
@@ -173,3 +173,9 @@
self[selected[0] - 1].selected = True
elif len(self) > 0:
self[0].selected = True
+
+ def toggle_selected_visibility(self):
+ """Toggle visibility of selected layers"""
+ for layer in self:
+ if layer.selected:
+ layer.visible = not layer.visible
|
{"golden_diff": "diff --git a/napari/_viewer_key_bindings.py b/napari/_viewer_key_bindings.py\n--- a/napari/_viewer_key_bindings.py\n+++ b/napari/_viewer_key_bindings.py\n@@ -140,3 +140,9 @@\n else:\n axis = viewer.window.qt_viewer.dims.last_used or 0\n viewer.window.qt_viewer.dims.play(axis)\n+\n+\[email protected]_key('V')\n+def toggle_selected_visibility(viewer):\n+ \"\"\"Toggle visibility of selected layers\"\"\"\n+ viewer.layers.toggle_selected_visibility()\ndiff --git a/napari/components/layerlist.py b/napari/components/layerlist.py\n--- a/napari/components/layerlist.py\n+++ b/napari/components/layerlist.py\n@@ -173,3 +173,9 @@\n self[selected[0] - 1].selected = True\n elif len(self) > 0:\n self[0].selected = True\n+\n+ def toggle_selected_visibility(self):\n+ \"\"\"Toggle visibility of selected layers\"\"\"\n+ for layer in self:\n+ if layer.selected:\n+ layer.visible = not layer.visible\n", "issue": "Toggle current layer visibility w/keyboard shortcut\n## \ud83d\ude80 Feature\r\n<!-- A clear and concise description of the feature proposal -->\r\nI'd like to introduce a new keyboard shortcut to toggle visibility of current layer\r\n\r\n## Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nWhen working with a few layers, I often find myself wanting to toggle the visibility of a particular layer to look clearly at one beneath. This has been coming up most recently when painting labels.\r\n\r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nI'd like to propose a new keyboard shortcut to toggle the visibility of the current layer. \r\n\r\nThis part is up for debate, but if this is very useful for people, it could be a single key, like `H`, or a combo like `Ctrl+H`, or some other key(s) entirely. \r\n\r\nFrom looking at the other key bindings, I assume this would be straightforward to do, unless there is some `Qt` issue I'm not familiar with.\r\n\r\n## Alternatives\r\n\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->\r\nAt the moment, I use the mouse to manually toggle the visibility.\n", "before_files": [{"content": "from ..layers import Layer\nfrom ..utils.naming import inc_name_count\nfrom ..utils.list import ListModel\n\n\ndef _add(event):\n \"\"\"When a layer is added, set its name.\"\"\"\n layers = event.source\n layer = event.item\n layer.name = layers._coerce_name(layer.name, layer)\n layer.events.name.connect(lambda e: layers._update_name(e))\n layers.unselect_all(ignore=layer)\n\n\nclass LayerList(ListModel):\n \"\"\"List-like layer collection with built-in reordering and callback hooks.\n\n Parameters\n ----------\n iterable : iterable\n Iterable of napari.layer.Layer\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Event hooks:\n * added(item, index): whenever an item is added\n * removed(item): whenever an item is removed\n * reordered(): whenever the list is reordered\n \"\"\"\n\n def __init__(self, iterable=()):\n super().__init__(\n basetype=Layer,\n iterable=iterable,\n lookup={str: lambda q, e: q == e.name},\n )\n\n self.events.added.connect(_add)\n\n def __newlike__(self, iterable):\n return ListModel(self._basetype, iterable, self._lookup)\n\n def _coerce_name(self, name, layer=None):\n \"\"\"Coerce a name into a unique equivalent.\n\n Parameters\n ----------\n name : str\n Original name.\n layer : napari.layers.Layer, optional\n Layer for which name is generated.\n\n Returns\n -------\n new_name : str\n Coerced, unique name.\n \"\"\"\n for l in self:\n if l is layer:\n continue\n if l.name == name:\n name = inc_name_count(name)\n\n return name\n\n def _update_name(self, event):\n \"\"\"Coerce name of the layer in `event.layer`.\"\"\"\n layer = event.source\n layer.name = self._coerce_name(layer.name, layer)\n\n def move_selected(self, index, insert):\n \"\"\"Reorder list by moving the item at index and inserting it\n at the insert index. If additional items are selected these will\n get inserted at the insert index too. This allows for rearranging\n the list based on dragging and dropping a selection of items, where\n index is the index of the primary item being dragged, and insert is\n the index of the drop location, and the selection indicates if\n multiple items are being dragged. If the moved layer is not selected\n select it.\n\n Parameters\n ----------\n index : int\n Index of primary item to be moved\n insert : int\n Index that item(s) will be inserted at\n \"\"\"\n total = len(self)\n indices = list(range(total))\n if not self[index].selected:\n self.unselect_all()\n self[index].selected = True\n selected = [i for i in range(total) if self[i].selected]\n\n # remove all indices to be moved\n for i in selected:\n indices.remove(i)\n # adjust offset based on selected indices to move\n offset = sum([i < insert and i != index for i in selected])\n # insert indices to be moved at correct start\n for insert_idx, elem_idx in enumerate(selected, start=insert - offset):\n indices.insert(insert_idx, elem_idx)\n # reorder list\n self[:] = self[tuple(indices)]\n\n def unselect_all(self, ignore=None):\n \"\"\"Unselects all layers expect any specified in ignore.\n\n Parameters\n ----------\n ignore : Layer | None\n Layer that should not be unselected if specified.\n \"\"\"\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False\n\n def select_all(self):\n \"\"\"Selects all layers.\"\"\"\n for layer in self:\n if not layer.selected:\n layer.selected = True\n\n def remove_selected(self):\n \"\"\"Removes selected items from list.\"\"\"\n to_delete = []\n for i in range(len(self)):\n if self[i].selected:\n to_delete.append(i)\n to_delete.reverse()\n for i in to_delete:\n self.pop(i)\n if len(to_delete) > 0:\n first_to_delete = to_delete[-1]\n if first_to_delete == 0 and len(self) > 0:\n self[0].selected = True\n elif first_to_delete > 0:\n self[first_to_delete - 1].selected = True\n\n def select_next(self, shift=False):\n \"\"\"Selects next item from list.\n \"\"\"\n selected = []\n for i in range(len(self)):\n if self[i].selected:\n selected.append(i)\n if len(selected) > 0:\n if selected[-1] == len(self) - 1:\n if shift is False:\n self.unselect_all(ignore=self[selected[-1]])\n elif selected[-1] < len(self) - 1:\n if shift is False:\n self.unselect_all(ignore=self[selected[-1] + 1])\n self[selected[-1] + 1].selected = True\n elif len(self) > 0:\n self[-1].selected = True\n\n def select_previous(self, shift=False):\n \"\"\"Selects previous item from list.\n \"\"\"\n selected = []\n for i in range(len(self)):\n if self[i].selected:\n selected.append(i)\n if len(selected) > 0:\n if selected[0] == 0:\n if shift is False:\n self.unselect_all(ignore=self[0])\n elif selected[0] > 0:\n if shift is False:\n self.unselect_all(ignore=self[selected[0] - 1])\n self[selected[0] - 1].selected = True\n elif len(self) > 0:\n self[0].selected = True\n", "path": "napari/components/layerlist.py"}, {"content": "import numpy as np\nfrom .viewer import Viewer\n\n\[email protected]_key('Control-F')\ndef toggle_fullscreen(viewer):\n \"\"\"Toggle fullscreen mode.\"\"\"\n if viewer.window._qt_window.isFullScreen():\n viewer.window._qt_window.showNormal()\n else:\n viewer.window._qt_window.showFullScreen()\n\n\[email protected]_key('Control-Y')\ndef toggle_ndisplay(viewer):\n \"\"\"Toggle ndisplay.\"\"\"\n if viewer.dims.ndisplay == 3:\n viewer.dims.ndisplay = 2\n else:\n viewer.dims.ndisplay = 3\n\n\[email protected]_key('Left')\ndef increment_dims_left(viewer):\n \"\"\"Increment dimensions slider to the left.\"\"\"\n axis = viewer.window.qt_viewer.dims.last_used\n if axis is not None:\n cur_point = viewer.dims.point[axis]\n axis_range = viewer.dims.range[axis]\n new_point = np.clip(\n cur_point - axis_range[2],\n axis_range[0],\n axis_range[1] - axis_range[2],\n )\n viewer.dims.set_point(axis, new_point)\n\n\[email protected]_key('Right')\ndef increment_dims_right(viewer):\n \"\"\"Increment dimensions slider to the right.\"\"\"\n axis = viewer.window.qt_viewer.dims.last_used\n if axis is not None:\n cur_point = viewer.dims.point[axis]\n axis_range = viewer.dims.range[axis]\n new_point = np.clip(\n cur_point + axis_range[2],\n axis_range[0],\n axis_range[1] - axis_range[2],\n )\n viewer.dims.set_point(axis, new_point)\n\n\[email protected]_key('Control-E')\ndef roll_axes(viewer):\n \"\"\"Change order of the visible axes, e.g. [0, 1, 2] -> [2, 0, 1].\"\"\"\n viewer.dims._roll()\n\n\[email protected]_key('Control-T')\ndef transpose_axes(viewer):\n \"\"\"Transpose order of the last two visible axes, e.g. [0, 1] -> [1, 0].\"\"\"\n viewer.dims._transpose()\n\n\[email protected]_key('Alt-Up')\ndef focus_axes_up(viewer):\n \"\"\"Move focus of dimensions slider up.\"\"\"\n viewer.window.qt_viewer.dims.focus_up()\n\n\[email protected]_key('Alt-Down')\ndef focus_axes_down(viewer):\n \"\"\"Move focus of dimensions slider down.\"\"\"\n viewer.window.qt_viewer.dims.focus_down()\n\n\[email protected]_key('Control-Backspace')\ndef remove_selected(viewer):\n \"\"\"Remove selected layers.\"\"\"\n viewer.layers.remove_selected()\n\n\[email protected]_key('Control-A')\ndef select_all(viewer):\n \"\"\"Selected all layers.\"\"\"\n viewer.layers.select_all()\n\n\[email protected]_key('Control-Shift-Backspace')\ndef remove_all_layers(viewer):\n \"\"\"Remove all layers.\"\"\"\n viewer.layers.select_all()\n viewer.layers.remove_selected()\n\n\[email protected]_key('Up')\ndef select_layer_above(viewer):\n \"\"\"Select layer above.\"\"\"\n viewer.layers.select_next()\n\n\[email protected]_key('Down')\ndef select_layer_below(viewer):\n \"\"\"Select layer below.\"\"\"\n viewer.layers.select_previous()\n\n\[email protected]_key('Shift-Up')\ndef also_select_layer_above(viewer):\n \"\"\"Also select layer above.\"\"\"\n viewer.layers.select_next(shift=True)\n\n\[email protected]_key('Shift-Down')\ndef also_select_layer_below(viewer):\n \"\"\"Also select layer below.\"\"\"\n viewer.layers.select_previous(shift=True)\n\n\[email protected]_key('Control-R')\ndef reset_view(viewer):\n \"\"\"Reset view to original state.\"\"\"\n viewer.reset_view()\n\n\[email protected]_key('Control-G')\ndef toggle_grid(viewer):\n \"\"\"Toggle grid mode.\"\"\"\n if np.all(viewer.grid_size == (1, 1)):\n viewer.grid_view()\n else:\n viewer.stack_view()\n\n\[email protected]_key('Control-Alt-P')\ndef play(viewer):\n \"\"\"Toggle animation on the first axis\"\"\"\n if viewer.window.qt_viewer.dims.is_playing:\n viewer.window.qt_viewer.dims.stop()\n else:\n axis = viewer.window.qt_viewer.dims.last_used or 0\n viewer.window.qt_viewer.dims.play(axis)\n", "path": "napari/_viewer_key_bindings.py"}]}
| 3,738 | 251 |
gh_patches_debug_100
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip-sync uninstalls pkg-resources, breaks psycopg2
`pip-sync` uninstalls `pkg-resources`, which in turn breaks installation of many other packages. `pkg-resources` is a new "system" package that was recently extracted from `setuptools` (since version 31, I believe). I think it must be handled similarly to `setuptools`.
##### Steps to replicate
On a fully updated Ubuntu 16.04 LTS:
```console
semenov@dev2:~/tmp$ rm -rf ~/.cache/pip
semenov@dev2:~/tmp$ virtualenv --python=$(which python3) test
Already using interpreter /usr/bin/python3
Using base prefix '/usr'
New python executable in /home/semenov/tmp/test/bin/python3
Also creating executable in /home/semenov/tmp/test/bin/python
Installing setuptools, pkg_resources, pip, wheel...done.
semenov@dev2:~/tmp$ cd test
semenov@dev2:~/tmp/test$ . bin/activate
(test) semenov@dev2:~/tmp/test$ pip install pip-tools
Collecting pip-tools
Downloading pip_tools-1.8.0-py2.py3-none-any.whl
Collecting six (from pip-tools)
Downloading six-1.10.0-py2.py3-none-any.whl
Collecting first (from pip-tools)
Downloading first-2.0.1-py2.py3-none-any.whl
Collecting click>=6 (from pip-tools)
Downloading click-6.6-py2.py3-none-any.whl (71kB)
100% |████████████████████████████████| 71kB 559kB/s
Installing collected packages: six, first, click, pip-tools
Successfully installed click-6.6 first-2.0.1 pip-tools-1.8.0 six-1.10.0
(test) semenov@dev2:~/tmp/test$ echo psycopg2 > requirements.in
(test) semenov@dev2:~/tmp/test$ pip-compile
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file requirements.txt requirements.in
#
psycopg2==2.6.2
(test) semenov@dev2:~/tmp/test$ pip-sync
Uninstalling pkg-resources-0.0.0:
Successfully uninstalled pkg-resources-0.0.0
Collecting psycopg2==2.6.2
Downloading psycopg2-2.6.2.tar.gz (376kB)
100% |████████████████████████████████| 378kB 2.4MB/s
Could not import setuptools which is required to install from a source distribution.
Traceback (most recent call last):
File "/home/semenov/tmp/test/lib/python3.5/site-packages/pip/req/req_install.py", line 387, in setup_py
import setuptools # noqa
File "/home/semenov/tmp/test/lib/python3.5/site-packages/setuptools/__init__.py", line 10, in <module>
from setuptools.extern.six.moves import filter, filterfalse, map
File "/home/semenov/tmp/test/lib/python3.5/site-packages/setuptools/extern/__init__.py", line 1, in <module>
from pkg_resources.extern import VendorImporter
ImportError: No module named 'pkg_resources.extern'
Traceback (most recent call last):
File "/home/semenov/tmp/test/bin/pip-sync", line 11, in <module>
sys.exit(cli())
File "/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/home/semenov/tmp/test/lib/python3.5/site-packages/piptools/scripts/sync.py", line 72, in cli
install_flags=install_flags))
File "/home/semenov/tmp/test/lib/python3.5/site-packages/piptools/sync.py", line 157, in sync
check_call([pip, 'install'] + pip_flags + install_flags + sorted(to_install))
File "/usr/lib/python3.5/subprocess.py", line 581, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['pip', 'install', 'psycopg2==2.6.2']' returned non-zero exit status 1
```
##### Expected result
`pip-sync` keeps `pkg-resources` in place, `psycopg2` installs normally.
##### Actual result
`pip-sync` uninstalls `pkg-resources`, then `psycopg2` installation fails with: `ImportError: No module named 'pkg_resources.extern'`
</issue>
<code>
[start of piptools/sync.py]
1 import collections
2 import os
3 import sys
4 from subprocess import check_call
5
6 from . import click
7 from .exceptions import IncompatibleRequirements, UnsupportedConstraint
8 from .utils import flat_map, format_requirement, key_from_req
9
10 PACKAGES_TO_IGNORE = [
11 'pip',
12 'pip-tools',
13 'pip-review',
14 'setuptools',
15 'wheel',
16 ]
17
18
19 def dependency_tree(installed_keys, root_key):
20 """
21 Calculate the dependency tree for the package `root_key` and return
22 a collection of all its dependencies. Uses a DFS traversal algorithm.
23
24 `installed_keys` should be a {key: requirement} mapping, e.g.
25 {'django': from_line('django==1.8')}
26 `root_key` should be the key to return the dependency tree for.
27 """
28 dependencies = set()
29 queue = collections.deque()
30
31 if root_key in installed_keys:
32 dep = installed_keys[root_key]
33 queue.append(dep)
34
35 while queue:
36 v = queue.popleft()
37 key = key_from_req(v)
38 if key in dependencies:
39 continue
40
41 dependencies.add(key)
42
43 for dep_specifier in v.requires():
44 dep_name = key_from_req(dep_specifier)
45 if dep_name in installed_keys:
46 dep = installed_keys[dep_name]
47
48 if dep_specifier.specifier.contains(dep.version):
49 queue.append(dep)
50
51 return dependencies
52
53
54 def get_dists_to_ignore(installed):
55 """
56 Returns a collection of package names to ignore when performing pip-sync,
57 based on the currently installed environment. For example, when pip-tools
58 is installed in the local environment, it should be ignored, including all
59 of its dependencies (e.g. click). When pip-tools is not installed
60 locally, click should also be installed/uninstalled depending on the given
61 requirements.
62 """
63 installed_keys = {key_from_req(r): r for r in installed}
64 return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))
65
66
67 def merge(requirements, ignore_conflicts):
68 by_key = {}
69
70 for ireq in requirements:
71 if ireq.link is not None and not ireq.editable:
72 msg = ('pip-compile does not support URLs as packages, unless they are editable. '
73 'Perhaps add -e option?')
74 raise UnsupportedConstraint(msg, ireq)
75
76 key = ireq.link or key_from_req(ireq.req)
77
78 if not ignore_conflicts:
79 existing_ireq = by_key.get(key)
80 if existing_ireq:
81 # NOTE: We check equality here since we can assume that the
82 # requirements are all pinned
83 if ireq.specifier != existing_ireq.specifier:
84 raise IncompatibleRequirements(ireq, existing_ireq)
85
86 # TODO: Always pick the largest specifier in case of a conflict
87 by_key[key] = ireq
88
89 return by_key.values()
90
91
92 def diff(compiled_requirements, installed_dists):
93 """
94 Calculate which packages should be installed or uninstalled, given a set
95 of compiled requirements and a list of currently installed modules.
96 """
97 requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}
98
99 satisfied = set() # holds keys
100 to_install = set() # holds InstallRequirement objects
101 to_uninstall = set() # holds keys
102
103 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
104 for dist in installed_dists:
105 key = key_from_req(dist)
106 if key not in requirements_lut:
107 to_uninstall.add(key)
108 elif requirements_lut[key].specifier.contains(dist.version):
109 satisfied.add(key)
110
111 for key, requirement in requirements_lut.items():
112 if key not in satisfied:
113 to_install.add(requirement)
114
115 # Make sure to not uninstall any packages that should be ignored
116 to_uninstall -= set(pkgs_to_ignore)
117
118 return (to_install, to_uninstall)
119
120
121 def sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):
122 """
123 Install and uninstalls the given sets of modules.
124 """
125 if not to_uninstall and not to_install:
126 click.echo("Everything up-to-date")
127
128 if pip_flags is None:
129 pip_flags = []
130
131 if not verbose:
132 pip_flags += ['-q']
133
134 if os.environ.get('VIRTUAL_ENV'):
135 # find pip via PATH
136 pip = 'pip'
137 else:
138 # find pip in same directory as pip-sync entry-point script
139 pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
140
141 if to_uninstall:
142 if dry_run:
143 click.echo("Would uninstall:")
144 for pkg in to_uninstall:
145 click.echo(" {}".format(pkg))
146 else:
147 check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
148
149 if to_install:
150 if install_flags is None:
151 install_flags = []
152 if dry_run:
153 click.echo("Would install:")
154 for ireq in to_install:
155 click.echo(" {}".format(format_requirement(ireq)))
156 else:
157 package_args = []
158 for ireq in sorted(to_install):
159 if ireq.editable:
160 package_args.extend(['-e', str(ireq.link or ireq.req)])
161 else:
162 package_args.append(str(ireq.req))
163 check_call([pip, 'install'] + pip_flags + install_flags + package_args)
164 return 0
165
[end of piptools/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -11,6 +11,7 @@
'pip',
'pip-tools',
'pip-review',
+ 'pkg-resources',
'setuptools',
'wheel',
]
|
{"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -11,6 +11,7 @@\n 'pip',\n 'pip-tools',\n 'pip-review',\n+ 'pkg-resources',\n 'setuptools',\n 'wheel',\n ]\n", "issue": "pip-sync uninstalls pkg-resources, breaks psycopg2\n`pip-sync` uninstalls `pkg-resources`, which in turn breaks installation of many other packages. `pkg-resources` is a new \"system\" package that was recently extracted from `setuptools` (since version 31, I believe). I think it must be handled similarly to `setuptools`.\r\n\r\n##### Steps to replicate\r\n\r\nOn a fully updated Ubuntu 16.04 LTS:\r\n\r\n```console\r\nsemenov@dev2:~/tmp$ rm -rf ~/.cache/pip\r\nsemenov@dev2:~/tmp$ virtualenv --python=$(which python3) test\r\nAlready using interpreter /usr/bin/python3\r\nUsing base prefix '/usr'\r\nNew python executable in /home/semenov/tmp/test/bin/python3\r\nAlso creating executable in /home/semenov/tmp/test/bin/python\r\nInstalling setuptools, pkg_resources, pip, wheel...done.\r\nsemenov@dev2:~/tmp$ cd test\r\nsemenov@dev2:~/tmp/test$ . bin/activate\r\n(test) semenov@dev2:~/tmp/test$ pip install pip-tools\r\nCollecting pip-tools\r\n Downloading pip_tools-1.8.0-py2.py3-none-any.whl\r\nCollecting six (from pip-tools)\r\n Downloading six-1.10.0-py2.py3-none-any.whl\r\nCollecting first (from pip-tools)\r\n Downloading first-2.0.1-py2.py3-none-any.whl\r\nCollecting click>=6 (from pip-tools)\r\n Downloading click-6.6-py2.py3-none-any.whl (71kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 71kB 559kB/s\r\nInstalling collected packages: six, first, click, pip-tools\r\nSuccessfully installed click-6.6 first-2.0.1 pip-tools-1.8.0 six-1.10.0\r\n(test) semenov@dev2:~/tmp/test$ echo psycopg2 > requirements.in\r\n(test) semenov@dev2:~/tmp/test$ pip-compile\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file requirements.txt requirements.in\r\n#\r\npsycopg2==2.6.2\r\n(test) semenov@dev2:~/tmp/test$ pip-sync\r\nUninstalling pkg-resources-0.0.0:\r\n Successfully uninstalled pkg-resources-0.0.0\r\nCollecting psycopg2==2.6.2\r\n Downloading psycopg2-2.6.2.tar.gz (376kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 378kB 2.4MB/s\r\nCould not import setuptools which is required to install from a source distribution.\r\nTraceback (most recent call last):\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/pip/req/req_install.py\", line 387, in setup_py\r\n import setuptools # noqa\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/setuptools/__init__.py\", line 10, in <module>\r\n from setuptools.extern.six.moves import filter, filterfalse, map\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/setuptools/extern/__init__.py\", line 1, in <module>\r\n from pkg_resources.extern import VendorImporter\r\nImportError: No module named 'pkg_resources.extern'\r\n\r\nTraceback (most recent call last):\r\n File \"/home/semenov/tmp/test/bin/pip-sync\", line 11, in <module>\r\n sys.exit(cli())\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py\", line 716, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py\", line 696, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py\", line 889, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/click/core.py\", line 534, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/piptools/scripts/sync.py\", line 72, in cli\r\n install_flags=install_flags))\r\n File \"/home/semenov/tmp/test/lib/python3.5/site-packages/piptools/sync.py\", line 157, in sync\r\n check_call([pip, 'install'] + pip_flags + install_flags + sorted(to_install))\r\n File \"/usr/lib/python3.5/subprocess.py\", line 581, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nsubprocess.CalledProcessError: Command '['pip', 'install', 'psycopg2==2.6.2']' returned non-zero exit status 1\r\n```\r\n\r\n##### Expected result\r\n\r\n`pip-sync` keeps `pkg-resources` in place, `psycopg2` installs normally.\r\n\r\n##### Actual result\r\n\r\n`pip-sync` uninstalls `pkg-resources`, then `psycopg2` installation fails with: `ImportError: No module named 'pkg_resources.extern'`\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nfrom subprocess import check_call\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements, UnsupportedConstraint\nfrom .utils import flat_map, format_requirement, key_from_req\n\nPACKAGES_TO_IGNORE = [\n 'pip',\n 'pip-tools',\n 'pip-review',\n 'setuptools',\n 'wheel',\n]\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n if ireq.link is not None and not ireq.editable:\n msg = ('pip-compile does not support URLs as packages, unless they are editable. '\n 'Perhaps add -e option?')\n raise UnsupportedConstraint(msg, ireq)\n\n key = ireq.link or key_from_req(ireq.req)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n\n return by_key.values()\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut:\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied:\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n click.echo(\"Everything up-to-date\")\n\n if pip_flags is None:\n pip_flags = []\n\n if not verbose:\n pip_flags += ['-q']\n\n if os.environ.get('VIRTUAL_ENV'):\n # find pip via PATH\n pip = 'pip'\n else:\n # find pip in same directory as pip-sync entry-point script\n pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n\n if to_install:\n if install_flags is None:\n install_flags = []\n if dry_run:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n else:\n package_args = []\n for ireq in sorted(to_install):\n if ireq.editable:\n package_args.extend(['-e', str(ireq.link or ireq.req)])\n else:\n package_args.append(str(ireq.req))\n check_call([pip, 'install'] + pip_flags + install_flags + package_args)\n return 0\n", "path": "piptools/sync.py"}]}
| 3,310 | 78 |
gh_patches_debug_3781
|
rasdani/github-patches
|
git_diff
|
beeware__toga-2356
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On GTK, `App.current_window` returns the last active window even when all windows are hidden
### Describe the bug
On Cocoa and WinForms, it returns `None` in this situation, which I think makes more sense.
### Environment
- Software versions:
- Toga: main (536a60f)
</issue>
<code>
[start of gtk/src/toga_gtk/app.py]
1 import asyncio
2 import signal
3 import sys
4 from pathlib import Path
5
6 import gbulb
7
8 import toga
9 from toga import App as toga_App
10 from toga.command import Command, Separator
11
12 from .keys import gtk_accel
13 from .libs import TOGA_DEFAULT_STYLES, Gdk, Gio, GLib, Gtk
14 from .window import Window
15
16
17 class MainWindow(Window):
18 def create(self):
19 self.native = Gtk.ApplicationWindow()
20 self.native.set_role("MainWindow")
21 icon_impl = toga_App.app.icon._impl
22 self.native.set_icon(icon_impl.native_72)
23
24 def gtk_delete_event(self, *args):
25 # Return value of the GTK on_close handler indicates
26 # whether the event has been fully handled. Returning
27 # False indicates the event handling is *not* complete,
28 # so further event processing (including actually
29 # closing the window) should be performed; so
30 # "should_exit == True" must be converted to a return
31 # value of False.
32 self.interface.app.on_exit()
33 return True
34
35
36 class App:
37 """
38 Todo:
39 * Creation of Menus is not working.
40 * Disabling of menu items is not working.
41 * App Icon is not showing up
42 """
43
44 def __init__(self, interface):
45 self.interface = interface
46 self.interface._impl = self
47
48 gbulb.install(gtk=True)
49 self.loop = asyncio.new_event_loop()
50
51 self.create()
52
53 def create(self):
54 # Stimulate the build of the app
55 self.native = Gtk.Application(
56 application_id=self.interface.app_id,
57 flags=Gio.ApplicationFlags.FLAGS_NONE,
58 )
59 self.native_about_dialog = None
60
61 # Connect the GTK signal that will cause app startup to occur
62 self.native.connect("startup", self.gtk_startup)
63 self.native.connect("activate", self.gtk_activate)
64
65 self.actions = None
66
67 def gtk_startup(self, data=None):
68 # Set up the default commands for the interface.
69 self.interface.commands.add(
70 Command(
71 self._menu_about,
72 "About " + self.interface.formal_name,
73 group=toga.Group.HELP,
74 ),
75 Command(None, "Preferences", group=toga.Group.APP),
76 # Quit should always be the last item, in a section on its own
77 Command(
78 self._menu_quit,
79 "Quit " + self.interface.formal_name,
80 shortcut=toga.Key.MOD_1 + "q",
81 group=toga.Group.APP,
82 section=sys.maxsize,
83 ),
84 )
85 self._create_app_commands()
86
87 self.interface._startup()
88
89 # Create the lookup table of menu items,
90 # then force the creation of the menus.
91 self.create_menus()
92
93 # Now that we have menus, make the app take responsibility for
94 # showing the menubar.
95 # This is required because of inconsistencies in how the Gnome
96 # shell operates on different windowing environments;
97 # see #872 for details.
98 settings = Gtk.Settings.get_default()
99 settings.set_property("gtk-shell-shows-menubar", False)
100
101 # Set any custom styles
102 css_provider = Gtk.CssProvider()
103 css_provider.load_from_data(TOGA_DEFAULT_STYLES)
104
105 context = Gtk.StyleContext()
106 context.add_provider_for_screen(
107 Gdk.Screen.get_default(), css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER
108 )
109
110 def _create_app_commands(self):
111 # No extra menus
112 pass
113
114 def gtk_activate(self, data=None):
115 pass
116
117 def _menu_about(self, command, **kwargs):
118 self.interface.about()
119
120 def _menu_quit(self, command, **kwargs):
121 self.interface.on_exit()
122
123 def create_menus(self):
124 # Only create the menu if the menu item index has been created.
125 self._menu_items = {}
126 self._menu_groups = {}
127
128 # Create the menu for the top level menubar.
129 menubar = Gio.Menu()
130 section = None
131 for cmd in self.interface.commands:
132 if isinstance(cmd, Separator):
133 section = None
134 else:
135 submenu, created = self._submenu(cmd.group, menubar)
136 if created:
137 section = None
138
139 if section is None:
140 section = Gio.Menu()
141 submenu.append_section(None, section)
142
143 cmd_id = "command-%s" % id(cmd)
144 action = Gio.SimpleAction.new(cmd_id, None)
145 action.connect("activate", cmd._impl.gtk_activate)
146
147 cmd._impl.native.append(action)
148 cmd._impl.set_enabled(cmd.enabled)
149 self._menu_items[action] = cmd
150 self.native.add_action(action)
151
152 item = Gio.MenuItem.new(cmd.text, "app." + cmd_id)
153 if cmd.shortcut:
154 item.set_attribute_value(
155 "accel", GLib.Variant("s", gtk_accel(cmd.shortcut))
156 )
157
158 section.append_item(item)
159
160 # Set the menu for the app.
161 self.native.set_menubar(menubar)
162
163 def _submenu(self, group, menubar):
164 try:
165 return self._menu_groups[group], False
166 except KeyError:
167 if group is None:
168 submenu = menubar
169 else:
170 parent_menu, _ = self._submenu(group.parent, menubar)
171 submenu = Gio.Menu()
172 self._menu_groups[group] = submenu
173
174 text = group.text
175 if text == "*":
176 text = self.interface.formal_name
177 parent_menu.append_submenu(text, submenu)
178
179 # Install the item in the group cache.
180 self._menu_groups[group] = submenu
181
182 return submenu, True
183
184 def main_loop(self):
185 # Modify signal handlers to make sure Ctrl-C is caught and handled.
186 signal.signal(signal.SIGINT, signal.SIG_DFL)
187
188 self.loop.run_forever(application=self.native)
189
190 def set_main_window(self, window):
191 pass
192
193 def show_about_dialog(self):
194 self.native_about_dialog = Gtk.AboutDialog()
195 self.native_about_dialog.set_modal(True)
196
197 icon_impl = toga_App.app.icon._impl
198 self.native_about_dialog.set_logo(icon_impl.native_72)
199
200 self.native_about_dialog.set_program_name(self.interface.formal_name)
201 if self.interface.version is not None:
202 self.native_about_dialog.set_version(self.interface.version)
203 if self.interface.author is not None:
204 self.native_about_dialog.set_authors([self.interface.author])
205 if self.interface.description is not None:
206 self.native_about_dialog.set_comments(self.interface.description)
207 if self.interface.home_page is not None:
208 self.native_about_dialog.set_website(self.interface.home_page)
209
210 self.native_about_dialog.show()
211 self.native_about_dialog.connect("close", self._close_about)
212
213 def _close_about(self, dialog):
214 self.native_about_dialog.destroy()
215 self.native_about_dialog = None
216
217 def beep(self):
218 Gdk.beep()
219
220 # We can't call this under test conditions, because it would kill the test harness
221 def exit(self): # pragma: no cover
222 self.native.quit()
223
224 def get_current_window(self):
225 return self.native.get_active_window()._impl
226
227 def set_current_window(self, window):
228 window._impl.native.present()
229
230 def enter_full_screen(self, windows):
231 for window in windows:
232 window._impl.set_full_screen(True)
233
234 def exit_full_screen(self, windows):
235 for window in windows:
236 window._impl.set_full_screen(False)
237
238 def show_cursor(self):
239 self.interface.factory.not_implemented("App.show_cursor()")
240
241 def hide_cursor(self):
242 self.interface.factory.not_implemented("App.hide_cursor()")
243
244
245 class DocumentApp(App): # pragma: no cover
246 def _create_app_commands(self):
247 self.interface.commands.add(
248 toga.Command(
249 self.open_file,
250 text="Open...",
251 shortcut=toga.Key.MOD_1 + "o",
252 group=toga.Group.FILE,
253 section=0,
254 ),
255 )
256
257 def gtk_startup(self, data=None):
258 super().gtk_startup(data=data)
259
260 try:
261 # Look for a filename specified on the command line
262 self.interface._open(Path(sys.argv[1]))
263 except IndexError:
264 # Nothing on the command line; open a file dialog instead.
265 # Create a temporary window so we have context for the dialog
266 m = toga.Window()
267 m.open_file_dialog(
268 self.interface.formal_name,
269 file_types=self.interface.document_types.keys(),
270 on_result=lambda dialog, path: self.interface._open(path)
271 if path
272 else self.exit(),
273 )
274
275 def open_file(self, widget, **kwargs):
276 # Create a temporary window so we have context for the dialog
277 m = toga.Window()
278 m.open_file_dialog(
279 self.interface.formal_name,
280 file_types=self.interface.document_types.keys(),
281 on_result=lambda dialog, path: self.interface._open(path) if path else None,
282 )
283
[end of gtk/src/toga_gtk/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gtk/src/toga_gtk/app.py b/gtk/src/toga_gtk/app.py
--- a/gtk/src/toga_gtk/app.py
+++ b/gtk/src/toga_gtk/app.py
@@ -222,7 +222,8 @@
self.native.quit()
def get_current_window(self):
- return self.native.get_active_window()._impl
+ current_window = self.native.get_active_window()._impl
+ return current_window if current_window.interface.visible else None
def set_current_window(self, window):
window._impl.native.present()
|
{"golden_diff": "diff --git a/gtk/src/toga_gtk/app.py b/gtk/src/toga_gtk/app.py\n--- a/gtk/src/toga_gtk/app.py\n+++ b/gtk/src/toga_gtk/app.py\n@@ -222,7 +222,8 @@\n self.native.quit()\n \n def get_current_window(self):\n- return self.native.get_active_window()._impl\n+ current_window = self.native.get_active_window()._impl\n+ return current_window if current_window.interface.visible else None\n \n def set_current_window(self, window):\n window._impl.native.present()\n", "issue": "On GTK, `App.current_window` returns the last active window even when all windows are hidden\n### Describe the bug\r\n\r\nOn Cocoa and WinForms, it returns `None` in this situation, which I think makes more sense.\r\n\r\n### Environment\r\n\r\n- Software versions:\r\n - Toga: main (536a60f)\n", "before_files": [{"content": "import asyncio\nimport signal\nimport sys\nfrom pathlib import Path\n\nimport gbulb\n\nimport toga\nfrom toga import App as toga_App\nfrom toga.command import Command, Separator\n\nfrom .keys import gtk_accel\nfrom .libs import TOGA_DEFAULT_STYLES, Gdk, Gio, GLib, Gtk\nfrom .window import Window\n\n\nclass MainWindow(Window):\n def create(self):\n self.native = Gtk.ApplicationWindow()\n self.native.set_role(\"MainWindow\")\n icon_impl = toga_App.app.icon._impl\n self.native.set_icon(icon_impl.native_72)\n\n def gtk_delete_event(self, *args):\n # Return value of the GTK on_close handler indicates\n # whether the event has been fully handled. Returning\n # False indicates the event handling is *not* complete,\n # so further event processing (including actually\n # closing the window) should be performed; so\n # \"should_exit == True\" must be converted to a return\n # value of False.\n self.interface.app.on_exit()\n return True\n\n\nclass App:\n \"\"\"\n Todo:\n * Creation of Menus is not working.\n * Disabling of menu items is not working.\n * App Icon is not showing up\n \"\"\"\n\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n gbulb.install(gtk=True)\n self.loop = asyncio.new_event_loop()\n\n self.create()\n\n def create(self):\n # Stimulate the build of the app\n self.native = Gtk.Application(\n application_id=self.interface.app_id,\n flags=Gio.ApplicationFlags.FLAGS_NONE,\n )\n self.native_about_dialog = None\n\n # Connect the GTK signal that will cause app startup to occur\n self.native.connect(\"startup\", self.gtk_startup)\n self.native.connect(\"activate\", self.gtk_activate)\n\n self.actions = None\n\n def gtk_startup(self, data=None):\n # Set up the default commands for the interface.\n self.interface.commands.add(\n Command(\n self._menu_about,\n \"About \" + self.interface.formal_name,\n group=toga.Group.HELP,\n ),\n Command(None, \"Preferences\", group=toga.Group.APP),\n # Quit should always be the last item, in a section on its own\n Command(\n self._menu_quit,\n \"Quit \" + self.interface.formal_name,\n shortcut=toga.Key.MOD_1 + \"q\",\n group=toga.Group.APP,\n section=sys.maxsize,\n ),\n )\n self._create_app_commands()\n\n self.interface._startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.create_menus()\n\n # Now that we have menus, make the app take responsibility for\n # showing the menubar.\n # This is required because of inconsistencies in how the Gnome\n # shell operates on different windowing environments;\n # see #872 for details.\n settings = Gtk.Settings.get_default()\n settings.set_property(\"gtk-shell-shows-menubar\", False)\n\n # Set any custom styles\n css_provider = Gtk.CssProvider()\n css_provider.load_from_data(TOGA_DEFAULT_STYLES)\n\n context = Gtk.StyleContext()\n context.add_provider_for_screen(\n Gdk.Screen.get_default(), css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER\n )\n\n def _create_app_commands(self):\n # No extra menus\n pass\n\n def gtk_activate(self, data=None):\n pass\n\n def _menu_about(self, command, **kwargs):\n self.interface.about()\n\n def _menu_quit(self, command, **kwargs):\n self.interface.on_exit()\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n self._menu_items = {}\n self._menu_groups = {}\n\n # Create the menu for the top level menubar.\n menubar = Gio.Menu()\n section = None\n for cmd in self.interface.commands:\n if isinstance(cmd, Separator):\n section = None\n else:\n submenu, created = self._submenu(cmd.group, menubar)\n if created:\n section = None\n\n if section is None:\n section = Gio.Menu()\n submenu.append_section(None, section)\n\n cmd_id = \"command-%s\" % id(cmd)\n action = Gio.SimpleAction.new(cmd_id, None)\n action.connect(\"activate\", cmd._impl.gtk_activate)\n\n cmd._impl.native.append(action)\n cmd._impl.set_enabled(cmd.enabled)\n self._menu_items[action] = cmd\n self.native.add_action(action)\n\n item = Gio.MenuItem.new(cmd.text, \"app.\" + cmd_id)\n if cmd.shortcut:\n item.set_attribute_value(\n \"accel\", GLib.Variant(\"s\", gtk_accel(cmd.shortcut))\n )\n\n section.append_item(item)\n\n # Set the menu for the app.\n self.native.set_menubar(menubar)\n\n def _submenu(self, group, menubar):\n try:\n return self._menu_groups[group], False\n except KeyError:\n if group is None:\n submenu = menubar\n else:\n parent_menu, _ = self._submenu(group.parent, menubar)\n submenu = Gio.Menu()\n self._menu_groups[group] = submenu\n\n text = group.text\n if text == \"*\":\n text = self.interface.formal_name\n parent_menu.append_submenu(text, submenu)\n\n # Install the item in the group cache.\n self._menu_groups[group] = submenu\n\n return submenu, True\n\n def main_loop(self):\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self.loop.run_forever(application=self.native)\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.native_about_dialog = Gtk.AboutDialog()\n self.native_about_dialog.set_modal(True)\n\n icon_impl = toga_App.app.icon._impl\n self.native_about_dialog.set_logo(icon_impl.native_72)\n\n self.native_about_dialog.set_program_name(self.interface.formal_name)\n if self.interface.version is not None:\n self.native_about_dialog.set_version(self.interface.version)\n if self.interface.author is not None:\n self.native_about_dialog.set_authors([self.interface.author])\n if self.interface.description is not None:\n self.native_about_dialog.set_comments(self.interface.description)\n if self.interface.home_page is not None:\n self.native_about_dialog.set_website(self.interface.home_page)\n\n self.native_about_dialog.show()\n self.native_about_dialog.connect(\"close\", self._close_about)\n\n def _close_about(self, dialog):\n self.native_about_dialog.destroy()\n self.native_about_dialog = None\n\n def beep(self):\n Gdk.beep()\n\n # We can't call this under test conditions, because it would kill the test harness\n def exit(self): # pragma: no cover\n self.native.quit()\n\n def get_current_window(self):\n return self.native.get_active_window()._impl\n\n def set_current_window(self, window):\n window._impl.native.present()\n\n def enter_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(True)\n\n def exit_full_screen(self, windows):\n for window in windows:\n window._impl.set_full_screen(False)\n\n def show_cursor(self):\n self.interface.factory.not_implemented(\"App.show_cursor()\")\n\n def hide_cursor(self):\n self.interface.factory.not_implemented(\"App.hide_cursor()\")\n\n\nclass DocumentApp(App): # pragma: no cover\n def _create_app_commands(self):\n self.interface.commands.add(\n toga.Command(\n self.open_file,\n text=\"Open...\",\n shortcut=toga.Key.MOD_1 + \"o\",\n group=toga.Group.FILE,\n section=0,\n ),\n )\n\n def gtk_startup(self, data=None):\n super().gtk_startup(data=data)\n\n try:\n # Look for a filename specified on the command line\n self.interface._open(Path(sys.argv[1]))\n except IndexError:\n # Nothing on the command line; open a file dialog instead.\n # Create a temporary window so we have context for the dialog\n m = toga.Window()\n m.open_file_dialog(\n self.interface.formal_name,\n file_types=self.interface.document_types.keys(),\n on_result=lambda dialog, path: self.interface._open(path)\n if path\n else self.exit(),\n )\n\n def open_file(self, widget, **kwargs):\n # Create a temporary window so we have context for the dialog\n m = toga.Window()\n m.open_file_dialog(\n self.interface.formal_name,\n file_types=self.interface.document_types.keys(),\n on_result=lambda dialog, path: self.interface._open(path) if path else None,\n )\n", "path": "gtk/src/toga_gtk/app.py"}]}
| 3,273 | 128 |
gh_patches_debug_18418
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-5792
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
user warnings about plugins are shown every time when kolibri starts
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
<!--
Description of the behavior that was observed, including screenshots or other references when applicable
-->
When I started kolibri from a fresh install, I got this warning:
```
/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.setup_wizard exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly
module=self._module_path()
```
After I created the facility and got navigated to the device page, I got another warning:
```
/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.device_management exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly
module=self._module_path()
```
### Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
we should try to eliminate the warnings if possible
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
users/developers who check the logs might be confused by these warnings
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
```
INFO ENGINE Serving on http://0.0.0.0:8080
INFO ENGINE Bus STARTED
/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.setup_wizard exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly
module=self._module_path()
/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.device_management exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly
module=self._module_path()
INFO Downloading data for channel id a9b25ac9814742c883ce1b0579448337
```
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
1. get the 0.12.6b1 pex
2. run `python kolibri-0.12.6b1.pex start`
3. open localhost:8080
4. check the log (on Ubuntu, the error appeared in daemon.txt)
5. create the facility
6. check the log again
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
Kolibri version: 0.12.6b1
Operating System: OSX, Ubuntu Xenial
</issue>
<code>
[start of kolibri/plugins/base.py]
1 """The base of a Kolibri plugin is the inheritence from
2 :class:`.KolibriPluginBase`.
3 """
4 from __future__ import absolute_import
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import logging
9 import sys
10 import warnings
11 from importlib import import_module
12
13 from django.conf import settings
14 from django.utils.module_loading import module_has_submodule
15
16 from kolibri.utils.conf import config
17
18 logger = logging.getLogger(__name__)
19
20
21 class MandatoryPluginMethodNotImplemented(NotImplementedError):
22 def __init__(self):
23 super(MandatoryPluginMethodNotImplemented, self).__init__(
24 "Plugin needs to define this method"
25 ) # pragma: no cover
26
27
28 class MandatoryPluginAttributeNotImplemented(NotImplementedError):
29 def __init__(self):
30 super(MandatoryPluginAttributeNotImplemented, self).__init__(
31 "Plugin needs to define this attribute"
32 ) # pragma: no cover
33
34
35 class KolibriPluginBase(object):
36 """
37 This is the base class that all Kolibri plugins need to implement.
38 """
39
40 #: Comment
41 # Name of a local module that contains url_patterns that define
42 # URLs for views that do not contain any
43 # translated content, and hence will not be prefixed
44 # with a language prefix
45 untranslated_view_urls = None
46
47 #: Comment
48 # Name of a local module that contains url_patterns that define
49 # URLs for views that contain
50 # translated content, and hence will be prefixed
51 # with a language prefixs
52 translated_view_urls = None
53
54 #: Comment
55 # Name of a local module that contains url_patterns that define
56 # URLs for views that should be attached to the domain root.
57 # Use with caution! The lack of namespacing is dangerous.
58 root_view_urls = None
59
60 #: Comment
61 # Name of a local module that contains additional settings to augment
62 # Django settings.
63 # For settings that take a tuple or list, these will be appended to the value from
64 # the base settings module set through conventional Django means.
65 django_settings = None
66
67 #: Comment
68 # Name of a local module, containing a config spec as the 'option_spec' value.
69 # These options should not override the core config spec, but may specify a new
70 # default value for a core config spec option.
71 kolibri_options = None
72
73 # : Suggested property, not yet in use
74 migrate_on_enable = False
75
76 # : Suggested property, not yet in use
77 collect_static_on_enable = False
78
79 # : Suggested property, not yet in use
80 collect_static_on_enable = False
81
82 def __init__(self):
83 if settings.configured:
84 # Check to see if a plugin is being initialized after Django
85 warnings.warn(
86 "{module} exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly".format(
87 module=self._module_path()
88 )
89 )
90
91 @classmethod
92 def _module_path(cls):
93 """
94 Returns the path of the class inheriting this classmethod.
95 There is no such thing as Class properties, that's why it's implemented
96 as such.
97
98 Used in KolibriPluginBase._installed_apps_add
99 """
100 return ".".join(cls.__module__.split(".")[:-1])
101
102 @classmethod
103 def _installed_apps_add(cls):
104 """Call this from your enable() method to have the plugin automatically
105 added to Kolibri configuration"""
106 module_path = cls._module_path()
107 if module_path not in config.ACTIVE_PLUGINS:
108 config.enable_plugin(module_path)
109 else:
110 logger.warning("{} already enabled".format(module_path))
111
112 @classmethod
113 def _installed_apps_remove(cls):
114 """Call this from your enable() method to have the plugin automatically
115 added to Kolibri configuration"""
116 module_path = cls._module_path()
117 if module_path in config.ACTIVE_PLUGINS:
118 config.disable_plugin(module_path)
119 else:
120 logger.warning("{} already disabled".format(module_path))
121
122 @classmethod
123 def enable(cls):
124 """Modify the kolibri config dict to your plugin's needs"""
125 cls._installed_apps_add()
126
127 @classmethod
128 def disable(cls):
129 """Modify the kolibri config dict to your plugin's needs"""
130 cls._installed_apps_remove()
131
132 def _return_module(self, module_name):
133 if module_has_submodule(sys.modules[self._module_path()], module_name):
134 models_module_name = "%s.%s" % (self._module_path(), module_name)
135 return import_module(models_module_name)
136
137 return None
138
139 def url_module(self):
140 """
141 Return a url module, containing ``urlpatterns = [...]``, a conventional
142 Django application url module.
143
144 URLs are by default accessed through Django's reverse lookups like
145 this::
146
147 reverse('kolibri:mypluginclass:url_name')
148
149 To customize "mypluginclass" (which is automatically derived from the
150 plugin's class name), override ``url_namespace``.
151
152 By default this will be discovered based on the translated_view_urls
153 property.
154 """
155 if self.translated_view_urls:
156 module = self._return_module(self.translated_view_urls)
157 if module is None:
158 logging.warn(
159 "{plugin} defined {urls} translated view urls but the module was not found".format(
160 plugin=self._module_path(), urls=self.translated_view_urls
161 )
162 )
163 return module
164
165 def api_url_module(self):
166 """
167 Return a url module, containing ``urlpatterns = [...]``, a conventional
168 Django application url module.
169
170 Do this separately for API endpoints so that they do not need
171 to be prefixed by the language code.
172
173 URLs are by default accessed through Django's reverse lookups like
174 this::
175
176 reverse('kolibri:mypluginclass:url_name')
177
178 To customize "mypluginclass" (which is automatically derived from the
179 plugin's class name), override ``url_namespace``.
180
181 By default this will be discovered based on the untranslated_view_urls
182 property.
183 """
184 if self.untranslated_view_urls:
185 module = self._return_module(self.untranslated_view_urls)
186 if module is None:
187 logging.warn(
188 "{plugin} defined {urls} untranslated view urls but the module was not found".format(
189 plugin=self._module_path(), urls=self.untranslated_view_urls
190 )
191 )
192 return module
193
194 def root_url_module(self):
195 """
196 Return a url module, containing ``urlpatterns = [...]``, a conventional
197 Django application url module.
198
199 Do this separately for endpoints that need to be attached at the root.
200
201 URLs are by default accessed through Django's reverse lookups like
202 this::
203
204 reverse('kolibri:url_name')
205
206 By default this will be discovered based on the root_view_urls
207 property.
208 """
209 if self.root_view_urls:
210 module = self._return_module(self.root_view_urls)
211 logger.warning(
212 "Setting up root URLs which is not recommended!\n plugin module: {}".format(
213 self
214 )
215 )
216 if module is None:
217 logging.warn(
218 "{plugin} defined {urls} root view urls but the module was not found".format(
219 plugin=self._module_path(), urls=self.root_view_urls
220 )
221 )
222 return module
223
224 def settings_module(self):
225 """
226 Return a settings module, containing Django settings that this
227 module wants to apply.
228
229 For settings that take a tuple or list, these will be appended to the value from
230 the base settings module set through conventional Django means.
231
232 By default this will be discovered based on the django_settings
233 property.
234 """
235 if self.django_settings:
236 module = self._return_module(self.django_settings)
237 if module is None:
238 logging.warn(
239 "{plugin} defined {module} django settings but the module was not found".format(
240 plugin=self._module_path(), module=self.django_settings
241 )
242 )
243 return module
244
245 def options_module(self):
246 """
247 Return an options module, containing a config spec as the 'option_spec' value.
248
249 These options should not override the core config spec, but may specify only a new
250 default value for a core config spec option.
251
252 By default this will be discovered based on the kolibri_options
253 property.
254 """
255 if self.kolibri_options:
256 module = self._return_module(self.kolibri_options)
257 if module is None:
258 logging.warn(
259 "{plugin} defined {module} kolibri options but the module was not found".format(
260 plugin=self._module_path(), module=self.kolibri_options
261 )
262 )
263 return module
264
265 def url_namespace(self):
266 """
267 Used for the ``namespace`` argument when including the plugin's
268 urlpatterns. By default, returns a lowercase of the class name.
269 """
270 return self.__class__.__name__.lower()
271
272 def url_slug(self):
273 """
274 Where should urls be included? By default, this is a lower-case version
275 of the class name.
276
277 Example::
278
279 return r"my-plugin/"
280
281 .. warning:: Avoid the empty string, as you might get conflicts.
282 """
283 return self.__class__.__name__.lower() + "/"
284
[end of kolibri/plugins/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/plugins/base.py b/kolibri/plugins/base.py
--- a/kolibri/plugins/base.py
+++ b/kolibri/plugins/base.py
@@ -7,10 +7,8 @@
import logging
import sys
-import warnings
from importlib import import_module
-from django.conf import settings
from django.utils.module_loading import module_has_submodule
from kolibri.utils.conf import config
@@ -79,15 +77,6 @@
# : Suggested property, not yet in use
collect_static_on_enable = False
- def __init__(self):
- if settings.configured:
- # Check to see if a plugin is being initialized after Django
- warnings.warn(
- "{module} exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly".format(
- module=self._module_path()
- )
- )
-
@classmethod
def _module_path(cls):
"""
|
{"golden_diff": "diff --git a/kolibri/plugins/base.py b/kolibri/plugins/base.py\n--- a/kolibri/plugins/base.py\n+++ b/kolibri/plugins/base.py\n@@ -7,10 +7,8 @@\n \n import logging\n import sys\n-import warnings\n from importlib import import_module\n \n-from django.conf import settings\n from django.utils.module_loading import module_has_submodule\n \n from kolibri.utils.conf import config\n@@ -79,15 +77,6 @@\n # : Suggested property, not yet in use\n collect_static_on_enable = False\n \n- def __init__(self):\n- if settings.configured:\n- # Check to see if a plugin is being initialized after Django\n- warnings.warn(\n- \"{module} exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\".format(\n- module=self._module_path()\n- )\n- )\n-\n @classmethod\n def _module_path(cls):\n \"\"\"\n", "issue": "user warnings about plugins are shown every time when kolibri starts\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n### Observed behavior\r\n<!--\r\nDescription of the behavior that was observed, including screenshots or other references when applicable\r\n-->\r\n\r\nWhen I started kolibri from a fresh install, I got this warning:\r\n```\r\n/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.setup_wizard exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\r\n module=self._module_path()\r\n```\r\nAfter I created the facility and got navigated to the device page, I got another warning:\r\n```\r\n/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.device_management exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\r\n module=self._module_path()\r\n```\r\n\r\n### Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nwe should try to eliminate the warnings if possible\r\n\r\n### User-facing consequences\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nusers/developers who check the logs might be confused by these warnings\r\n\r\n### Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n```\r\nINFO ENGINE Serving on http://0.0.0.0:8080\r\nINFO ENGINE Bus STARTED\r\n/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.setup_wizard exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\r\n module=self._module_path()\r\n/Users/lingyiwang/.venvs/kolibri0.12.6b1/lib/python2.7/site-packages/kolibri/plugins/base.py:87: UserWarning: kolibri.plugins.device_management exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\r\n module=self._module_path()\r\nINFO Downloading data for channel id a9b25ac9814742c883ce1b0579448337\r\n```\r\n\r\n### Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\n1. get the 0.12.6b1 pex\r\n2. run `python kolibri-0.12.6b1.pex start`\r\n3. open localhost:8080\r\n4. check the log (on Ubuntu, the error appeared in daemon.txt)\r\n5. create the facility\r\n6. check the log again\r\n\r\n### Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\nKolibri version: 0.12.6b1\r\nOperating System: OSX, Ubuntu Xenial\r\n\n", "before_files": [{"content": "\"\"\"The base of a Kolibri plugin is the inheritence from\n:class:`.KolibriPluginBase`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport sys\nimport warnings\nfrom importlib import import_module\n\nfrom django.conf import settings\nfrom django.utils.module_loading import module_has_submodule\n\nfrom kolibri.utils.conf import config\n\nlogger = logging.getLogger(__name__)\n\n\nclass MandatoryPluginMethodNotImplemented(NotImplementedError):\n def __init__(self):\n super(MandatoryPluginMethodNotImplemented, self).__init__(\n \"Plugin needs to define this method\"\n ) # pragma: no cover\n\n\nclass MandatoryPluginAttributeNotImplemented(NotImplementedError):\n def __init__(self):\n super(MandatoryPluginAttributeNotImplemented, self).__init__(\n \"Plugin needs to define this attribute\"\n ) # pragma: no cover\n\n\nclass KolibriPluginBase(object):\n \"\"\"\n This is the base class that all Kolibri plugins need to implement.\n \"\"\"\n\n #: Comment\n # Name of a local module that contains url_patterns that define\n # URLs for views that do not contain any\n # translated content, and hence will not be prefixed\n # with a language prefix\n untranslated_view_urls = None\n\n #: Comment\n # Name of a local module that contains url_patterns that define\n # URLs for views that contain\n # translated content, and hence will be prefixed\n # with a language prefixs\n translated_view_urls = None\n\n #: Comment\n # Name of a local module that contains url_patterns that define\n # URLs for views that should be attached to the domain root.\n # Use with caution! The lack of namespacing is dangerous.\n root_view_urls = None\n\n #: Comment\n # Name of a local module that contains additional settings to augment\n # Django settings.\n # For settings that take a tuple or list, these will be appended to the value from\n # the base settings module set through conventional Django means.\n django_settings = None\n\n #: Comment\n # Name of a local module, containing a config spec as the 'option_spec' value.\n # These options should not override the core config spec, but may specify a new\n # default value for a core config spec option.\n kolibri_options = None\n\n # : Suggested property, not yet in use\n migrate_on_enable = False\n\n # : Suggested property, not yet in use\n collect_static_on_enable = False\n\n # : Suggested property, not yet in use\n collect_static_on_enable = False\n\n def __init__(self):\n if settings.configured:\n # Check to see if a plugin is being initialized after Django\n warnings.warn(\n \"{module} exposes a KolibriPluginBase derived object but is initialized after Django - enable as a plugin to use this properly\".format(\n module=self._module_path()\n )\n )\n\n @classmethod\n def _module_path(cls):\n \"\"\"\n Returns the path of the class inheriting this classmethod.\n There is no such thing as Class properties, that's why it's implemented\n as such.\n\n Used in KolibriPluginBase._installed_apps_add\n \"\"\"\n return \".\".join(cls.__module__.split(\".\")[:-1])\n\n @classmethod\n def _installed_apps_add(cls):\n \"\"\"Call this from your enable() method to have the plugin automatically\n added to Kolibri configuration\"\"\"\n module_path = cls._module_path()\n if module_path not in config.ACTIVE_PLUGINS:\n config.enable_plugin(module_path)\n else:\n logger.warning(\"{} already enabled\".format(module_path))\n\n @classmethod\n def _installed_apps_remove(cls):\n \"\"\"Call this from your enable() method to have the plugin automatically\n added to Kolibri configuration\"\"\"\n module_path = cls._module_path()\n if module_path in config.ACTIVE_PLUGINS:\n config.disable_plugin(module_path)\n else:\n logger.warning(\"{} already disabled\".format(module_path))\n\n @classmethod\n def enable(cls):\n \"\"\"Modify the kolibri config dict to your plugin's needs\"\"\"\n cls._installed_apps_add()\n\n @classmethod\n def disable(cls):\n \"\"\"Modify the kolibri config dict to your plugin's needs\"\"\"\n cls._installed_apps_remove()\n\n def _return_module(self, module_name):\n if module_has_submodule(sys.modules[self._module_path()], module_name):\n models_module_name = \"%s.%s\" % (self._module_path(), module_name)\n return import_module(models_module_name)\n\n return None\n\n def url_module(self):\n \"\"\"\n Return a url module, containing ``urlpatterns = [...]``, a conventional\n Django application url module.\n\n URLs are by default accessed through Django's reverse lookups like\n this::\n\n reverse('kolibri:mypluginclass:url_name')\n\n To customize \"mypluginclass\" (which is automatically derived from the\n plugin's class name), override ``url_namespace``.\n\n By default this will be discovered based on the translated_view_urls\n property.\n \"\"\"\n if self.translated_view_urls:\n module = self._return_module(self.translated_view_urls)\n if module is None:\n logging.warn(\n \"{plugin} defined {urls} translated view urls but the module was not found\".format(\n plugin=self._module_path(), urls=self.translated_view_urls\n )\n )\n return module\n\n def api_url_module(self):\n \"\"\"\n Return a url module, containing ``urlpatterns = [...]``, a conventional\n Django application url module.\n\n Do this separately for API endpoints so that they do not need\n to be prefixed by the language code.\n\n URLs are by default accessed through Django's reverse lookups like\n this::\n\n reverse('kolibri:mypluginclass:url_name')\n\n To customize \"mypluginclass\" (which is automatically derived from the\n plugin's class name), override ``url_namespace``.\n\n By default this will be discovered based on the untranslated_view_urls\n property.\n \"\"\"\n if self.untranslated_view_urls:\n module = self._return_module(self.untranslated_view_urls)\n if module is None:\n logging.warn(\n \"{plugin} defined {urls} untranslated view urls but the module was not found\".format(\n plugin=self._module_path(), urls=self.untranslated_view_urls\n )\n )\n return module\n\n def root_url_module(self):\n \"\"\"\n Return a url module, containing ``urlpatterns = [...]``, a conventional\n Django application url module.\n\n Do this separately for endpoints that need to be attached at the root.\n\n URLs are by default accessed through Django's reverse lookups like\n this::\n\n reverse('kolibri:url_name')\n\n By default this will be discovered based on the root_view_urls\n property.\n \"\"\"\n if self.root_view_urls:\n module = self._return_module(self.root_view_urls)\n logger.warning(\n \"Setting up root URLs which is not recommended!\\n plugin module: {}\".format(\n self\n )\n )\n if module is None:\n logging.warn(\n \"{plugin} defined {urls} root view urls but the module was not found\".format(\n plugin=self._module_path(), urls=self.root_view_urls\n )\n )\n return module\n\n def settings_module(self):\n \"\"\"\n Return a settings module, containing Django settings that this\n module wants to apply.\n\n For settings that take a tuple or list, these will be appended to the value from\n the base settings module set through conventional Django means.\n\n By default this will be discovered based on the django_settings\n property.\n \"\"\"\n if self.django_settings:\n module = self._return_module(self.django_settings)\n if module is None:\n logging.warn(\n \"{plugin} defined {module} django settings but the module was not found\".format(\n plugin=self._module_path(), module=self.django_settings\n )\n )\n return module\n\n def options_module(self):\n \"\"\"\n Return an options module, containing a config spec as the 'option_spec' value.\n\n These options should not override the core config spec, but may specify only a new\n default value for a core config spec option.\n\n By default this will be discovered based on the kolibri_options\n property.\n \"\"\"\n if self.kolibri_options:\n module = self._return_module(self.kolibri_options)\n if module is None:\n logging.warn(\n \"{plugin} defined {module} kolibri options but the module was not found\".format(\n plugin=self._module_path(), module=self.kolibri_options\n )\n )\n return module\n\n def url_namespace(self):\n \"\"\"\n Used for the ``namespace`` argument when including the plugin's\n urlpatterns. By default, returns a lowercase of the class name.\n \"\"\"\n return self.__class__.__name__.lower()\n\n def url_slug(self):\n \"\"\"\n Where should urls be included? By default, this is a lower-case version\n of the class name.\n\n Example::\n\n return r\"my-plugin/\"\n\n .. warning:: Avoid the empty string, as you might get conflicts.\n \"\"\"\n return self.__class__.__name__.lower() + \"/\"\n", "path": "kolibri/plugins/base.py"}]}
| 4,072 | 215 |
gh_patches_debug_13374
|
rasdani/github-patches
|
git_diff
|
autogluon__autogluon-126
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing best config after fit for object detection fit example
After executing object detection example, it only produces:
```
INFO:autogluon.task.object_detection.object_detection:>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting
INFO:autogluon.task.object_detection.object_detection:The best config:
```
while no best config is reported.
Might relate to https://github.com/awslabs/autogluon/issues/29
</issue>
<code>
[start of autogluon/task/object_detection/object_detection.py]
1 import logging
2
3 import mxnet as mx
4 from mxnet import gluon, nd
5
6 from ...core.optimizer import *
7 from ...core.optimizer import *
8 from ...core import *
9 from ...searcher import *
10 from ...scheduler import *
11 from ...scheduler.resource import get_cpu_count, get_gpu_count
12 from ..base import BaseTask
13
14 from .dataset import *
15 from .pipeline import train_object_detection
16 from .utils import *
17 from ...utils import update_params
18
19 from .detector import Detector
20
21 __all__ = ['ObjectDetection']
22
23 logger = logging.getLogger(__name__)
24
25 class ObjectDetection(BaseTask):
26 """AutoGluon ImageClassification Task
27 """
28 @staticmethod
29 def Dataset(*args, **kwargs):
30 return get_dataset(*args, **kwargs)
31
32 @staticmethod
33 def fit(dataset='voc',
34 net=Categorical('mobilenet1.0'),
35 lr=Categorical(5e-4, 1e-4),
36 loss=gluon.loss.SoftmaxCrossEntropyLoss(),
37 batch_size=16,
38 epochs=200,
39 num_trials=2,
40 nthreads_per_trial=12,
41 num_workers=32,
42 ngpus_per_trial=1,
43 hybridize=True,
44 search_strategy='random',
45 search_options={},
46 time_limits=None,
47 resume=False,
48 checkpoint='checkpoint/exp1.ag',
49 visualizer='none',
50 dist_ip_addrs=[],
51 grace_period=None,
52 auto_search=True,
53 seed=223,
54 data_shape=416,
55 start_epoch=0,
56 lr_mode='step',
57 lr_decay=0.1,
58 lr_decay_period=0,
59 lr_decay_epoch='160,180',
60 warmup_lr=0.0,
61 warmup_epochs=2,
62 momentum=0.9,
63 wd=0.0005,
64 log_interval=100,
65 save_prefix='',
66 save_interval=10,
67 val_interval=1,
68 num_samples=-1,
69 no_random_shape=False,
70 no_wd=False,
71 mixup=False,
72 no_mixup_epochs=20,
73 label_smooth=False,
74 syncbn=False,
75 ):
76
77 """
78 Auto fit on object detection dataset
79
80 Parameters
81 ----------
82 dataset : str or :meth:`autogluon.task.ObjectDectection.Dataset`
83 Training dataset.
84 net : str, :class:`autogluon.AutoGluonObject`
85 Network candidates.
86 optimizer : str, :class:`autogluon.AutoGluonObject`
87 optimizer candidates.
88 metric : str or object
89 observation metric.
90 loss : mxnet.gluon.loss
91 training loss function.
92 num_trials : int
93 number of trials in the experiment.
94 time_limits : int
95 training time limits in seconds.
96 resources_per_trial : dict
97 Machine resources to allocate per trial.
98 savedir : str
99 Local dir to save training results to.
100 search_strategy : str or callable
101 Search Algorithms ('random', 'bayesopt' and 'hyperband')
102 resume : bool, default False
103 If checkpoint exists, the experiment will resume from there.
104
105 Examples
106 --------
107 >>> dataset = task.Dataset(train_path='~/data/train',
108 >>> test_path='data/test')
109 >>> results = task.fit(dataset,
110 >>> nets=ag.space.Categorical['resnet18_v1', 'resnet34_v1'],
111 >>> time_limits=time_limits,
112 >>> ngpus_per_trial=1,
113 >>> num_trials = 4)
114 """
115 if auto_search:
116 # The strategies can be injected here, for example: automatic suggest some hps
117 # based on the dataset statistics
118 pass
119
120 nthreads_per_trial = get_cpu_count() if nthreads_per_trial > get_cpu_count() else nthreads_per_trial
121 ngpus_per_trial = get_gpu_count() if ngpus_per_trial > get_gpu_count() else ngpus_per_trial
122
123 train_object_detection.register_args(
124 dataset=dataset,
125 net=net,
126 lr = lr,
127 loss=loss,
128 num_gpus=ngpus_per_trial,
129 batch_size=batch_size,
130 epochs=epochs,
131 num_workers=nthreads_per_trial,
132 hybridize=hybridize,
133 final_fit=False,
134 seed=seed,
135 data_shape=data_shape,
136 start_epoch=0,
137 lr_mode=lr_mode,
138 lr_decay=lr_decay,
139 lr_decay_period=lr_decay_period,
140 lr_decay_epoch=lr_decay_epoch,
141 warmup_lr=warmup_lr,
142 warmup_epochs=warmup_epochs,
143 momentum=momentum,
144 wd=wd,
145 log_interval=log_interval,
146 save_prefix=save_prefix,
147 save_interval=save_interval,
148 val_interval=val_interval,
149 num_samples=num_samples,
150 no_random_shape=no_random_shape,
151 no_wd=no_wd,
152 mixup=mixup,
153 no_mixup_epochs=no_mixup_epochs,
154 label_smooth=label_smooth,
155 resume=resume,
156 syncbn=syncbn)
157
158 scheduler_options = {
159 'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
160 'checkpoint': checkpoint,
161 'num_trials': num_trials,
162 'time_out': time_limits,
163 'resume': resume,
164 'visualizer': visualizer,
165 'time_attr': 'epoch',
166 'reward_attr': 'map_reward',
167 'dist_ip_addrs': dist_ip_addrs,
168 'searcher': search_strategy,
169 'search_options': search_options,
170 }
171 if search_strategy == 'hyperband':
172 scheduler_options.update({
173 'searcher': 'random',
174 'max_t': epochs,
175 'grace_period': grace_period if grace_period else epochs//4})
176
177 results = BaseTask.run_fit(train_object_detection, search_strategy,
178 scheduler_options)
179 logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting")
180 args = sample_config(train_object_detection.args, results['best_config'])
181 logger.info('The best config:\n', results['best_config'])
182
183 model = get_network(args.net, dataset.init().get_classes(), mx.cpu(0))
184 update_params(model, results.pop('model_params'))
185 return Detector(model, results, checkpoint, args)
186
[end of autogluon/task/object_detection/object_detection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/autogluon/task/object_detection/object_detection.py b/autogluon/task/object_detection/object_detection.py
--- a/autogluon/task/object_detection/object_detection.py
+++ b/autogluon/task/object_detection/object_detection.py
@@ -178,7 +178,7 @@
scheduler_options)
logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting")
args = sample_config(train_object_detection.args, results['best_config'])
- logger.info('The best config:\n', results['best_config'])
+ logger.info('The best config: {}'.format(results['best_config']))
model = get_network(args.net, dataset.init().get_classes(), mx.cpu(0))
update_params(model, results.pop('model_params'))
|
{"golden_diff": "diff --git a/autogluon/task/object_detection/object_detection.py b/autogluon/task/object_detection/object_detection.py\n--- a/autogluon/task/object_detection/object_detection.py\n+++ b/autogluon/task/object_detection/object_detection.py\n@@ -178,7 +178,7 @@\n scheduler_options)\n logger.info(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting\")\n args = sample_config(train_object_detection.args, results['best_config'])\n- logger.info('The best config:\\n', results['best_config'])\n+ logger.info('The best config: {}'.format(results['best_config']))\n \n model = get_network(args.net, dataset.init().get_classes(), mx.cpu(0))\n update_params(model, results.pop('model_params'))\n", "issue": "Missing best config after fit for object detection fit example\nAfter executing object detection example, it only produces:\r\n\r\n```\r\nINFO:autogluon.task.object_detection.object_detection:>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting\r\nINFO:autogluon.task.object_detection.object_detection:The best config:\r\n```\r\nwhile no best config is reported.\r\n\r\nMight relate to https://github.com/awslabs/autogluon/issues/29\n", "before_files": [{"content": "import logging\n\nimport mxnet as mx\nfrom mxnet import gluon, nd\n\nfrom ...core.optimizer import *\nfrom ...core.optimizer import *\nfrom ...core import *\nfrom ...searcher import *\nfrom ...scheduler import *\nfrom ...scheduler.resource import get_cpu_count, get_gpu_count\nfrom ..base import BaseTask\n\nfrom .dataset import *\nfrom .pipeline import train_object_detection\nfrom .utils import *\nfrom ...utils import update_params\n\nfrom .detector import Detector\n\n__all__ = ['ObjectDetection']\n\nlogger = logging.getLogger(__name__)\n\nclass ObjectDetection(BaseTask):\n \"\"\"AutoGluon ImageClassification Task\n \"\"\"\n @staticmethod\n def Dataset(*args, **kwargs):\n return get_dataset(*args, **kwargs)\n\n @staticmethod\n def fit(dataset='voc',\n net=Categorical('mobilenet1.0'),\n lr=Categorical(5e-4, 1e-4),\n loss=gluon.loss.SoftmaxCrossEntropyLoss(),\n batch_size=16,\n epochs=200,\n num_trials=2,\n nthreads_per_trial=12,\n num_workers=32,\n ngpus_per_trial=1,\n hybridize=True,\n search_strategy='random',\n search_options={},\n time_limits=None,\n resume=False,\n checkpoint='checkpoint/exp1.ag',\n visualizer='none',\n dist_ip_addrs=[],\n grace_period=None,\n auto_search=True,\n seed=223,\n data_shape=416,\n start_epoch=0,\n lr_mode='step',\n lr_decay=0.1,\n lr_decay_period=0,\n lr_decay_epoch='160,180',\n warmup_lr=0.0,\n warmup_epochs=2,\n momentum=0.9,\n wd=0.0005,\n log_interval=100,\n save_prefix='',\n save_interval=10,\n val_interval=1,\n num_samples=-1,\n no_random_shape=False,\n no_wd=False,\n mixup=False,\n no_mixup_epochs=20,\n label_smooth=False,\n syncbn=False,\n ):\n\n \"\"\"\n Auto fit on object detection dataset\n\n Parameters\n ----------\n dataset : str or :meth:`autogluon.task.ObjectDectection.Dataset`\n Training dataset.\n net : str, :class:`autogluon.AutoGluonObject`\n Network candidates.\n optimizer : str, :class:`autogluon.AutoGluonObject`\n optimizer candidates.\n metric : str or object\n observation metric.\n loss : mxnet.gluon.loss\n training loss function.\n num_trials : int\n number of trials in the experiment.\n time_limits : int\n training time limits in seconds.\n resources_per_trial : dict\n Machine resources to allocate per trial.\n savedir : str\n Local dir to save training results to.\n search_strategy : str or callable\n Search Algorithms ('random', 'bayesopt' and 'hyperband')\n resume : bool, default False\n If checkpoint exists, the experiment will resume from there.\n\n Examples\n --------\n >>> dataset = task.Dataset(train_path='~/data/train',\n >>> test_path='data/test')\n >>> results = task.fit(dataset,\n >>> nets=ag.space.Categorical['resnet18_v1', 'resnet34_v1'],\n >>> time_limits=time_limits,\n >>> ngpus_per_trial=1,\n >>> num_trials = 4)\n \"\"\"\n if auto_search:\n # The strategies can be injected here, for example: automatic suggest some hps\n # based on the dataset statistics\n pass\n\n nthreads_per_trial = get_cpu_count() if nthreads_per_trial > get_cpu_count() else nthreads_per_trial\n ngpus_per_trial = get_gpu_count() if ngpus_per_trial > get_gpu_count() else ngpus_per_trial\n\n train_object_detection.register_args(\n dataset=dataset,\n net=net,\n lr = lr,\n loss=loss,\n num_gpus=ngpus_per_trial,\n batch_size=batch_size,\n epochs=epochs,\n num_workers=nthreads_per_trial,\n hybridize=hybridize,\n final_fit=False,\n seed=seed,\n data_shape=data_shape,\n start_epoch=0,\n lr_mode=lr_mode,\n lr_decay=lr_decay,\n lr_decay_period=lr_decay_period,\n lr_decay_epoch=lr_decay_epoch,\n warmup_lr=warmup_lr,\n warmup_epochs=warmup_epochs,\n momentum=momentum,\n wd=wd,\n log_interval=log_interval,\n save_prefix=save_prefix,\n save_interval=save_interval,\n val_interval=val_interval,\n num_samples=num_samples,\n no_random_shape=no_random_shape,\n no_wd=no_wd,\n mixup=mixup,\n no_mixup_epochs=no_mixup_epochs,\n label_smooth=label_smooth,\n resume=resume,\n syncbn=syncbn)\n\n scheduler_options = {\n 'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},\n 'checkpoint': checkpoint,\n 'num_trials': num_trials,\n 'time_out': time_limits,\n 'resume': resume,\n 'visualizer': visualizer,\n 'time_attr': 'epoch',\n 'reward_attr': 'map_reward',\n 'dist_ip_addrs': dist_ip_addrs,\n 'searcher': search_strategy,\n 'search_options': search_options,\n }\n if search_strategy == 'hyperband':\n scheduler_options.update({\n 'searcher': 'random',\n 'max_t': epochs,\n 'grace_period': grace_period if grace_period else epochs//4})\n \n results = BaseTask.run_fit(train_object_detection, search_strategy,\n scheduler_options)\n logger.info(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting\")\n args = sample_config(train_object_detection.args, results['best_config'])\n logger.info('The best config:\\n', results['best_config'])\n\n model = get_network(args.net, dataset.init().get_classes(), mx.cpu(0))\n update_params(model, results.pop('model_params'))\n return Detector(model, results, checkpoint, args)\n", "path": "autogluon/task/object_detection/object_detection.py"}]}
| 2,436 | 165 |
gh_patches_debug_27894
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-1864
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"pwn template" emits a Python2 shebang
We should probably update this to use Python3 explicitly, since that's what we recommend.
</issue>
<code>
[start of pwnlib/commandline/template.py]
1 #!/usr/bin/env python2
2 from __future__ import absolute_import
3 from __future__ import division
4
5 import re
6
7 from pwn import *
8 from pwnlib.commandline import common
9
10 from mako.lookup import TemplateLookup
11
12 parser = common.parser_commands.add_parser(
13 'template',
14 help = 'Generate an exploit template',
15 description = 'Generate an exploit template'
16 )
17
18 parser.add_argument('exe', nargs='?', help='Target binary')
19 parser.add_argument('--host', help='Remote host / SSH server')
20 parser.add_argument('--port', help='Remote port / SSH port', type=int)
21 parser.add_argument('--user', help='SSH Username')
22 parser.add_argument('--pass', '--password', help='SSH Password', dest='password')
23 parser.add_argument('--path', help='Remote path of file on SSH server')
24 parser.add_argument('--quiet', help='Less verbose template comments', action='store_true')
25 parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')
26
27 def main(args):
28 cache = None
29
30 if cache:
31 cache = os.path.join(context.cache_dir, 'mako')
32
33 lookup = TemplateLookup(
34 directories = [os.path.join(pwnlib.data.path, 'templates')],
35 module_directory = cache
36 )
37
38 # For the SSH scenario, check that the binary is at the
39 # same path on the remote host.
40 if args.user:
41 if not (args.path or args.exe):
42 log.error("Must specify --path or a exe")
43
44 s = ssh(args.user, args.host, args.port or 22, args.password or None)
45
46 try:
47 remote = args.path or args.exe
48 s.download(remote)
49 except Exception:
50 log.warning("Could not download file %r, opening a shell", remote)
51 s.interactive()
52 return
53
54 if not args.exe:
55 args.exe = os.path.basename(args.path)
56
57 template = lookup.get_template('pwnup.mako')
58 output = template.render(args.exe,
59 args.host,
60 args.port,
61 args.user,
62 args.password,
63 args.path,
64 args.quiet)
65
66 # Fix Mako formatting bs
67 output = re.sub('\n\n\n', '\n\n', output)
68
69 # Colorize the output if it's a TTY
70 if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):
71 from pygments import highlight
72 from pygments.formatters import TerminalFormatter
73 from pygments.lexers.python import PythonLexer
74 output = highlight(output, PythonLexer(), TerminalFormatter())
75
76 print(output)
77
78 # If redirected to a file, make the resulting script executable
79 if not sys.stdout.isatty():
80 try: os.fchmod(sys.stdout.fileno(), 0o700)
81 except OSError: pass
82
83 if __name__ == '__main__':
84 pwnlib.commandline.common.main(__file__)
85
[end of pwnlib/commandline/template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py
old mode 100644
new mode 100755
--- a/pwnlib/commandline/template.py
+++ b/pwnlib/commandline/template.py
@@ -2,8 +2,6 @@
from __future__ import absolute_import
from __future__ import division
-import re
-
from pwn import *
from pwnlib.commandline import common
@@ -25,14 +23,9 @@
parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')
def main(args):
- cache = None
-
- if cache:
- cache = os.path.join(context.cache_dir, 'mako')
-
lookup = TemplateLookup(
directories = [os.path.join(pwnlib.data.path, 'templates')],
- module_directory = cache
+ module_directory = None
)
# For the SSH scenario, check that the binary is at the
@@ -44,10 +37,10 @@
s = ssh(args.user, args.host, args.port or 22, args.password or None)
try:
- remote = args.path or args.exe
- s.download(remote)
+ remote_file = args.path or args.exe
+ s.download(remote_file)
except Exception:
- log.warning("Could not download file %r, opening a shell", remote)
+ log.warning("Could not download file %r, opening a shell", remote_file)
s.interactive()
return
|
{"golden_diff": "diff --git a/pwnlib/commandline/template.py b/pwnlib/commandline/template.py\nold mode 100644\nnew mode 100755\n--- a/pwnlib/commandline/template.py\n+++ b/pwnlib/commandline/template.py\n@@ -2,8 +2,6 @@\n from __future__ import absolute_import\n from __future__ import division\n \n-import re\n-\n from pwn import *\n from pwnlib.commandline import common\n \n@@ -25,14 +23,9 @@\n parser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n \n def main(args):\n- cache = None\n-\n- if cache:\n- cache = os.path.join(context.cache_dir, 'mako')\n-\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n- module_directory = cache\n+ module_directory = None\n )\n \n # For the SSH scenario, check that the binary is at the\n@@ -44,10 +37,10 @@\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n \n try:\n- remote = args.path or args.exe\n- s.download(remote)\n+ remote_file = args.path or args.exe\n+ s.download(remote_file)\n except Exception:\n- log.warning(\"Could not download file %r, opening a shell\", remote)\n+ log.warning(\"Could not download file %r, opening a shell\", remote_file)\n s.interactive()\n return\n", "issue": "\"pwn template\" emits a Python2 shebang\nWe should probably update this to use Python3 explicitly, since that's what we recommend.\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport re\n\nfrom pwn import *\nfrom pwnlib.commandline import common\n\nfrom mako.lookup import TemplateLookup\n\nparser = common.parser_commands.add_parser(\n 'template',\n help = 'Generate an exploit template',\n description = 'Generate an exploit template'\n)\n\nparser.add_argument('exe', nargs='?', help='Target binary')\nparser.add_argument('--host', help='Remote host / SSH server')\nparser.add_argument('--port', help='Remote port / SSH port', type=int)\nparser.add_argument('--user', help='SSH Username')\nparser.add_argument('--pass', '--password', help='SSH Password', dest='password')\nparser.add_argument('--path', help='Remote path of file on SSH server')\nparser.add_argument('--quiet', help='Less verbose template comments', action='store_true')\nparser.add_argument('--color', help='Print the output in color', choices=['never', 'always', 'auto'], default='auto')\n\ndef main(args):\n cache = None\n\n if cache:\n cache = os.path.join(context.cache_dir, 'mako')\n\n lookup = TemplateLookup(\n directories = [os.path.join(pwnlib.data.path, 'templates')],\n module_directory = cache\n )\n\n # For the SSH scenario, check that the binary is at the\n # same path on the remote host.\n if args.user:\n if not (args.path or args.exe):\n log.error(\"Must specify --path or a exe\")\n\n s = ssh(args.user, args.host, args.port or 22, args.password or None)\n\n try:\n remote = args.path or args.exe\n s.download(remote)\n except Exception:\n log.warning(\"Could not download file %r, opening a shell\", remote)\n s.interactive()\n return\n\n if not args.exe:\n args.exe = os.path.basename(args.path)\n\n template = lookup.get_template('pwnup.mako')\n output = template.render(args.exe,\n args.host,\n args.port,\n args.user,\n args.password,\n args.path,\n args.quiet)\n\n # Fix Mako formatting bs\n output = re.sub('\\n\\n\\n', '\\n\\n', output)\n\n # Colorize the output if it's a TTY\n if args.color == 'always' or (args.color == 'auto' and sys.stdout.isatty()):\n from pygments import highlight\n from pygments.formatters import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n output = highlight(output, PythonLexer(), TerminalFormatter())\n\n print(output)\n\n # If redirected to a file, make the resulting script executable\n if not sys.stdout.isatty():\n try: os.fchmod(sys.stdout.fileno(), 0o700)\n except OSError: pass\n\nif __name__ == '__main__':\n pwnlib.commandline.common.main(__file__)\n", "path": "pwnlib/commandline/template.py"}]}
| 1,370 | 355 |
gh_patches_debug_16887
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-7612
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--query does not work with aws eks get-token
It looks like `aws eks get-token` returns JSON-like output, but not pretty printed, and not working with `--query`, like
```
aws eks get-token --cluster-name myclustername --query status.token
```
still returns the complete output. As well format output cannot be changed.
Tested with
```
aws --version
aws-cli/1.16.218 Python/3.6.8 Linux/4.15.0-1047-aws botocore/1.12.208
```
but others reported the same for `1.16.230`: https://stackoverflow.com/a/57878048/1545325
Thank you!
</issue>
<code>
[start of awscli/customizations/eks/get_token.py]
1 # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import base64
14 import botocore
15 import json
16 import os
17 import sys
18
19 from datetime import datetime, timedelta
20 from botocore.signers import RequestSigner
21 from botocore.model import ServiceId
22
23 from awscli.customizations.commands import BasicCommand
24 from awscli.customizations.utils import uni_print
25 from awscli.customizations.utils import validate_mutually_exclusive
26
27 AUTH_SERVICE = "sts"
28 AUTH_COMMAND = "GetCallerIdentity"
29 AUTH_API_VERSION = "2011-06-15"
30 AUTH_SIGNING_VERSION = "v4"
31
32 ALPHA_API = "client.authentication.k8s.io/v1alpha1"
33 BETA_API = "client.authentication.k8s.io/v1beta1"
34 V1_API = "client.authentication.k8s.io/v1"
35
36 FULLY_SUPPORTED_API_VERSIONS = [
37 V1_API,
38 BETA_API,
39 ]
40 DEPRECATED_API_VERSIONS = [
41 ALPHA_API,
42 ]
43
44 ERROR_MSG_TPL = (
45 "{0} KUBERNETES_EXEC_INFO, defaulting to {1}. This is likely a "
46 "bug in your Kubernetes client. Please update your Kubernetes "
47 "client."
48 )
49 UNRECOGNIZED_MSG_TPL = (
50 "Unrecognized API version in KUBERNETES_EXEC_INFO, defaulting to "
51 "{0}. This is likely due to an outdated AWS "
52 "CLI. Please update your AWS CLI."
53 )
54 DEPRECATION_MSG_TPL = (
55 "Kubeconfig user entry is using deprecated API version {0}. Run "
56 "'aws eks update-kubeconfig' to update."
57 )
58
59 # Presigned url timeout in seconds
60 URL_TIMEOUT = 60
61
62 TOKEN_EXPIRATION_MINS = 14
63
64 TOKEN_PREFIX = 'k8s-aws-v1.'
65
66 K8S_AWS_ID_HEADER = 'x-k8s-aws-id'
67
68
69 class GetTokenCommand(BasicCommand):
70 NAME = 'get-token'
71
72 DESCRIPTION = (
73 "Get a token for authentication with an Amazon EKS cluster. "
74 "This can be used as an alternative to the "
75 "aws-iam-authenticator."
76 )
77
78 ARG_TABLE = [
79 {
80 'name': 'cluster-name',
81 'help_text': (
82 "Specify the name of the Amazon EKS cluster to create a token for. (Note: for local clusters on AWS Outposts, please use --cluster-id parameter)"
83 ),
84 'required': False,
85 },
86 {
87 'name': 'role-arn',
88 'help_text': (
89 "Assume this role for credentials when signing the token. "
90 "Use this optional parameter when the credentials for signing "
91 "the token differ from that of the current role session. "
92 "Using this parameter results in new role session credentials "
93 "that are used to sign the token."
94 ),
95 'required': False,
96 },
97 {
98 'name': 'cluster-id',
99 # When EKS in-region cluster supports cluster-id, we will need to update this help text
100 'help_text': (
101 "Specify the id of the Amazon EKS cluster to create a token for. (Note: for local clusters on AWS Outposts only)"
102 ),
103 'required': False,
104 },
105 ]
106
107 def get_expiration_time(self):
108 token_expiration = datetime.utcnow() + timedelta(
109 minutes=TOKEN_EXPIRATION_MINS
110 )
111 return token_expiration.strftime('%Y-%m-%dT%H:%M:%SZ')
112
113 def _run_main(self, parsed_args, parsed_globals):
114 client_factory = STSClientFactory(self._session)
115 sts_client = client_factory.get_sts_client(
116 region_name=parsed_globals.region, role_arn=parsed_args.role_arn
117 )
118
119 validate_mutually_exclusive(parsed_args, ['cluster_name'], ['cluster_id'])
120
121 if parsed_args.cluster_id:
122 identifier = parsed_args.cluster_id
123 elif parsed_args.cluster_name:
124 identifier = parsed_args.cluster_name
125 else:
126 return ValueError("Either parameter --cluster-name or --cluster-id must be specified.")
127
128 token = TokenGenerator(sts_client).get_token(identifier)
129
130 # By default STS signs the url for 15 minutes so we are creating a
131 # rfc3339 timestamp with expiration in 14 minutes as part of the token, which
132 # is used by some clients (client-go) who will refresh the token after 14 mins
133 token_expiration = self.get_expiration_time()
134
135 full_object = {
136 "kind": "ExecCredential",
137 "apiVersion": self.discover_api_version(),
138 "spec": {},
139 "status": {
140 "expirationTimestamp": token_expiration,
141 "token": token,
142 },
143 }
144
145 uni_print(json.dumps(full_object))
146 uni_print('\n')
147 return 0
148
149 def discover_api_version(self):
150 """
151 Parses the KUBERNETES_EXEC_INFO environment variable and returns the
152 API version. If the environment variable is malformed or invalid,
153 return the v1beta1 response and print a message to stderr.
154
155 If the v1alpha1 API is specified explicitly, a message is printed to
156 stderr with instructions to update.
157
158 :return: The client authentication API version
159 :rtype: string
160 """
161 # At the time Kubernetes v1.29 is released upstream (approx Dec 2023),
162 # "v1beta1" will be removed. At or around that time, EKS will likely
163 # support v1.22 through v1.28, in which client API version "v1beta1"
164 # will be supported by all EKS versions.
165 fallback_api_version = BETA_API
166
167 error_prefixes = {
168 "error": "Error parsing",
169 "empty": "Empty",
170 }
171
172 exec_info_raw = os.environ.get("KUBERNETES_EXEC_INFO", "")
173 if not exec_info_raw:
174 # All kube clients should be setting this, but client-go clients
175 # (kubectl, kubelet, etc) < 1.20 were not setting this if the API
176 # version defined in the kubeconfig was not v1alpha1.
177 #
178 # This was changed in kubernetes/kubernetes#95489 so that
179 # KUBERNETES_EXEC_INFO is always provided
180 return fallback_api_version
181 try:
182 exec_info = json.loads(exec_info_raw)
183 except json.JSONDecodeError:
184 # The environment variable was malformed
185 uni_print(
186 ERROR_MSG_TPL.format(
187 error_prefixes["error"],
188 fallback_api_version,
189 ),
190 sys.stderr,
191 )
192 uni_print("\n", sys.stderr)
193 return fallback_api_version
194
195 api_version_raw = exec_info.get("apiVersion")
196 if api_version_raw in FULLY_SUPPORTED_API_VERSIONS:
197 return api_version_raw
198 elif api_version_raw in DEPRECATED_API_VERSIONS:
199 uni_print(DEPRECATION_MSG_TPL.format(api_version_raw), sys.stderr)
200 uni_print("\n", sys.stderr)
201 return api_version_raw
202 else:
203 uni_print(
204 UNRECOGNIZED_MSG_TPL.format(fallback_api_version),
205 sys.stderr,
206 )
207 uni_print("\n", sys.stderr)
208 return fallback_api_version
209
210
211 class TokenGenerator(object):
212 def __init__(self, sts_client):
213 self._sts_client = sts_client
214
215 def get_token(self, k8s_aws_id):
216 """Generate a presigned url token to pass to kubectl."""
217 url = self._get_presigned_url(k8s_aws_id)
218 token = TOKEN_PREFIX + base64.urlsafe_b64encode(
219 url.encode('utf-8')
220 ).decode('utf-8').rstrip('=')
221 return token
222
223 def _get_presigned_url(self, k8s_aws_id):
224 return self._sts_client.generate_presigned_url(
225 'get_caller_identity',
226 Params={K8S_AWS_ID_HEADER: k8s_aws_id},
227 ExpiresIn=URL_TIMEOUT,
228 HttpMethod='GET',
229 )
230
231
232 class STSClientFactory(object):
233 def __init__(self, session):
234 self._session = session
235
236 def get_sts_client(self, region_name=None, role_arn=None):
237 client_kwargs = {'region_name': region_name}
238 if role_arn is not None:
239 creds = self._get_role_credentials(region_name, role_arn)
240 client_kwargs['aws_access_key_id'] = creds['AccessKeyId']
241 client_kwargs['aws_secret_access_key'] = creds['SecretAccessKey']
242 client_kwargs['aws_session_token'] = creds['SessionToken']
243 sts = self._session.create_client('sts', **client_kwargs)
244 self._register_k8s_aws_id_handlers(sts)
245 return sts
246
247 def _get_role_credentials(self, region_name, role_arn):
248 sts = self._session.create_client('sts', region_name)
249 return sts.assume_role(
250 RoleArn=role_arn, RoleSessionName='EKSGetTokenAuth'
251 )['Credentials']
252
253 def _register_k8s_aws_id_handlers(self, sts_client):
254 sts_client.meta.events.register(
255 'provide-client-params.sts.GetCallerIdentity',
256 self._retrieve_k8s_aws_id,
257 )
258 sts_client.meta.events.register(
259 'before-sign.sts.GetCallerIdentity',
260 self._inject_k8s_aws_id_header,
261 )
262
263 def _retrieve_k8s_aws_id(self, params, context, **kwargs):
264 if K8S_AWS_ID_HEADER in params:
265 context[K8S_AWS_ID_HEADER] = params.pop(K8S_AWS_ID_HEADER)
266
267 def _inject_k8s_aws_id_header(self, request, **kwargs):
268 if K8S_AWS_ID_HEADER in request.context:
269 request.headers[K8S_AWS_ID_HEADER] = request.context[K8S_AWS_ID_HEADER]
270
[end of awscli/customizations/eks/get_token.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awscli/customizations/eks/get_token.py b/awscli/customizations/eks/get_token.py
--- a/awscli/customizations/eks/get_token.py
+++ b/awscli/customizations/eks/get_token.py
@@ -20,6 +20,7 @@
from botocore.signers import RequestSigner
from botocore.model import ServiceId
+from awscli.formatter import get_formatter
from awscli.customizations.commands import BasicCommand
from awscli.customizations.utils import uni_print
from awscli.customizations.utils import validate_mutually_exclusive
@@ -142,7 +143,11 @@
},
}
- uni_print(json.dumps(full_object))
+ output = self._session.get_config_variable('output')
+ formatter = get_formatter(output, parsed_globals)
+ formatter.query = parsed_globals.query
+
+ formatter(self.NAME, full_object)
uni_print('\n')
return 0
|
{"golden_diff": "diff --git a/awscli/customizations/eks/get_token.py b/awscli/customizations/eks/get_token.py\n--- a/awscli/customizations/eks/get_token.py\n+++ b/awscli/customizations/eks/get_token.py\n@@ -20,6 +20,7 @@\n from botocore.signers import RequestSigner\n from botocore.model import ServiceId\n \n+from awscli.formatter import get_formatter\n from awscli.customizations.commands import BasicCommand\n from awscli.customizations.utils import uni_print\n from awscli.customizations.utils import validate_mutually_exclusive\n@@ -142,7 +143,11 @@\n },\n }\n \n- uni_print(json.dumps(full_object))\n+ output = self._session.get_config_variable('output')\n+ formatter = get_formatter(output, parsed_globals)\n+ formatter.query = parsed_globals.query\n+\n+ formatter(self.NAME, full_object)\n uni_print('\\n')\n return 0\n", "issue": "--query does not work with aws eks get-token\nIt looks like `aws eks get-token` returns JSON-like output, but not pretty printed, and not working with `--query`, like\r\n```\r\naws eks get-token --cluster-name myclustername --query status.token\r\n```\r\nstill returns the complete output. As well format output cannot be changed.\r\n\r\nTested with\r\n```\r\naws --version\r\naws-cli/1.16.218 Python/3.6.8 Linux/4.15.0-1047-aws botocore/1.12.208\r\n```\r\nbut others reported the same for `1.16.230`: https://stackoverflow.com/a/57878048/1545325\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport base64\nimport botocore\nimport json\nimport os\nimport sys\n\nfrom datetime import datetime, timedelta\nfrom botocore.signers import RequestSigner\nfrom botocore.model import ServiceId\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.utils import validate_mutually_exclusive\n\nAUTH_SERVICE = \"sts\"\nAUTH_COMMAND = \"GetCallerIdentity\"\nAUTH_API_VERSION = \"2011-06-15\"\nAUTH_SIGNING_VERSION = \"v4\"\n\nALPHA_API = \"client.authentication.k8s.io/v1alpha1\"\nBETA_API = \"client.authentication.k8s.io/v1beta1\"\nV1_API = \"client.authentication.k8s.io/v1\"\n\nFULLY_SUPPORTED_API_VERSIONS = [\n V1_API,\n BETA_API,\n]\nDEPRECATED_API_VERSIONS = [\n ALPHA_API,\n]\n\nERROR_MSG_TPL = (\n \"{0} KUBERNETES_EXEC_INFO, defaulting to {1}. This is likely a \"\n \"bug in your Kubernetes client. Please update your Kubernetes \"\n \"client.\"\n)\nUNRECOGNIZED_MSG_TPL = (\n \"Unrecognized API version in KUBERNETES_EXEC_INFO, defaulting to \"\n \"{0}. This is likely due to an outdated AWS \"\n \"CLI. Please update your AWS CLI.\"\n)\nDEPRECATION_MSG_TPL = (\n \"Kubeconfig user entry is using deprecated API version {0}. Run \"\n \"'aws eks update-kubeconfig' to update.\"\n)\n\n# Presigned url timeout in seconds\nURL_TIMEOUT = 60\n\nTOKEN_EXPIRATION_MINS = 14\n\nTOKEN_PREFIX = 'k8s-aws-v1.'\n\nK8S_AWS_ID_HEADER = 'x-k8s-aws-id'\n\n\nclass GetTokenCommand(BasicCommand):\n NAME = 'get-token'\n\n DESCRIPTION = (\n \"Get a token for authentication with an Amazon EKS cluster. \"\n \"This can be used as an alternative to the \"\n \"aws-iam-authenticator.\"\n )\n\n ARG_TABLE = [\n {\n 'name': 'cluster-name',\n 'help_text': (\n \"Specify the name of the Amazon EKS cluster to create a token for. (Note: for local clusters on AWS Outposts, please use --cluster-id parameter)\"\n ),\n 'required': False,\n },\n {\n 'name': 'role-arn',\n 'help_text': (\n \"Assume this role for credentials when signing the token. \"\n \"Use this optional parameter when the credentials for signing \"\n \"the token differ from that of the current role session. \"\n \"Using this parameter results in new role session credentials \"\n \"that are used to sign the token.\"\n ),\n 'required': False,\n },\n {\n 'name': 'cluster-id',\n # When EKS in-region cluster supports cluster-id, we will need to update this help text\n 'help_text': (\n \"Specify the id of the Amazon EKS cluster to create a token for. (Note: for local clusters on AWS Outposts only)\"\n ),\n 'required': False,\n },\n ]\n\n def get_expiration_time(self):\n token_expiration = datetime.utcnow() + timedelta(\n minutes=TOKEN_EXPIRATION_MINS\n )\n return token_expiration.strftime('%Y-%m-%dT%H:%M:%SZ')\n\n def _run_main(self, parsed_args, parsed_globals):\n client_factory = STSClientFactory(self._session)\n sts_client = client_factory.get_sts_client(\n region_name=parsed_globals.region, role_arn=parsed_args.role_arn\n )\n \n validate_mutually_exclusive(parsed_args, ['cluster_name'], ['cluster_id'])\n\n if parsed_args.cluster_id:\n identifier = parsed_args.cluster_id\n elif parsed_args.cluster_name:\n identifier = parsed_args.cluster_name\n else:\n return ValueError(\"Either parameter --cluster-name or --cluster-id must be specified.\")\n\n token = TokenGenerator(sts_client).get_token(identifier)\n\n # By default STS signs the url for 15 minutes so we are creating a\n # rfc3339 timestamp with expiration in 14 minutes as part of the token, which\n # is used by some clients (client-go) who will refresh the token after 14 mins\n token_expiration = self.get_expiration_time()\n\n full_object = {\n \"kind\": \"ExecCredential\",\n \"apiVersion\": self.discover_api_version(),\n \"spec\": {},\n \"status\": {\n \"expirationTimestamp\": token_expiration,\n \"token\": token,\n },\n }\n\n uni_print(json.dumps(full_object))\n uni_print('\\n')\n return 0\n\n def discover_api_version(self):\n \"\"\"\n Parses the KUBERNETES_EXEC_INFO environment variable and returns the\n API version. If the environment variable is malformed or invalid,\n return the v1beta1 response and print a message to stderr.\n\n If the v1alpha1 API is specified explicitly, a message is printed to\n stderr with instructions to update.\n\n :return: The client authentication API version\n :rtype: string\n \"\"\"\n # At the time Kubernetes v1.29 is released upstream (approx Dec 2023),\n # \"v1beta1\" will be removed. At or around that time, EKS will likely\n # support v1.22 through v1.28, in which client API version \"v1beta1\"\n # will be supported by all EKS versions.\n fallback_api_version = BETA_API\n\n error_prefixes = {\n \"error\": \"Error parsing\",\n \"empty\": \"Empty\",\n }\n\n exec_info_raw = os.environ.get(\"KUBERNETES_EXEC_INFO\", \"\")\n if not exec_info_raw:\n # All kube clients should be setting this, but client-go clients\n # (kubectl, kubelet, etc) < 1.20 were not setting this if the API\n # version defined in the kubeconfig was not v1alpha1.\n #\n # This was changed in kubernetes/kubernetes#95489 so that\n # KUBERNETES_EXEC_INFO is always provided\n return fallback_api_version\n try:\n exec_info = json.loads(exec_info_raw)\n except json.JSONDecodeError:\n # The environment variable was malformed\n uni_print(\n ERROR_MSG_TPL.format(\n error_prefixes[\"error\"],\n fallback_api_version,\n ),\n sys.stderr,\n )\n uni_print(\"\\n\", sys.stderr)\n return fallback_api_version\n\n api_version_raw = exec_info.get(\"apiVersion\")\n if api_version_raw in FULLY_SUPPORTED_API_VERSIONS:\n return api_version_raw\n elif api_version_raw in DEPRECATED_API_VERSIONS:\n uni_print(DEPRECATION_MSG_TPL.format(api_version_raw), sys.stderr)\n uni_print(\"\\n\", sys.stderr)\n return api_version_raw\n else:\n uni_print(\n UNRECOGNIZED_MSG_TPL.format(fallback_api_version),\n sys.stderr,\n )\n uni_print(\"\\n\", sys.stderr)\n return fallback_api_version\n\n\nclass TokenGenerator(object):\n def __init__(self, sts_client):\n self._sts_client = sts_client\n\n def get_token(self, k8s_aws_id):\n \"\"\"Generate a presigned url token to pass to kubectl.\"\"\"\n url = self._get_presigned_url(k8s_aws_id)\n token = TOKEN_PREFIX + base64.urlsafe_b64encode(\n url.encode('utf-8')\n ).decode('utf-8').rstrip('=')\n return token\n\n def _get_presigned_url(self, k8s_aws_id):\n return self._sts_client.generate_presigned_url(\n 'get_caller_identity',\n Params={K8S_AWS_ID_HEADER: k8s_aws_id},\n ExpiresIn=URL_TIMEOUT,\n HttpMethod='GET',\n )\n\n\nclass STSClientFactory(object):\n def __init__(self, session):\n self._session = session\n\n def get_sts_client(self, region_name=None, role_arn=None):\n client_kwargs = {'region_name': region_name}\n if role_arn is not None:\n creds = self._get_role_credentials(region_name, role_arn)\n client_kwargs['aws_access_key_id'] = creds['AccessKeyId']\n client_kwargs['aws_secret_access_key'] = creds['SecretAccessKey']\n client_kwargs['aws_session_token'] = creds['SessionToken']\n sts = self._session.create_client('sts', **client_kwargs)\n self._register_k8s_aws_id_handlers(sts)\n return sts\n\n def _get_role_credentials(self, region_name, role_arn):\n sts = self._session.create_client('sts', region_name)\n return sts.assume_role(\n RoleArn=role_arn, RoleSessionName='EKSGetTokenAuth'\n )['Credentials']\n\n def _register_k8s_aws_id_handlers(self, sts_client):\n sts_client.meta.events.register(\n 'provide-client-params.sts.GetCallerIdentity',\n self._retrieve_k8s_aws_id,\n )\n sts_client.meta.events.register(\n 'before-sign.sts.GetCallerIdentity',\n self._inject_k8s_aws_id_header,\n )\n\n def _retrieve_k8s_aws_id(self, params, context, **kwargs):\n if K8S_AWS_ID_HEADER in params:\n context[K8S_AWS_ID_HEADER] = params.pop(K8S_AWS_ID_HEADER)\n\n def _inject_k8s_aws_id_header(self, request, **kwargs):\n if K8S_AWS_ID_HEADER in request.context:\n request.headers[K8S_AWS_ID_HEADER] = request.context[K8S_AWS_ID_HEADER]\n", "path": "awscli/customizations/eks/get_token.py"}]}
| 3,689 | 206 |
gh_patches_debug_20961
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-16094
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add current NetBox version in the `PluginConfig.validate()` error output
### NetBox version
v4.0.1
### Feature type
New functionality
### Proposed functionality
When [`PluginConfig.validate()`](https://github.com/netbox-community/netbox/blob/4a64a3f6e0d0edf27996422eb2dbe0e197a6bea5/netbox/netbox/plugins/__init__.py#L133) determines that the current NetBox version does not meet the plugin requirements, also print out the current NetBox version in the exception message.
### Use case
Currently the error message only prints the version that the plugin mandates, but due to possible installation directory confusion it would be good to also print the current NetBox version. Example case: https://github.com/netbox-community/netbox/issues/16088
### Database changes
None
### External dependencies
None
</issue>
<code>
[start of netbox/netbox/plugins/__init__.py]
1 import collections
2 from importlib import import_module
3
4 from django.apps import AppConfig
5 from django.core.exceptions import ImproperlyConfigured
6 from django.utils.module_loading import import_string
7 from packaging import version
8
9 from netbox.registry import registry
10 from netbox.search import register_search
11 from netbox.utils import register_data_backend
12 from .navigation import *
13 from .registration import *
14 from .templates import *
15 from .utils import *
16
17 # Initialize plugin registry
18 registry['plugins'].update({
19 'graphql_schemas': [],
20 'menus': [],
21 'menu_items': {},
22 'preferences': {},
23 'template_extensions': collections.defaultdict(list),
24 })
25
26 DEFAULT_RESOURCE_PATHS = {
27 'search_indexes': 'search.indexes',
28 'data_backends': 'data_backends.backends',
29 'graphql_schema': 'graphql.schema',
30 'menu': 'navigation.menu',
31 'menu_items': 'navigation.menu_items',
32 'template_extensions': 'template_content.template_extensions',
33 'user_preferences': 'preferences.preferences',
34 }
35
36
37 #
38 # Plugin AppConfig class
39 #
40
41 class PluginConfig(AppConfig):
42 """
43 Subclass of Django's built-in AppConfig class, to be used for NetBox plugins.
44 """
45 # Plugin metadata
46 author = ''
47 author_email = ''
48 description = ''
49 version = ''
50
51 # Root URL path under /plugins. If not set, the plugin's label will be used.
52 base_url = None
53
54 # Minimum/maximum compatible versions of NetBox
55 min_version = None
56 max_version = None
57
58 # Default configuration parameters
59 default_settings = {}
60
61 # Mandatory configuration parameters
62 required_settings = []
63
64 # Middleware classes provided by the plugin
65 middleware = []
66
67 # Django-rq queues dedicated to the plugin
68 queues = []
69
70 # Django apps to append to INSTALLED_APPS when plugin requires them.
71 django_apps = []
72
73 # Optional plugin resources
74 search_indexes = None
75 data_backends = None
76 graphql_schema = None
77 menu = None
78 menu_items = None
79 template_extensions = None
80 user_preferences = None
81
82 def _load_resource(self, name):
83 # Import from the configured path, if defined.
84 if path := getattr(self, name, None):
85 return import_string(f"{self.__module__}.{path}")
86
87 # Fall back to the resource's default path. Return None if the module has not been provided.
88 default_path = f'{self.__module__}.{DEFAULT_RESOURCE_PATHS[name]}'
89 default_module, resource_name = default_path.rsplit('.', 1)
90 try:
91 module = import_module(default_module)
92 return getattr(module, resource_name, None)
93 except ModuleNotFoundError:
94 pass
95
96 def ready(self):
97 from netbox.models.features import register_models
98
99 # Register models
100 register_models(*self.get_models())
101
102 plugin_name = self.name.rsplit('.', 1)[-1]
103
104 # Register search extensions (if defined)
105 search_indexes = self._load_resource('search_indexes') or []
106 for idx in search_indexes:
107 register_search(idx)
108
109 # Register data backends (if defined)
110 data_backends = self._load_resource('data_backends') or []
111 for backend in data_backends:
112 register_data_backend()(backend)
113
114 # Register template content (if defined)
115 if template_extensions := self._load_resource('template_extensions'):
116 register_template_extensions(template_extensions)
117
118 # Register navigation menu and/or menu items (if defined)
119 if menu := self._load_resource('menu'):
120 register_menu(menu)
121 if menu_items := self._load_resource('menu_items'):
122 register_menu_items(self.verbose_name, menu_items)
123
124 # Register GraphQL schema (if defined)
125 if graphql_schema := self._load_resource('graphql_schema'):
126 register_graphql_schema(graphql_schema)
127
128 # Register user preferences (if defined)
129 if user_preferences := self._load_resource('user_preferences'):
130 register_user_preferences(plugin_name, user_preferences)
131
132 @classmethod
133 def validate(cls, user_config, netbox_version):
134
135 # Enforce version constraints
136 current_version = version.parse(netbox_version)
137 if cls.min_version is not None:
138 min_version = version.parse(cls.min_version)
139 if current_version < min_version:
140 raise ImproperlyConfigured(
141 f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}."
142 )
143 if cls.max_version is not None:
144 max_version = version.parse(cls.max_version)
145 if current_version > max_version:
146 raise ImproperlyConfigured(
147 f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}."
148 )
149
150 # Verify required configuration settings
151 for setting in cls.required_settings:
152 if setting not in user_config:
153 raise ImproperlyConfigured(
154 f"Plugin {cls.__module__} requires '{setting}' to be present in the PLUGINS_CONFIG section of "
155 f"configuration.py."
156 )
157
158 # Apply default configuration values
159 for setting, value in cls.default_settings.items():
160 if setting not in user_config:
161 user_config[setting] = value
162
[end of netbox/netbox/plugins/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/netbox/plugins/__init__.py b/netbox/netbox/plugins/__init__.py
--- a/netbox/netbox/plugins/__init__.py
+++ b/netbox/netbox/plugins/__init__.py
@@ -138,13 +138,15 @@
min_version = version.parse(cls.min_version)
if current_version < min_version:
raise ImproperlyConfigured(
- f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}."
+ f"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version} (current: "
+ f"{netbox_version})."
)
if cls.max_version is not None:
max_version = version.parse(cls.max_version)
if current_version > max_version:
raise ImproperlyConfigured(
- f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}."
+ f"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version} (current: "
+ f"{netbox_version})."
)
# Verify required configuration settings
|
{"golden_diff": "diff --git a/netbox/netbox/plugins/__init__.py b/netbox/netbox/plugins/__init__.py\n--- a/netbox/netbox/plugins/__init__.py\n+++ b/netbox/netbox/plugins/__init__.py\n@@ -138,13 +138,15 @@\n min_version = version.parse(cls.min_version)\n if current_version < min_version:\n raise ImproperlyConfigured(\n- f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}.\"\n+ f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version} (current: \"\n+ f\"{netbox_version}).\"\n )\n if cls.max_version is not None:\n max_version = version.parse(cls.max_version)\n if current_version > max_version:\n raise ImproperlyConfigured(\n- f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}.\"\n+ f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version} (current: \"\n+ f\"{netbox_version}).\"\n )\n \n # Verify required configuration settings\n", "issue": "Add current NetBox version in the `PluginConfig.validate()` error output\n### NetBox version\n\nv4.0.1\n\n### Feature type\n\nNew functionality\n\n### Proposed functionality\n\nWhen [`PluginConfig.validate()`](https://github.com/netbox-community/netbox/blob/4a64a3f6e0d0edf27996422eb2dbe0e197a6bea5/netbox/netbox/plugins/__init__.py#L133) determines that the current NetBox version does not meet the plugin requirements, also print out the current NetBox version in the exception message.\n\n### Use case\n\nCurrently the error message only prints the version that the plugin mandates, but due to possible installation directory confusion it would be good to also print the current NetBox version. Example case: https://github.com/netbox-community/netbox/issues/16088\r\n\n\n### Database changes\n\nNone\n\n### External dependencies\n\nNone\n", "before_files": [{"content": "import collections\nfrom importlib import import_module\n\nfrom django.apps import AppConfig\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.utils.module_loading import import_string\nfrom packaging import version\n\nfrom netbox.registry import registry\nfrom netbox.search import register_search\nfrom netbox.utils import register_data_backend\nfrom .navigation import *\nfrom .registration import *\nfrom .templates import *\nfrom .utils import *\n\n# Initialize plugin registry\nregistry['plugins'].update({\n 'graphql_schemas': [],\n 'menus': [],\n 'menu_items': {},\n 'preferences': {},\n 'template_extensions': collections.defaultdict(list),\n})\n\nDEFAULT_RESOURCE_PATHS = {\n 'search_indexes': 'search.indexes',\n 'data_backends': 'data_backends.backends',\n 'graphql_schema': 'graphql.schema',\n 'menu': 'navigation.menu',\n 'menu_items': 'navigation.menu_items',\n 'template_extensions': 'template_content.template_extensions',\n 'user_preferences': 'preferences.preferences',\n}\n\n\n#\n# Plugin AppConfig class\n#\n\nclass PluginConfig(AppConfig):\n \"\"\"\n Subclass of Django's built-in AppConfig class, to be used for NetBox plugins.\n \"\"\"\n # Plugin metadata\n author = ''\n author_email = ''\n description = ''\n version = ''\n\n # Root URL path under /plugins. If not set, the plugin's label will be used.\n base_url = None\n\n # Minimum/maximum compatible versions of NetBox\n min_version = None\n max_version = None\n\n # Default configuration parameters\n default_settings = {}\n\n # Mandatory configuration parameters\n required_settings = []\n\n # Middleware classes provided by the plugin\n middleware = []\n\n # Django-rq queues dedicated to the plugin\n queues = []\n\n # Django apps to append to INSTALLED_APPS when plugin requires them.\n django_apps = []\n\n # Optional plugin resources\n search_indexes = None\n data_backends = None\n graphql_schema = None\n menu = None\n menu_items = None\n template_extensions = None\n user_preferences = None\n\n def _load_resource(self, name):\n # Import from the configured path, if defined.\n if path := getattr(self, name, None):\n return import_string(f\"{self.__module__}.{path}\")\n\n # Fall back to the resource's default path. Return None if the module has not been provided.\n default_path = f'{self.__module__}.{DEFAULT_RESOURCE_PATHS[name]}'\n default_module, resource_name = default_path.rsplit('.', 1)\n try:\n module = import_module(default_module)\n return getattr(module, resource_name, None)\n except ModuleNotFoundError:\n pass\n\n def ready(self):\n from netbox.models.features import register_models\n\n # Register models\n register_models(*self.get_models())\n\n plugin_name = self.name.rsplit('.', 1)[-1]\n\n # Register search extensions (if defined)\n search_indexes = self._load_resource('search_indexes') or []\n for idx in search_indexes:\n register_search(idx)\n\n # Register data backends (if defined)\n data_backends = self._load_resource('data_backends') or []\n for backend in data_backends:\n register_data_backend()(backend)\n\n # Register template content (if defined)\n if template_extensions := self._load_resource('template_extensions'):\n register_template_extensions(template_extensions)\n\n # Register navigation menu and/or menu items (if defined)\n if menu := self._load_resource('menu'):\n register_menu(menu)\n if menu_items := self._load_resource('menu_items'):\n register_menu_items(self.verbose_name, menu_items)\n\n # Register GraphQL schema (if defined)\n if graphql_schema := self._load_resource('graphql_schema'):\n register_graphql_schema(graphql_schema)\n\n # Register user preferences (if defined)\n if user_preferences := self._load_resource('user_preferences'):\n register_user_preferences(plugin_name, user_preferences)\n\n @classmethod\n def validate(cls, user_config, netbox_version):\n\n # Enforce version constraints\n current_version = version.parse(netbox_version)\n if cls.min_version is not None:\n min_version = version.parse(cls.min_version)\n if current_version < min_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox minimum version {cls.min_version}.\"\n )\n if cls.max_version is not None:\n max_version = version.parse(cls.max_version)\n if current_version > max_version:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires NetBox maximum version {cls.max_version}.\"\n )\n\n # Verify required configuration settings\n for setting in cls.required_settings:\n if setting not in user_config:\n raise ImproperlyConfigured(\n f\"Plugin {cls.__module__} requires '{setting}' to be present in the PLUGINS_CONFIG section of \"\n f\"configuration.py.\"\n )\n\n # Apply default configuration values\n for setting, value in cls.default_settings.items():\n if setting not in user_config:\n user_config[setting] = value\n", "path": "netbox/netbox/plugins/__init__.py"}]}
| 2,224 | 249 |
gh_patches_debug_57172
|
rasdani/github-patches
|
git_diff
|
jupyterhub__zero-to-jupyterhub-k8s-373
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
build.py corrupts yaml
## Description
I ran into an issue with entries in values.yaml file after using the build.py script. My `extraVolume` entries were corrupted.
## Reproduction
```yaml
# create this file and name it values.yaml
root-1:
# offset=2 will indent the dash 2 spaces (default: 2)
# sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)
- indent-status-1: ok
indent-status-2: bad after ./test.py runs
root-2:
# mapping=2 will indent the following entry 2 spaces (default: 2)
name: old-name
```
```python
# create this runnable.py file, and run it
import os
from ruamel.yaml import YAML
def main():
yaml = YAML()
yaml.indent(mapping=2, offset=2, sequence=4)
with open('values.yaml') as f:
values = yaml.load(f)
values['root-2'].update({'name': 'new-name'})
with open('values.yaml', 'w') as f:
yaml.dump(values, f)
main()
```
## Corrupt output
#### Look at `indent-status-2`, it has a indentation causing the yaml to become corrupt.
```yaml
# create this file and name it values.yaml
root-1:
# offset=2 will indent the dash 2 spaces (default: 2)
# sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)
- indent-status-1: ok
indent-status-2: bad after ./test.py runs
root-2:
# mapping=2 will indent the following entry 2 spaces (default: 2)
name: new-name
```
build.py corrupts yaml
## Description
I ran into an issue with entries in values.yaml file after using the build.py script. My `extraVolume` entries were corrupted.
## Reproduction
```yaml
# create this file and name it values.yaml
root-1:
# offset=2 will indent the dash 2 spaces (default: 2)
# sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)
- indent-status-1: ok
indent-status-2: bad after ./test.py runs
root-2:
# mapping=2 will indent the following entry 2 spaces (default: 2)
name: old-name
```
```python
# create this runnable.py file, and run it
import os
from ruamel.yaml import YAML
def main():
yaml = YAML()
yaml.indent(mapping=2, offset=2, sequence=4)
with open('values.yaml') as f:
values = yaml.load(f)
values['root-2'].update({'name': 'new-name'})
with open('values.yaml', 'w') as f:
yaml.dump(values, f)
main()
```
## Corrupt output
#### Look at `indent-status-2`, it has a indentation causing the yaml to become corrupt.
```yaml
# create this file and name it values.yaml
root-1:
# offset=2 will indent the dash 2 spaces (default: 2)
# sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)
- indent-status-1: ok
indent-status-2: bad after ./test.py runs
root-2:
# mapping=2 will indent the following entry 2 spaces (default: 2)
name: new-name
```
</issue>
<code>
[start of build.py]
1 #!/usr/bin/env python3
2 import argparse
3 import os
4 import subprocess
5 import shutil
6 from tempfile import TemporaryDirectory
7
8 from ruamel.yaml import YAML
9
10 # use safe roundtrip yaml loader
11 yaml = YAML(typ='rt')
12 yaml.indent(offset=2)
13
14 def last_modified_commit(*paths, **kwargs):
15 return subprocess.check_output([
16 'git',
17 'log',
18 '-n', '1',
19 '--pretty=format:%h',
20 *paths
21 ], **kwargs).decode('utf-8')
22
23 def last_modified_date(*paths, **kwargs):
24 return subprocess.check_output([
25 'git',
26 'log',
27 '-n', '1',
28 '--pretty=format:%cd',
29 '--date=iso',
30 *paths
31 ], **kwargs).decode('utf-8')
32
33 def path_touched(*paths, commit_range):
34 return subprocess.check_output([
35 'git', 'diff', '--name-only', commit_range, *paths
36 ]).decode('utf-8').strip() != ''
37
38
39 def render_build_args(options, ns):
40 """Get docker build args dict, rendering any templated args."""
41 build_args = options.get('buildArgs', {})
42 for key, value in build_args.items():
43 build_args[key] = value.format(**ns)
44 return build_args
45
46 def build_image(image_path, image_spec, build_args):
47 cmd = ['docker', 'build', '-t', image_spec, image_path]
48
49 for k, v in build_args.items():
50 cmd += ['--build-arg', '{}={}'.format(k, v)]
51 subprocess.check_call(cmd)
52
53 def build_images(prefix, images, tag=None, commit_range=None, push=False):
54 value_modifications = {}
55 for name, options in images.items():
56 image_path = os.path.join('images', name)
57 paths = options.get('paths', []) + [image_path]
58 last_commit = last_modified_commit(*paths)
59 if tag is None:
60 tag = last_commit
61 image_name = prefix + name
62 image_spec = '{}:{}'.format(image_name, tag)
63 value_modifications[options['valuesPath']] = {
64 'name': image_name,
65 'tag': tag
66 }
67
68 if commit_range and not path_touched(*paths, commit_range=commit_range):
69 print(f"Skipping {name}, not touched in {commit_range}")
70 continue
71
72 template_namespace = {
73 'LAST_COMMIT': last_commit,
74 'TAG': tag,
75 }
76
77 build_args = render_build_args(options, template_namespace)
78 build_image(image_path, image_spec, build_args)
79
80 if push:
81 subprocess.check_call([
82 'docker', 'push', image_spec
83 ])
84 return value_modifications
85
86 def build_values(name, values_mods):
87 """Update name/values.yaml with modifications"""
88
89 values_file = os.path.join(name, 'values.yaml')
90
91 with open(values_file) as f:
92 values = yaml.load(f)
93
94 for key, value in values_mods.items():
95 parts = key.split('.')
96 mod_obj = values
97 for p in parts:
98 mod_obj = mod_obj[p]
99 mod_obj.update(value)
100
101
102 with open(values_file, 'w') as f:
103 yaml.dump(values, f)
104
105
106 def build_chart(name, version=None, paths=None):
107 """Update chart with specified version or last-modified commit in path(s)"""
108 chart_file = os.path.join(name, 'Chart.yaml')
109 with open(chart_file) as f:
110 chart = yaml.load(f)
111
112 if version is None:
113 if paths is None:
114 paths = ['.']
115 commit = last_modified_commit(*paths)
116 version = chart['version'].split('-')[0] + '-' + commit
117
118 chart['version'] = version
119
120 with open(chart_file, 'w') as f:
121 yaml.dump(chart, f)
122
123
124 def publish_pages(name, paths, git_repo, published_repo):
125 """publish helm chart index to github pages"""
126 version = last_modified_commit(*paths)
127 checkout_dir = '{}-{}'.format(name, version)
128 subprocess.check_call([
129 'git', 'clone', '--no-checkout',
130 '[email protected]:{}'.format(git_repo), checkout_dir],
131 )
132 subprocess.check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
133
134 # package the latest version into a temporary directory
135 # and run helm repo index with --merge to update index.yaml
136 # without refreshing all of the timestamps
137 with TemporaryDirectory() as td:
138 subprocess.check_call([
139 'helm', 'package', name,
140 '--destination', td + '/',
141 ])
142
143 subprocess.check_call([
144 'helm', 'repo', 'index', td,
145 '--url', published_repo,
146 '--merge', os.path.join(checkout_dir, 'index.yaml'),
147 ])
148
149 # equivalent to `cp td/* checkout/`
150 # copies new helm chart and updated index.yaml
151 for f in os.listdir(td):
152 shutil.copy2(
153 os.path.join(td, f),
154 os.path.join(checkout_dir, f)
155 )
156 subprocess.check_call(['git', 'add', '.'], cwd=checkout_dir)
157 subprocess.check_call([
158 'git',
159 'commit',
160 '-m', '[{}] Automatic update for commit {}'.format(name, version)
161 ], cwd=checkout_dir)
162 subprocess.check_call(
163 ['git', 'push', 'origin', 'gh-pages'],
164 cwd=checkout_dir,
165 )
166
167
168 def main():
169 with open('chartpress.yaml') as f:
170 config = yaml.load(f)
171
172 argparser = argparse.ArgumentParser()
173
174 argparser.add_argument('--commit-range', help='Range of commits to consider when building images')
175 argparser.add_argument('--push', action='store_true')
176 argparser.add_argument('--publish-chart', action='store_true')
177 argparser.add_argument('--tag', default=None, help='Use this tag for images & charts')
178
179 args = argparser.parse_args()
180
181 for chart in config['charts']:
182 value_mods = build_images(chart['imagePrefix'], chart['images'], args.tag, args.commit_range, args.push)
183 build_values(chart['name'], value_mods)
184 chart_paths = ['.'] + chart.get('paths', [])
185 build_chart(chart['name'], paths=chart_paths, version=args.tag)
186 if args.publish_chart:
187 publish_pages(chart['name'],
188 paths=chart_paths,
189 git_repo=chart['repo']['git'],
190 published_repo=chart['repo']['published'],
191 )
192
193 main()
194
[end of build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/build.py b/build.py
--- a/build.py
+++ b/build.py
@@ -9,7 +9,7 @@
# use safe roundtrip yaml loader
yaml = YAML(typ='rt')
-yaml.indent(offset=2)
+yaml.indent(mapping=2, offset=2, sequence=4)
def last_modified_commit(*paths, **kwargs):
return subprocess.check_output([
|
{"golden_diff": "diff --git a/build.py b/build.py\n--- a/build.py\n+++ b/build.py\n@@ -9,7 +9,7 @@\n \n # use safe roundtrip yaml loader\n yaml = YAML(typ='rt')\n-yaml.indent(offset=2)\n+yaml.indent(mapping=2, offset=2, sequence=4)\n \n def last_modified_commit(*paths, **kwargs):\n return subprocess.check_output([\n", "issue": "build.py corrupts yaml\n## Description\r\nI ran into an issue with entries in values.yaml file after using the build.py script. My `extraVolume` entries were corrupted.\r\n\r\n## Reproduction\r\n\r\n```yaml\r\n# create this file and name it values.yaml\r\nroot-1:\r\n # offset=2 will indent the dash 2 spaces (default: 2)\r\n # sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)\r\n - indent-status-1: ok\r\n indent-status-2: bad after ./test.py runs\r\nroot-2:\r\n # mapping=2 will indent the following entry 2 spaces (default: 2)\r\n name: old-name\r\n```\r\n\r\n```python\r\n# create this runnable.py file, and run it\r\nimport os\r\nfrom ruamel.yaml import YAML\r\n\r\ndef main():\r\n yaml = YAML()\r\n yaml.indent(mapping=2, offset=2, sequence=4)\r\n\r\n with open('values.yaml') as f:\r\n values = yaml.load(f)\r\n\r\n values['root-2'].update({'name': 'new-name'})\r\n\r\n with open('values.yaml', 'w') as f:\r\n yaml.dump(values, f)\r\n\r\n\r\nmain()\r\n```\r\n\r\n## Corrupt output\r\n\r\n#### Look at `indent-status-2`, it has a indentation causing the yaml to become corrupt.\r\n```yaml\r\n# create this file and name it values.yaml\r\nroot-1:\r\n # offset=2 will indent the dash 2 spaces (default: 2)\r\n # sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)\r\n - indent-status-1: ok\r\n indent-status-2: bad after ./test.py runs\r\nroot-2:\r\n # mapping=2 will indent the following entry 2 spaces (default: 2)\r\n name: new-name\r\n```\nbuild.py corrupts yaml\n## Description\r\nI ran into an issue with entries in values.yaml file after using the build.py script. My `extraVolume` entries were corrupted.\r\n\r\n## Reproduction\r\n\r\n```yaml\r\n# create this file and name it values.yaml\r\nroot-1:\r\n # offset=2 will indent the dash 2 spaces (default: 2)\r\n # sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)\r\n - indent-status-1: ok\r\n indent-status-2: bad after ./test.py runs\r\nroot-2:\r\n # mapping=2 will indent the following entry 2 spaces (default: 2)\r\n name: old-name\r\n```\r\n\r\n```python\r\n# create this runnable.py file, and run it\r\nimport os\r\nfrom ruamel.yaml import YAML\r\n\r\ndef main():\r\n yaml = YAML()\r\n yaml.indent(mapping=2, offset=2, sequence=4)\r\n\r\n with open('values.yaml') as f:\r\n values = yaml.load(f)\r\n\r\n values['root-2'].update({'name': 'new-name'})\r\n\r\n with open('values.yaml', 'w') as f:\r\n yaml.dump(values, f)\r\n\r\n\r\nmain()\r\n```\r\n\r\n## Corrupt output\r\n\r\n#### Look at `indent-status-2`, it has a indentation causing the yaml to become corrupt.\r\n```yaml\r\n# create this file and name it values.yaml\r\nroot-1:\r\n # offset=2 will indent the dash 2 spaces (default: 2)\r\n # sequence=4 will indent indent-status-2 4 spaces and ignore what offset is used (default: 2)\r\n - indent-status-1: ok\r\n indent-status-2: bad after ./test.py runs\r\nroot-2:\r\n # mapping=2 will indent the following entry 2 spaces (default: 2)\r\n name: new-name\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport argparse\nimport os\nimport subprocess\nimport shutil\nfrom tempfile import TemporaryDirectory\n\nfrom ruamel.yaml import YAML\n\n# use safe roundtrip yaml loader\nyaml = YAML(typ='rt')\nyaml.indent(offset=2)\n\ndef last_modified_commit(*paths, **kwargs):\n return subprocess.check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n *paths\n ], **kwargs).decode('utf-8')\n\ndef last_modified_date(*paths, **kwargs):\n return subprocess.check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%cd',\n '--date=iso',\n *paths\n ], **kwargs).decode('utf-8')\n\ndef path_touched(*paths, commit_range):\n return subprocess.check_output([\n 'git', 'diff', '--name-only', commit_range, *paths\n ]).decode('utf-8').strip() != ''\n\n\ndef render_build_args(options, ns):\n \"\"\"Get docker build args dict, rendering any templated args.\"\"\"\n build_args = options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args\n\ndef build_image(image_path, image_spec, build_args):\n cmd = ['docker', 'build', '-t', image_spec, image_path]\n\n for k, v in build_args.items():\n cmd += ['--build-arg', '{}={}'.format(k, v)]\n subprocess.check_call(cmd)\n\ndef build_images(prefix, images, tag=None, commit_range=None, push=False):\n value_modifications = {}\n for name, options in images.items():\n image_path = os.path.join('images', name)\n paths = options.get('paths', []) + [image_path]\n last_commit = last_modified_commit(*paths)\n if tag is None:\n tag = last_commit\n image_name = prefix + name\n image_spec = '{}:{}'.format(image_name, tag)\n value_modifications[options['valuesPath']] = {\n 'name': image_name,\n 'tag': tag\n }\n\n if commit_range and not path_touched(*paths, commit_range=commit_range):\n print(f\"Skipping {name}, not touched in {commit_range}\")\n continue\n\n template_namespace = {\n 'LAST_COMMIT': last_commit,\n 'TAG': tag,\n }\n\n build_args = render_build_args(options, template_namespace)\n build_image(image_path, image_spec, build_args)\n\n if push:\n subprocess.check_call([\n 'docker', 'push', image_spec\n ])\n return value_modifications\n\ndef build_values(name, values_mods):\n \"\"\"Update name/values.yaml with modifications\"\"\"\n\n values_file = os.path.join(name, 'values.yaml')\n\n with open(values_file) as f:\n values = yaml.load(f)\n\n for key, value in values_mods.items():\n parts = key.split('.')\n mod_obj = values\n for p in parts:\n mod_obj = mod_obj[p]\n mod_obj.update(value)\n\n\n with open(values_file, 'w') as f:\n yaml.dump(values, f)\n\n\ndef build_chart(name, version=None, paths=None):\n \"\"\"Update chart with specified version or last-modified commit in path(s)\"\"\"\n chart_file = os.path.join(name, 'Chart.yaml')\n with open(chart_file) as f:\n chart = yaml.load(f)\n\n if version is None:\n if paths is None:\n paths = ['.']\n commit = last_modified_commit(*paths)\n version = chart['version'].split('-')[0] + '-' + commit\n\n chart['version'] = version\n\n with open(chart_file, 'w') as f:\n yaml.dump(chart, f)\n\n\ndef publish_pages(name, paths, git_repo, published_repo):\n \"\"\"publish helm chart index to github pages\"\"\"\n version = last_modified_commit(*paths)\n checkout_dir = '{}-{}'.format(name, version)\n subprocess.check_call([\n 'git', 'clone', '--no-checkout',\n '[email protected]:{}'.format(git_repo), checkout_dir],\n )\n subprocess.check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)\n\n # package the latest version into a temporary directory\n # and run helm repo index with --merge to update index.yaml\n # without refreshing all of the timestamps\n with TemporaryDirectory() as td:\n subprocess.check_call([\n 'helm', 'package', name,\n '--destination', td + '/',\n ])\n\n subprocess.check_call([\n 'helm', 'repo', 'index', td,\n '--url', published_repo,\n '--merge', os.path.join(checkout_dir, 'index.yaml'),\n ])\n\n # equivalent to `cp td/* checkout/`\n # copies new helm chart and updated index.yaml\n for f in os.listdir(td):\n shutil.copy2(\n os.path.join(td, f),\n os.path.join(checkout_dir, f)\n )\n subprocess.check_call(['git', 'add', '.'], cwd=checkout_dir)\n subprocess.check_call([\n 'git',\n 'commit',\n '-m', '[{}] Automatic update for commit {}'.format(name, version)\n ], cwd=checkout_dir)\n subprocess.check_call(\n ['git', 'push', 'origin', 'gh-pages'],\n cwd=checkout_dir,\n )\n\n\ndef main():\n with open('chartpress.yaml') as f:\n config = yaml.load(f)\n\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument('--commit-range', help='Range of commits to consider when building images')\n argparser.add_argument('--push', action='store_true')\n argparser.add_argument('--publish-chart', action='store_true')\n argparser.add_argument('--tag', default=None, help='Use this tag for images & charts')\n\n args = argparser.parse_args()\n\n for chart in config['charts']:\n value_mods = build_images(chart['imagePrefix'], chart['images'], args.tag, args.commit_range, args.push)\n build_values(chart['name'], value_mods)\n chart_paths = ['.'] + chart.get('paths', [])\n build_chart(chart['name'], paths=chart_paths, version=args.tag)\n if args.publish_chart:\n publish_pages(chart['name'],\n paths=chart_paths,\n git_repo=chart['repo']['git'],\n published_repo=chart['repo']['published'],\n )\n\nmain()\n", "path": "build.py"}]}
| 3,200 | 85 |
gh_patches_debug_15484
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-3239
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Legacy and Warehouse RSS feeds differ
@andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds:
> * https://pypi.org/rss/updates.xml
> * https://pypi.org/rss/packages.xml
> Which I expected the contents to match the old ones but currently don't:
> * https://pypi.python.org/pypi?%3Aaction=rss
> * https://pypi.python.org/pypi?%3Aaction=packages_rss
I've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ.
This is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference.
</issue>
<code>
[start of warehouse/rss/views.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.view import view_config
14 from sqlalchemy.orm import joinedload
15
16 from warehouse.cache.origin import origin_cache
17 from warehouse.packaging.models import Project, Release
18 from warehouse.xml import XML_CSP
19
20
21 @view_config(
22 route_name="rss.updates",
23 renderer="rss/updates.xml",
24 decorator=[
25 origin_cache(
26 1 * 24 * 60 * 60, # 1 day
27 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
28 stale_if_error=5 * 24 * 60 * 60, # 5 days
29 ),
30 ],
31 )
32 def rss_updates(request):
33 request.response.content_type = "text/xml"
34
35 request.find_service(name="csp").merge(XML_CSP)
36
37 latest_releases = (
38 request.db.query(Release)
39 .options(joinedload(Release.project))
40 .order_by(Release.created.desc())
41 .limit(40)
42 .all()
43 )
44
45 return {"latest_releases": latest_releases}
46
47
48 @view_config(
49 route_name="rss.packages",
50 renderer="rss/packages.xml",
51 decorator=[
52 origin_cache(
53 1 * 24 * 60 * 60, # 1 day
54 stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
55 stale_if_error=5 * 24 * 60 * 60, # 5 days
56 ),
57 ],
58 )
59 def rss_packages(request):
60 request.response.content_type = "text/xml"
61
62 request.find_service(name="csp").merge(XML_CSP)
63
64 newest_projects = (
65 request.db.query(Project)
66 .options(joinedload(Project.releases, innerjoin=True))
67 .order_by(Project.created.desc())
68 .limit(40)
69 .all()
70 )
71
72 return {"newest_projects": newest_projects}
73
[end of warehouse/rss/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py
--- a/warehouse/rss/views.py
+++ b/warehouse/rss/views.py
@@ -26,6 +26,7 @@
1 * 24 * 60 * 60, # 1 day
stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
stale_if_error=5 * 24 * 60 * 60, # 5 days
+ keys=["all-projects"],
),
],
)
@@ -53,6 +54,7 @@
1 * 24 * 60 * 60, # 1 day
stale_while_revalidate=1 * 24 * 60 * 60, # 1 day
stale_if_error=5 * 24 * 60 * 60, # 5 days
+ keys=["all-projects"],
),
],
)
|
{"golden_diff": "diff --git a/warehouse/rss/views.py b/warehouse/rss/views.py\n--- a/warehouse/rss/views.py\n+++ b/warehouse/rss/views.py\n@@ -26,6 +26,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n@@ -53,6 +54,7 @@\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n+ keys=[\"all-projects\"],\n ),\n ],\n )\n", "issue": "Legacy and Warehouse RSS feeds differ\n@andrew asked in https://github.com/librariesio/libraries.io/issues/2024#issuecomment-372638824 about Warehouse's RSS feeds:\r\n\r\n> * https://pypi.org/rss/updates.xml\r\n> * https://pypi.org/rss/packages.xml\r\n\r\n> Which I expected the contents to match the old ones but currently don't:\r\n\r\n> * https://pypi.python.org/pypi?%3Aaction=rss\r\n> * https://pypi.python.org/pypi?%3Aaction=packages_rss\r\n\r\nI've verified through visual inspection that the data in the legacy RSS feed and the data in the Warehouse RSS feeds differ.\r\n\r\nThis is a bug in the feeds or a bug in the docs. Currently our [feeds documentation](https://warehouse.readthedocs.io/api-reference/feeds/) and [Warehouse migration guide](https://warehouse.readthedocs.io/api-reference/integration-guide/) don't say anything about deliberate differences between the legacy and Warehouse RSS feeds. We can update the docs if there's a deliberate reason for the difference.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.xml import XML_CSP\n\n\n@view_config(\n route_name=\"rss.updates\",\n renderer=\"rss/updates.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_updates(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n latest_releases = (\n request.db.query(Release)\n .options(joinedload(Release.project))\n .order_by(Release.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"latest_releases\": latest_releases}\n\n\n@view_config(\n route_name=\"rss.packages\",\n renderer=\"rss/packages.xml\",\n decorator=[\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=1 * 24 * 60 * 60, # 1 day\n stale_if_error=5 * 24 * 60 * 60, # 5 days\n ),\n ],\n)\ndef rss_packages(request):\n request.response.content_type = \"text/xml\"\n\n request.find_service(name=\"csp\").merge(XML_CSP)\n\n newest_projects = (\n request.db.query(Project)\n .options(joinedload(Project.releases, innerjoin=True))\n .order_by(Project.created.desc())\n .limit(40)\n .all()\n )\n\n return {\"newest_projects\": newest_projects}\n", "path": "warehouse/rss/views.py"}]}
| 1,448 | 228 |
gh_patches_debug_34793
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-2571
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
azure - lower az log verbosity when running custodian commands without -v
currently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag
```
$ custodian schema network-addr.filters.shield-enabled -v
2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config:
AzureCloud
2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication
2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx
Help
----
The most base type
Schema
------
{
"additionalProperties": false,
"required": [
"type"
],
"type": "object",
"properties": {
"state": {
"type": "boolean"
},
"type": {
"enum": [
"shield-enabled"
]
}
}
}
```
</issue>
<code>
[start of tools/c7n_azure/c7n_azure/session.py]
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import importlib
16 import os
17 import logging
18 from azure.cli.core.cloud import AZURE_PUBLIC_CLOUD
19 from azure.cli.core._profile import Profile
20 from azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication
21 from c7n_azure.utils import ResourceIdParser
22
23
24 class Session(object):
25
26 def __init__(self, subscription_id=None):
27 """
28 Creates a session using available authentication type.
29
30 Auth priority:
31 1. Token Auth
32 2. Tenant Auth
33 3. Azure CLI Auth
34
35 :param subscription_id: If provided, overrides environment variables.
36 """
37
38 self.log = logging.getLogger('custodian.azure.session')
39 self._provider_cache = {}
40
41 tenant_auth_variables = [
42 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
43 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'
44 ]
45 token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']
46
47 if all(k in os.environ for k in token_auth_variables):
48 # Token authentication
49 self.credentials = BasicTokenAuthentication(
50 token={
51 'access_token': os.environ['AZURE_ACCESS_TOKEN']
52 })
53 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
54 self.log.info("Creating session with Token Authentication")
55
56 elif all(k in os.environ for k in tenant_auth_variables):
57 # Tenant (service principal) authentication
58 self.credentials = ServicePrincipalCredentials(
59 client_id=os.environ['AZURE_CLIENT_ID'],
60 secret=os.environ['AZURE_CLIENT_SECRET'],
61 tenant=os.environ['AZURE_TENANT_ID']
62 )
63 self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
64 self.tenant_id = os.environ['AZURE_TENANT_ID']
65 self.log.info("Creating session with Service Principal Authentication")
66
67 else:
68 # Azure CLI authentication
69 (self.credentials,
70 self.subscription_id,
71 self.tenant_id) = Profile().get_login_credentials(
72 resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)
73 self.log.info("Creating session with Azure CLI Authentication")
74
75 # Let provided id parameter override everything else
76 if subscription_id is not None:
77 self.subscription_id = subscription_id
78
79 self.log.info("Session using Subscription ID: %s" % self.subscription_id)
80
81 if self.credentials is None:
82 self.log.error('Unable to locate credentials for Azure session.')
83
84 def client(self, client):
85 service_name, client_name = client.rsplit('.', 1)
86 svc_module = importlib.import_module(service_name)
87 klass = getattr(svc_module, client_name)
88 return klass(self.credentials, self.subscription_id)
89
90 def resource_api_version(self, resource_id):
91 """ latest non-preview api version for resource """
92
93 namespace = ResourceIdParser.get_namespace(resource_id)
94 resource_type = ResourceIdParser.get_resource_type(resource_id)
95
96 if resource_type in self._provider_cache:
97 return self._provider_cache[resource_type]
98
99 resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')
100 provider = resource_client.providers.get(namespace)
101
102 rt = next((t for t in provider.resource_types
103 if t.resource_type == str(resource_type).split('/')[-1]), None)
104 if rt and rt.api_versions:
105 versions = [v for v in rt.api_versions if 'preview' not in v.lower()]
106 api_version = versions[0] if versions else rt.api_versions[0]
107 self._provider_cache[resource_type] = api_version
108 return api_version
109
[end of tools/c7n_azure/c7n_azure/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py
--- a/tools/c7n_azure/c7n_azure/session.py
+++ b/tools/c7n_azure/c7n_azure/session.py
@@ -24,6 +24,19 @@
class Session(object):
def __init__(self, subscription_id=None):
+ """
+ :param subscription_id: If provided overrides environment variables.
+
+ """
+
+ self.log = logging.getLogger('custodian.azure.session')
+ self._provider_cache = {}
+ self.subscription_id_override = subscription_id
+ self.credentials = None
+ self.subscription_id = None
+ self.tenant_id = None
+
+ def _initialize_session(self):
"""
Creates a session using available authentication type.
@@ -32,11 +45,11 @@
2. Tenant Auth
3. Azure CLI Auth
- :param subscription_id: If provided, overrides environment variables.
"""
- self.log = logging.getLogger('custodian.azure.session')
- self._provider_cache = {}
+ # Only run once
+ if self.credentials is not None:
+ return
tenant_auth_variables = [
'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',
@@ -73,8 +86,8 @@
self.log.info("Creating session with Azure CLI Authentication")
# Let provided id parameter override everything else
- if subscription_id is not None:
- self.subscription_id = subscription_id
+ if self.subscription_id_override is not None:
+ self.subscription_id = self.subscription_id_override
self.log.info("Session using Subscription ID: %s" % self.subscription_id)
@@ -82,6 +95,7 @@
self.log.error('Unable to locate credentials for Azure session.')
def client(self, client):
+ self._initialize_session()
service_name, client_name = client.rsplit('.', 1)
svc_module = importlib.import_module(service_name)
klass = getattr(svc_module, client_name)
|
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/session.py b/tools/c7n_azure/c7n_azure/session.py\n--- a/tools/c7n_azure/c7n_azure/session.py\n+++ b/tools/c7n_azure/c7n_azure/session.py\n@@ -24,6 +24,19 @@\n class Session(object):\n \n def __init__(self, subscription_id=None):\n+ \"\"\"\n+ :param subscription_id: If provided overrides environment variables.\n+\n+ \"\"\"\n+\n+ self.log = logging.getLogger('custodian.azure.session')\n+ self._provider_cache = {}\n+ self.subscription_id_override = subscription_id\n+ self.credentials = None\n+ self.subscription_id = None\n+ self.tenant_id = None\n+\n+ def _initialize_session(self):\n \"\"\"\n Creates a session using available authentication type.\n \n@@ -32,11 +45,11 @@\n 2. Tenant Auth\n 3. Azure CLI Auth\n \n- :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n \n- self.log = logging.getLogger('custodian.azure.session')\n- self._provider_cache = {}\n+ # Only run once\n+ if self.credentials is not None:\n+ return\n \n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n@@ -73,8 +86,8 @@\n self.log.info(\"Creating session with Azure CLI Authentication\")\n \n # Let provided id parameter override everything else\n- if subscription_id is not None:\n- self.subscription_id = subscription_id\n+ if self.subscription_id_override is not None:\n+ self.subscription_id = self.subscription_id_override\n \n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n \n@@ -82,6 +95,7 @@\n self.log.error('Unable to locate credentials for Azure session.')\n \n def client(self, client):\n+ self._initialize_session()\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n", "issue": "azure - lower az log verbosity when running custodian commands without -v\ncurrently, c7n_azure will log info about the session it's using with normal c7n commands and is much more verbose than what it used to be, moved the log level from info to debug so it's still accessible with the -v flag\r\n\r\n```\r\n$ custodian schema network-addr.filters.shield-enabled -v\r\n2018-06-19 09:42:36,028: cli.azure.cli.core:DEBUG Current cloud config:\r\nAzureCloud\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Creating session with Azure CLI Authentication\r\n2018-06-19 09:42:36,029: custodian.azure.session:DEBUG Session using Subscription ID: xxxxxxxxxxxxxxxxxxxxxxxxxxx\r\n\r\nHelp\r\n----\r\n\r\nThe most base type\r\n\r\nSchema\r\n------\r\n\r\n{\r\n \"additionalProperties\": false,\r\n \"required\": [\r\n \"type\"\r\n ],\r\n \"type\": \"object\",\r\n \"properties\": {\r\n \"state\": {\r\n \"type\": \"boolean\"\r\n },\r\n \"type\": {\r\n \"enum\": [\r\n \"shield-enabled\"\r\n ]\r\n }\r\n }\r\n}\r\n```\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport os\nimport logging\nfrom azure.cli.core.cloud import AZURE_PUBLIC_CLOUD\nfrom azure.cli.core._profile import Profile\nfrom azure.common.credentials import ServicePrincipalCredentials, BasicTokenAuthentication\nfrom c7n_azure.utils import ResourceIdParser\n\n\nclass Session(object):\n\n def __init__(self, subscription_id=None):\n \"\"\"\n Creates a session using available authentication type.\n\n Auth priority:\n 1. Token Auth\n 2. Tenant Auth\n 3. Azure CLI Auth\n\n :param subscription_id: If provided, overrides environment variables.\n \"\"\"\n\n self.log = logging.getLogger('custodian.azure.session')\n self._provider_cache = {}\n\n tenant_auth_variables = [\n 'AZURE_TENANT_ID', 'AZURE_SUBSCRIPTION_ID',\n 'AZURE_CLIENT_ID', 'AZURE_CLIENT_SECRET'\n ]\n token_auth_variables = ['AZURE_ACCESS_TOKEN', 'AZURE_SUBSCRIPTION_ID']\n\n if all(k in os.environ for k in token_auth_variables):\n # Token authentication\n self.credentials = BasicTokenAuthentication(\n token={\n 'access_token': os.environ['AZURE_ACCESS_TOKEN']\n })\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.log.info(\"Creating session with Token Authentication\")\n\n elif all(k in os.environ for k in tenant_auth_variables):\n # Tenant (service principal) authentication\n self.credentials = ServicePrincipalCredentials(\n client_id=os.environ['AZURE_CLIENT_ID'],\n secret=os.environ['AZURE_CLIENT_SECRET'],\n tenant=os.environ['AZURE_TENANT_ID']\n )\n self.subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']\n self.tenant_id = os.environ['AZURE_TENANT_ID']\n self.log.info(\"Creating session with Service Principal Authentication\")\n\n else:\n # Azure CLI authentication\n (self.credentials,\n self.subscription_id,\n self.tenant_id) = Profile().get_login_credentials(\n resource=AZURE_PUBLIC_CLOUD.endpoints.active_directory_resource_id)\n self.log.info(\"Creating session with Azure CLI Authentication\")\n\n # Let provided id parameter override everything else\n if subscription_id is not None:\n self.subscription_id = subscription_id\n\n self.log.info(\"Session using Subscription ID: %s\" % self.subscription_id)\n\n if self.credentials is None:\n self.log.error('Unable to locate credentials for Azure session.')\n\n def client(self, client):\n service_name, client_name = client.rsplit('.', 1)\n svc_module = importlib.import_module(service_name)\n klass = getattr(svc_module, client_name)\n return klass(self.credentials, self.subscription_id)\n\n def resource_api_version(self, resource_id):\n \"\"\" latest non-preview api version for resource \"\"\"\n\n namespace = ResourceIdParser.get_namespace(resource_id)\n resource_type = ResourceIdParser.get_resource_type(resource_id)\n\n if resource_type in self._provider_cache:\n return self._provider_cache[resource_type]\n\n resource_client = self.client('azure.mgmt.resource.ResourceManagementClient')\n provider = resource_client.providers.get(namespace)\n\n rt = next((t for t in provider.resource_types\n if t.resource_type == str(resource_type).split('/')[-1]), None)\n if rt and rt.api_versions:\n versions = [v for v in rt.api_versions if 'preview' not in v.lower()]\n api_version = versions[0] if versions else rt.api_versions[0]\n self._provider_cache[resource_type] = api_version\n return api_version\n", "path": "tools/c7n_azure/c7n_azure/session.py"}]}
| 1,933 | 469 |
gh_patches_debug_24562
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1158
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SQLite databases with invalid unicode not supported
**Small description**
Opening a SQLite database will fail when a cell in a TEXT column has invalid unicode characters
**Expected result**
Either the specific cell that has invalid unicode is not imported, or it is imported but the invalid characters are stripped or slash-escaped.
**Actual result with screenshot**


(those are errors from two different DBs that I've seen this problem on)
**Steps to reproduce with sample data and a .vd**
Here's a SQLite file that has this problem: https://hack.wesleyac.com/test.sqlite
**Additional context**
`vd --version`: `saul.pw/VisiData v2.4`
dbcli/litecli#89 is a similar issue in a different python project, which may have useful context for fixing this
</issue>
<code>
[start of visidata/loaders/sqlite.py]
1 import re
2
3 from visidata import *
4
5 @VisiData.api
6 def open_sqlite(vd, p):
7 return SqliteIndexSheet(p.name, source=p)
8
9 VisiData.open_sqlite3 = VisiData.open_sqlite
10 VisiData.open_db = VisiData.open_sqlite
11
12 # rowdef: list of values
13 class SqliteSheet(Sheet):
14 'Provide functionality for importing SQLite databases.'
15 savesToSource = True
16 defer = True
17
18 def resolve(self):
19 'Resolve all the way back to the original source Path.'
20 return self.source.resolve()
21
22 def conn(self):
23 import sqlite3
24 return sqlite3.connect(str(self.resolve()))
25
26 def execute(self, conn, sql, parms=None):
27 parms = parms or []
28 vd.debug(sql)
29 return conn.execute(sql, parms)
30
31 def iterload(self):
32 import sqlite3
33
34 def parse_sqlite_type(t):
35 m = re.match(r'(\w+)(\((\d+)(,(\d+))?\))?', t.upper())
36 if not m: return anytype
37 typename, _, i, _, f = m.groups()
38 if typename == 'DATE': return date
39 if typename == 'INTEGER': return int
40 if typename == 'REAL': return float
41 if typename == 'NUMBER':
42 return int if f == '0' else float
43 return anytype
44
45 with self.conn() as conn:
46 tblname = self.tableName
47 if not isinstance(self, SqliteIndexSheet):
48 self.columns = []
49 self.addColumn(ColumnItem('rowid', 0, type=int, width=0))
50 for r in self.execute(conn, 'PRAGMA TABLE_XINFO("%s")' % tblname):
51 colnum, colname, coltype, nullable, defvalue, colkey, *_ = r
52 c = ColumnItem(colname, colnum+1, type=parse_sqlite_type(coltype))
53 self.addColumn(c)
54
55 if colkey:
56 self.setKeys([c])
57
58 try:
59 r = self.execute(conn, 'SELECT rowid, * FROM "%s"' % tblname)
60 except sqlite3.OperationalError:
61 vd.error('tables WITHOUT ROWID not supported')
62 yield from Progress(r, total=r.rowcount-1)
63
64 @asyncthread
65 def putChanges(self):
66 adds, mods, dels = self.getDeferredChanges()
67 options_safe_error = options.safe_error
68 def value(row, col):
69 v = col.getTypedValue(row)
70 if isinstance(v, TypedWrapper):
71 if isinstance(v, TypedExceptionWrapper):
72 return options_safe_error
73 else:
74 return None
75 elif not isinstance(v, (int, float, str)):
76 v = col.getDisplayValue(r)
77 return v
78
79 def values(row, cols):
80 vals = []
81 for c in cols:
82 vals.append(value(row, c))
83 return vals
84
85 with self.conn() as conn:
86 wherecols = [self.columns[0]] # self.column("rowid")
87 for r in adds.values():
88 cols = self.visibleCols
89 sql = 'INSERT INTO "%s" ' % self.tableName
90 sql += '(%s)' % ','.join(c.name for c in cols)
91 sql += ' VALUES (%s)' % ','.join('?' for c in cols)
92 res = self.execute(conn, sql, parms=values(r, cols))
93 if res.rowcount != res.arraysize:
94 vd.warning('not all rows inserted') # f'{res.rowcount}/{res.arraysize} rows inserted'
95
96 for row, rowmods in mods.values():
97 sql = 'UPDATE "%s" SET ' % self.tableName
98 sql += ', '.join('%s=?' % c.name for c, _ in rowmods.items())
99 sql += ' WHERE %s' % ' AND '.join('"%s"=?' % c.name for c in wherecols)
100 newvals=values(row, [c for c, _ in rowmods.items()])
101 # calcValue gets the 'previous' value (before update)
102 wherevals=list(Column.calcValue(c, row) or '' for c in wherecols)
103 res = self.execute(conn, sql, parms=newvals+wherevals)
104 if res.rowcount != res.arraysize:
105 vd.warning('not all rows updated') # f'{res.rowcount}/{res.arraysize} rows updated'
106
107 for row in dels.values():
108 sql = 'DELETE FROM "%s" ' % self.tableName
109 sql += ' WHERE %s' % ' AND '.join('"%s"=?' % c.name for c in wherecols)
110 wherevals=list(Column.calcValue(c, row) for c in wherecols)
111 res = self.execute(conn, sql, parms=wherevals)
112 if res.rowcount != res.arraysize:
113 vd.warning('not all rows deleted') # f'{res.rowcount}/{res.arraysize} rows deleted'
114
115 conn.commit()
116
117 self.preloadHook()
118 self.reload()
119
120
121 class SqliteIndexSheet(SqliteSheet, IndexSheet):
122 rowtype = 'tables'
123 tableName = 'sqlite_master'
124 savesToSource = True
125 defer = True
126 def iterload(self):
127 for row in SqliteSheet.iterload(self):
128 if row[1] != 'index':
129 tblname = row[2]
130 yield SqliteSheet(tblname, source=self, tableName=tblname, row=row)
131
132 def putChanges(self):
133 adds, mods, dels = self.getDeferredChanges()
134 with self.conn() as conn:
135 for r in adds.values():
136 vd.warning('create a new table by saving a new sheet to this database file')
137
138 for row, rowmods in mods.values():
139 cname = self.column('name')
140 if len(rowmods) == 1 and cname in rowmods:
141 sql='ALTER TABLE "%s" RENAME TO "%s"' % (cname.calcValue(row), rowmods[cname])
142 self.execute(conn, sql)
143 else:
144 vd.warning('can only modify table name')
145
146 for row in dels.values():
147 sql = 'DROP TABLE "%s"' % row.tableName
148 self.execute(conn, sql)
149
150 conn.commit()
151
152 self.preloadHook()
153 self.reload()
154
155 class SqliteQuerySheet(SqliteSheet):
156 def iterload(self):
157 with self.conn() as conn:
158 self.columns = []
159 for c in type(self).columns:
160 self.addColumn(copy(c))
161 self.result = self.execute(conn, self.query, parms=getattr(self, 'parms', []))
162 for i, desc in enumerate(self.result.description):
163 self.addColumn(ColumnItem(desc[0], i))
164
165 for row in self.result:
166 yield row
167
168
169
170 @VisiData.api
171 def save_sqlite(vd, p, *vsheets):
172 import sqlite3
173 conn = sqlite3.connect(str(p))
174 conn.row_factory = sqlite3.Row
175 c = conn.cursor()
176
177 sqltypes = {
178 int: 'INTEGER',
179 float: 'REAL',
180 currency: 'REAL'
181 }
182
183 for vs in vsheets:
184 vs.ensureLoaded()
185 vd.sync()
186
187 for vs in vsheets:
188 tblname = clean_to_id(vs.name)
189 sqlcols = []
190 for col in vs.visibleCols:
191 sqlcols.append('"%s" %s' % (col.name, sqltypes.get(col.type, 'TEXT')))
192 sql = 'CREATE TABLE IF NOT EXISTS "%s" (%s)' % (tblname, ', '.join(sqlcols))
193 c.execute(sql)
194
195 for r in Progress(vs.rows, 'saving'):
196 sqlvals = []
197 for col in vs.visibleCols:
198 v = col.getTypedValue(r)
199 if isinstance(v, TypedWrapper):
200 if isinstance(v, TypedExceptionWrapper):
201 v = options.safe_error
202 else:
203 v = None
204 elif not isinstance(v, (int, float, str)):
205 v = col.getDisplayValue(r)
206 sqlvals.append(v)
207 sql = 'INSERT INTO "%s" (%s) VALUES (%s)' % (tblname, ','.join(f'"{c.name}"' for c in vs.visibleCols), ','.join('?' for v in sqlvals))
208 c.execute(sql, sqlvals)
209
210 conn.commit()
211
212 vd.status("%s save finished" % p)
213
214
215 SqliteIndexSheet.addCommand('a', 'add-table', 'fail("create a new table by saving a sheet to this database file")', 'stub; add table by saving a sheet to the db file instead')
216 SqliteIndexSheet.bindkey('ga', 'add-table')
217 SqliteSheet.class_options.header = 0
218 VisiData.save_db = VisiData.save_sqlite
219
[end of visidata/loaders/sqlite.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/visidata/loaders/sqlite.py b/visidata/loaders/sqlite.py
--- a/visidata/loaders/sqlite.py
+++ b/visidata/loaders/sqlite.py
@@ -21,7 +21,9 @@
def conn(self):
import sqlite3
- return sqlite3.connect(str(self.resolve()))
+ con = sqlite3.connect(str(self.resolve()))
+ con.text_factory = lambda s, enc=self.options.encoding: s.decode(enc)
+ return con
def execute(self, conn, sql, parms=None):
parms = parms or []
@@ -55,10 +57,7 @@
if colkey:
self.setKeys([c])
- try:
- r = self.execute(conn, 'SELECT rowid, * FROM "%s"' % tblname)
- except sqlite3.OperationalError:
- vd.error('tables WITHOUT ROWID not supported')
+ r = self.execute(conn, 'SELECT rowid, * FROM "%s"' % tblname)
yield from Progress(r, total=r.rowcount-1)
@asyncthread
@@ -171,6 +170,7 @@
def save_sqlite(vd, p, *vsheets):
import sqlite3
conn = sqlite3.connect(str(p))
+ conn.text_factory = lambda s, enc=vsheets[0].options.encoding: s.decode(enc)
conn.row_factory = sqlite3.Row
c = conn.cursor()
|
{"golden_diff": "diff --git a/visidata/loaders/sqlite.py b/visidata/loaders/sqlite.py\n--- a/visidata/loaders/sqlite.py\n+++ b/visidata/loaders/sqlite.py\n@@ -21,7 +21,9 @@\n \n def conn(self):\n import sqlite3\n- return sqlite3.connect(str(self.resolve()))\n+ con = sqlite3.connect(str(self.resolve()))\n+ con.text_factory = lambda s, enc=self.options.encoding: s.decode(enc)\n+ return con\n \n def execute(self, conn, sql, parms=None):\n parms = parms or []\n@@ -55,10 +57,7 @@\n if colkey:\n self.setKeys([c])\n \n- try:\n- r = self.execute(conn, 'SELECT rowid, * FROM \"%s\"' % tblname)\n- except sqlite3.OperationalError:\n- vd.error('tables WITHOUT ROWID not supported')\n+ r = self.execute(conn, 'SELECT rowid, * FROM \"%s\"' % tblname)\n yield from Progress(r, total=r.rowcount-1)\n \n @asyncthread\n@@ -171,6 +170,7 @@\n def save_sqlite(vd, p, *vsheets):\n import sqlite3\n conn = sqlite3.connect(str(p))\n+ conn.text_factory = lambda s, enc=vsheets[0].options.encoding: s.decode(enc)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n", "issue": "SQLite databases with invalid unicode not supported\n**Small description**\r\n\r\nOpening a SQLite database will fail when a cell in a TEXT column has invalid unicode characters\r\n\r\n**Expected result**\r\n\r\nEither the specific cell that has invalid unicode is not imported, or it is imported but the invalid characters are stripped or slash-escaped.\r\n\r\n**Actual result with screenshot**\r\n\r\n\r\n\r\n\r\n\r\n(those are errors from two different DBs that I've seen this problem on)\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\nHere's a SQLite file that has this problem: https://hack.wesleyac.com/test.sqlite\r\n\r\n**Additional context**\r\n\r\n`vd --version`: `saul.pw/VisiData v2.4`\r\n\r\ndbcli/litecli#89 is a similar issue in a different python project, which may have useful context for fixing this\r\n\n", "before_files": [{"content": "import re\n\nfrom visidata import *\n\[email protected]\ndef open_sqlite(vd, p):\n return SqliteIndexSheet(p.name, source=p)\n\nVisiData.open_sqlite3 = VisiData.open_sqlite\nVisiData.open_db = VisiData.open_sqlite\n\n# rowdef: list of values\nclass SqliteSheet(Sheet):\n 'Provide functionality for importing SQLite databases.'\n savesToSource = True\n defer = True\n\n def resolve(self):\n 'Resolve all the way back to the original source Path.'\n return self.source.resolve()\n\n def conn(self):\n import sqlite3\n return sqlite3.connect(str(self.resolve()))\n\n def execute(self, conn, sql, parms=None):\n parms = parms or []\n vd.debug(sql)\n return conn.execute(sql, parms)\n\n def iterload(self):\n import sqlite3\n\n def parse_sqlite_type(t):\n m = re.match(r'(\\w+)(\\((\\d+)(,(\\d+))?\\))?', t.upper())\n if not m: return anytype\n typename, _, i, _, f = m.groups()\n if typename == 'DATE': return date\n if typename == 'INTEGER': return int\n if typename == 'REAL': return float\n if typename == 'NUMBER':\n return int if f == '0' else float\n return anytype\n\n with self.conn() as conn:\n tblname = self.tableName\n if not isinstance(self, SqliteIndexSheet):\n self.columns = []\n self.addColumn(ColumnItem('rowid', 0, type=int, width=0))\n for r in self.execute(conn, 'PRAGMA TABLE_XINFO(\"%s\")' % tblname):\n colnum, colname, coltype, nullable, defvalue, colkey, *_ = r\n c = ColumnItem(colname, colnum+1, type=parse_sqlite_type(coltype))\n self.addColumn(c)\n\n if colkey:\n self.setKeys([c])\n\n try:\n r = self.execute(conn, 'SELECT rowid, * FROM \"%s\"' % tblname)\n except sqlite3.OperationalError:\n vd.error('tables WITHOUT ROWID not supported')\n yield from Progress(r, total=r.rowcount-1)\n\n @asyncthread\n def putChanges(self):\n adds, mods, dels = self.getDeferredChanges()\n options_safe_error = options.safe_error\n def value(row, col):\n v = col.getTypedValue(row)\n if isinstance(v, TypedWrapper):\n if isinstance(v, TypedExceptionWrapper):\n return options_safe_error\n else:\n return None\n elif not isinstance(v, (int, float, str)):\n v = col.getDisplayValue(r)\n return v\n\n def values(row, cols):\n vals = []\n for c in cols:\n vals.append(value(row, c))\n return vals\n\n with self.conn() as conn:\n wherecols = [self.columns[0]] # self.column(\"rowid\")\n for r in adds.values():\n cols = self.visibleCols\n sql = 'INSERT INTO \"%s\" ' % self.tableName\n sql += '(%s)' % ','.join(c.name for c in cols)\n sql += ' VALUES (%s)' % ','.join('?' for c in cols)\n res = self.execute(conn, sql, parms=values(r, cols))\n if res.rowcount != res.arraysize:\n vd.warning('not all rows inserted') # f'{res.rowcount}/{res.arraysize} rows inserted'\n\n for row, rowmods in mods.values():\n sql = 'UPDATE \"%s\" SET ' % self.tableName\n sql += ', '.join('%s=?' % c.name for c, _ in rowmods.items())\n sql += ' WHERE %s' % ' AND '.join('\"%s\"=?' % c.name for c in wherecols)\n newvals=values(row, [c for c, _ in rowmods.items()])\n # calcValue gets the 'previous' value (before update)\n wherevals=list(Column.calcValue(c, row) or '' for c in wherecols)\n res = self.execute(conn, sql, parms=newvals+wherevals)\n if res.rowcount != res.arraysize:\n vd.warning('not all rows updated') # f'{res.rowcount}/{res.arraysize} rows updated'\n\n for row in dels.values():\n sql = 'DELETE FROM \"%s\" ' % self.tableName\n sql += ' WHERE %s' % ' AND '.join('\"%s\"=?' % c.name for c in wherecols)\n wherevals=list(Column.calcValue(c, row) for c in wherecols)\n res = self.execute(conn, sql, parms=wherevals)\n if res.rowcount != res.arraysize:\n vd.warning('not all rows deleted') # f'{res.rowcount}/{res.arraysize} rows deleted'\n\n conn.commit()\n\n self.preloadHook()\n self.reload()\n\n\nclass SqliteIndexSheet(SqliteSheet, IndexSheet):\n rowtype = 'tables'\n tableName = 'sqlite_master'\n savesToSource = True\n defer = True\n def iterload(self):\n for row in SqliteSheet.iterload(self):\n if row[1] != 'index':\n tblname = row[2]\n yield SqliteSheet(tblname, source=self, tableName=tblname, row=row)\n\n def putChanges(self):\n adds, mods, dels = self.getDeferredChanges()\n with self.conn() as conn:\n for r in adds.values():\n vd.warning('create a new table by saving a new sheet to this database file')\n\n for row, rowmods in mods.values():\n cname = self.column('name')\n if len(rowmods) == 1 and cname in rowmods:\n sql='ALTER TABLE \"%s\" RENAME TO \"%s\"' % (cname.calcValue(row), rowmods[cname])\n self.execute(conn, sql)\n else:\n vd.warning('can only modify table name')\n\n for row in dels.values():\n sql = 'DROP TABLE \"%s\"' % row.tableName\n self.execute(conn, sql)\n\n conn.commit()\n\n self.preloadHook()\n self.reload()\n\nclass SqliteQuerySheet(SqliteSheet):\n def iterload(self):\n with self.conn() as conn:\n self.columns = []\n for c in type(self).columns:\n self.addColumn(copy(c))\n self.result = self.execute(conn, self.query, parms=getattr(self, 'parms', []))\n for i, desc in enumerate(self.result.description):\n self.addColumn(ColumnItem(desc[0], i))\n\n for row in self.result:\n yield row\n\n\n\[email protected]\ndef save_sqlite(vd, p, *vsheets):\n import sqlite3\n conn = sqlite3.connect(str(p))\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n\n sqltypes = {\n int: 'INTEGER',\n float: 'REAL',\n currency: 'REAL'\n }\n\n for vs in vsheets:\n vs.ensureLoaded()\n vd.sync()\n\n for vs in vsheets:\n tblname = clean_to_id(vs.name)\n sqlcols = []\n for col in vs.visibleCols:\n sqlcols.append('\"%s\" %s' % (col.name, sqltypes.get(col.type, 'TEXT')))\n sql = 'CREATE TABLE IF NOT EXISTS \"%s\" (%s)' % (tblname, ', '.join(sqlcols))\n c.execute(sql)\n\n for r in Progress(vs.rows, 'saving'):\n sqlvals = []\n for col in vs.visibleCols:\n v = col.getTypedValue(r)\n if isinstance(v, TypedWrapper):\n if isinstance(v, TypedExceptionWrapper):\n v = options.safe_error\n else:\n v = None\n elif not isinstance(v, (int, float, str)):\n v = col.getDisplayValue(r)\n sqlvals.append(v)\n sql = 'INSERT INTO \"%s\" (%s) VALUES (%s)' % (tblname, ','.join(f'\"{c.name}\"' for c in vs.visibleCols), ','.join('?' for v in sqlvals))\n c.execute(sql, sqlvals)\n\n conn.commit()\n\n vd.status(\"%s save finished\" % p)\n\n\nSqliteIndexSheet.addCommand('a', 'add-table', 'fail(\"create a new table by saving a sheet to this database file\")', 'stub; add table by saving a sheet to the db file instead')\nSqliteIndexSheet.bindkey('ga', 'add-table')\nSqliteSheet.class_options.header = 0\nVisiData.save_db = VisiData.save_sqlite\n", "path": "visidata/loaders/sqlite.py"}]}
| 3,279 | 325 |
gh_patches_debug_18250
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-6213
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Modin fails to read a feather file if it contains index metadata
### Modin version checks
- [X] I have checked that this issue has not already been reported.
- [X] I have confirmed this bug exists on the latest released version of Modin.
- [X] I have confirmed this bug exists on the main branch of Modin. (In order to do this you can follow [this guide](https://modin.readthedocs.io/en/stable/getting_started/installation.html#installing-from-the-github-master-branch).)
### Reproducible Example
```python
import tempfile
import pandas
import modin.pandas as pd
with tempfile.NamedTemporaryFile() as fl:
pandas.DataFrame({"a": [1, 2, 3]}, index=pandas.Index([0, 1, 2])).to_feather(fl.name)
pandas.read_feather(fl.name) # reads okay
pd.read_feather(fl.name) # ValueError
```
### Issue Description
Feather format actually should only allow writing dataframes with default RangeIndexes as [other indices are considered to be unsupported so far](https://github.com/pandas-dev/pandas/issues/28208#issuecomment-526002530), however, it appears that a dataframe can also be written if its index <i>matches</i> the values of default index, [there are no requirements in the writer's code for this to be an actual RangeIndex.](https://github.com/pandas-dev/pandas/blob/37ea63d540fd27274cad6585082c91b1283f963d/pandas/io/feather_format.py#L75-L79) This extra index metadata confuses modin and makes its `.read_feather()` to fail on such fails.
### Expected Behavior
We should filter out the index columns here: https://github.com/modin-project/modin/blob/d14abf463548dff147a12cd59727cbf0dbee7721/modin/core/io/column_stores/feather_dispatcher.py#L65
### Error Logs
<details>
```python-traceback
Traceback (most recent call last):
File "srt.py", line 9, in <module>
pd.read_feather(fl.name) # Error
File "repos/modin/modin/logging/logger_decorator.py", line 128, in run_and_log
return obj(*args, **kwargs)
File "repos/modin/modin/pandas/io.py", line 509, in read_feather
return DataFrame(query_compiler=FactoryDispatcher.read_feather(**kwargs))
File "repos/modin/modin/core/execution/dispatching/factories/dispatcher.py", line 235, in read_feather
return cls.get_factory()._read_feather(**kwargs)
File "repos/modin/modin/core/execution/dispatching/factories/factories.py", line 293, in _read_feather
return cls.io_cls.read_feather(**kwargs)
File "repos/modin/modin/logging/logger_decorator.py", line 128, in run_and_log
return obj(*args, **kwargs)
File "repos/modin/modin/core/io/file_dispatcher.py", line 157, in read
query_compiler = cls._read(*args, **kwargs)
File "repos/modin/modin/logging/logger_decorator.py", line 128, in run_and_log
return obj(*args, **kwargs)
File "repos/modin/modin/core/io/column_stores/feather_dispatcher.py", line 66, in _read
return cls.build_query_compiler(path, columns, use_threads=False)
File "repos/modin/modin/logging/logger_decorator.py", line 128, in run_and_log
return obj(*args, **kwargs)
File "repos/modin/modin/core/io/column_stores/column_store_dispatcher.py", line 224, in build_query_compiler
cls.build_dtypes(partition_ids[-1], columns)
File "repos/modin/modin/logging/logger_decorator.py", line 128, in run_and_log
return obj(*args, **kwargs)
File "repos/modin/modin/core/io/column_stores/column_store_dispatcher.py", line 197, in build_dtypes
dtypes.index = columns
File "miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/generic.py", line 5915, in __setattr__
return object.__setattr__(self, name, value)
File "pandas/_libs/properties.pyx", line 69, in pandas._libs.properties.AxisProperty.__set__
File "miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/series.py", line 593, in _set_axis
self._mgr.set_axis(axis, labels)
File "miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 230, in set_axis
self._validate_set_axis(axis, new_labels)
File "miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/internals/base.py", line 70, in _validate_set_axis
raise ValueError(
ValueError: Length mismatch: Expected axis has 1 elements, new values have 2 elements
```
</details>
### Installed Versions
<details>
Replace this line with the output of pd.show_versions()
</details>
</issue>
<code>
[start of modin/core/io/column_stores/feather_dispatcher.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Module houses `FeatherDispatcher` class, that is used for reading `.feather` files."""
15
16 from modin.core.io.column_stores.column_store_dispatcher import ColumnStoreDispatcher
17 from modin.utils import import_optional_dependency
18 from modin.core.io.file_dispatcher import OpenFile
19
20
21 class FeatherDispatcher(ColumnStoreDispatcher):
22 """Class handles utils for reading `.feather` files."""
23
24 @classmethod
25 def _read(cls, path, columns=None, **kwargs):
26 """
27 Read data from the file path, returning a query compiler.
28
29 Parameters
30 ----------
31 path : str or file-like object
32 The filepath of the feather file.
33 columns : array-like, optional
34 Columns to read from file. If not provided, all columns are read.
35 **kwargs : dict
36 `read_feather` function kwargs.
37
38 Returns
39 -------
40 BaseQueryCompiler
41 Query compiler with imported data for further processing.
42
43 Notes
44 -----
45 `PyArrow` engine and local files only are supported for now,
46 multi threading is set to False by default.
47 PyArrow feather is used. Please refer to the documentation here
48 https://arrow.apache.org/docs/python/api.html#feather-format
49 """
50 path = cls.get_path(path)
51 if columns is None:
52 import_optional_dependency(
53 "pyarrow", "pyarrow is required to read feather files."
54 )
55 from pyarrow import ipc
56
57 with OpenFile(
58 path,
59 **(kwargs.get("storage_options", None) or {}),
60 ) as file:
61 # Opens the file to extract its metadata
62 reader = ipc.open_file(file)
63 # TODO: pyarrow's schema contains much more metadata than just column names, it also
64 # has dtypes and index information that we could use when building a dataframe
65 columns = reader.schema.names
66 return cls.build_query_compiler(path, columns, use_threads=False)
67
[end of modin/core/io/column_stores/feather_dispatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modin/core/io/column_stores/feather_dispatcher.py b/modin/core/io/column_stores/feather_dispatcher.py
--- a/modin/core/io/column_stores/feather_dispatcher.py
+++ b/modin/core/io/column_stores/feather_dispatcher.py
@@ -62,5 +62,13 @@
reader = ipc.open_file(file)
# TODO: pyarrow's schema contains much more metadata than just column names, it also
# has dtypes and index information that we could use when building a dataframe
- columns = reader.schema.names
+ index_cols = frozenset(
+ col
+ for col in reader.schema.pandas_metadata["index_columns"]
+ # 'index_columns' field may also contain dictionary fields describing actual
+ # RangeIndices, so we're only filtering here for string column names
+ if isinstance(col, str)
+ )
+ # Filtering out the columns that describe the frame's index
+ columns = [col for col in reader.schema.names if col not in index_cols]
return cls.build_query_compiler(path, columns, use_threads=False)
|
{"golden_diff": "diff --git a/modin/core/io/column_stores/feather_dispatcher.py b/modin/core/io/column_stores/feather_dispatcher.py\n--- a/modin/core/io/column_stores/feather_dispatcher.py\n+++ b/modin/core/io/column_stores/feather_dispatcher.py\n@@ -62,5 +62,13 @@\n reader = ipc.open_file(file)\n # TODO: pyarrow's schema contains much more metadata than just column names, it also\n # has dtypes and index information that we could use when building a dataframe\n- columns = reader.schema.names\n+ index_cols = frozenset(\n+ col\n+ for col in reader.schema.pandas_metadata[\"index_columns\"]\n+ # 'index_columns' field may also contain dictionary fields describing actual\n+ # RangeIndices, so we're only filtering here for string column names\n+ if isinstance(col, str)\n+ )\n+ # Filtering out the columns that describe the frame's index\n+ columns = [col for col in reader.schema.names if col not in index_cols]\n return cls.build_query_compiler(path, columns, use_threads=False)\n", "issue": "BUG: Modin fails to read a feather file if it contains index metadata\n### Modin version checks\n\n- [X] I have checked that this issue has not already been reported.\n\n- [X] I have confirmed this bug exists on the latest released version of Modin.\n\n- [X] I have confirmed this bug exists on the main branch of Modin. (In order to do this you can follow [this guide](https://modin.readthedocs.io/en/stable/getting_started/installation.html#installing-from-the-github-master-branch).)\n\n\n### Reproducible Example\n\n```python\nimport tempfile\r\nimport pandas\r\nimport modin.pandas as pd\r\n\r\nwith tempfile.NamedTemporaryFile() as fl:\r\n pandas.DataFrame({\"a\": [1, 2, 3]}, index=pandas.Index([0, 1, 2])).to_feather(fl.name)\r\n\r\n pandas.read_feather(fl.name) # reads okay\r\n pd.read_feather(fl.name) # ValueError\n```\n\n\n### Issue Description\n\nFeather format actually should only allow writing dataframes with default RangeIndexes as [other indices are considered to be unsupported so far](https://github.com/pandas-dev/pandas/issues/28208#issuecomment-526002530), however, it appears that a dataframe can also be written if its index <i>matches</i> the values of default index, [there are no requirements in the writer's code for this to be an actual RangeIndex.](https://github.com/pandas-dev/pandas/blob/37ea63d540fd27274cad6585082c91b1283f963d/pandas/io/feather_format.py#L75-L79) This extra index metadata confuses modin and makes its `.read_feather()` to fail on such fails.\n\n### Expected Behavior\n\nWe should filter out the index columns here: https://github.com/modin-project/modin/blob/d14abf463548dff147a12cd59727cbf0dbee7721/modin/core/io/column_stores/feather_dispatcher.py#L65\n\n### Error Logs\n\n<details>\r\n\r\n```python-traceback\r\n\r\nTraceback (most recent call last):\r\n File \"srt.py\", line 9, in <module>\r\n pd.read_feather(fl.name) # Error\r\n File \"repos/modin/modin/logging/logger_decorator.py\", line 128, in run_and_log\r\n return obj(*args, **kwargs)\r\n File \"repos/modin/modin/pandas/io.py\", line 509, in read_feather\r\n return DataFrame(query_compiler=FactoryDispatcher.read_feather(**kwargs))\r\n File \"repos/modin/modin/core/execution/dispatching/factories/dispatcher.py\", line 235, in read_feather\r\n return cls.get_factory()._read_feather(**kwargs)\r\n File \"repos/modin/modin/core/execution/dispatching/factories/factories.py\", line 293, in _read_feather\r\n return cls.io_cls.read_feather(**kwargs)\r\n File \"repos/modin/modin/logging/logger_decorator.py\", line 128, in run_and_log\r\n return obj(*args, **kwargs)\r\n File \"repos/modin/modin/core/io/file_dispatcher.py\", line 157, in read\r\n query_compiler = cls._read(*args, **kwargs)\r\n File \"repos/modin/modin/logging/logger_decorator.py\", line 128, in run_and_log\r\n return obj(*args, **kwargs)\r\n File \"repos/modin/modin/core/io/column_stores/feather_dispatcher.py\", line 66, in _read\r\n return cls.build_query_compiler(path, columns, use_threads=False)\r\n File \"repos/modin/modin/logging/logger_decorator.py\", line 128, in run_and_log\r\n return obj(*args, **kwargs)\r\n File \"repos/modin/modin/core/io/column_stores/column_store_dispatcher.py\", line 224, in build_query_compiler\r\n cls.build_dtypes(partition_ids[-1], columns)\r\n File \"repos/modin/modin/logging/logger_decorator.py\", line 128, in run_and_log\r\n return obj(*args, **kwargs)\r\n File \"repos/modin/modin/core/io/column_stores/column_store_dispatcher.py\", line 197, in build_dtypes\r\n dtypes.index = columns\r\n File \"miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/generic.py\", line 5915, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 69, in pandas._libs.properties.AxisProperty.__set__\r\n File \"miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/series.py\", line 593, in _set_axis\r\n self._mgr.set_axis(axis, labels)\r\n File \"miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/internals/managers.py\", line 230, in set_axis\r\n self._validate_set_axis(axis, new_labels)\r\n File \"miniconda3/envs/modinc/lib/python3.8/site-packages/pandas/core/internals/base.py\", line 70, in _validate_set_axis\r\n raise ValueError(\r\nValueError: Length mismatch: Expected axis has 1 elements, new values have 2 elements\r\n\r\n\r\n```\r\n\r\n</details>\r\n\n\n### Installed Versions\n\n<details>\r\n\r\nReplace this line with the output of pd.show_versions()\r\n\r\n</details>\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses `FeatherDispatcher` class, that is used for reading `.feather` files.\"\"\"\n\nfrom modin.core.io.column_stores.column_store_dispatcher import ColumnStoreDispatcher\nfrom modin.utils import import_optional_dependency\nfrom modin.core.io.file_dispatcher import OpenFile\n\n\nclass FeatherDispatcher(ColumnStoreDispatcher):\n \"\"\"Class handles utils for reading `.feather` files.\"\"\"\n\n @classmethod\n def _read(cls, path, columns=None, **kwargs):\n \"\"\"\n Read data from the file path, returning a query compiler.\n\n Parameters\n ----------\n path : str or file-like object\n The filepath of the feather file.\n columns : array-like, optional\n Columns to read from file. If not provided, all columns are read.\n **kwargs : dict\n `read_feather` function kwargs.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n\n Notes\n -----\n `PyArrow` engine and local files only are supported for now,\n multi threading is set to False by default.\n PyArrow feather is used. Please refer to the documentation here\n https://arrow.apache.org/docs/python/api.html#feather-format\n \"\"\"\n path = cls.get_path(path)\n if columns is None:\n import_optional_dependency(\n \"pyarrow\", \"pyarrow is required to read feather files.\"\n )\n from pyarrow import ipc\n\n with OpenFile(\n path,\n **(kwargs.get(\"storage_options\", None) or {}),\n ) as file:\n # Opens the file to extract its metadata\n reader = ipc.open_file(file)\n # TODO: pyarrow's schema contains much more metadata than just column names, it also\n # has dtypes and index information that we could use when building a dataframe\n columns = reader.schema.names\n return cls.build_query_compiler(path, columns, use_threads=False)\n", "path": "modin/core/io/column_stores/feather_dispatcher.py"}]}
| 2,471 | 244 |
gh_patches_debug_15726
|
rasdani/github-patches
|
git_diff
|
AnalogJ__lexicon-998
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dreamhost integration will break November 2nd, 2021
It looks like Dreamhost is "retiring" some of it's API calls Nov 2nd, including `domain-list_domains` which Lexicon seems to use for validating the domain during initial auth:
https://help.dreamhost.com/hc/en-us/articles/217555767-Domain-API-commands
https://github.com/AnalogJ/lexicon/blob/db82a948febee04972bb648ac59471b292c7e394/lexicon/providers/dreamhost.py#L103
They have begin sending email to anyone using the API:
> The following API commands were run on your account in the last thirty days and will no longer function after November 2nd:
> - domain-list_domains
</issue>
<code>
[start of lexicon/providers/dreamhost.py]
1 """Module provider for Dreamhost"""
2 import base64
3 import json
4 import logging
5 import time
6
7 import requests
8
9 from lexicon.exceptions import AuthenticationError
10 from lexicon.providers.base import Provider as BaseProvider
11
12 LOGGER = logging.getLogger(__name__)
13
14 NAMESERVER_DOMAINS = ["dreamhost.com"]
15
16 _DATA_NON_EXIST_ERROR_LIST = [
17 "no_record",
18 "no_type",
19 "no_value",
20 "no_such_record",
21 "no_such_type",
22 "no_such_value",
23 "no_such_zone",
24 ]
25
26 _DATA_ALREADY_EXIST_ERROR_LIST = [
27 "record_already_exists_not_editable",
28 "record_already_exists_remove_first",
29 "CNAME_already_on_record",
30 ]
31
32
33 class NonExistError(Exception):
34 """NonExistError"""
35
36
37 class AlreadyExistError(Exception):
38 """AlreadyExistError"""
39
40
41 def provider_parser(subparser):
42 """Module provider for Dreamhost"""
43 subparser.add_argument("--auth-token", help="specify api key for authentication")
44
45
46 class Provider(BaseProvider):
47 """Provider class for Dreamhost"""
48
49 def __init__(self, config):
50 super(Provider, self).__init__(config)
51 self.domain_id = None
52 self.api_endpoint = "https://api.dreamhost.com/"
53
54 # Dreamhost provides no identifier for a record.
55 # Furthermore, Dreamhost requires type, record, value to delete a record.
56 # The record defined in lexicon is {type, name, content, id}
57 # We use base64(json({'type', 'name', 'content'}))
58 # as the identifier of Dreamhost record.
59 @staticmethod
60 def _identifier(dreamhost_record):
61 id_struct = {
62 "type": dreamhost_record["type"],
63 "name": dreamhost_record["record"],
64 "content": dreamhost_record["value"],
65 }
66 return base64.urlsafe_b64encode(json.dumps(id_struct).encode("utf-8")).decode(
67 "utf-8"
68 )
69
70 # The information in identifier follows the record in lexicon.
71 # Provider._record_to_dreamhost_record transfers to dreamhost-based record.
72 @staticmethod
73 def _id_to_dreamhost_record(identifier):
74 record = json.loads(
75 base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
76 )
77 dreamhost_record = Provider._record_to_dreamhost_record(record)
78 return dreamhost_record
79
80 # The information in identifier follows the record in lexicon.
81 # 'id' is added in the record.
82 @staticmethod
83 def _id_to_record(identifier):
84 record = json.loads(
85 base64.urlsafe_b64decode(identifier.encode("utf-8")).decode("utf-8")
86 )
87 record["id"] = identifier
88
89 return record
90
91 # Transferring lexicon-based record to Dreamhost-based record.
92 @staticmethod
93 def _record_to_dreamhost_record(record):
94 dreamhost_record = {
95 "type": record["type"],
96 "record": record["name"],
97 "value": record["content"],
98 }
99 return dreamhost_record
100
101 def _authenticate(self):
102 self.domain_id = None
103 payload = self._get("domain-list_domains")
104 data = payload.get("data", None)
105 if data is None:
106 raise AuthenticationError("Domain not found")
107
108 for domain in data:
109 if domain.get("domain", "") == self.domain:
110 self.domain_id = self.domain
111 if self.domain_id is None:
112 raise AuthenticationError("Domain not found")
113
114 def _create_record(self, rtype, name, content):
115 name = self._full_name(name)
116
117 try:
118 self._get(
119 "dns-add_record",
120 query_params={"record": name, "type": rtype, "value": content},
121 )
122 except AlreadyExistError:
123 pass
124
125 return True
126
127 # List all records. Return an empty list if no records found
128 # type, name and content are used to filter records.
129 # If possible filter during the query, otherwise filter after response is received.
130 def _list_records(self, rtype=None, name=None, content=None):
131
132 payload = self._get("dns-list_records")
133
134 resource_list = payload.get("data", None)
135 if not isinstance(resource_list, list):
136 raise Exception(f"unable to get records: {payload}")
137
138 resource_list = [
139 resource for resource in resource_list if resource["zone"] == self.domain
140 ]
141 if rtype:
142 resource_list = [
143 resource for resource in resource_list if resource["type"] == rtype
144 ]
145 if name:
146 name = self._full_name(name)
147 resource_list = [
148 resource for resource in resource_list if resource["record"] == name
149 ]
150 if content:
151 resource_list = [
152 resource for resource in resource_list if resource["value"] == content
153 ]
154
155 processed_records = []
156 for dreamhost_record in resource_list:
157 processed_records.append(
158 {
159 "id": Provider._identifier(dreamhost_record),
160 "type": dreamhost_record["type"],
161 "name": dreamhost_record["record"],
162 "content": dreamhost_record["value"],
163 }
164 )
165
166 return processed_records
167
168 # Create or update a record.
169 def _update_record(self, identifier, rtype=None, name=None, content=None):
170 if identifier:
171 try:
172 self._delete_record(identifier)
173 except NonExistError:
174 pass
175
176 return self._create_record(rtype=rtype, name=name, content=content)
177
178 # Delete existing records.
179 # If record does not exist, do nothing.
180 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
181 to_deletes = []
182 if identifier:
183 record = Provider._id_to_record(identifier)
184 to_deletes.append(record)
185 else:
186 records = self._list_records(rtype=rtype, name=name, content=content)
187 to_deletes = records
188
189 # for-loop to delete deletes.
190 err = None
191 for each in to_deletes:
192 try:
193 dreamhost_record = Provider._record_to_dreamhost_record(each)
194 self._get("dns-remove_record", query_params=dreamhost_record)
195
196 except Exception as exception:
197 err = exception
198
199 # Sleeping for 1-second to avoid trigerring ddos protecting in case of looped requests
200 time.sleep(1)
201
202 if err is not None:
203 raise err
204
205 return True
206
207 # Helpers
208 def _request(self, action="GET", url="", data=None, query_params=None):
209 if data is None:
210 data = {}
211
212 if query_params is None:
213 query_params = {}
214
215 default_headers = {
216 "Accept": "application/json",
217 "Content-Type": "application/json",
218 }
219
220 query_params["key"] = self._get_provider_option("auth_token")
221 query_params["format"] = "json"
222 if "cmd" not in query_params:
223 query_params["cmd"] = url
224
225 response = requests.request(
226 action,
227 self.api_endpoint,
228 params=query_params,
229 data=json.dumps(data),
230 headers=default_headers,
231 )
232
233 # if the request fails for any reason, throw an error.
234 response.raise_for_status()
235 result = response.json()
236 if result.get("result", "") != "success":
237 err_msg = result.get("data", "")
238 if err_msg in _DATA_NON_EXIST_ERROR_LIST:
239 raise NonExistError(f"Dreamhost non-exist error: {result}")
240 if err_msg in _DATA_ALREADY_EXIST_ERROR_LIST:
241 raise AlreadyExistError(f"Dreamhost already-exist error: {result}")
242 raise Exception(f"Dreamhost api error: {result}")
243 return result
244
[end of lexicon/providers/dreamhost.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lexicon/providers/dreamhost.py b/lexicon/providers/dreamhost.py
--- a/lexicon/providers/dreamhost.py
+++ b/lexicon/providers/dreamhost.py
@@ -100,14 +100,15 @@
def _authenticate(self):
self.domain_id = None
- payload = self._get("domain-list_domains")
+ payload = self._get("dns-list_records")
data = payload.get("data", None)
if data is None:
raise AuthenticationError("Domain not found")
- for domain in data:
- if domain.get("domain", "") == self.domain:
+ for record in data:
+ if record.get("record", "") == self.domain and record.get("type", "") in ["A", "AAAA"]:
self.domain_id = self.domain
+ break
if self.domain_id is None:
raise AuthenticationError("Domain not found")
|
{"golden_diff": "diff --git a/lexicon/providers/dreamhost.py b/lexicon/providers/dreamhost.py\n--- a/lexicon/providers/dreamhost.py\n+++ b/lexicon/providers/dreamhost.py\n@@ -100,14 +100,15 @@\n \n def _authenticate(self):\n self.domain_id = None\n- payload = self._get(\"domain-list_domains\")\n+ payload = self._get(\"dns-list_records\")\n data = payload.get(\"data\", None)\n if data is None:\n raise AuthenticationError(\"Domain not found\")\n \n- for domain in data:\n- if domain.get(\"domain\", \"\") == self.domain:\n+ for record in data:\n+ if record.get(\"record\", \"\") == self.domain and record.get(\"type\", \"\") in [\"A\", \"AAAA\"]:\n self.domain_id = self.domain\n+ break\n if self.domain_id is None:\n raise AuthenticationError(\"Domain not found\")\n", "issue": "Dreamhost integration will break November 2nd, 2021\nIt looks like Dreamhost is \"retiring\" some of it's API calls Nov 2nd, including `domain-list_domains` which Lexicon seems to use for validating the domain during initial auth:\r\n\r\nhttps://help.dreamhost.com/hc/en-us/articles/217555767-Domain-API-commands\r\n\r\nhttps://github.com/AnalogJ/lexicon/blob/db82a948febee04972bb648ac59471b292c7e394/lexicon/providers/dreamhost.py#L103\r\n\r\nThey have begin sending email to anyone using the API:\r\n\r\n> The following API commands were run on your account in the last thirty days and will no longer function after November 2nd:\r\n> - domain-list_domains\n", "before_files": [{"content": "\"\"\"Module provider for Dreamhost\"\"\"\nimport base64\nimport json\nimport logging\nimport time\n\nimport requests\n\nfrom lexicon.exceptions import AuthenticationError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = [\"dreamhost.com\"]\n\n_DATA_NON_EXIST_ERROR_LIST = [\n \"no_record\",\n \"no_type\",\n \"no_value\",\n \"no_such_record\",\n \"no_such_type\",\n \"no_such_value\",\n \"no_such_zone\",\n]\n\n_DATA_ALREADY_EXIST_ERROR_LIST = [\n \"record_already_exists_not_editable\",\n \"record_already_exists_remove_first\",\n \"CNAME_already_on_record\",\n]\n\n\nclass NonExistError(Exception):\n \"\"\"NonExistError\"\"\"\n\n\nclass AlreadyExistError(Exception):\n \"\"\"AlreadyExistError\"\"\"\n\n\ndef provider_parser(subparser):\n \"\"\"Module provider for Dreamhost\"\"\"\n subparser.add_argument(\"--auth-token\", help=\"specify api key for authentication\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for Dreamhost\"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = \"https://api.dreamhost.com/\"\n\n # Dreamhost provides no identifier for a record.\n # Furthermore, Dreamhost requires type, record, value to delete a record.\n # The record defined in lexicon is {type, name, content, id}\n # We use base64(json({'type', 'name', 'content'}))\n # as the identifier of Dreamhost record.\n @staticmethod\n def _identifier(dreamhost_record):\n id_struct = {\n \"type\": dreamhost_record[\"type\"],\n \"name\": dreamhost_record[\"record\"],\n \"content\": dreamhost_record[\"value\"],\n }\n return base64.urlsafe_b64encode(json.dumps(id_struct).encode(\"utf-8\")).decode(\n \"utf-8\"\n )\n\n # The information in identifier follows the record in lexicon.\n # Provider._record_to_dreamhost_record transfers to dreamhost-based record.\n @staticmethod\n def _id_to_dreamhost_record(identifier):\n record = json.loads(\n base64.urlsafe_b64decode(identifier.encode(\"utf-8\")).decode(\"utf-8\")\n )\n dreamhost_record = Provider._record_to_dreamhost_record(record)\n return dreamhost_record\n\n # The information in identifier follows the record in lexicon.\n # 'id' is added in the record.\n @staticmethod\n def _id_to_record(identifier):\n record = json.loads(\n base64.urlsafe_b64decode(identifier.encode(\"utf-8\")).decode(\"utf-8\")\n )\n record[\"id\"] = identifier\n\n return record\n\n # Transferring lexicon-based record to Dreamhost-based record.\n @staticmethod\n def _record_to_dreamhost_record(record):\n dreamhost_record = {\n \"type\": record[\"type\"],\n \"record\": record[\"name\"],\n \"value\": record[\"content\"],\n }\n return dreamhost_record\n\n def _authenticate(self):\n self.domain_id = None\n payload = self._get(\"domain-list_domains\")\n data = payload.get(\"data\", None)\n if data is None:\n raise AuthenticationError(\"Domain not found\")\n\n for domain in data:\n if domain.get(\"domain\", \"\") == self.domain:\n self.domain_id = self.domain\n if self.domain_id is None:\n raise AuthenticationError(\"Domain not found\")\n\n def _create_record(self, rtype, name, content):\n name = self._full_name(name)\n\n try:\n self._get(\n \"dns-add_record\",\n query_params={\"record\": name, \"type\": rtype, \"value\": content},\n )\n except AlreadyExistError:\n pass\n\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n\n payload = self._get(\"dns-list_records\")\n\n resource_list = payload.get(\"data\", None)\n if not isinstance(resource_list, list):\n raise Exception(f\"unable to get records: {payload}\")\n\n resource_list = [\n resource for resource in resource_list if resource[\"zone\"] == self.domain\n ]\n if rtype:\n resource_list = [\n resource for resource in resource_list if resource[\"type\"] == rtype\n ]\n if name:\n name = self._full_name(name)\n resource_list = [\n resource for resource in resource_list if resource[\"record\"] == name\n ]\n if content:\n resource_list = [\n resource for resource in resource_list if resource[\"value\"] == content\n ]\n\n processed_records = []\n for dreamhost_record in resource_list:\n processed_records.append(\n {\n \"id\": Provider._identifier(dreamhost_record),\n \"type\": dreamhost_record[\"type\"],\n \"name\": dreamhost_record[\"record\"],\n \"content\": dreamhost_record[\"value\"],\n }\n )\n\n return processed_records\n\n # Create or update a record.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n if identifier:\n try:\n self._delete_record(identifier)\n except NonExistError:\n pass\n\n return self._create_record(rtype=rtype, name=name, content=content)\n\n # Delete existing records.\n # If record does not exist, do nothing.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n to_deletes = []\n if identifier:\n record = Provider._id_to_record(identifier)\n to_deletes.append(record)\n else:\n records = self._list_records(rtype=rtype, name=name, content=content)\n to_deletes = records\n\n # for-loop to delete deletes.\n err = None\n for each in to_deletes:\n try:\n dreamhost_record = Provider._record_to_dreamhost_record(each)\n self._get(\"dns-remove_record\", query_params=dreamhost_record)\n\n except Exception as exception:\n err = exception\n\n # Sleeping for 1-second to avoid trigerring ddos protecting in case of looped requests\n time.sleep(1)\n\n if err is not None:\n raise err\n\n return True\n\n # Helpers\n def _request(self, action=\"GET\", url=\"\", data=None, query_params=None):\n if data is None:\n data = {}\n\n if query_params is None:\n query_params = {}\n\n default_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n\n query_params[\"key\"] = self._get_provider_option(\"auth_token\")\n query_params[\"format\"] = \"json\"\n if \"cmd\" not in query_params:\n query_params[\"cmd\"] = url\n\n response = requests.request(\n action,\n self.api_endpoint,\n params=query_params,\n data=json.dumps(data),\n headers=default_headers,\n )\n\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n result = response.json()\n if result.get(\"result\", \"\") != \"success\":\n err_msg = result.get(\"data\", \"\")\n if err_msg in _DATA_NON_EXIST_ERROR_LIST:\n raise NonExistError(f\"Dreamhost non-exist error: {result}\")\n if err_msg in _DATA_ALREADY_EXIST_ERROR_LIST:\n raise AlreadyExistError(f\"Dreamhost already-exist error: {result}\")\n raise Exception(f\"Dreamhost api error: {result}\")\n return result\n", "path": "lexicon/providers/dreamhost.py"}]}
| 3,042 | 206 |
gh_patches_debug_38514
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-1316
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
verify that all entry points of Buildbot are at least importable
This ticket is a migrated Trac ticket [3470](http://trac.buildbot.net/ticket/3470)
People contributed to the original ticket: @rutsky
Ticket created on: `Feb 24 2016`
Ticket last modified on: `Feb 24 2016`
---
We have big bunch of plugins exported in setup.py, and looks like not all plugins are exported correctly: https://github.com/buildbot/buildbot/pull/2006/files#diff-9030ee1b7d992477393062826c5decfeL345
It would be nice to have test or check for that.
---
</issue>
<code>
[start of master/buildbot/db/schedulers.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import sqlalchemy as sa
17 import sqlalchemy.exc
18
19 from buildbot.db import NULL
20 from buildbot.db import base
21 from twisted.internet import defer
22
23
24 class SchedulerAlreadyClaimedError(Exception):
25 pass
26
27
28 class SchedulersConnectorComponent(base.DBConnectorComponent):
29 # Documentation is in developer/db.rst
30
31 def classifyChanges(self, schedulerid, classifications):
32 def thd(conn):
33 transaction = conn.begin()
34 tbl = self.db.model.scheduler_changes
35 ins_q = tbl.insert()
36 upd_q = tbl.update(
37 ((tbl.c.schedulerid == schedulerid)
38 & (tbl.c.changeid == sa.bindparam('wc_changeid'))))
39 for changeid, important in classifications.items():
40 # convert the 'important' value into an integer, since that
41 # is the column type
42 imp_int = important and 1 or 0
43 try:
44 conn.execute(ins_q,
45 schedulerid=schedulerid,
46 changeid=changeid,
47 important=imp_int)
48 except (sqlalchemy.exc.ProgrammingError,
49 sqlalchemy.exc.IntegrityError):
50 # insert failed, so try an update
51 conn.execute(upd_q,
52 wc_changeid=changeid,
53 important=imp_int)
54
55 transaction.commit()
56 return self.db.pool.do(thd)
57
58 def flushChangeClassifications(self, schedulerid, less_than=None):
59 def thd(conn):
60 sch_ch_tbl = self.db.model.scheduler_changes
61 wc = (sch_ch_tbl.c.schedulerid == schedulerid)
62 if less_than is not None:
63 wc = wc & (sch_ch_tbl.c.changeid < less_than)
64 q = sch_ch_tbl.delete(whereclause=wc)
65 conn.execute(q)
66 return self.db.pool.do(thd)
67
68 def getChangeClassifications(self, schedulerid, branch=-1,
69 repository=-1, project=-1,
70 codebase=-1):
71 # -1 here stands for "argument not given", since None has meaning
72 # as a branch
73 def thd(conn):
74 sch_ch_tbl = self.db.model.scheduler_changes
75 ch_tbl = self.db.model.changes
76
77 wc = (sch_ch_tbl.c.schedulerid == schedulerid)
78
79 # may need to filter further based on branch, etc
80 extra_wheres = []
81 if branch != -1:
82 extra_wheres.append(ch_tbl.c.branch == branch)
83 if repository != -1:
84 extra_wheres.append(ch_tbl.c.repository == repository)
85 if project != -1:
86 extra_wheres.append(ch_tbl.c.project == project)
87 if codebase != -1:
88 extra_wheres.append(ch_tbl.c.codebase == codebase)
89
90 # if we need to filter further append those, as well as a join
91 # on changeid (but just once for that one)
92 if extra_wheres:
93 wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)
94 for w in extra_wheres:
95 wc &= w
96
97 q = sa.select(
98 [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],
99 whereclause=wc)
100 return dict([(r.changeid, [False, True][r.important])
101 for r in conn.execute(q)])
102 return self.db.pool.do(thd)
103
104 def findSchedulerId(self, name):
105 tbl = self.db.model.schedulers
106 name_hash = self.hashColumns(name)
107 return self.findSomethingId(
108 tbl=tbl,
109 whereclause=(tbl.c.name_hash == name_hash),
110 insert_values=dict(
111 name=name,
112 name_hash=name_hash,
113 ))
114
115 def setSchedulerMaster(self, schedulerid, masterid):
116 def thd(conn):
117 sch_mst_tbl = self.db.model.scheduler_masters
118
119 # handle the masterid=None case to get it out of the way
120 if masterid is None:
121 q = sch_mst_tbl.delete(
122 whereclause=(sch_mst_tbl.c.schedulerid == schedulerid))
123 conn.execute(q)
124 return
125
126 # try a blind insert..
127 try:
128 q = sch_mst_tbl.insert()
129 conn.execute(q,
130 dict(schedulerid=schedulerid, masterid=masterid))
131 except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
132 # someone already owns this scheduler.
133 raise SchedulerAlreadyClaimedError
134
135 return self.db.pool.do(thd)
136
137 @defer.inlineCallbacks
138 def getScheduler(self, schedulerid):
139 sch = yield self.getSchedulers(_schedulerid=schedulerid)
140 if sch:
141 defer.returnValue(sch[0])
142
143 def getSchedulers(self, active=None, masterid=None, _schedulerid=None):
144 def thd(conn):
145 sch_tbl = self.db.model.schedulers
146 sch_mst_tbl = self.db.model.scheduler_masters
147
148 # handle the trivial case of masterid=xx and active=False
149 if masterid is not None and active is not None and not active:
150 return []
151
152 join = sch_tbl.outerjoin(sch_mst_tbl,
153 (sch_tbl.c.id == sch_mst_tbl.c.schedulerid))
154
155 # if we're given a _schedulerid, select only that row
156 wc = None
157 if _schedulerid:
158 wc = (sch_tbl.c.id == _schedulerid)
159 else:
160 # otherwise, filter with active, if necessary
161 if masterid is not None:
162 wc = (sch_mst_tbl.c.masterid == masterid)
163 elif active:
164 wc = (sch_mst_tbl.c.masterid != NULL)
165 elif active is not None:
166 wc = (sch_mst_tbl.c.masterid == NULL)
167
168 q = sa.select([sch_tbl.c.id, sch_tbl.c.name,
169 sch_mst_tbl.c.masterid],
170 from_obj=join, whereclause=wc)
171
172 return [dict(id=row.id, name=row.name,
173 masterid=row.masterid)
174 for row in conn.execute(q).fetchall()]
175 return self.db.pool.do(thd)
176
[end of master/buildbot/db/schedulers.py]
[start of master/buildbot/db/migrate/versions/037_buildrequests_builderid.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import hashlib
17 import sqlalchemy as sa
18
19 from migrate import changeset
20
21
22 def add_new_schema_parts(migrate_engine):
23 metadata = sa.MetaData()
24 metadata.bind = migrate_engine
25 sa.Table('builders', metadata, autoload=True)
26 sa.Table('buildsets', metadata, autoload=True)
27
28 buildrequests = sa.Table('buildrequests', metadata, autoload=True)
29
30 builderid = sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'))
31 builderid.create(buildrequests)
32
33 # Remove all index
34 for index in buildrequests.indexes:
35 index.drop()
36
37
38 def migrate_data(migrate_engine):
39 metadata = sa.MetaData()
40 metadata.bind = migrate_engine
41
42 # set up the tables we'll need to migrate
43 buildrequests = sa.Table('buildrequests', metadata, autoload=True)
44 builders = sa.Table('builders', metadata, autoload=True)
45
46 bName2bID = dict()
47 q = sa.select([builders.c.id, builders.c.name])
48 for row in migrate_engine.execute(q).fetchall():
49 bName2bID[row.name] = row.id
50
51 def hashColumns(*args):
52 # copy paste from buildbot/db/base.py
53 def encode(x):
54 try:
55 return x.encode('utf8')
56 except AttributeError:
57 if x is None:
58 return '\xf5'
59 return str(x)
60 return hashlib.sha1('\0'.join(map(encode, args))).hexdigest()
61
62 def findbuilderid(buildername):
63 bid = bName2bID.get(buildername)
64 if bid is None:
65 r = migrate_engine.execute(builders.insert(), [{
66 'name': buildername,
67 'name_hash': hashColumns(buildername),
68 }])
69 bid = r.inserted_primary_key[0]
70 bName2bID[buildername] = bid
71 return bid
72
73 c = buildrequests.c
74 q = sa.select([c.id, c.buildername])
75 for row in migrate_engine.execute(q).fetchall():
76 builderid = findbuilderid(row.buildername)
77 migrate_engine.execute(
78 buildrequests.update(whereclause=(c.id == row.id)),
79 builderid=builderid)
80
81
82 def remove_buildername(migrate_engine):
83
84 metadata = sa.MetaData()
85 metadata.bind = migrate_engine
86
87 sa.Table('builders', metadata, autoload=True)
88 sa.Table('buildsets', metadata, autoload=True)
89
90 # Specify what the new table should look like
91 buildrequests = sa.Table('buildrequests', metadata,
92 sa.Column('id', sa.Integer, primary_key=True),
93 sa.Column('buildsetid', sa.Integer, sa.ForeignKey("buildsets.id"),
94 nullable=False),
95 sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'),
96 nullable=False),
97 sa.Column('priority', sa.Integer, nullable=False,
98 server_default=sa.DefaultClause("0")),
99 sa.Column('complete', sa.Integer,
100 server_default=sa.DefaultClause("0")),
101 sa.Column('results', sa.SmallInteger),
102 sa.Column('submitted_at', sa.Integer, nullable=False),
103 sa.Column('complete_at', sa.Integer),
104 sa.Column('waited_for', sa.SmallInteger,
105 server_default=sa.DefaultClause("0")),
106 )
107 changeset.drop_column(
108 sa.Column('buildername', sa.String(length=256), nullable=False),
109 table=buildrequests,
110 metadata=metadata,
111 engine=migrate_engine)
112
113 changeset.alter_column(
114 sa.Column('builderid', sa.Integer, sa.ForeignKey("builders.id"), nullable=False),
115 table='buildrequests',
116 metadata=metadata,
117 engine=migrate_engine)
118
119 idx = sa.Index('buildrequests_builderid', buildrequests.c.builderid)
120 idx.create(migrate_engine)
121 idx = sa.Index('buildrequests_buildsetid', buildrequests.c.buildsetid)
122 idx.create(migrate_engine)
123 idx = sa.Index('buildrequests_complete', buildrequests.c.complete)
124 idx.create(migrate_engine)
125
126
127 def upgrade(migrate_engine):
128 # add a 'builderid' column to buildrequests
129 add_new_schema_parts(migrate_engine)
130 # migrate the data to new tables
131 migrate_data(migrate_engine)
132
133 # Finally remove the buildername column
134 remove_buildername(migrate_engine)
135
[end of master/buildbot/db/migrate/versions/037_buildrequests_builderid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py b/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py
--- a/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py
+++ b/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py
@@ -86,33 +86,14 @@
sa.Table('builders', metadata, autoload=True)
sa.Table('buildsets', metadata, autoload=True)
+ buildrequests = sa.Table('buildrequests', metadata, autoload=True)
# Specify what the new table should look like
- buildrequests = sa.Table('buildrequests', metadata,
- sa.Column('id', sa.Integer, primary_key=True),
- sa.Column('buildsetid', sa.Integer, sa.ForeignKey("buildsets.id"),
- nullable=False),
- sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'),
- nullable=False),
- sa.Column('priority', sa.Integer, nullable=False,
- server_default=sa.DefaultClause("0")),
- sa.Column('complete', sa.Integer,
- server_default=sa.DefaultClause("0")),
- sa.Column('results', sa.SmallInteger),
- sa.Column('submitted_at', sa.Integer, nullable=False),
- sa.Column('complete_at', sa.Integer),
- sa.Column('waited_for', sa.SmallInteger,
- server_default=sa.DefaultClause("0")),
- )
- changeset.drop_column(
- sa.Column('buildername', sa.String(length=256), nullable=False),
- table=buildrequests,
- metadata=metadata,
- engine=migrate_engine)
+ buildrequests.c.buildername.drop()
changeset.alter_column(
sa.Column('builderid', sa.Integer, sa.ForeignKey("builders.id"), nullable=False),
- table='buildrequests',
+ table=buildrequests,
metadata=metadata,
engine=migrate_engine)
@@ -129,6 +110,5 @@
add_new_schema_parts(migrate_engine)
# migrate the data to new tables
migrate_data(migrate_engine)
-
# Finally remove the buildername column
remove_buildername(migrate_engine)
diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py
--- a/master/buildbot/db/schedulers.py
+++ b/master/buildbot/db/schedulers.py
@@ -47,6 +47,8 @@
important=imp_int)
except (sqlalchemy.exc.ProgrammingError,
sqlalchemy.exc.IntegrityError):
+ transaction.rollback()
+ transaction = conn.begin()
# insert failed, so try an update
conn.execute(upd_q,
wc_changeid=changeid,
|
{"golden_diff": "diff --git a/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py b/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py\n--- a/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py\n+++ b/master/buildbot/db/migrate/versions/037_buildrequests_builderid.py\n@@ -86,33 +86,14 @@\n \n sa.Table('builders', metadata, autoload=True)\n sa.Table('buildsets', metadata, autoload=True)\n+ buildrequests = sa.Table('buildrequests', metadata, autoload=True)\n \n # Specify what the new table should look like\n- buildrequests = sa.Table('buildrequests', metadata,\n- sa.Column('id', sa.Integer, primary_key=True),\n- sa.Column('buildsetid', sa.Integer, sa.ForeignKey(\"buildsets.id\"),\n- nullable=False),\n- sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'),\n- nullable=False),\n- sa.Column('priority', sa.Integer, nullable=False,\n- server_default=sa.DefaultClause(\"0\")),\n- sa.Column('complete', sa.Integer,\n- server_default=sa.DefaultClause(\"0\")),\n- sa.Column('results', sa.SmallInteger),\n- sa.Column('submitted_at', sa.Integer, nullable=False),\n- sa.Column('complete_at', sa.Integer),\n- sa.Column('waited_for', sa.SmallInteger,\n- server_default=sa.DefaultClause(\"0\")),\n- )\n- changeset.drop_column(\n- sa.Column('buildername', sa.String(length=256), nullable=False),\n- table=buildrequests,\n- metadata=metadata,\n- engine=migrate_engine)\n+ buildrequests.c.buildername.drop()\n \n changeset.alter_column(\n sa.Column('builderid', sa.Integer, sa.ForeignKey(\"builders.id\"), nullable=False),\n- table='buildrequests',\n+ table=buildrequests,\n metadata=metadata,\n engine=migrate_engine)\n \n@@ -129,6 +110,5 @@\n add_new_schema_parts(migrate_engine)\n # migrate the data to new tables\n migrate_data(migrate_engine)\n-\n # Finally remove the buildername column\n remove_buildername(migrate_engine)\ndiff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py\n--- a/master/buildbot/db/schedulers.py\n+++ b/master/buildbot/db/schedulers.py\n@@ -47,6 +47,8 @@\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n+ transaction.rollback()\n+ transaction = conn.begin()\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n", "issue": "verify that all entry points of Buildbot are at least importable\nThis ticket is a migrated Trac ticket [3470](http://trac.buildbot.net/ticket/3470)\n\nPeople contributed to the original ticket: @rutsky\nTicket created on: `Feb 24 2016`\nTicket last modified on: `Feb 24 2016`\n\n---\n\nWe have big bunch of plugins exported in setup.py, and looks like not all plugins are exported correctly: https://github.com/buildbot/buildbot/pull/2006/files#diff-9030ee1b7d992477393062826c5decfeL345\n\nIt would be nice to have test or check for that.\n\n\n---\n\n\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\n\nfrom buildbot.db import NULL\nfrom buildbot.db import base\nfrom twisted.internet import defer\n\n\nclass SchedulerAlreadyClaimedError(Exception):\n pass\n\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/db.rst\n\n def classifyChanges(self, schedulerid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.schedulerid == schedulerid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n schedulerid=schedulerid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, schedulerid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.schedulerid == schedulerid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n def getChangeClassifications(self, schedulerid, branch=-1,\n repository=-1, project=-1,\n codebase=-1):\n # -1 here stands for \"argument not given\", since None has meaning\n # as a branch\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.schedulerid == schedulerid)\n\n # may need to filter further based on branch, etc\n extra_wheres = []\n if branch != -1:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository != -1:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project != -1:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase != -1:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],\n whereclause=wc)\n return dict([(r.changeid, [False, True][r.important])\n for r in conn.execute(q)])\n return self.db.pool.do(thd)\n\n def findSchedulerId(self, name):\n tbl = self.db.model.schedulers\n name_hash = self.hashColumns(name)\n return self.findSomethingId(\n tbl=tbl,\n whereclause=(tbl.c.name_hash == name_hash),\n insert_values=dict(\n name=name,\n name_hash=name_hash,\n ))\n\n def setSchedulerMaster(self, schedulerid, masterid):\n def thd(conn):\n sch_mst_tbl = self.db.model.scheduler_masters\n\n # handle the masterid=None case to get it out of the way\n if masterid is None:\n q = sch_mst_tbl.delete(\n whereclause=(sch_mst_tbl.c.schedulerid == schedulerid))\n conn.execute(q)\n return\n\n # try a blind insert..\n try:\n q = sch_mst_tbl.insert()\n conn.execute(q,\n dict(schedulerid=schedulerid, masterid=masterid))\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n # someone already owns this scheduler.\n raise SchedulerAlreadyClaimedError\n\n return self.db.pool.do(thd)\n\n @defer.inlineCallbacks\n def getScheduler(self, schedulerid):\n sch = yield self.getSchedulers(_schedulerid=schedulerid)\n if sch:\n defer.returnValue(sch[0])\n\n def getSchedulers(self, active=None, masterid=None, _schedulerid=None):\n def thd(conn):\n sch_tbl = self.db.model.schedulers\n sch_mst_tbl = self.db.model.scheduler_masters\n\n # handle the trivial case of masterid=xx and active=False\n if masterid is not None and active is not None and not active:\n return []\n\n join = sch_tbl.outerjoin(sch_mst_tbl,\n (sch_tbl.c.id == sch_mst_tbl.c.schedulerid))\n\n # if we're given a _schedulerid, select only that row\n wc = None\n if _schedulerid:\n wc = (sch_tbl.c.id == _schedulerid)\n else:\n # otherwise, filter with active, if necessary\n if masterid is not None:\n wc = (sch_mst_tbl.c.masterid == masterid)\n elif active:\n wc = (sch_mst_tbl.c.masterid != NULL)\n elif active is not None:\n wc = (sch_mst_tbl.c.masterid == NULL)\n\n q = sa.select([sch_tbl.c.id, sch_tbl.c.name,\n sch_mst_tbl.c.masterid],\n from_obj=join, whereclause=wc)\n\n return [dict(id=row.id, name=row.name,\n masterid=row.masterid)\n for row in conn.execute(q).fetchall()]\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport hashlib\nimport sqlalchemy as sa\n\nfrom migrate import changeset\n\n\ndef add_new_schema_parts(migrate_engine):\n metadata = sa.MetaData()\n metadata.bind = migrate_engine\n sa.Table('builders', metadata, autoload=True)\n sa.Table('buildsets', metadata, autoload=True)\n\n buildrequests = sa.Table('buildrequests', metadata, autoload=True)\n\n builderid = sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'))\n builderid.create(buildrequests)\n\n # Remove all index\n for index in buildrequests.indexes:\n index.drop()\n\n\ndef migrate_data(migrate_engine):\n metadata = sa.MetaData()\n metadata.bind = migrate_engine\n\n # set up the tables we'll need to migrate\n buildrequests = sa.Table('buildrequests', metadata, autoload=True)\n builders = sa.Table('builders', metadata, autoload=True)\n\n bName2bID = dict()\n q = sa.select([builders.c.id, builders.c.name])\n for row in migrate_engine.execute(q).fetchall():\n bName2bID[row.name] = row.id\n\n def hashColumns(*args):\n # copy paste from buildbot/db/base.py\n def encode(x):\n try:\n return x.encode('utf8')\n except AttributeError:\n if x is None:\n return '\\xf5'\n return str(x)\n return hashlib.sha1('\\0'.join(map(encode, args))).hexdigest()\n\n def findbuilderid(buildername):\n bid = bName2bID.get(buildername)\n if bid is None:\n r = migrate_engine.execute(builders.insert(), [{\n 'name': buildername,\n 'name_hash': hashColumns(buildername),\n }])\n bid = r.inserted_primary_key[0]\n bName2bID[buildername] = bid\n return bid\n\n c = buildrequests.c\n q = sa.select([c.id, c.buildername])\n for row in migrate_engine.execute(q).fetchall():\n builderid = findbuilderid(row.buildername)\n migrate_engine.execute(\n buildrequests.update(whereclause=(c.id == row.id)),\n builderid=builderid)\n\n\ndef remove_buildername(migrate_engine):\n\n metadata = sa.MetaData()\n metadata.bind = migrate_engine\n\n sa.Table('builders', metadata, autoload=True)\n sa.Table('buildsets', metadata, autoload=True)\n\n # Specify what the new table should look like\n buildrequests = sa.Table('buildrequests', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('buildsetid', sa.Integer, sa.ForeignKey(\"buildsets.id\"),\n nullable=False),\n sa.Column('builderid', sa.Integer, sa.ForeignKey('builders.id'),\n nullable=False),\n sa.Column('priority', sa.Integer, nullable=False,\n server_default=sa.DefaultClause(\"0\")),\n sa.Column('complete', sa.Integer,\n server_default=sa.DefaultClause(\"0\")),\n sa.Column('results', sa.SmallInteger),\n sa.Column('submitted_at', sa.Integer, nullable=False),\n sa.Column('complete_at', sa.Integer),\n sa.Column('waited_for', sa.SmallInteger,\n server_default=sa.DefaultClause(\"0\")),\n )\n changeset.drop_column(\n sa.Column('buildername', sa.String(length=256), nullable=False),\n table=buildrequests,\n metadata=metadata,\n engine=migrate_engine)\n\n changeset.alter_column(\n sa.Column('builderid', sa.Integer, sa.ForeignKey(\"builders.id\"), nullable=False),\n table='buildrequests',\n metadata=metadata,\n engine=migrate_engine)\n\n idx = sa.Index('buildrequests_builderid', buildrequests.c.builderid)\n idx.create(migrate_engine)\n idx = sa.Index('buildrequests_buildsetid', buildrequests.c.buildsetid)\n idx.create(migrate_engine)\n idx = sa.Index('buildrequests_complete', buildrequests.c.complete)\n idx.create(migrate_engine)\n\n\ndef upgrade(migrate_engine):\n # add a 'builderid' column to buildrequests\n add_new_schema_parts(migrate_engine)\n # migrate the data to new tables\n migrate_data(migrate_engine)\n\n # Finally remove the buildername column\n remove_buildername(migrate_engine)\n", "path": "master/buildbot/db/migrate/versions/037_buildrequests_builderid.py"}]}
| 4,003 | 604 |
gh_patches_debug_19694
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-2875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update cached logging level in C when Python log level is changed
As an optimization gpi_log caches the gpi logging level. This cached value is not currently updated when the "cocotb.gpi" logger's level is updated in Python. This will require creating a `logging.Handler` subclass that calls `simulator.set_level` when its `setLevel` method is called.
</issue>
<code>
[start of cocotb/log.py]
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import logging
33 import os
34 import sys
35 import warnings
36
37 import cocotb.ANSI as ANSI
38 from cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output
39
40 try:
41 _suppress = int(os.environ.get("COCOTB_REDUCED_LOG_FMT", "1"))
42 except ValueError:
43 _suppress = 1
44
45 # Column alignment
46 _LEVEL_CHARS = len("CRITICAL") # noqa
47 _RECORD_CHARS = 35 # noqa
48 _FILENAME_CHARS = 20 # noqa
49 _LINENO_CHARS = 4 # noqa
50 _FUNCNAME_CHARS = 31 # noqa
51
52 # Custom log level
53 logging.TRACE = 5
54 logging.addLevelName(5, "TRACE")
55
56 # Default log level if not overwritten by the user.
57 _COCOTB_LOG_LEVEL_DEFAULT = "INFO"
58
59
60 def default_config():
61 """Apply the default cocotb log formatting to the root logger.
62
63 This hooks up the logger to write to stdout, using either
64 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
65 on whether colored output is requested. It also adds a
66 :class:`SimTimeContextFilter` filter so that
67 :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.
68
69 The logging level for cocotb logs is set based on the
70 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
71
72 If desired, this logging configuration can be overwritten by calling
73 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
74 manually resetting the root logger instance.
75 An example of this can be found in the section on :ref:`rotating-logger`.
76
77 .. versionadded:: 1.4
78 """
79 # construct an appropriate handler
80 hdlr = logging.StreamHandler(sys.stdout)
81 hdlr.addFilter(SimTimeContextFilter())
82 if want_color_output():
83 hdlr.setFormatter(SimColourLogFormatter())
84 else:
85 hdlr.setFormatter(SimLogFormatter())
86
87 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
88 logging.basicConfig()
89 logging.getLogger().handlers = [hdlr] # overwrite default handlers
90
91 # apply level settings for cocotb
92 log = logging.getLogger("cocotb")
93
94 try:
95 # All log levels are upper case, convert the user input for convenience.
96 level = os.environ["COCOTB_LOG_LEVEL"].upper()
97 except KeyError:
98 level = _COCOTB_LOG_LEVEL_DEFAULT
99
100 try:
101 log.setLevel(level)
102 except ValueError:
103 valid_levels = ("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "TRACE")
104 raise ValueError(
105 "Invalid log level %r passed through the "
106 "COCOTB_LOG_LEVEL environment variable. Valid log "
107 "levels: %s" % (level, ", ".join(valid_levels))
108 )
109
110 # Notify GPI of log level, which it uses as an optimization to avoid
111 # calling into Python.
112 from cocotb import simulator
113
114 simulator.log_level(log.getEffectiveLevel())
115
116
117 class SimBaseLog(logging.getLoggerClass()):
118 """This class only exists for backwards compatibility"""
119
120 @property
121 def logger(self):
122 warnings.warn(
123 "the .logger attribute should not be used now that `SimLog` "
124 "returns a native logger instance directly.",
125 DeprecationWarning,
126 stacklevel=2,
127 )
128 return self
129
130 @property
131 def colour(self):
132 warnings.warn(
133 "the .colour attribute may be removed in future, use the "
134 "equivalent `cocotb.utils.want_color_output()` instead",
135 DeprecationWarning,
136 stacklevel=2,
137 )
138 return want_color_output()
139
140
141 # this used to be a class, hence the unusual capitalization
142 def SimLog(name, ident=None):
143 """Like logging.getLogger, but append a numeric identifier to the name"""
144 if ident is not None:
145 name = f"{name}.0x{ident:x}"
146 return logging.getLogger(name)
147
148
149 class SimTimeContextFilter(logging.Filter):
150 """
151 A filter to inject simulator times into the log records.
152
153 This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.
154
155 This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.
156
157 .. versionadded:: 1.4
158 """
159
160 # needed to make our docs render well
161 def __init__(self):
162 """"""
163 super().__init__()
164
165 def filter(self, record):
166 try:
167 record.created_sim_time = get_sim_time()
168 except RecursionError:
169 # get_sim_time may try to log - if that happens, we can't
170 # attach a simulator time to this message.
171 record.created_sim_time = None
172 return True
173
174
175 class SimLogFormatter(logging.Formatter):
176 """Log formatter to provide consistent log message handling.
177
178 This will only add simulator timestamps if the handler object this
179 formatter is attached to has a :class:`SimTimeContextFilter` filter
180 attached, which cocotb ensures by default.
181 """
182
183 # Removes the arguments from the base class. Docstring needed to make
184 # sphinx happy.
185 def __init__(self):
186 """Takes no arguments."""
187 super().__init__()
188
189 # Justify and truncate
190 @staticmethod
191 def ljust(string, chars):
192 if len(string) > chars:
193 return ".." + string[(chars - 2) * -1 :]
194 return string.ljust(chars)
195
196 @staticmethod
197 def rjust(string, chars):
198 if len(string) > chars:
199 return ".." + string[(chars - 2) * -1 :]
200 return string.rjust(chars)
201
202 def _format(self, level, record, msg, coloured=False):
203 sim_time = getattr(record, "created_sim_time", None)
204 if sim_time is None:
205 sim_time_str = " -.--ns"
206 else:
207 time_ns = get_time_from_sim_steps(sim_time, "ns")
208 sim_time_str = f"{time_ns:6.2f}ns"
209 prefix = sim_time_str.rjust(11) + " " + level + " "
210 if not _suppress:
211 prefix += (
212 self.ljust(record.name, _RECORD_CHARS)
213 + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)
214 + ":"
215 + self.ljust(str(record.lineno), _LINENO_CHARS)
216 + " in "
217 + self.ljust(str(record.funcName), _FUNCNAME_CHARS)
218 + " "
219 )
220
221 # these lines are copied from the builtin logger
222 if record.exc_info:
223 # Cache the traceback text to avoid converting it multiple times
224 # (it's constant anyway)
225 if not record.exc_text:
226 record.exc_text = self.formatException(record.exc_info)
227 if record.exc_text:
228 if msg[-1:] != "\n":
229 msg = msg + "\n"
230 msg = msg + record.exc_text
231
232 prefix_len = len(prefix)
233 if coloured:
234 prefix_len -= len(level) - _LEVEL_CHARS
235 pad = "\n" + " " * (prefix_len)
236 return prefix + pad.join(msg.split("\n"))
237
238 def format(self, record):
239 """Prettify the log output, annotate with simulation time"""
240
241 msg = record.getMessage()
242 level = record.levelname.ljust(_LEVEL_CHARS)
243
244 return self._format(level, record, msg)
245
246
247 class SimColourLogFormatter(SimLogFormatter):
248 """Log formatter to provide consistent log message handling."""
249
250 loglevel2colour = {
251 logging.TRACE: "%s",
252 logging.DEBUG: "%s",
253 logging.INFO: "%s",
254 logging.WARNING: ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
255 logging.ERROR: ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
256 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
257 }
258
259 def format(self, record):
260 """Prettify the log output, annotate with simulation time"""
261
262 msg = record.getMessage()
263
264 # Need to colour each line in case coloring is applied in the message
265 msg = "\n".join(
266 [
267 SimColourLogFormatter.loglevel2colour.get(record.levelno, "%s") % line
268 for line in msg.split("\n")
269 ]
270 )
271 level = SimColourLogFormatter.loglevel2colour.get(
272 record.levelno, "%s"
273 ) % record.levelname.ljust(_LEVEL_CHARS)
274
275 return self._format(level, record, msg, coloured=True)
276
277
278 def _filter_from_c(logger_name, level):
279 return logging.getLogger(logger_name).isEnabledFor(level)
280
281
282 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
283 """
284 This is for use from the C world, and allows us to insert C stack
285 information.
286 """
287 logger = logging.getLogger(logger_name)
288 if logger.isEnabledFor(level):
289 record = logger.makeRecord(
290 logger.name, level, filename, lineno, msg, None, None, function_name
291 )
292 logger.handle(record)
293
[end of cocotb/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -35,6 +35,7 @@
import warnings
import cocotb.ANSI as ANSI
+from cocotb import simulator
from cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output
try:
@@ -109,8 +110,6 @@
# Notify GPI of log level, which it uses as an optimization to avoid
# calling into Python.
- from cocotb import simulator
-
simulator.log_level(log.getEffectiveLevel())
@@ -137,6 +136,11 @@
)
return want_color_output()
+ def setLevel(self, level: int) -> None:
+ super().setLevel(level)
+ if self.name == "gpi":
+ simulator.log_level(level)
+
# this used to be a class, hence the unusual capitalization
def SimLog(name, ident=None):
|
{"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -35,6 +35,7 @@\n import warnings\n \n import cocotb.ANSI as ANSI\n+from cocotb import simulator\n from cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output\n \n try:\n@@ -109,8 +110,6 @@\n \n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n- from cocotb import simulator\n-\n simulator.log_level(log.getEffectiveLevel())\n \n \n@@ -137,6 +136,11 @@\n )\n return want_color_output()\n \n+ def setLevel(self, level: int) -> None:\n+ super().setLevel(level)\n+ if self.name == \"gpi\":\n+ simulator.log_level(level)\n+\n \n # this used to be a class, hence the unusual capitalization\n def SimLog(name, ident=None):\n", "issue": "Update cached logging level in C when Python log level is changed\nAs an optimization gpi_log caches the gpi logging level. This cached value is not currently updated when the \"cocotb.gpi\" logger's level is updated in Python. This will require creating a `logging.Handler` subclass that calls `simulator.set_level` when its `setLevel` method is called.\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport logging\nimport os\nimport sys\nimport warnings\n\nimport cocotb.ANSI as ANSI\nfrom cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output\n\ntry:\n _suppress = int(os.environ.get(\"COCOTB_REDUCED_LOG_FMT\", \"1\"))\nexcept ValueError:\n _suppress = 1\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n# Custom log level\nlogging.TRACE = 5\nlogging.addLevelName(5, \"TRACE\")\n\n# Default log level if not overwritten by the user.\n_COCOTB_LOG_LEVEL_DEFAULT = \"INFO\"\n\n\ndef default_config():\n \"\"\"Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance.\n An example of this can be found in the section on :ref:`rotating-logger`.\n\n .. versionadded:: 1.4\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger(\"cocotb\")\n\n try:\n # All log levels are upper case, convert the user input for convenience.\n level = os.environ[\"COCOTB_LOG_LEVEL\"].upper()\n except KeyError:\n level = _COCOTB_LOG_LEVEL_DEFAULT\n\n try:\n log.setLevel(level)\n except ValueError:\n valid_levels = (\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"TRACE\")\n raise ValueError(\n \"Invalid log level %r passed through the \"\n \"COCOTB_LOG_LEVEL environment variable. Valid log \"\n \"levels: %s\" % (level, \", \".join(valid_levels))\n )\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n from cocotb import simulator\n\n simulator.log_level(log.getEffectiveLevel())\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\"This class only exists for backwards compatibility\"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\"Like logging.getLogger, but append a numeric identifier to the name\"\"\"\n if ident is not None:\n name = f\"{name}.0x{ident:x}\"\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n\n .. versionadded:: 1.4\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the handler object this\n formatter is attached to has a :class:`SimTimeContextFilter` filter\n attached, which cocotb ensures by default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\"Takes no arguments.\"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1 :]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1 :]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, \"created_sim_time\", None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, \"ns\")\n sim_time_str = f\"{time_ns:6.2f}ns\"\n prefix = sim_time_str.rjust(11) + \" \" + level + \" \"\n if not _suppress:\n prefix += (\n self.ljust(record.name, _RECORD_CHARS)\n + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)\n + \":\"\n + self.ljust(str(record.lineno), _LINENO_CHARS)\n + \" in \"\n + self.ljust(str(record.funcName), _FUNCNAME_CHARS)\n + \" \"\n )\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= len(level) - _LEVEL_CHARS\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split(\"\\n\"))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.TRACE: \"%s\",\n logging.DEBUG: \"%s\",\n logging.INFO: \"%s\",\n logging.WARNING: ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR: ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = \"\\n\".join(\n [\n SimColourLogFormatter.loglevel2colour.get(record.levelno, \"%s\") % line\n for line in msg.split(\"\\n\")\n ]\n )\n level = SimColourLogFormatter.loglevel2colour.get(\n record.levelno, \"%s\"\n ) % record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name, level, filename, lineno, msg, None, None, function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]}
| 3,754 | 236 |
gh_patches_debug_34983
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-3117
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Psycopg patching doesn't properly handle execute_values
The `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras
```python
def execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):
from psycopg2.sql import Composable
if isinstance(sql, Composable):
sql = sql.as_string(cur)
# we can't just use sql % vals because vals is bytes: if sql is bytes
# there will be some decoding error because of stupid codec used, and Py3
# doesn't implement % on bytes.
if not isinstance(sql, bytes):
sql = sql.encode(_ext.encodings[cur.connection.encoding])
pre, post = _split_sql(sql)
result = [] if fetch else None
for page in _paginate(argslist, page_size=page_size):
if template is None:
template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'
parts = pre[:]
for args in page:
parts.append(cur.mogrify(template, args))
parts.append(b',')
parts[-1:] = post
cur.execute(b''.join(parts))
if fetch:
result.extend(cur.fetchall())
return result
```
The problem is that ddtrace assumes that the "resource" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with "TypeError: Object of type bytes is not JSON serializable". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.
I think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:
```python
class Psycopg2TracedCursor(dbapi.TracedCursor):
"""TracedCursor for psycopg2"""
def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):
# treat psycopg2.sql.Composable resource objects as strings
if isinstance(resource, Composable):
resource = resource.as_string(self.__wrapped__)
# THIS IS THE NEW PART BELOW (next 2 lines)
if isinstance(resource, bytes):
resource = resource.decode('utf-8')
return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)
```
### Which version of dd-trace-py are you using?
Lambda layer, v50.
### Which version of pip are you using?
n/a
### How can we reproduce your problem?
Use `execute_values` while inside a tracing context. It should have a 100% failure rate.
### What is the result that you get?
A type error when span.finish() is called and the metrics are furnished to DD.
### What is the result that you expected?
It should work as normal, with the resource decoded as a string.
</issue>
<code>
[start of ddtrace/internal/encoding.py]
1 import json
2 from typing import Any
3 from typing import Dict
4 from typing import List
5 from typing import Optional
6 from typing import TYPE_CHECKING
7
8 from ._encoding import ListStringTable
9 from ._encoding import MsgpackEncoderV03
10 from ._encoding import MsgpackEncoderV05
11 from .logger import get_logger
12
13
14 __all__ = ["MsgpackEncoderV03", "MsgpackEncoderV05", "ListStringTable", "MSGPACK_ENCODERS"]
15
16
17 if TYPE_CHECKING:
18 from ..span import Span
19
20
21 log = get_logger(__name__)
22
23
24 class _EncoderBase(object):
25 """
26 Encoder interface that provides the logic to encode traces and service.
27 """
28
29 def encode_traces(self, traces):
30 # type: (List[List[Span]]) -> str
31 """
32 Encodes a list of traces, expecting a list of items where each items
33 is a list of spans. Before dumping the string in a serialized format all
34 traces are normalized according to the encoding format. The trace
35 nesting is not changed.
36
37 :param traces: A list of traces that should be serialized
38 """
39 raise NotImplementedError()
40
41 def encode(self, obj):
42 # type: (List[List[Any]]) -> str
43 """
44 Defines the underlying format used during traces or services encoding.
45 This method must be implemented and should only be used by the internal
46 functions.
47 """
48 raise NotImplementedError()
49
50
51 class JSONEncoder(_EncoderBase):
52 content_type = "application/json"
53
54 def encode_traces(self, traces):
55 normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
56 return self.encode(normalized_traces)
57
58 @staticmethod
59 def encode(obj):
60 # type: (Any) -> str
61 return json.dumps(obj)
62
63
64 class JSONEncoderV2(JSONEncoder):
65 """
66 JSONEncoderV2 encodes traces to the new intake API format.
67 """
68
69 content_type = "application/json"
70
71 def encode_traces(self, traces):
72 # type: (List[List[Span]]) -> str
73 normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]
74 return self.encode({"traces": normalized_traces})
75
76 @staticmethod
77 def _convert_span(span):
78 # type: (Span) -> Dict[str, Any]
79 sp = span.to_dict()
80 sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
81 sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
82 sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
83 return sp
84
85 @staticmethod
86 def _encode_id_to_hex(dd_id):
87 # type: (Optional[int]) -> str
88 if not dd_id:
89 return "0000000000000000"
90 return "%0.16X" % int(dd_id)
91
92 @staticmethod
93 def _decode_id_to_hex(hex_id):
94 # type: (Optional[str]) -> int
95 if not hex_id:
96 return 0
97 return int(hex_id, 16)
98
99
100 MSGPACK_ENCODERS = {
101 "v0.3": MsgpackEncoderV03,
102 "v0.4": MsgpackEncoderV03,
103 "v0.5": MsgpackEncoderV05,
104 }
105
[end of ddtrace/internal/encoding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py
--- a/ddtrace/internal/encoding.py
+++ b/ddtrace/internal/encoding.py
@@ -8,6 +8,9 @@
from ._encoding import ListStringTable
from ._encoding import MsgpackEncoderV03
from ._encoding import MsgpackEncoderV05
+from .compat import PY3
+from .compat import binary_type
+from .compat import ensure_text
from .logger import get_logger
@@ -48,17 +51,33 @@
raise NotImplementedError()
-class JSONEncoder(_EncoderBase):
+class JSONEncoder(json.JSONEncoder, _EncoderBase):
content_type = "application/json"
def encode_traces(self, traces):
- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]
return self.encode(normalized_traces)
@staticmethod
- def encode(obj):
- # type: (Any) -> str
- return json.dumps(obj)
+ def _normalize_span(span):
+ # Ensure all string attributes are actually strings and not bytes
+ # DEV: We are deferring meta/metrics to reduce any performance issues.
+ # Meta/metrics may still contain `bytes` and have encoding issues.
+ span["resource"] = JSONEncoder._normalize_str(span["resource"])
+ span["name"] = JSONEncoder._normalize_str(span["name"])
+ span["service"] = JSONEncoder._normalize_str(span["service"])
+ return span
+
+ @staticmethod
+ def _normalize_str(obj):
+ if obj is None:
+ return obj
+
+ if PY3:
+ return ensure_text(obj, errors="backslashreplace")
+ elif isinstance(obj, binary_type):
+ return obj.decode("utf-8", errors="replace")
+ return obj
class JSONEncoderV2(JSONEncoder):
@@ -77,6 +96,7 @@
def _convert_span(span):
# type: (Span) -> Dict[str, Any]
sp = span.to_dict()
+ sp = JSONEncoderV2._normalize_span(sp)
sp["trace_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("trace_id"))
sp["parent_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("parent_id"))
sp["span_id"] = JSONEncoderV2._encode_id_to_hex(sp.get("span_id"))
|
{"golden_diff": "diff --git a/ddtrace/internal/encoding.py b/ddtrace/internal/encoding.py\n--- a/ddtrace/internal/encoding.py\n+++ b/ddtrace/internal/encoding.py\n@@ -8,6 +8,9 @@\n from ._encoding import ListStringTable\n from ._encoding import MsgpackEncoderV03\n from ._encoding import MsgpackEncoderV05\n+from .compat import PY3\n+from .compat import binary_type\n+from .compat import ensure_text\n from .logger import get_logger\n \n \n@@ -48,17 +51,33 @@\n raise NotImplementedError()\n \n \n-class JSONEncoder(_EncoderBase):\n+class JSONEncoder(json.JSONEncoder, _EncoderBase):\n content_type = \"application/json\"\n \n def encode_traces(self, traces):\n- normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n+ normalized_traces = [[JSONEncoder._normalize_span(span.to_dict()) for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n \n @staticmethod\n- def encode(obj):\n- # type: (Any) -> str\n- return json.dumps(obj)\n+ def _normalize_span(span):\n+ # Ensure all string attributes are actually strings and not bytes\n+ # DEV: We are deferring meta/metrics to reduce any performance issues.\n+ # Meta/metrics may still contain `bytes` and have encoding issues.\n+ span[\"resource\"] = JSONEncoder._normalize_str(span[\"resource\"])\n+ span[\"name\"] = JSONEncoder._normalize_str(span[\"name\"])\n+ span[\"service\"] = JSONEncoder._normalize_str(span[\"service\"])\n+ return span\n+\n+ @staticmethod\n+ def _normalize_str(obj):\n+ if obj is None:\n+ return obj\n+\n+ if PY3:\n+ return ensure_text(obj, errors=\"backslashreplace\")\n+ elif isinstance(obj, binary_type):\n+ return obj.decode(\"utf-8\", errors=\"replace\")\n+ return obj\n \n \n class JSONEncoderV2(JSONEncoder):\n@@ -77,6 +96,7 @@\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n+ sp = JSONEncoderV2._normalize_span(sp)\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n", "issue": "Psycopg patching doesn't properly handle execute_values\nThe `execute_values` extension in psycopg2 composes and executes the query with b-string, even if you passed the query as a string. Below is the full function from psycopg2.extras\r\n\r\n```python\r\ndef execute_values(cur, sql, argslist, template=None, page_size=100, fetch=False):\r\n from psycopg2.sql import Composable\r\n if isinstance(sql, Composable):\r\n sql = sql.as_string(cur)\r\n\r\n # we can't just use sql % vals because vals is bytes: if sql is bytes\r\n # there will be some decoding error because of stupid codec used, and Py3\r\n # doesn't implement % on bytes.\r\n if not isinstance(sql, bytes):\r\n sql = sql.encode(_ext.encodings[cur.connection.encoding])\r\n pre, post = _split_sql(sql)\r\n\r\n result = [] if fetch else None\r\n for page in _paginate(argslist, page_size=page_size):\r\n if template is None:\r\n template = b'(' + b','.join([b'%s'] * len(page[0])) + b')'\r\n parts = pre[:]\r\n for args in page:\r\n parts.append(cur.mogrify(template, args))\r\n parts.append(b',')\r\n parts[-1:] = post\r\n cur.execute(b''.join(parts))\r\n if fetch:\r\n result.extend(cur.fetchall())\r\n\r\n return result\r\n```\r\n\r\nThe problem is that ddtrace assumes that the \"resource\" added to a span is a string. The result is that when `span.finish()` is called in the datadog lambda handler and it tries to serialize the span to json, it blows up with \"TypeError: Object of type bytes is not JSON serializable\". Upon investigation, I discovered that the ddtrace.internal.encoder.py's JSONEncoder just does a simple json.dumps() on all the spans and the `resource` attribute on the span from the using `execute_values` is bytes, not a string.\r\n\r\nI think the solution here is simply to update the Psycopg2TracedCursor class to decode the resource from bytes if it is bytes, like this:\r\n\r\n```python\r\nclass Psycopg2TracedCursor(dbapi.TracedCursor):\r\n \"\"\"TracedCursor for psycopg2\"\"\"\r\n\r\n def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs):\r\n # treat psycopg2.sql.Composable resource objects as strings\r\n if isinstance(resource, Composable):\r\n resource = resource.as_string(self.__wrapped__)\r\n # THIS IS THE NEW PART BELOW (next 2 lines)\r\n if isinstance(resource, bytes):\r\n resource = resource.decode('utf-8')\r\n return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs)\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\nLambda layer, v50.\r\n### Which version of pip are you using?\r\nn/a\r\n\r\n### How can we reproduce your problem?\r\nUse `execute_values` while inside a tracing context. It should have a 100% failure rate.\r\n\r\n### What is the result that you get?\r\nA type error when span.finish() is called and the metrics are furnished to DD.\r\n\r\n### What is the result that you expected?\r\nIt should work as normal, with the resource decoded as a string.\r\n\n", "before_files": [{"content": "import json\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom ._encoding import ListStringTable\nfrom ._encoding import MsgpackEncoderV03\nfrom ._encoding import MsgpackEncoderV05\nfrom .logger import get_logger\n\n\n__all__ = [\"MsgpackEncoderV03\", \"MsgpackEncoderV05\", \"ListStringTable\", \"MSGPACK_ENCODERS\"]\n\n\nif TYPE_CHECKING:\n from ..span import Span\n\n\nlog = get_logger(__name__)\n\n\nclass _EncoderBase(object):\n \"\"\"\n Encoder interface that provides the logic to encode traces and service.\n \"\"\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n \"\"\"\n Encodes a list of traces, expecting a list of items where each items\n is a list of spans. Before dumping the string in a serialized format all\n traces are normalized according to the encoding format. The trace\n nesting is not changed.\n\n :param traces: A list of traces that should be serialized\n \"\"\"\n raise NotImplementedError()\n\n def encode(self, obj):\n # type: (List[List[Any]]) -> str\n \"\"\"\n Defines the underlying format used during traces or services encoding.\n This method must be implemented and should only be used by the internal\n functions.\n \"\"\"\n raise NotImplementedError()\n\n\nclass JSONEncoder(_EncoderBase):\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n normalized_traces = [[span.to_dict() for span in trace] for trace in traces]\n return self.encode(normalized_traces)\n\n @staticmethod\n def encode(obj):\n # type: (Any) -> str\n return json.dumps(obj)\n\n\nclass JSONEncoderV2(JSONEncoder):\n \"\"\"\n JSONEncoderV2 encodes traces to the new intake API format.\n \"\"\"\n\n content_type = \"application/json\"\n\n def encode_traces(self, traces):\n # type: (List[List[Span]]) -> str\n normalized_traces = [[JSONEncoderV2._convert_span(span) for span in trace] for trace in traces]\n return self.encode({\"traces\": normalized_traces})\n\n @staticmethod\n def _convert_span(span):\n # type: (Span) -> Dict[str, Any]\n sp = span.to_dict()\n sp[\"trace_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"trace_id\"))\n sp[\"parent_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"parent_id\"))\n sp[\"span_id\"] = JSONEncoderV2._encode_id_to_hex(sp.get(\"span_id\"))\n return sp\n\n @staticmethod\n def _encode_id_to_hex(dd_id):\n # type: (Optional[int]) -> str\n if not dd_id:\n return \"0000000000000000\"\n return \"%0.16X\" % int(dd_id)\n\n @staticmethod\n def _decode_id_to_hex(hex_id):\n # type: (Optional[str]) -> int\n if not hex_id:\n return 0\n return int(hex_id, 16)\n\n\nMSGPACK_ENCODERS = {\n \"v0.3\": MsgpackEncoderV03,\n \"v0.4\": MsgpackEncoderV03,\n \"v0.5\": MsgpackEncoderV05,\n}\n", "path": "ddtrace/internal/encoding.py"}]}
| 2,194 | 561 |
gh_patches_debug_27090
|
rasdani/github-patches
|
git_diff
|
data-for-change__anyway-720
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clusters view still very heavy
I uploaded the code to a new server, following #463.
The basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB).
</issue>
<code>
[start of clusters_calculator.py]
1 from models import Marker
2 from static.pymapcluster import calculate_clusters
3 import logging
4 import concurrent.futures
5 import multiprocessing
6
7
8 def retrieve_clusters(**kwargs):
9 marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
10 result_futures = []
11 logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
12 with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
13 for marker_box in marker_boxes:
14
15 kwargs.update(marker_box)
16 markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
17 result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
18
19 completed_futures = concurrent.futures.wait(result_futures)
20 result = []
21 for future in completed_futures.done:
22 result.extend(future.result())
23
24 return result
25
26
27 def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
28 cpu_count = multiprocessing.cpu_count()
29 lat_box_size = (ne_lat - sw_lat) / cpu_count
30 # lng_box_size = (sw_lng - ne_lng) / cpu_count
31 boxes = []
32 for i in xrange(cpu_count):
33 # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:
34 # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))
35 boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,
36 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})
37
38 return boxes
39
[end of clusters_calculator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/clusters_calculator.py b/clusters_calculator.py
--- a/clusters_calculator.py
+++ b/clusters_calculator.py
@@ -1,27 +1,25 @@
+import itertools
+from celery import Celery, group
from models import Marker
from static.pymapcluster import calculate_clusters
-import logging
-import concurrent.futures
import multiprocessing
-def retrieve_clusters(**kwargs):
- marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
- result_futures = []
- logging.info('number of cores: ' + str(multiprocessing.cpu_count()))
- with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
- for marker_box in marker_boxes:
+celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')
- kwargs.update(marker_box)
- markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
- result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))
+@celery_app.task
+def calculate_marker_box(kwargs, marker_box):
+ kwargs.update(marker_box)
+ markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()
+ return calculate_clusters(markers_in_box, kwargs['zoom'])
- completed_futures = concurrent.futures.wait(result_futures)
- result = []
- for future in completed_futures.done:
- result.extend(future.result())
- return result
+def retrieve_clusters(**kwargs):
+ marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])
+ job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])
+ result = job.apply_async()
+ result.join()
+ return list(itertools.chain.from_iterable(result.get()))
def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):
|
{"golden_diff": "diff --git a/clusters_calculator.py b/clusters_calculator.py\n--- a/clusters_calculator.py\n+++ b/clusters_calculator.py\n@@ -1,27 +1,25 @@\n+import itertools\n+from celery import Celery, group\n from models import Marker\n from static.pymapcluster import calculate_clusters\n-import logging\n-import concurrent.futures\n import multiprocessing\n \n \n-def retrieve_clusters(**kwargs):\n- marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n- result_futures = []\n- logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n- with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n- for marker_box in marker_boxes:\n+celery_app = Celery('tasks', backend='rpc://', broker='pyamqp://guest@localhost//')\n \n- kwargs.update(marker_box)\n- markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n- result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n+@celery_app.task\n+def calculate_marker_box(kwargs, marker_box):\n+ kwargs.update(marker_box)\n+ markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n+ return calculate_clusters(markers_in_box, kwargs['zoom'])\n \n- completed_futures = concurrent.futures.wait(result_futures)\n- result = []\n- for future in completed_futures.done:\n- result.extend(future.result())\n \n- return result\n+def retrieve_clusters(**kwargs):\n+ marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n+ job = group([calculate_marker_box.s(kwargs, marker_box) for marker_box in marker_boxes])\n+ result = job.apply_async()\n+ result.join()\n+ return list(itertools.chain.from_iterable(result.get()))\n \n \n def divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n", "issue": "Clusters view still very heavy\nI uploaded the code to a new server, following #463.\r\nThe basic markers view is better and faster, but the clusters is still heavy and might cause server failure (I think the query is hogging the DB).\n", "before_files": [{"content": "from models import Marker\nfrom static.pymapcluster import calculate_clusters\nimport logging\nimport concurrent.futures\nimport multiprocessing\n\n\ndef retrieve_clusters(**kwargs):\n marker_boxes = divide_to_boxes(kwargs['ne_lat'], kwargs['ne_lng'], kwargs['sw_lat'], kwargs['sw_lng'])\n result_futures = []\n logging.info('number of cores: ' + str(multiprocessing.cpu_count()))\n with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n for marker_box in marker_boxes:\n\n kwargs.update(marker_box)\n markers_in_box = Marker.bounding_box_query(**kwargs).markers.all()\n result_futures.append(executor.submit(calculate_clusters, markers_in_box, kwargs['zoom']))\n\n completed_futures = concurrent.futures.wait(result_futures)\n result = []\n for future in completed_futures.done:\n result.extend(future.result())\n\n return result\n\n\ndef divide_to_boxes(ne_lat, ne_lng, sw_lat, sw_lng):\n cpu_count = multiprocessing.cpu_count()\n lat_box_size = (ne_lat - sw_lat) / cpu_count\n # lng_box_size = (sw_lng - ne_lng) / cpu_count\n boxes = []\n for i in xrange(cpu_count):\n # TODO: the below calculation is using sw_lat as first param instead of ne_lat. Plz verify my fix for that:\n # boxes.append((sw_lat + (i + 1) * lat_box_size, ne_lng, sw_lat + i * lat_box_size, sw_lng))\n boxes.append({'ne_lat': ne_lat + (i + 1) * lat_box_size, 'ne_lng': ne_lng,\n 'sw_lat': sw_lat + i * lat_box_size, 'sw_lng': sw_lng})\n\n return boxes\n", "path": "clusters_calculator.py"}]}
| 1,033 | 443 |
gh_patches_debug_4555
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-9157
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[sdk] Containerized Python Component module not found error
There is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules.
### Environment
KFP SDK 2.0.0-beta.6
### Steps to reproduce
For example:
```python
# component.py
from module_one import one
from kfp import dsl
@dsl.component
def comp(): ...
```
```python
# module_one.py
from module_two import two
one = 1
```
```python
# module_two.py
two = 2
```
Then: `kfp component build .`
You get a `No module named` error.
### Expected result
Should build without an error.
### Materials and Reference
Related: https://github.com/kubeflow/pipelines/issues/8353
</issue>
<code>
[start of sdk/python/kfp/components/utils.py]
1 # Copyright 2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Definitions of utils methods."""
15
16 import importlib
17 import os
18 import re
19 import sys
20 import types
21 from typing import List
22
23 _COMPONENT_NAME_PREFIX = 'comp-'
24 _EXECUTOR_LABEL_PREFIX = 'exec-'
25
26
27 def load_module(module_name: str, module_directory: str) -> types.ModuleType:
28 """Dynamically imports the Python module with the given name and package
29 path.
30
31 E.g., Assuming there is a file called `my_module.py` under
32 `/some/directory/my_module`, we can use::
33
34 load_module('my_module', '/some/directory')
35
36 to effectively `import mymodule`.
37
38 Args:
39 module_name: The name of the module.
40 package_path: The package under which the specified module resides.
41 """
42 module_spec = importlib.util.spec_from_file_location(
43 name=module_name,
44 location=os.path.join(module_directory, f'{module_name}.py'))
45 module = importlib.util.module_from_spec(module_spec)
46 sys.modules[module_spec.name] = module
47 module_spec.loader.exec_module(module)
48 return module
49
50
51 def maybe_rename_for_k8s(name: str) -> str:
52 """Cleans and converts a name to be k8s compatible.
53
54 Args:
55 name: The original name.
56
57 Returns:
58 A sanitized name.
59 """
60 return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',
61 name.lower())).lstrip('-').rstrip('-')
62
63
64 def sanitize_input_name(name: str) -> str:
65 """Sanitizes input name."""
66 return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')
67
68
69 def sanitize_component_name(name: str) -> str:
70 """Sanitizes component name."""
71 return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)
72
73
74 def sanitize_task_name(name: str) -> str:
75 """Sanitizes task name."""
76 return maybe_rename_for_k8s(name)
77
78
79 def sanitize_executor_label(label: str) -> str:
80 """Sanitizes executor label."""
81 return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)
82
83
84 def make_name_unique_by_adding_index(
85 name: str,
86 collection: List[str],
87 delimiter: str,
88 ) -> str:
89 """Makes a unique name by adding index.
90
91 The index starts from 2 and increase by 1 until we find a unique name.
92
93 Args:
94 name: The original name.
95 collection: The collection of existing names.
96 delimiter: The delimiter to connect the original name and an index.
97
98 Returns:
99 A unique name composed of name+delimiter+next index
100 """
101 unique_name = name
102 if unique_name in collection:
103 for i in range(2, sys.maxsize**10):
104 unique_name = name + delimiter + str(i)
105 if unique_name not in collection:
106 break
107 return unique_name
108
109
110 def validate_pipeline_name(name: str) -> None:
111 """Validate pipeline name.
112
113 A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.
114
115 Args:
116 name: The pipeline name.
117
118 Raises:
119 ValueError if the pipeline name doesn't conform to the regular expression.
120 """
121 pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')
122 if not pattern.match(name):
123 raise ValueError(
124 'Invalid pipeline name: %s.\n'
125 'Please specify a pipeline name that matches the regular '
126 'expression "^[a-z0-9][a-z0-9-]{0,127}$" using '
127 '`dsl.pipeline(name=...)` decorator.' % name)
128
[end of sdk/python/kfp/components/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py
--- a/sdk/python/kfp/components/utils.py
+++ b/sdk/python/kfp/components/utils.py
@@ -44,6 +44,7 @@
location=os.path.join(module_directory, f'{module_name}.py'))
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_spec.name] = module
+ sys.path.insert(0, str(module_directory))
module_spec.loader.exec_module(module)
return module
|
{"golden_diff": "diff --git a/sdk/python/kfp/components/utils.py b/sdk/python/kfp/components/utils.py\n--- a/sdk/python/kfp/components/utils.py\n+++ b/sdk/python/kfp/components/utils.py\n@@ -44,6 +44,7 @@\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n+ sys.path.insert(0, str(module_directory))\n module_spec.loader.exec_module(module)\n return module\n", "issue": "[sdk] Containerized Python Component module not found error \nThere is a bug when building a containerized Python component that happens (at least) in the case when the longest path of the import graph ending at the component involves >2 modules. \r\n\r\n### Environment\r\nKFP SDK 2.0.0-beta.6\r\n\r\n### Steps to reproduce\r\nFor example:\r\n\r\n```python\r\n# component.py\r\nfrom module_one import one\r\nfrom kfp import dsl\r\n\r\[email protected]\r\ndef comp(): ...\r\n```\r\n\r\n```python\r\n# module_one.py\r\nfrom module_two import two\r\none = 1\r\n```\r\n\r\n```python\r\n# module_two.py\r\ntwo = 2\r\n```\r\n\r\nThen: `kfp component build .`\r\n\r\nYou get a `No module named` error.\r\n\r\n### Expected result\r\n\r\nShould build without an error.\r\n\r\n### Materials and Reference\r\nRelated: https://github.com/kubeflow/pipelines/issues/8353\n", "before_files": [{"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definitions of utils methods.\"\"\"\n\nimport importlib\nimport os\nimport re\nimport sys\nimport types\nfrom typing import List\n\n_COMPONENT_NAME_PREFIX = 'comp-'\n_EXECUTOR_LABEL_PREFIX = 'exec-'\n\n\ndef load_module(module_name: str, module_directory: str) -> types.ModuleType:\n \"\"\"Dynamically imports the Python module with the given name and package\n path.\n\n E.g., Assuming there is a file called `my_module.py` under\n `/some/directory/my_module`, we can use::\n\n load_module('my_module', '/some/directory')\n\n to effectively `import mymodule`.\n\n Args:\n module_name: The name of the module.\n package_path: The package under which the specified module resides.\n \"\"\"\n module_spec = importlib.util.spec_from_file_location(\n name=module_name,\n location=os.path.join(module_directory, f'{module_name}.py'))\n module = importlib.util.module_from_spec(module_spec)\n sys.modules[module_spec.name] = module\n module_spec.loader.exec_module(module)\n return module\n\n\ndef maybe_rename_for_k8s(name: str) -> str:\n \"\"\"Cleans and converts a name to be k8s compatible.\n\n Args:\n name: The original name.\n\n Returns:\n A sanitized name.\n \"\"\"\n return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-',\n name.lower())).lstrip('-').rstrip('-')\n\n\ndef sanitize_input_name(name: str) -> str:\n \"\"\"Sanitizes input name.\"\"\"\n return re.sub('[^_0-9a-z]+', '_', name.lower()).lstrip('_').rstrip('_')\n\n\ndef sanitize_component_name(name: str) -> str:\n \"\"\"Sanitizes component name.\"\"\"\n return _COMPONENT_NAME_PREFIX + maybe_rename_for_k8s(name)\n\n\ndef sanitize_task_name(name: str) -> str:\n \"\"\"Sanitizes task name.\"\"\"\n return maybe_rename_for_k8s(name)\n\n\ndef sanitize_executor_label(label: str) -> str:\n \"\"\"Sanitizes executor label.\"\"\"\n return _EXECUTOR_LABEL_PREFIX + maybe_rename_for_k8s(label)\n\n\ndef make_name_unique_by_adding_index(\n name: str,\n collection: List[str],\n delimiter: str,\n) -> str:\n \"\"\"Makes a unique name by adding index.\n\n The index starts from 2 and increase by 1 until we find a unique name.\n\n Args:\n name: The original name.\n collection: The collection of existing names.\n delimiter: The delimiter to connect the original name and an index.\n\n Returns:\n A unique name composed of name+delimiter+next index\n \"\"\"\n unique_name = name\n if unique_name in collection:\n for i in range(2, sys.maxsize**10):\n unique_name = name + delimiter + str(i)\n if unique_name not in collection:\n break\n return unique_name\n\n\ndef validate_pipeline_name(name: str) -> None:\n \"\"\"Validate pipeline name.\n\n A valid pipeline name should match ^[a-z0-9][a-z0-9-]{0,127}$.\n\n Args:\n name: The pipeline name.\n\n Raises:\n ValueError if the pipeline name doesn't conform to the regular expression.\n \"\"\"\n pattern = re.compile(r'^[a-z0-9][a-z0-9-]{0,127}$')\n if not pattern.match(name):\n raise ValueError(\n 'Invalid pipeline name: %s.\\n'\n 'Please specify a pipeline name that matches the regular '\n 'expression \"^[a-z0-9][a-z0-9-]{0,127}$\" using '\n '`dsl.pipeline(name=...)` decorator.' % name)\n", "path": "sdk/python/kfp/components/utils.py"}]}
| 1,965 | 114 |
gh_patches_debug_14269
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-2093
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
importchannel command is broken
## Summary
* `importchannel` command is broken
## System information
- Version: 0.6
- Operating system: Linux
```
kolibri manage importchannel -- network bcd99d8aeef04ce6b9e25a88d87eedb7
INFO Downloading data for channel id bcd99d8aeef04ce6b9e25a88d87eedb7
0%| | 0/239616 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/home/christian/.virtualenvs/kolibri/bin/kolibri", line 9, in <module>
load_entry_point('kolibri', 'console_scripts', 'kolibri')()
File "/home/christian/repos/kolibri/kolibri/utils/cli.py", line 580, in main
manage(command, args=django_args)
File "/home/christian/repos/kolibri/kolibri/utils/cli.py", line 411, in manage
execute_from_command_line(argv=argv)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 353, in execute_from_command_line
utility.execute()
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 345, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py", line 348, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py", line 399, in execute
output = self.handle(*args, **options)
File "/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py", line 98, in handle
return self.handle_async(*args, **options)
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 89, in handle_async
self.download_channel(options["channel_id"])
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 42, in download_channel
self._transfer(DOWNLOAD_METHOD, channel_id)
File "/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py", line 73, in _transfer
if self.is_cancelled():
File "/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py", line 108, in is_cancelled
self.check_for_cancel(last_stage)
TypeError: 'NoneType' object is not callable
```
## How to reproduce
1. Use the `importchannel` command
## Real-life consequences
* Breaks my workflow.
</issue>
<code>
[start of kolibri/tasks/management/commands/base.py]
1 import abc
2 from collections import namedtuple
3
4 from barbequeue.exceptions import UserCancelledError
5 from django.core.management.base import BaseCommand
6 from tqdm import tqdm
7
8 Progress = namedtuple(
9 'Progress',
10 [
11 'progress_fraction',
12 'message',
13 'extra_data',
14 'level',
15 ]
16 )
17
18
19 class ProgressTracker():
20
21 def __init__(self, total=100, level=0, update_callback=None):
22
23 # set default values
24 self.progress = 0
25 self.message = ""
26 self.extra_data = None
27
28 # store provided arguments
29 self.total = total
30 self.level = level
31 self.update_callback = update_callback
32
33 # initialize the tqdm progress bar
34 self.progressbar = tqdm(total=total)
35
36 def update_progress(self, increment=1, message="", extra_data=None):
37
38 self.progressbar.update(increment)
39
40 self.progress += increment
41
42 self.message = message
43
44 self.extra_data = extra_data
45
46 if callable(self.update_callback):
47 p = self.get_progress()
48 self.update_callback(p.progress_fraction, p)
49
50 def get_progress(self):
51
52 return Progress(
53 progress_fraction=0 if self.total == 0 else self.progress / float(self.total),
54 message=self.message,
55 extra_data=self.extra_data,
56 level=self.level,
57 )
58
59 def __enter__(self):
60 return self.update_progress
61
62 def __exit__(self, *exc_details):
63 if self.progressbar is not None:
64 self.progressbar.close()
65
66
67 class AsyncCommand(BaseCommand):
68 """A management command with added convenience functions for displaying
69 progress to the user.
70
71 Rather than implementing handle() (as is for BaseCommand), subclasses, must
72 implement handle_async(), which accepts the same arguments as handle().
73
74 If ran from the command line, AsynCommand displays a progress bar to the
75 user. If ran asynchronously through kolibri.tasks.schedule_command(),
76 AsyncCommand sends results through the Progress class to the main Django
77 process. Anyone who knows the task id for the command instance can check
78 the intermediate progress by looking at the task's AsyncResult.result
79 variable.
80
81 """
82
83 def __init__(self, *args, **kwargs):
84 self.progresstrackers = []
85
86 def _update_all_progress(self, progress_fraction, progress):
87 if callable(self.update_progress):
88 progress_list = [p.get_progress() for p in self.progresstrackers]
89 # HACK (aron): self.update_progress' signature has changed between django_q
90 # and iceqube/bbq. It now expects the current progress,
91 # the total progress, and then derives the
92 # percentage progress manually.
93 self.update_progress(progress_list[0].progress_fraction, 1.)
94
95 def handle(self, *args, **options):
96 self.update_progress = options.pop("update_progress", None)
97 self.check_for_cancel = options.pop("check_for_cancel", None)
98 return self.handle_async(*args, **options)
99
100 def start_progress(self, total=100):
101 level = len(self.progresstrackers)
102 tracker = ProgressTracker(total=total, level=level, update_callback=self._update_all_progress)
103 self.progresstrackers.append(tracker)
104 return tracker
105
106 def is_cancelled(self, last_stage="CANCELLING"):
107 try:
108 self.check_for_cancel(last_stage)
109 return False
110 except UserCancelledError:
111 return True
112
113 def cancel(self, last_stage="CANCELLED"):
114 self.check_for_cancel(last_stage)
115
116 @abc.abstractmethod
117 def handle_async(self, *args, **options):
118 """
119 handle_async should be reimplemented by any Subclass of AsyncCommand.
120 """
121 pass
122
[end of kolibri/tasks/management/commands/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/tasks/management/commands/base.py b/kolibri/tasks/management/commands/base.py
--- a/kolibri/tasks/management/commands/base.py
+++ b/kolibri/tasks/management/commands/base.py
@@ -104,14 +104,17 @@
return tracker
def is_cancelled(self, last_stage="CANCELLING"):
- try:
- self.check_for_cancel(last_stage)
- return False
- except UserCancelledError:
- return True
+ if self.check_for_cancel:
+ try:
+ self.check_for_cancel(last_stage)
+ return False
+ except UserCancelledError:
+ return True
+ return False
def cancel(self, last_stage="CANCELLED"):
- self.check_for_cancel(last_stage)
+ if self.check_for_cancel:
+ return self.check_for_cancel(last_stage)
@abc.abstractmethod
def handle_async(self, *args, **options):
|
{"golden_diff": "diff --git a/kolibri/tasks/management/commands/base.py b/kolibri/tasks/management/commands/base.py\n--- a/kolibri/tasks/management/commands/base.py\n+++ b/kolibri/tasks/management/commands/base.py\n@@ -104,14 +104,17 @@\n return tracker\n \n def is_cancelled(self, last_stage=\"CANCELLING\"):\n- try:\n- self.check_for_cancel(last_stage)\n- return False\n- except UserCancelledError:\n- return True\n+ if self.check_for_cancel:\n+ try:\n+ self.check_for_cancel(last_stage)\n+ return False\n+ except UserCancelledError:\n+ return True\n+ return False\n \n def cancel(self, last_stage=\"CANCELLED\"):\n- self.check_for_cancel(last_stage)\n+ if self.check_for_cancel:\n+ return self.check_for_cancel(last_stage)\n \n @abc.abstractmethod\n def handle_async(self, *args, **options):\n", "issue": "importchannel command is broken\n## Summary\r\n\r\n* `importchannel` command is broken\r\n\r\n## System information\r\n\r\n - Version: 0.6\r\n - Operating system: Linux\r\n\r\n```\r\nkolibri manage importchannel -- network bcd99d8aeef04ce6b9e25a88d87eedb7\r\nINFO Downloading data for channel id bcd99d8aeef04ce6b9e25a88d87eedb7\r\n 0%| | 0/239616 [00:00<?, ?it/s]\r\nTraceback (most recent call last):\r\n File \"/home/christian/.virtualenvs/kolibri/bin/kolibri\", line 9, in <module>\r\n load_entry_point('kolibri', 'console_scripts', 'kolibri')()\r\n File \"/home/christian/repos/kolibri/kolibri/utils/cli.py\", line 580, in main\r\n manage(command, args=django_args)\r\n File \"/home/christian/repos/kolibri/kolibri/utils/cli.py\", line 411, in manage\r\n execute_from_command_line(argv=argv)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 353, in execute_from_command_line\r\n utility.execute()\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 345, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py\", line 348, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/home/christian/.virtualenvs/kolibri/local/lib/python2.7/site-packages/django/core/management/base.py\", line 399, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py\", line 98, in handle\r\n return self.handle_async(*args, **options)\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 89, in handle_async\r\n self.download_channel(options[\"channel_id\"])\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 42, in download_channel\r\n self._transfer(DOWNLOAD_METHOD, channel_id)\r\n File \"/home/christian/repos/kolibri/kolibri/content/management/commands/importchannel.py\", line 73, in _transfer\r\n if self.is_cancelled():\r\n File \"/home/christian/repos/kolibri/kolibri/tasks/management/commands/base.py\", line 108, in is_cancelled\r\n self.check_for_cancel(last_stage)\r\nTypeError: 'NoneType' object is not callable\r\n\r\n```\r\n\r\n## How to reproduce\r\n\r\n1. Use the `importchannel` command\r\n\r\n## Real-life consequences\r\n\r\n* Breaks my workflow.\n", "before_files": [{"content": "import abc\nfrom collections import namedtuple\n\nfrom barbequeue.exceptions import UserCancelledError\nfrom django.core.management.base import BaseCommand\nfrom tqdm import tqdm\n\nProgress = namedtuple(\n 'Progress',\n [\n 'progress_fraction',\n 'message',\n 'extra_data',\n 'level',\n ]\n)\n\n\nclass ProgressTracker():\n\n def __init__(self, total=100, level=0, update_callback=None):\n\n # set default values\n self.progress = 0\n self.message = \"\"\n self.extra_data = None\n\n # store provided arguments\n self.total = total\n self.level = level\n self.update_callback = update_callback\n\n # initialize the tqdm progress bar\n self.progressbar = tqdm(total=total)\n\n def update_progress(self, increment=1, message=\"\", extra_data=None):\n\n self.progressbar.update(increment)\n\n self.progress += increment\n\n self.message = message\n\n self.extra_data = extra_data\n\n if callable(self.update_callback):\n p = self.get_progress()\n self.update_callback(p.progress_fraction, p)\n\n def get_progress(self):\n\n return Progress(\n progress_fraction=0 if self.total == 0 else self.progress / float(self.total),\n message=self.message,\n extra_data=self.extra_data,\n level=self.level,\n )\n\n def __enter__(self):\n return self.update_progress\n\n def __exit__(self, *exc_details):\n if self.progressbar is not None:\n self.progressbar.close()\n\n\nclass AsyncCommand(BaseCommand):\n \"\"\"A management command with added convenience functions for displaying\n progress to the user.\n\n Rather than implementing handle() (as is for BaseCommand), subclasses, must\n implement handle_async(), which accepts the same arguments as handle().\n\n If ran from the command line, AsynCommand displays a progress bar to the\n user. If ran asynchronously through kolibri.tasks.schedule_command(),\n AsyncCommand sends results through the Progress class to the main Django\n process. Anyone who knows the task id for the command instance can check\n the intermediate progress by looking at the task's AsyncResult.result\n variable.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.progresstrackers = []\n\n def _update_all_progress(self, progress_fraction, progress):\n if callable(self.update_progress):\n progress_list = [p.get_progress() for p in self.progresstrackers]\n # HACK (aron): self.update_progress' signature has changed between django_q\n # and iceqube/bbq. It now expects the current progress,\n # the total progress, and then derives the\n # percentage progress manually.\n self.update_progress(progress_list[0].progress_fraction, 1.)\n\n def handle(self, *args, **options):\n self.update_progress = options.pop(\"update_progress\", None)\n self.check_for_cancel = options.pop(\"check_for_cancel\", None)\n return self.handle_async(*args, **options)\n\n def start_progress(self, total=100):\n level = len(self.progresstrackers)\n tracker = ProgressTracker(total=total, level=level, update_callback=self._update_all_progress)\n self.progresstrackers.append(tracker)\n return tracker\n\n def is_cancelled(self, last_stage=\"CANCELLING\"):\n try:\n self.check_for_cancel(last_stage)\n return False\n except UserCancelledError:\n return True\n\n def cancel(self, last_stage=\"CANCELLED\"):\n self.check_for_cancel(last_stage)\n\n @abc.abstractmethod\n def handle_async(self, *args, **options):\n \"\"\"\n handle_async should be reimplemented by any Subclass of AsyncCommand.\n \"\"\"\n pass\n", "path": "kolibri/tasks/management/commands/base.py"}]}
| 2,314 | 217 |
gh_patches_debug_32958
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-498
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tornado Tracer configuration doesn't have access to settings object for Trace Filtering
</issue>
<code>
[start of ddtrace/contrib/tornado/__init__.py]
1 """
2 The Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.
3 Auto instrumentation is available using the ``patch`` function that **must be called before**
4 importing the tornado library. The following is an example::
5
6 # patch before importing tornado and concurrent.futures
7 from ddtrace import tracer, patch
8 patch(tornado=True)
9
10 import tornado.web
11 import tornado.gen
12 import tornado.ioloop
13
14 # create your handlers
15 class MainHandler(tornado.web.RequestHandler):
16 @tornado.gen.coroutine
17 def get(self):
18 self.write("Hello, world")
19
20 # create your application
21 app = tornado.web.Application([
22 (r'/', MainHandler),
23 ])
24
25 # and run it as usual
26 app.listen(8888)
27 tornado.ioloop.IOLoop.current().start()
28
29 When any type of ``RequestHandler`` is hit, a request root span is automatically created. If
30 you want to trace more parts of your application, you can use the ``wrap()`` decorator and
31 the ``trace()`` method as usual::
32
33 class MainHandler(tornado.web.RequestHandler):
34 @tornado.gen.coroutine
35 def get(self):
36 yield self.notify()
37 yield self.blocking_method()
38 with tracer.trace('tornado.before_write') as span:
39 # trace more work in the handler
40
41 @tracer.wrap('tornado.executor_handler')
42 @tornado.concurrent.run_on_executor
43 def blocking_method(self):
44 # do something expensive
45
46 @tracer.wrap('tornado.notify', service='tornado-notification')
47 @tornado.gen.coroutine
48 def notify(self):
49 # do something
50
51 Tornado settings can be used to change some tracing configuration, like::
52
53 settings = {
54 'datadog_trace': {
55 'default_service': 'my-tornado-app',
56 'tags': {'env': 'production'},
57 'distributed_tracing': True,
58 },
59 }
60
61 app = tornado.web.Application([
62 (r'/', MainHandler),
63 ], **settings)
64
65 The available settings are:
66
67 * ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually
68 this configuration must be updated with a meaningful name.
69 * ``tags`` (default: `{}`): set global tags that should be applied to all spans.
70 * ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the
71 code is still instrumented but no spans are sent to the APM agent.
72 * ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called
73 remotely from an instrumented application.
74 We suggest to enable it only for internal services where headers are under your control.
75 * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.
76 * ``agent_port`` (default: `8126`): define the port of the APM agent.
77 """
78 from ...utils.importlib import require_modules
79
80
81 required_modules = ['tornado']
82
83 with require_modules(required_modules) as missing_modules:
84 if not missing_modules:
85 from .stack_context import run_with_trace_context, TracerStackContext
86
87 context_provider = TracerStackContext()
88
89 from .patch import patch, unpatch
90
91 __all__ = [
92 'patch',
93 'unpatch',
94 'context_provider',
95 'run_with_trace_context',
96 'TracerStackContext',
97 ]
98
[end of ddtrace/contrib/tornado/__init__.py]
[start of ddtrace/contrib/tornado/application.py]
1 import ddtrace
2
3 from tornado import template
4
5 from . import decorators, context_provider
6 from .constants import CONFIG_KEY
7
8 from ...ext import AppTypes
9
10
11 def tracer_config(__init__, app, args, kwargs):
12 """
13 Wrap Tornado web application so that we can configure services info and
14 tracing settings after the initialization.
15 """
16 # call the Application constructor
17 __init__(*args, **kwargs)
18
19 # default settings
20 settings = {
21 'tracer': ddtrace.tracer,
22 'default_service': 'tornado-web',
23 'distributed_tracing': False,
24 }
25
26 # update defaults with users settings
27 user_settings = app.settings.get(CONFIG_KEY)
28 if user_settings:
29 settings.update(user_settings)
30
31 app.settings[CONFIG_KEY] = settings
32 tracer = settings['tracer']
33 service = settings['default_service']
34
35 # the tracer must use the right Context propagation and wrap executor;
36 # this action is done twice because the patch() method uses the
37 # global tracer while here we can have a different instance (even if
38 # this is not usual).
39 tracer.configure(
40 context_provider=context_provider,
41 wrap_executor=decorators.wrap_executor,
42 enabled=settings.get('enabled', None),
43 hostname=settings.get('agent_hostname', None),
44 port=settings.get('agent_port', None),
45 )
46
47 # set global tags if any
48 tags = settings.get('tags', None)
49 if tags:
50 tracer.set_tags(tags)
51
52 # configure the current service
53 tracer.set_service_info(
54 service=service,
55 app='tornado',
56 app_type=AppTypes.web,
57 )
58
59 # configure the PIN object for template rendering
60 ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)
61
[end of ddtrace/contrib/tornado/application.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py
--- a/ddtrace/contrib/tornado/__init__.py
+++ b/ddtrace/contrib/tornado/__init__.py
@@ -55,6 +55,11 @@
'default_service': 'my-tornado-app',
'tags': {'env': 'production'},
'distributed_tracing': True,
+ 'settings': {
+ 'FILTERS': [
+ FilterRequestsOnUrl(r'http://test\.example\.com'),
+ ],
+ },
},
}
@@ -74,6 +79,7 @@
We suggest to enable it only for internal services where headers are under your control.
* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.
* ``agent_port`` (default: `8126`): define the port of the APM agent.
+* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.
"""
from ...utils.importlib import require_modules
diff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py
--- a/ddtrace/contrib/tornado/application.py
+++ b/ddtrace/contrib/tornado/application.py
@@ -32,6 +32,9 @@
tracer = settings['tracer']
service = settings['default_service']
+ # extract extra settings
+ extra_settings = settings.get('settings', {})
+
# the tracer must use the right Context propagation and wrap executor;
# this action is done twice because the patch() method uses the
# global tracer while here we can have a different instance (even if
@@ -42,6 +45,7 @@
enabled=settings.get('enabled', None),
hostname=settings.get('agent_hostname', None),
port=settings.get('agent_port', None),
+ settings=extra_settings,
)
# set global tags if any
|
{"golden_diff": "diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py\n--- a/ddtrace/contrib/tornado/__init__.py\n+++ b/ddtrace/contrib/tornado/__init__.py\n@@ -55,6 +55,11 @@\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n+ 'settings': {\n+ 'FILTERS': [\n+ FilterRequestsOnUrl(r'http://test\\.example\\.com'),\n+ ],\n+ },\n },\n }\n \n@@ -74,6 +79,7 @@\n We suggest to enable it only for internal services where headers are under your control.\n * ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n * ``agent_port`` (default: `8126`): define the port of the APM agent.\n+* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.\n \"\"\"\n from ...utils.importlib import require_modules\n \ndiff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py\n--- a/ddtrace/contrib/tornado/application.py\n+++ b/ddtrace/contrib/tornado/application.py\n@@ -32,6 +32,9 @@\n tracer = settings['tracer']\n service = settings['default_service']\n \n+ # extract extra settings\n+ extra_settings = settings.get('settings', {})\n+\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n@@ -42,6 +45,7 @@\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n+ settings=extra_settings,\n )\n \n # set global tags if any\n", "issue": "Tornado Tracer configuration doesn't have access to settings object for Trace Filtering\n\n", "before_files": [{"content": "\"\"\"\nThe Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.\nAuto instrumentation is available using the ``patch`` function that **must be called before**\nimporting the tornado library. The following is an example::\n\n # patch before importing tornado and concurrent.futures\n from ddtrace import tracer, patch\n patch(tornado=True)\n\n import tornado.web\n import tornado.gen\n import tornado.ioloop\n\n # create your handlers\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n self.write(\"Hello, world\")\n\n # create your application\n app = tornado.web.Application([\n (r'/', MainHandler),\n ])\n\n # and run it as usual\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\nWhen any type of ``RequestHandler`` is hit, a request root span is automatically created. If\nyou want to trace more parts of your application, you can use the ``wrap()`` decorator and\nthe ``trace()`` method as usual::\n\n class MainHandler(tornado.web.RequestHandler):\n @tornado.gen.coroutine\n def get(self):\n yield self.notify()\n yield self.blocking_method()\n with tracer.trace('tornado.before_write') as span:\n # trace more work in the handler\n\n @tracer.wrap('tornado.executor_handler')\n @tornado.concurrent.run_on_executor\n def blocking_method(self):\n # do something expensive\n\n @tracer.wrap('tornado.notify', service='tornado-notification')\n @tornado.gen.coroutine\n def notify(self):\n # do something\n\nTornado settings can be used to change some tracing configuration, like::\n\n settings = {\n 'datadog_trace': {\n 'default_service': 'my-tornado-app',\n 'tags': {'env': 'production'},\n 'distributed_tracing': True,\n },\n }\n\n app = tornado.web.Application([\n (r'/', MainHandler),\n ], **settings)\n\nThe available settings are:\n\n* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually\n this configuration must be updated with a meaningful name.\n* ``tags`` (default: `{}`): set global tags that should be applied to all spans.\n* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the\n code is still instrumented but no spans are sent to the APM agent.\n* ``distributed_tracing`` (default: `False`): enable distributed tracing if this is called\n remotely from an instrumented application.\n We suggest to enable it only for internal services where headers are under your control.\n* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.\n* ``agent_port`` (default: `8126`): define the port of the APM agent.\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = ['tornado']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .stack_context import run_with_trace_context, TracerStackContext\n\n context_provider = TracerStackContext()\n\n from .patch import patch, unpatch\n\n __all__ = [\n 'patch',\n 'unpatch',\n 'context_provider',\n 'run_with_trace_context',\n 'TracerStackContext',\n ]\n", "path": "ddtrace/contrib/tornado/__init__.py"}, {"content": "import ddtrace\n\nfrom tornado import template\n\nfrom . import decorators, context_provider\nfrom .constants import CONFIG_KEY\n\nfrom ...ext import AppTypes\n\n\ndef tracer_config(__init__, app, args, kwargs):\n \"\"\"\n Wrap Tornado web application so that we can configure services info and\n tracing settings after the initialization.\n \"\"\"\n # call the Application constructor\n __init__(*args, **kwargs)\n\n # default settings\n settings = {\n 'tracer': ddtrace.tracer,\n 'default_service': 'tornado-web',\n 'distributed_tracing': False,\n }\n\n # update defaults with users settings\n user_settings = app.settings.get(CONFIG_KEY)\n if user_settings:\n settings.update(user_settings)\n\n app.settings[CONFIG_KEY] = settings\n tracer = settings['tracer']\n service = settings['default_service']\n\n # the tracer must use the right Context propagation and wrap executor;\n # this action is done twice because the patch() method uses the\n # global tracer while here we can have a different instance (even if\n # this is not usual).\n tracer.configure(\n context_provider=context_provider,\n wrap_executor=decorators.wrap_executor,\n enabled=settings.get('enabled', None),\n hostname=settings.get('agent_hostname', None),\n port=settings.get('agent_port', None),\n )\n\n # set global tags if any\n tags = settings.get('tags', None)\n if tags:\n tracer.set_tags(tags)\n\n # configure the current service\n tracer.set_service_info(\n service=service,\n app='tornado',\n app_type=AppTypes.web,\n )\n\n # configure the PIN object for template rendering\n ddtrace.Pin(app='tornado', service=service, app_type='web', tracer=tracer).onto(template)\n", "path": "ddtrace/contrib/tornado/application.py"}]}
| 2,023 | 441 |
gh_patches_debug_17104
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-3992
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clearer Error Logs when RabbitMQ not reachable
If RabbitMQ is not running, users see logs like this in st2api.log:
```
2018-02-06 17:44:10,549 140082567223888 INFO __init__ [-] Connecting to database "st2" @ "127.0.0.1:27017" as user "None".
2018-02-06 17:44:10,654 140082567223888 ERROR connection_retry_wrapper [-] Connection or channel error identified: [Errno 111] ECONNREFUSED.
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/connection_retry_wrapper.py", line 115, in run
channel = connection.channel()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py", line 266, in channel
chan = self.transport.create_channel(self.connection)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py", line 802, in connection
self._connection = self._establish_connection()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py", line 757, in _establish_connection
conn = self.transport.establish_connection()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 130, in establish_connection
conn.connect()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/connection.py", line 282, in connect
self.transport.connect()
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/transport.py", line 109, in connect
self._connect(self.host, self.port, self.connect_timeout)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/transport.py", line 150, in _connect
self.sock.connect(sa)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/eventlet/greenio/base.py", line 256, in connect
socket_checkerr(fd)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/eventlet/greenio/base.py", line 46, in socket_checkerr
raise socket.error(err, errno.errorcode[err])
error: [Errno 111] ECONNREFUSED
```
This is confusing for users for two reasons. One is because it is immediately below a "Connecting to database" message. Issue #3990 addresses that. The other issue is that the ERROR message here does not make it obvious that it is a problem with connecting to RabbitMQ. This should be more obvious in the log message.
</issue>
<code>
[start of st2common/st2common/transport/connection_retry_wrapper.py]
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17 import eventlet
18
19 __all__ = ['ConnectionRetryWrapper', 'ClusterRetryContext']
20
21
22 class ClusterRetryContext(object):
23 """
24 Stores retry context for cluster retries. It makes certain assumptions
25 on how cluster_size and retry should be determined.
26 """
27 def __init__(self, cluster_size):
28 # No of nodes in a cluster
29 self.cluster_size = cluster_size
30 # No of times to retry in a cluster
31 self.cluster_retry = 2
32 # time to wait between retry in a cluster
33 self.wait_between_cluster = 10
34
35 # No of nodes attempted. Starts at 1 since the
36 self._nodes_attempted = 1
37
38 def test_should_stop(self):
39 should_stop = True
40 if self._nodes_attempted > self.cluster_size * self.cluster_retry:
41 return should_stop, -1
42 wait = 0
43 should_stop = False
44 if self._nodes_attempted % self.cluster_size == 0:
45 wait = self.wait_between_cluster
46 self._nodes_attempted += 1
47 return should_stop, wait
48
49
50 class ConnectionRetryWrapper(object):
51 """
52 Manages retry of connection and also switching to different nodes in a cluster.
53
54 :param cluster_size: Size of the cluster.
55 :param logger: logger to use to log moderately useful information.
56
57 .. code-block:: python
58 # Without ensuring recoverable errors are retried
59 connection_urls = [
60 'amqp://guest:guest@node1:5672',
61 'amqp://guest:guest@node2:5672',
62 'amqp://guest:guest@node3:5672'
63 ]
64 with Connection(connection_urls) as connection:
65 retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),
66 logger=my_logger)
67 # wrapped_callback must have signature ``def func(connection, channel)``
68 def wrapped_callback(connection, channel):
69 pass
70
71 retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)
72
73 # With ensuring recoverable errors are retried
74 connection_urls = [
75 'amqp://guest:guest@node1:5672',
76 'amqp://guest:guest@node2:5672',
77 'amqp://guest:guest@node3:5672'
78 ]
79 with Connection(connection_urls) as connection:
80 retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),
81 logger=my_logger)
82 # wrapped_callback must have signature ``def func(connection, channel)``
83 def wrapped_callback(connection, channel):
84 kwargs = {...}
85 # call ensured to correctly deal with recoverable errors.
86 retry_wrapper.ensured(connection=connection_retry_wrapper,
87 obj=my_obj,
88 to_ensure_func=my_obj.ensuree,
89 **kwargs)
90
91 retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)
92
93 """
94 def __init__(self, cluster_size, logger):
95 self._retry_context = ClusterRetryContext(cluster_size=cluster_size)
96 self._logger = logger
97
98 def errback(self, exc, interval):
99 self._logger.error('Rabbitmq connection error: %s', exc.message)
100
101 def run(self, connection, wrapped_callback):
102 """
103 Run the wrapped_callback in a protective covering of retries and error handling.
104
105 :param connection: Connection to messaging service
106 :type connection: kombu.connection.Connection
107
108 :param wrapped_callback: Callback that will be wrapped by all the fine handling in this
109 method. Expected signature of callback -
110 ``def func(connection, channel)``
111 """
112 should_stop = False
113 channel = None
114 while not should_stop:
115 try:
116 channel = connection.channel()
117 wrapped_callback(connection=connection, channel=channel)
118 should_stop = True
119 except connection.connection_errors + connection.channel_errors as e:
120 self._logger.exception('Connection or channel error identified: %s.' % (str(e)))
121 should_stop, wait = self._retry_context.test_should_stop()
122 # reset channel to None to avoid any channel closing errors. At this point
123 # in case of an exception there should be no channel but that is better to
124 # guarantee.
125 channel = None
126 # All attempts to re-establish connections have failed. This error needs to
127 # be notified so raise.
128 if should_stop:
129 raise
130 # -1, 0 and 1+ are handled properly by eventlet.sleep
131 eventlet.sleep(wait)
132
133 connection.close()
134 # ensure_connection will automatically switch to an alternate. Other connections
135 # in the pool will be fixed independently. It would be nice to cut-over the
136 # entire ConnectionPool simultaneously but that would require writing our own
137 # ConnectionPool. If a server recovers it could happen that the same process
138 # ends up talking to separate nodes in a cluster.
139 connection.ensure_connection()
140
141 except Exception as e:
142 self._logger.exception('Connections to rabbitmq cannot be re-established: %s',
143 e.message)
144 # Not being able to publish a message could be a significant issue for an app.
145 raise
146 finally:
147 if should_stop and channel:
148 try:
149 channel.close()
150 except Exception:
151 self._logger.warning('Error closing channel.', exc_info=True)
152
153 def ensured(self, connection, obj, to_ensure_func, **kwargs):
154 """
155 Ensure that recoverable errors are retried a set number of times before giving up.
156
157 :param connection: Connection to messaging service
158 :type connection: kombu.connection.Connection
159
160 :param obj: Object whose method is to be ensured. Typically, channel, producer etc. from
161 the kombu library.
162 :type obj: Must support mixin kombu.abstract.MaybeChannelBound
163 """
164 ensuring_func = connection.ensure(obj, to_ensure_func, errback=self.errback, max_retries=3)
165 ensuring_func(**kwargs)
166
[end of st2common/st2common/transport/connection_retry_wrapper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/transport/connection_retry_wrapper.py b/st2common/st2common/transport/connection_retry_wrapper.py
--- a/st2common/st2common/transport/connection_retry_wrapper.py
+++ b/st2common/st2common/transport/connection_retry_wrapper.py
@@ -117,7 +117,7 @@
wrapped_callback(connection=connection, channel=channel)
should_stop = True
except connection.connection_errors + connection.channel_errors as e:
- self._logger.exception('Connection or channel error identified: %s.' % (str(e)))
+ self._logger.exception('RabbitMQ connection or channel error: %s.' % (str(e)))
should_stop, wait = self._retry_context.test_should_stop()
# reset channel to None to avoid any channel closing errors. At this point
# in case of an exception there should be no channel but that is better to
|
{"golden_diff": "diff --git a/st2common/st2common/transport/connection_retry_wrapper.py b/st2common/st2common/transport/connection_retry_wrapper.py\n--- a/st2common/st2common/transport/connection_retry_wrapper.py\n+++ b/st2common/st2common/transport/connection_retry_wrapper.py\n@@ -117,7 +117,7 @@\n wrapped_callback(connection=connection, channel=channel)\n should_stop = True\n except connection.connection_errors + connection.channel_errors as e:\n- self._logger.exception('Connection or channel error identified: %s.' % (str(e)))\n+ self._logger.exception('RabbitMQ connection or channel error: %s.' % (str(e)))\n should_stop, wait = self._retry_context.test_should_stop()\n # reset channel to None to avoid any channel closing errors. At this point\n # in case of an exception there should be no channel but that is better to\n", "issue": "Clearer Error Logs when RabbitMQ not reachable\nIf RabbitMQ is not running, users see logs like this in st2api.log:\r\n\r\n```\r\n2018-02-06 17:44:10,549 140082567223888 INFO __init__ [-] Connecting to database \"st2\" @ \"127.0.0.1:27017\" as user \"None\".\r\n2018-02-06 17:44:10,654 140082567223888 ERROR connection_retry_wrapper [-] Connection or channel error identified: [Errno 111] ECONNREFUSED.\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/transport/connection_retry_wrapper.py\", line 115, in run\r\n channel = connection.channel()\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py\", line 266, in channel\r\n chan = self.transport.create_channel(self.connection)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py\", line 802, in connection\r\n self._connection = self._establish_connection()\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/connection.py\", line 757, in _establish_connection\r\n conn = self.transport.establish_connection()\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/kombu/transport/pyamqp.py\", line 130, in establish_connection\r\n conn.connect()\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/connection.py\", line 282, in connect\r\n self.transport.connect()\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/transport.py\", line 109, in connect\r\n self._connect(self.host, self.port, self.connect_timeout)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/amqp/transport.py\", line 150, in _connect\r\n self.sock.connect(sa)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/eventlet/greenio/base.py\", line 256, in connect\r\n socket_checkerr(fd)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/eventlet/greenio/base.py\", line 46, in socket_checkerr\r\n raise socket.error(err, errno.errorcode[err])\r\nerror: [Errno 111] ECONNREFUSED\r\n```\r\n\r\nThis is confusing for users for two reasons. One is because it is immediately below a \"Connecting to database\" message. Issue #3990 addresses that. The other issue is that the ERROR message here does not make it obvious that it is a problem with connecting to RabbitMQ. This should be more obvious in the log message. \n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport eventlet\n\n__all__ = ['ConnectionRetryWrapper', 'ClusterRetryContext']\n\n\nclass ClusterRetryContext(object):\n \"\"\"\n Stores retry context for cluster retries. It makes certain assumptions\n on how cluster_size and retry should be determined.\n \"\"\"\n def __init__(self, cluster_size):\n # No of nodes in a cluster\n self.cluster_size = cluster_size\n # No of times to retry in a cluster\n self.cluster_retry = 2\n # time to wait between retry in a cluster\n self.wait_between_cluster = 10\n\n # No of nodes attempted. Starts at 1 since the\n self._nodes_attempted = 1\n\n def test_should_stop(self):\n should_stop = True\n if self._nodes_attempted > self.cluster_size * self.cluster_retry:\n return should_stop, -1\n wait = 0\n should_stop = False\n if self._nodes_attempted % self.cluster_size == 0:\n wait = self.wait_between_cluster\n self._nodes_attempted += 1\n return should_stop, wait\n\n\nclass ConnectionRetryWrapper(object):\n \"\"\"\n Manages retry of connection and also switching to different nodes in a cluster.\n\n :param cluster_size: Size of the cluster.\n :param logger: logger to use to log moderately useful information.\n\n .. code-block:: python\n # Without ensuring recoverable errors are retried\n connection_urls = [\n 'amqp://guest:guest@node1:5672',\n 'amqp://guest:guest@node2:5672',\n 'amqp://guest:guest@node3:5672'\n ]\n with Connection(connection_urls) as connection:\n retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),\n logger=my_logger)\n # wrapped_callback must have signature ``def func(connection, channel)``\n def wrapped_callback(connection, channel):\n pass\n\n retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)\n\n # With ensuring recoverable errors are retried\n connection_urls = [\n 'amqp://guest:guest@node1:5672',\n 'amqp://guest:guest@node2:5672',\n 'amqp://guest:guest@node3:5672'\n ]\n with Connection(connection_urls) as connection:\n retry_wrapper = ConnectionRetryWrapper(cluster_size=len(connection_urls),\n logger=my_logger)\n # wrapped_callback must have signature ``def func(connection, channel)``\n def wrapped_callback(connection, channel):\n kwargs = {...}\n # call ensured to correctly deal with recoverable errors.\n retry_wrapper.ensured(connection=connection_retry_wrapper,\n obj=my_obj,\n to_ensure_func=my_obj.ensuree,\n **kwargs)\n\n retry_wrapper.run(connection=connection, wrapped_callback=wrapped_callback)\n\n \"\"\"\n def __init__(self, cluster_size, logger):\n self._retry_context = ClusterRetryContext(cluster_size=cluster_size)\n self._logger = logger\n\n def errback(self, exc, interval):\n self._logger.error('Rabbitmq connection error: %s', exc.message)\n\n def run(self, connection, wrapped_callback):\n \"\"\"\n Run the wrapped_callback in a protective covering of retries and error handling.\n\n :param connection: Connection to messaging service\n :type connection: kombu.connection.Connection\n\n :param wrapped_callback: Callback that will be wrapped by all the fine handling in this\n method. Expected signature of callback -\n ``def func(connection, channel)``\n \"\"\"\n should_stop = False\n channel = None\n while not should_stop:\n try:\n channel = connection.channel()\n wrapped_callback(connection=connection, channel=channel)\n should_stop = True\n except connection.connection_errors + connection.channel_errors as e:\n self._logger.exception('Connection or channel error identified: %s.' % (str(e)))\n should_stop, wait = self._retry_context.test_should_stop()\n # reset channel to None to avoid any channel closing errors. At this point\n # in case of an exception there should be no channel but that is better to\n # guarantee.\n channel = None\n # All attempts to re-establish connections have failed. This error needs to\n # be notified so raise.\n if should_stop:\n raise\n # -1, 0 and 1+ are handled properly by eventlet.sleep\n eventlet.sleep(wait)\n\n connection.close()\n # ensure_connection will automatically switch to an alternate. Other connections\n # in the pool will be fixed independently. It would be nice to cut-over the\n # entire ConnectionPool simultaneously but that would require writing our own\n # ConnectionPool. If a server recovers it could happen that the same process\n # ends up talking to separate nodes in a cluster.\n connection.ensure_connection()\n\n except Exception as e:\n self._logger.exception('Connections to rabbitmq cannot be re-established: %s',\n e.message)\n # Not being able to publish a message could be a significant issue for an app.\n raise\n finally:\n if should_stop and channel:\n try:\n channel.close()\n except Exception:\n self._logger.warning('Error closing channel.', exc_info=True)\n\n def ensured(self, connection, obj, to_ensure_func, **kwargs):\n \"\"\"\n Ensure that recoverable errors are retried a set number of times before giving up.\n\n :param connection: Connection to messaging service\n :type connection: kombu.connection.Connection\n\n :param obj: Object whose method is to be ensured. Typically, channel, producer etc. from\n the kombu library.\n :type obj: Must support mixin kombu.abstract.MaybeChannelBound\n \"\"\"\n ensuring_func = connection.ensure(obj, to_ensure_func, errback=self.errback, max_retries=3)\n ensuring_func(**kwargs)\n", "path": "st2common/st2common/transport/connection_retry_wrapper.py"}]}
| 3,066 | 196 |
gh_patches_debug_9614
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-780
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set snippet to `Draft` when `Save as New`
</issue>
<code>
[start of snippets/base/admin/adminmodels.py]
1 import re
2
3 from django.contrib import admin
4 from django.db.models import TextField, Q
5 from django.template.loader import get_template
6 from django.utils.safestring import mark_safe
7
8 from reversion.admin import VersionAdmin
9 from django_ace import AceWidget
10 from django_statsd.clients import statsd
11 from jinja2.meta import find_undeclared_variables
12 from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter
13
14 from snippets.base import forms, models
15 from snippets.base.models import JINJA_ENV
16 from snippets.base.admin.filters import ModifiedFilter, ReleaseFilter
17
18
19 MATCH_LOCALE_REGEX = re.compile('(\w+(?:-\w+)*)')
20 RESERVED_VARIABLES = ('_', 'snippet_id')
21
22
23 class ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):
24 list_display = ('description', 'is_exclusion', 'startpage_version', 'name',
25 'version', 'locale', 'appbuildid', 'build_target',
26 'channel', 'os_version', 'distribution',
27 'distribution_version', 'modified')
28 list_filter = ('name', 'version', 'os_version', 'appbuildid',
29 'build_target', 'channel', 'distribution', 'locale')
30 save_on_top = True
31 search_fields = ('description',)
32
33
34 class LogEntryAdmin(admin.ModelAdmin):
35 list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')
36 list_filter = ('user', 'content_type')
37
38
39 class SnippetTemplateVariableInline(admin.TabularInline):
40 model = models.SnippetTemplateVariable
41 formset = forms.SnippetTemplateVariableInlineFormset
42 max_num = 0
43 can_delete = False
44 readonly_fields = ('name',)
45 fields = ('name', 'type', 'order', 'description')
46
47
48 class SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):
49 save_on_top = True
50 list_display = ('name', 'priority', 'hidden')
51 list_filter = ('hidden', 'startpage')
52 inlines = (SnippetTemplateVariableInline,)
53 formfield_overrides = {
54 TextField: {'widget': AceWidget(mode='html', theme='github',
55 width='1200px', height='500px')},
56 }
57
58 class Media:
59 css = {
60 'all': ('css/admin.css',)
61 }
62
63 def save_related(self, request, form, formsets, change):
64 """
65 After saving the related objects, remove and add
66 SnippetTemplateVariables depending on how the template code changed.
67 """
68 super(SnippetTemplateAdmin, self).save_related(request, form, formsets,
69 change)
70
71 # Parse the template code and find any undefined variables.
72 ast = JINJA_ENV.env.parse(form.instance.code)
73 new_vars = find_undeclared_variables(ast)
74 var_manager = form.instance.variable_set
75
76 # Filter out reserved variable names.
77 new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]
78
79 # Delete variables not in the new set.
80 var_manager.filter(~Q(name__in=new_vars)).delete()
81
82 # Create variables that don't exist.
83 for i, variable in enumerate(new_vars, start=1):
84 obj, _ = models.SnippetTemplateVariable.objects.get_or_create(
85 template=form.instance, name=variable)
86 if obj.order == 0:
87 obj.order = i * 10
88 obj.save()
89
90
91 class UploadedFileAdmin(admin.ModelAdmin):
92 readonly_fields = ('url', 'preview', 'snippets')
93 list_display = ('name', 'url', 'preview', 'modified')
94 prepopulated_fields = {'name': ('file',)}
95 form = forms.UploadedFileAdminForm
96
97 def preview(self, obj):
98 template = get_template('base/uploadedfile_preview.jinja')
99 return mark_safe(template.render({'file': obj}))
100
101 def snippets(self, obj):
102 """Snippets using this file."""
103 template = get_template('base/uploadedfile_snippets.jinja')
104 return mark_safe(template.render({'snippets': obj.snippets}))
105
106
107 class AddonAdmin(admin.ModelAdmin):
108 list_display = ('name', 'guid')
109
110
111 class ASRSnippetAdmin(admin.ModelAdmin):
112 form = forms.ASRSnippetAdminForm
113
114 list_display_links = (
115 'id',
116 'name',
117 )
118 list_display = (
119 'id',
120 'name',
121 'status',
122 'modified',
123 )
124 list_filter = (
125 ModifiedFilter,
126 'status',
127 ReleaseFilter,
128 ('template', RelatedDropdownFilter),
129 )
130 search_fields = (
131 'name',
132 )
133 autocomplete_fields = (
134 'campaign',
135 'target',
136 )
137 preserve_filters = True
138 readonly_fields = (
139 'created',
140 'modified',
141 'uuid',
142 'creator',
143 'preview_url',
144 )
145 filter_horizontal = ('locales',)
146 save_on_top = True
147 save_as = True
148 view_on_site = False
149
150 fieldsets = (
151 ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),
152 ('Content', {
153 'description': (
154 '''
155 <strong>Available deep links:</strong><br/>
156 <ol>
157 <li><code>special:accounts</code> to open Firefox Accounts</li>
158 <li><code>special:appMenu</code> to open the hamburger menu</li>
159 </ol><br/>
160 <strong>Automatically add Snippet ID:</strong><br/>
161 You can use <code>[[snippet_id]]</code> in any field and it
162 will be automatically replaced by Snippet ID when served to users.
163 <br/>
164 Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa
165 <br/>
166 '''
167 ),
168 'fields': ('template', 'data'),
169 }),
170 ('Publishing Options', {
171 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)
172 }),
173 ('Other Info', {
174 'fields': ('uuid', ('created', 'modified')),
175 'classes': ('collapse',)
176 }),
177 )
178
179 class Media:
180 css = {
181 'all': ('css/admin/ASRSnippetAdmin.css',)
182 }
183 js = (
184 'js/admin/clipboard.min.js',
185 'js/admin/copy_preview.js',
186 )
187
188 def save_model(self, request, obj, form, change):
189 if not obj.creator_id:
190 obj.creator = request.user
191 statsd.incr('save.asrsnippet')
192 super().save_model(request, obj, form, change)
193
194 def preview_url(self, obj):
195 text = f'''
196 <span id="previewLinkUrl">{obj.get_preview_url()}</span>
197 <button id="copyPreviewLink" class="btn"
198 data-clipboard-target="#previewLinkUrl"
199 originalText="Copy to Clipboard" type="button">
200 Copy to Clipboard
201 </button>
202 '''
203 return mark_safe(text)
204
205
206 class CampaignAdmin(admin.ModelAdmin):
207 readonly_fields = ('created', 'modified', 'creator',)
208 prepopulated_fields = {'slug': ('name',)}
209
210 fieldsets = (
211 ('ID', {'fields': ('name', 'slug')}),
212 ('Other Info', {
213 'fields': ('creator', ('created', 'modified')),
214 }),
215 )
216 search_fields = (
217 'name',
218 )
219
220 def save_model(self, request, obj, form, change):
221 if not obj.creator_id:
222 obj.creator = request.user
223 statsd.incr('save.campaign')
224 super().save_model(request, obj, form, change)
225
226
227 class TargetAdmin(admin.ModelAdmin):
228 form = forms.TargetAdminForm
229 readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')
230 search_fields = (
231 'name',
232 )
233 fieldsets = (
234 ('ID', {'fields': ('name',)}),
235 ('Product channels', {
236 'description': 'What channels will this snippet be available in?',
237 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)
238 }),
239 ('Targeting', {
240 'fields': (
241 'filtr_is_default_browser',
242 'filtr_updates_enabled',
243 'filtr_updates_autodownload_enabled',
244 'filtr_profile_age_created',
245 'filtr_firefox_version',
246 'filtr_previous_session_end',
247 'filtr_uses_firefox_sync',
248 'filtr_country',
249 'filtr_is_developer',
250 'filtr_current_search_engine',
251 'filtr_browser_addon',
252 'filtr_total_bookmarks_count',
253 )
254 }),
255 ('Other Info', {
256 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),
257 }),
258 )
259
260 def save_model(self, request, obj, form, change):
261 if not obj.creator_id:
262 obj.creator = request.user
263 statsd.incr('save.target')
264 super().save_model(request, obj, form, change)
265
[end of snippets/base/admin/adminmodels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py
--- a/snippets/base/admin/adminmodels.py
+++ b/snippets/base/admin/adminmodels.py
@@ -202,6 +202,14 @@
'''
return mark_safe(text)
+ def change_view(self, request, *args, **kwargs):
+ if request.method == 'POST' and '_saveasnew' in request.POST:
+ # Always saved cloned snippets as un-published and un-check ready for review.
+ post_data = request.POST.copy()
+ post_data['status'] = models.STATUS_CHOICES['Draft']
+ request.POST = post_data
+ return super().change_view(request, *args, **kwargs)
+
class CampaignAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'modified', 'creator',)
|
{"golden_diff": "diff --git a/snippets/base/admin/adminmodels.py b/snippets/base/admin/adminmodels.py\n--- a/snippets/base/admin/adminmodels.py\n+++ b/snippets/base/admin/adminmodels.py\n@@ -202,6 +202,14 @@\n '''\n return mark_safe(text)\n \n+ def change_view(self, request, *args, **kwargs):\n+ if request.method == 'POST' and '_saveasnew' in request.POST:\n+ # Always saved cloned snippets as un-published and un-check ready for review.\n+ post_data = request.POST.copy()\n+ post_data['status'] = models.STATUS_CHOICES['Draft']\n+ request.POST = post_data\n+ return super().change_view(request, *args, **kwargs)\n+\n \n class CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n", "issue": "Set snippet to `Draft` when `Save as New`\n\n", "before_files": [{"content": "import re\n\nfrom django.contrib import admin\nfrom django.db.models import TextField, Q\nfrom django.template.loader import get_template\nfrom django.utils.safestring import mark_safe\n\nfrom reversion.admin import VersionAdmin\nfrom django_ace import AceWidget\nfrom django_statsd.clients import statsd\nfrom jinja2.meta import find_undeclared_variables\nfrom django_admin_listfilter_dropdown.filters import RelatedDropdownFilter\n\nfrom snippets.base import forms, models\nfrom snippets.base.models import JINJA_ENV\nfrom snippets.base.admin.filters import ModifiedFilter, ReleaseFilter\n\n\nMATCH_LOCALE_REGEX = re.compile('(\\w+(?:-\\w+)*)')\nRESERVED_VARIABLES = ('_', 'snippet_id')\n\n\nclass ClientMatchRuleAdmin(VersionAdmin, admin.ModelAdmin):\n list_display = ('description', 'is_exclusion', 'startpage_version', 'name',\n 'version', 'locale', 'appbuildid', 'build_target',\n 'channel', 'os_version', 'distribution',\n 'distribution_version', 'modified')\n list_filter = ('name', 'version', 'os_version', 'appbuildid',\n 'build_target', 'channel', 'distribution', 'locale')\n save_on_top = True\n search_fields = ('description',)\n\n\nclass LogEntryAdmin(admin.ModelAdmin):\n list_display = ('user', 'content_type', 'object_id', 'object_repr', 'change_message')\n list_filter = ('user', 'content_type')\n\n\nclass SnippetTemplateVariableInline(admin.TabularInline):\n model = models.SnippetTemplateVariable\n formset = forms.SnippetTemplateVariableInlineFormset\n max_num = 0\n can_delete = False\n readonly_fields = ('name',)\n fields = ('name', 'type', 'order', 'description')\n\n\nclass SnippetTemplateAdmin(VersionAdmin, admin.ModelAdmin):\n save_on_top = True\n list_display = ('name', 'priority', 'hidden')\n list_filter = ('hidden', 'startpage')\n inlines = (SnippetTemplateVariableInline,)\n formfield_overrides = {\n TextField: {'widget': AceWidget(mode='html', theme='github',\n width='1200px', height='500px')},\n }\n\n class Media:\n css = {\n 'all': ('css/admin.css',)\n }\n\n def save_related(self, request, form, formsets, change):\n \"\"\"\n After saving the related objects, remove and add\n SnippetTemplateVariables depending on how the template code changed.\n \"\"\"\n super(SnippetTemplateAdmin, self).save_related(request, form, formsets,\n change)\n\n # Parse the template code and find any undefined variables.\n ast = JINJA_ENV.env.parse(form.instance.code)\n new_vars = find_undeclared_variables(ast)\n var_manager = form.instance.variable_set\n\n # Filter out reserved variable names.\n new_vars = [x for x in new_vars if x not in RESERVED_VARIABLES]\n\n # Delete variables not in the new set.\n var_manager.filter(~Q(name__in=new_vars)).delete()\n\n # Create variables that don't exist.\n for i, variable in enumerate(new_vars, start=1):\n obj, _ = models.SnippetTemplateVariable.objects.get_or_create(\n template=form.instance, name=variable)\n if obj.order == 0:\n obj.order = i * 10\n obj.save()\n\n\nclass UploadedFileAdmin(admin.ModelAdmin):\n readonly_fields = ('url', 'preview', 'snippets')\n list_display = ('name', 'url', 'preview', 'modified')\n prepopulated_fields = {'name': ('file',)}\n form = forms.UploadedFileAdminForm\n\n def preview(self, obj):\n template = get_template('base/uploadedfile_preview.jinja')\n return mark_safe(template.render({'file': obj}))\n\n def snippets(self, obj):\n \"\"\"Snippets using this file.\"\"\"\n template = get_template('base/uploadedfile_snippets.jinja')\n return mark_safe(template.render({'snippets': obj.snippets}))\n\n\nclass AddonAdmin(admin.ModelAdmin):\n list_display = ('name', 'guid')\n\n\nclass ASRSnippetAdmin(admin.ModelAdmin):\n form = forms.ASRSnippetAdminForm\n\n list_display_links = (\n 'id',\n 'name',\n )\n list_display = (\n 'id',\n 'name',\n 'status',\n 'modified',\n )\n list_filter = (\n ModifiedFilter,\n 'status',\n ReleaseFilter,\n ('template', RelatedDropdownFilter),\n )\n search_fields = (\n 'name',\n )\n autocomplete_fields = (\n 'campaign',\n 'target',\n )\n preserve_filters = True\n readonly_fields = (\n 'created',\n 'modified',\n 'uuid',\n 'creator',\n 'preview_url',\n )\n filter_horizontal = ('locales',)\n save_on_top = True\n save_as = True\n view_on_site = False\n\n fieldsets = (\n ('ID', {'fields': ('creator', 'name', 'status', 'preview_url')}),\n ('Content', {\n 'description': (\n '''\n <strong>Available deep links:</strong><br/>\n <ol>\n <li><code>special:accounts</code> to open Firefox Accounts</li>\n <li><code>special:appMenu</code> to open the hamburger menu</li>\n </ol><br/>\n <strong>Automatically add Snippet ID:</strong><br/>\n You can use <code>[[snippet_id]]</code> in any field and it\n will be automatically replaced by Snippet ID when served to users.\n <br/>\n Example: This is a <code><a href="https://example.com?utm_term=[[snippet_id]]">link</a></code> # noqa\n <br/>\n '''\n ),\n 'fields': ('template', 'data'),\n }),\n ('Publishing Options', {\n 'fields': ('campaign', 'target', ('publish_start', 'publish_end'), 'locales', 'weight',)\n }),\n ('Other Info', {\n 'fields': ('uuid', ('created', 'modified')),\n 'classes': ('collapse',)\n }),\n )\n\n class Media:\n css = {\n 'all': ('css/admin/ASRSnippetAdmin.css',)\n }\n js = (\n 'js/admin/clipboard.min.js',\n 'js/admin/copy_preview.js',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.asrsnippet')\n super().save_model(request, obj, form, change)\n\n def preview_url(self, obj):\n text = f'''\n <span id=\"previewLinkUrl\">{obj.get_preview_url()}</span>\n <button id=\"copyPreviewLink\" class=\"btn\"\n data-clipboard-target=\"#previewLinkUrl\"\n originalText=\"Copy to Clipboard\" type=\"button\">\n Copy to Clipboard\n </button>\n '''\n return mark_safe(text)\n\n\nclass CampaignAdmin(admin.ModelAdmin):\n readonly_fields = ('created', 'modified', 'creator',)\n prepopulated_fields = {'slug': ('name',)}\n\n fieldsets = (\n ('ID', {'fields': ('name', 'slug')}),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified')),\n }),\n )\n search_fields = (\n 'name',\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.campaign')\n super().save_model(request, obj, form, change)\n\n\nclass TargetAdmin(admin.ModelAdmin):\n form = forms.TargetAdminForm\n readonly_fields = ('created', 'modified', 'creator', 'jexl_expr')\n search_fields = (\n 'name',\n )\n fieldsets = (\n ('ID', {'fields': ('name',)}),\n ('Product channels', {\n 'description': 'What channels will this snippet be available in?',\n 'fields': (('on_release', 'on_beta', 'on_aurora', 'on_nightly', 'on_esr'),)\n }),\n ('Targeting', {\n 'fields': (\n 'filtr_is_default_browser',\n 'filtr_updates_enabled',\n 'filtr_updates_autodownload_enabled',\n 'filtr_profile_age_created',\n 'filtr_firefox_version',\n 'filtr_previous_session_end',\n 'filtr_uses_firefox_sync',\n 'filtr_country',\n 'filtr_is_developer',\n 'filtr_current_search_engine',\n 'filtr_browser_addon',\n 'filtr_total_bookmarks_count',\n )\n }),\n ('Other Info', {\n 'fields': ('creator', ('created', 'modified'), 'jexl_expr'),\n }),\n )\n\n def save_model(self, request, obj, form, change):\n if not obj.creator_id:\n obj.creator = request.user\n statsd.incr('save.target')\n super().save_model(request, obj, form, change)\n", "path": "snippets/base/admin/adminmodels.py"}]}
| 3,202 | 183 |
gh_patches_debug_18793
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5118
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong charset handling in MailSender
Looks like passing `charset='utf-8'` makes a plain text message with `Content-Transfer-Encoding: base64` which then can't be read.
At https://github.com/scrapy/scrapy/blob/5a75b14a5fbbbd37c14aa7317761655ac7706b70/scrapy/mail.py#L81 `set_charset` is called but as the payload is not set yet, the underlying class just sets the headers. When later `set_payload` is called, it doesn't do any encoding, but the `Content-Transfer-Encoding` is already set. Looks like the fix should be passing the encoding to `set_payload` too, like was proposed in #3722 (and `set_charset` may be safe to be removed, not sure about this). Note that we have tests but they don't catch this.
Note also, that all of this seems to be compat code according to the Python docs.
Wrong charset handling in MailSender
Looks like passing `charset='utf-8'` makes a plain text message with `Content-Transfer-Encoding: base64` which then can't be read.
At https://github.com/scrapy/scrapy/blob/5a75b14a5fbbbd37c14aa7317761655ac7706b70/scrapy/mail.py#L81 `set_charset` is called but as the payload is not set yet, the underlying class just sets the headers. When later `set_payload` is called, it doesn't do any encoding, but the `Content-Transfer-Encoding` is already set. Looks like the fix should be passing the encoding to `set_payload` too, like was proposed in #3722 (and `set_charset` may be safe to be removed, not sure about this). Note that we have tests but they don't catch this.
Note also, that all of this seems to be compat code according to the Python docs.
Fix charset handling in MailSender
Changes:
Implementation
- Set encoding utf-8 for payload inside send
Fixes #5096, closes #5118
</issue>
<code>
[start of scrapy/mail.py]
1 """
2 Mail sending helpers
3
4 See documentation in docs/topics/email.rst
5 """
6 import logging
7 from email import encoders as Encoders
8 from email.mime.base import MIMEBase
9 from email.mime.multipart import MIMEMultipart
10 from email.mime.nonmultipart import MIMENonMultipart
11 from email.mime.text import MIMEText
12 from email.utils import formatdate
13 from io import BytesIO
14
15 from twisted import version as twisted_version
16 from twisted.internet import defer, ssl
17 from twisted.python.versions import Version
18
19 from scrapy.utils.misc import arg_to_iter
20 from scrapy.utils.python import to_bytes
21
22 logger = logging.getLogger(__name__)
23
24
25 # Defined in the email.utils module, but undocumented:
26 # https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42
27 COMMASPACE = ", "
28
29
30 def _to_bytes_or_none(text):
31 if text is None:
32 return None
33 return to_bytes(text)
34
35
36 class MailSender:
37 def __init__(
38 self,
39 smtphost="localhost",
40 mailfrom="scrapy@localhost",
41 smtpuser=None,
42 smtppass=None,
43 smtpport=25,
44 smtptls=False,
45 smtpssl=False,
46 debug=False,
47 ):
48 self.smtphost = smtphost
49 self.smtpport = smtpport
50 self.smtpuser = _to_bytes_or_none(smtpuser)
51 self.smtppass = _to_bytes_or_none(smtppass)
52 self.smtptls = smtptls
53 self.smtpssl = smtpssl
54 self.mailfrom = mailfrom
55 self.debug = debug
56
57 @classmethod
58 def from_settings(cls, settings):
59 return cls(
60 smtphost=settings["MAIL_HOST"],
61 mailfrom=settings["MAIL_FROM"],
62 smtpuser=settings["MAIL_USER"],
63 smtppass=settings["MAIL_PASS"],
64 smtpport=settings.getint("MAIL_PORT"),
65 smtptls=settings.getbool("MAIL_TLS"),
66 smtpssl=settings.getbool("MAIL_SSL"),
67 )
68
69 def send(
70 self,
71 to,
72 subject,
73 body,
74 cc=None,
75 attachs=(),
76 mimetype="text/plain",
77 charset=None,
78 _callback=None,
79 ):
80 from twisted.internet import reactor
81
82 if attachs:
83 msg = MIMEMultipart()
84 else:
85 msg = MIMENonMultipart(*mimetype.split("/", 1))
86
87 to = list(arg_to_iter(to))
88 cc = list(arg_to_iter(cc))
89
90 msg["From"] = self.mailfrom
91 msg["To"] = COMMASPACE.join(to)
92 msg["Date"] = formatdate(localtime=True)
93 msg["Subject"] = subject
94 rcpts = to[:]
95 if cc:
96 rcpts.extend(cc)
97 msg["Cc"] = COMMASPACE.join(cc)
98
99 if charset:
100 msg.set_charset(charset)
101
102 if attachs:
103 msg.attach(MIMEText(body, "plain", charset or "us-ascii"))
104 for attach_name, mimetype, f in attachs:
105 part = MIMEBase(*mimetype.split("/"))
106 part.set_payload(f.read())
107 Encoders.encode_base64(part)
108 part.add_header(
109 "Content-Disposition", "attachment", filename=attach_name
110 )
111 msg.attach(part)
112 else:
113 msg.set_payload(body)
114
115 if _callback:
116 _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
117
118 if self.debug:
119 logger.debug(
120 "Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s "
121 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
122 {
123 "mailto": to,
124 "mailcc": cc,
125 "mailsubject": subject,
126 "mailattachs": len(attachs),
127 },
128 )
129 return
130
131 dfd = self._sendmail(rcpts, msg.as_string().encode(charset or "utf-8"))
132 dfd.addCallbacks(
133 callback=self._sent_ok,
134 errback=self._sent_failed,
135 callbackArgs=[to, cc, subject, len(attachs)],
136 errbackArgs=[to, cc, subject, len(attachs)],
137 )
138 reactor.addSystemEventTrigger("before", "shutdown", lambda: dfd)
139 return dfd
140
141 def _sent_ok(self, result, to, cc, subject, nattachs):
142 logger.info(
143 "Mail sent OK: To=%(mailto)s Cc=%(mailcc)s "
144 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
145 {
146 "mailto": to,
147 "mailcc": cc,
148 "mailsubject": subject,
149 "mailattachs": nattachs,
150 },
151 )
152
153 def _sent_failed(self, failure, to, cc, subject, nattachs):
154 errstr = str(failure.value)
155 logger.error(
156 "Unable to send mail: To=%(mailto)s Cc=%(mailcc)s "
157 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
158 "- %(mailerr)s",
159 {
160 "mailto": to,
161 "mailcc": cc,
162 "mailsubject": subject,
163 "mailattachs": nattachs,
164 "mailerr": errstr,
165 },
166 )
167 return failure
168
169 def _sendmail(self, to_addrs, msg):
170 from twisted.internet import reactor
171
172 msg = BytesIO(msg)
173 d = defer.Deferred()
174
175 factory = self._create_sender_factory(to_addrs, msg, d)
176
177 if self.smtpssl:
178 reactor.connectSSL(
179 self.smtphost, self.smtpport, factory, ssl.ClientContextFactory()
180 )
181 else:
182 reactor.connectTCP(self.smtphost, self.smtpport, factory)
183
184 return d
185
186 def _create_sender_factory(self, to_addrs, msg, d):
187 from twisted.mail.smtp import ESMTPSenderFactory
188
189 factory_keywords = {
190 "heloFallback": True,
191 "requireAuthentication": False,
192 "requireTransportSecurity": self.smtptls,
193 }
194
195 # Newer versions of twisted require the hostname to use STARTTLS
196 if twisted_version >= Version("twisted", 21, 2, 0):
197 factory_keywords["hostname"] = self.smtphost
198
199 factory = ESMTPSenderFactory(
200 self.smtpuser,
201 self.smtppass,
202 self.mailfrom,
203 to_addrs,
204 msg,
205 d,
206 **factory_keywords
207 )
208 factory.noisy = False
209 return factory
210
[end of scrapy/mail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/mail.py b/scrapy/mail.py
--- a/scrapy/mail.py
+++ b/scrapy/mail.py
@@ -96,10 +96,9 @@
rcpts.extend(cc)
msg["Cc"] = COMMASPACE.join(cc)
- if charset:
- msg.set_charset(charset)
-
if attachs:
+ if charset:
+ msg.set_charset(charset)
msg.attach(MIMEText(body, "plain", charset or "us-ascii"))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split("/"))
@@ -110,7 +109,7 @@
)
msg.attach(part)
else:
- msg.set_payload(body)
+ msg.set_payload(body, charset)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
|
{"golden_diff": "diff --git a/scrapy/mail.py b/scrapy/mail.py\n--- a/scrapy/mail.py\n+++ b/scrapy/mail.py\n@@ -96,10 +96,9 @@\n rcpts.extend(cc)\n msg[\"Cc\"] = COMMASPACE.join(cc)\n \n- if charset:\n- msg.set_charset(charset)\n-\n if attachs:\n+ if charset:\n+ msg.set_charset(charset)\n msg.attach(MIMEText(body, \"plain\", charset or \"us-ascii\"))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split(\"/\"))\n@@ -110,7 +109,7 @@\n )\n msg.attach(part)\n else:\n- msg.set_payload(body)\n+ msg.set_payload(body, charset)\n \n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n", "issue": "Wrong charset handling in MailSender\nLooks like passing `charset='utf-8'` makes a plain text message with `Content-Transfer-Encoding: base64` which then can't be read.\r\n\r\nAt https://github.com/scrapy/scrapy/blob/5a75b14a5fbbbd37c14aa7317761655ac7706b70/scrapy/mail.py#L81 `set_charset` is called but as the payload is not set yet, the underlying class just sets the headers. When later `set_payload` is called, it doesn't do any encoding, but the `Content-Transfer-Encoding` is already set. Looks like the fix should be passing the encoding to `set_payload` too, like was proposed in #3722 (and `set_charset` may be safe to be removed, not sure about this). Note that we have tests but they don't catch this.\r\n\r\nNote also, that all of this seems to be compat code according to the Python docs.\nWrong charset handling in MailSender\nLooks like passing `charset='utf-8'` makes a plain text message with `Content-Transfer-Encoding: base64` which then can't be read.\r\n\r\nAt https://github.com/scrapy/scrapy/blob/5a75b14a5fbbbd37c14aa7317761655ac7706b70/scrapy/mail.py#L81 `set_charset` is called but as the payload is not set yet, the underlying class just sets the headers. When later `set_payload` is called, it doesn't do any encoding, but the `Content-Transfer-Encoding` is already set. Looks like the fix should be passing the encoding to `set_payload` too, like was proposed in #3722 (and `set_charset` may be safe to be removed, not sure about this). Note that we have tests but they don't catch this.\r\n\r\nNote also, that all of this seems to be compat code according to the Python docs.\nFix charset handling in MailSender\nChanges:\r\n \r\nImplementation\r\n- Set encoding utf-8 for payload inside send\r\n\r\nFixes #5096, closes #5118\n", "before_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom io import BytesIO\n\nfrom twisted import version as twisted_version\nfrom twisted.internet import defer, ssl\nfrom twisted.python.versions import Version\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import to_bytes\n\nlogger = logging.getLogger(__name__)\n\n\n# Defined in the email.utils module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42\nCOMMASPACE = \", \"\n\n\ndef _to_bytes_or_none(text):\n if text is None:\n return None\n return to_bytes(text)\n\n\nclass MailSender:\n def __init__(\n self,\n smtphost=\"localhost\",\n mailfrom=\"scrapy@localhost\",\n smtpuser=None,\n smtppass=None,\n smtpport=25,\n smtptls=False,\n smtpssl=False,\n debug=False,\n ):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = _to_bytes_or_none(smtpuser)\n self.smtppass = _to_bytes_or_none(smtppass)\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(\n smtphost=settings[\"MAIL_HOST\"],\n mailfrom=settings[\"MAIL_FROM\"],\n smtpuser=settings[\"MAIL_USER\"],\n smtppass=settings[\"MAIL_PASS\"],\n smtpport=settings.getint(\"MAIL_PORT\"),\n smtptls=settings.getbool(\"MAIL_TLS\"),\n smtpssl=settings.getbool(\"MAIL_SSL\"),\n )\n\n def send(\n self,\n to,\n subject,\n body,\n cc=None,\n attachs=(),\n mimetype=\"text/plain\",\n charset=None,\n _callback=None,\n ):\n from twisted.internet import reactor\n\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mimetype.split(\"/\", 1))\n\n to = list(arg_to_iter(to))\n cc = list(arg_to_iter(cc))\n\n msg[\"From\"] = self.mailfrom\n msg[\"To\"] = COMMASPACE.join(to)\n msg[\"Date\"] = formatdate(localtime=True)\n msg[\"Subject\"] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg[\"Cc\"] = COMMASPACE.join(cc)\n\n if charset:\n msg.set_charset(charset)\n\n if attachs:\n msg.attach(MIMEText(body, \"plain\", charset or \"us-ascii\"))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split(\"/\"))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\", \"attachment\", filename=attach_name\n )\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n logger.debug(\n \"Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": len(attachs),\n },\n )\n return\n\n dfd = self._sendmail(rcpts, msg.as_string().encode(charset or \"utf-8\"))\n dfd.addCallbacks(\n callback=self._sent_ok,\n errback=self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)],\n )\n reactor.addSystemEventTrigger(\"before\", \"shutdown\", lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n logger.info(\n \"Mail sent OK: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": nattachs,\n },\n )\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n logger.error(\n \"Unable to send mail: To=%(mailto)s Cc=%(mailcc)s \"\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n \"- %(mailerr)s\",\n {\n \"mailto\": to,\n \"mailcc\": cc,\n \"mailsubject\": subject,\n \"mailattachs\": nattachs,\n \"mailerr\": errstr,\n },\n )\n return failure\n\n def _sendmail(self, to_addrs, msg):\n from twisted.internet import reactor\n\n msg = BytesIO(msg)\n d = defer.Deferred()\n\n factory = self._create_sender_factory(to_addrs, msg, d)\n\n if self.smtpssl:\n reactor.connectSSL(\n self.smtphost, self.smtpport, factory, ssl.ClientContextFactory()\n )\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n\n def _create_sender_factory(self, to_addrs, msg, d):\n from twisted.mail.smtp import ESMTPSenderFactory\n\n factory_keywords = {\n \"heloFallback\": True,\n \"requireAuthentication\": False,\n \"requireTransportSecurity\": self.smtptls,\n }\n\n # Newer versions of twisted require the hostname to use STARTTLS\n if twisted_version >= Version(\"twisted\", 21, 2, 0):\n factory_keywords[\"hostname\"] = self.smtphost\n\n factory = ESMTPSenderFactory(\n self.smtpuser,\n self.smtppass,\n self.mailfrom,\n to_addrs,\n msg,\n d,\n **factory_keywords\n )\n factory.noisy = False\n return factory\n", "path": "scrapy/mail.py"}]}
| 2,976 | 206 |
gh_patches_debug_12466
|
rasdani/github-patches
|
git_diff
|
webkom__lego-1113
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add user id to metadata related to all data/actions in stripe
</issue>
<code>
[start of lego/apps/events/tasks.py]
1 from datetime import timedelta
2
3 import stripe
4 from django.db import IntegrityError, transaction
5 from django.utils import timezone
6 from redis.exceptions import LockError
7 from structlog import get_logger
8
9 from lego import celery_app
10 from lego.apps.events import constants
11 from lego.apps.events.exceptions import (
12 EventHasClosed, PoolCounterNotEqualToRegistrationCount, WebhookDidNotFindRegistration
13 )
14 from lego.apps.events.models import Event, Registration
15 from lego.apps.events.notifications import EventPaymentOverdueCreatorNotification
16 from lego.apps.events.serializers.registrations import StripeObjectSerializer
17 from lego.apps.events.websockets import (
18 notify_event_registration, notify_user_payment, notify_user_registration
19 )
20 from lego.apps.feed.registry import get_handler
21 from lego.utils.tasks import AbakusTask
22
23 log = get_logger()
24
25
26 class AsyncRegister(AbakusTask):
27 serializer = 'json'
28 default_retry_delay = 5
29 registration = None
30
31 def on_failure(self, *args):
32 if self.request.retries == self.max_retries:
33 self.registration.status = constants.FAILURE_REGISTER
34 self.registration.save()
35 notify_user_registration(
36 constants.SOCKET_REGISTRATION_FAILURE, self.registration,
37 error_message='Registrering feilet'
38 )
39
40
41 class Payment(AbakusTask):
42 serializer = 'json'
43 default_retry_delay = 5
44 registration = None
45
46 def on_failure(self, return_value, *args):
47 if self.request.retries == self.max_retries:
48 if return_value.json_body:
49 error = return_value.json_body['error']
50 self.registration.charge_id = error['charge']
51 self.registration.charge_status = error['code']
52 self.registration.save()
53 notify_user_registration(
54 constants.SOCKET_PAYMENT_FAILURE, self.registration,
55 error_message=error['message']
56 )
57 else:
58 self.registration.charge_status = constants.PAYMENT_FAILURE
59 self.registration.save()
60 notify_user_registration(
61 constants.SOCKET_PAYMENT_FAILURE, self.registration,
62 error_message='Payment failed'
63 )
64
65
66 @celery_app.task(base=AsyncRegister, bind=True)
67 def async_register(self, registration_id, logger_context=None):
68 self.setup_logger(logger_context)
69
70 self.registration = Registration.objects.get(id=registration_id)
71 try:
72 with transaction.atomic():
73 self.registration.event.register(self.registration)
74 transaction.on_commit(lambda: notify_event_registration(
75 constants.SOCKET_REGISTRATION_SUCCESS, self.registration
76 ))
77 log.info('registration_success', registration_id=self.registration.id)
78 except LockError as e:
79 log.error(
80 'registration_cache_lock_error', exception=e, registration_id=self.registration.id
81 )
82 raise self.retry(exc=e, max_retries=3)
83 except EventHasClosed as e:
84 log.warn(
85 'registration_tried_after_started', exception=e, registration_id=self.registration.id
86 )
87 except (ValueError, IntegrityError) as e:
88 log.error('registration_error', exception=e, registration_id=self.registration.id)
89 raise self.retry(exc=e, max_retries=3)
90
91
92 @celery_app.task(serializer='json', bind=True, base=AbakusTask, default_retry_delay=30)
93 def async_unregister(self, registration_id, logger_context=None):
94 self.setup_logger(logger_context)
95
96 registration = Registration.objects.get(id=registration_id)
97 pool_id = registration.pool_id
98 try:
99 with transaction.atomic():
100 registration.event.unregister(registration)
101 activation_time = registration.event.get_earliest_registration_time(registration.user)
102 transaction.on_commit(lambda: notify_event_registration(
103 constants.SOCKET_UNREGISTRATION_SUCCESS, registration,
104 from_pool=pool_id, activation_time=activation_time
105 ))
106 log.info('unregistration_success', registration_id=registration.id)
107 except EventHasClosed as e:
108 log.warn('unregistration_tried_after_started', exception=e, registration_id=registration.id)
109 except IntegrityError as e:
110 log.error('unregistration_error', exception=e, registration_id=registration.id)
111 registration.status = constants.FAILURE_UNREGISTER
112 registration.save()
113 notify_user_registration(
114 constants.SOCKET_UNREGISTRATION_FAILURE, registration,
115 error_message='Avregistrering feilet'
116 )
117
118
119 @celery_app.task(base=Payment, bind=True)
120 def async_payment(self, registration_id, token, logger_context=None):
121 self.setup_logger(logger_context)
122
123 self.registration = Registration.objects.get(id=registration_id)
124 event = self.registration.event
125 try:
126 response = stripe.Charge.create(
127 amount=event.get_price(self.registration.user), currency='NOK', source=token,
128 description=event.slug, metadata={
129 'EVENT_ID': event.id,
130 'USER': self.registration.user.full_name,
131 'EMAIL': self.registration.user.email
132 }
133 )
134 log.info('stripe_payment_success', registration_id=self.registration.id)
135 return response
136 except stripe.error.CardError as e:
137 raise self.retry(exc=e)
138 except stripe.error.InvalidRequestError as e:
139 log.error('invalid_request', exception=e, registration_id=self.registration.id)
140 self.registration.charge_status = e.json_body['error']['type']
141 self.registration.save()
142 notify_user_payment(
143 constants.SOCKET_PAYMENT_FAILURE, self.registration, error_message='Invalid request'
144 )
145 except stripe.error.StripeError as e:
146 log.error('stripe_error', exception=e, registration_id=self.registration.id)
147 raise self.retry(exc=e)
148
149
150 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
151 def registration_payment_save(self, result, registration_id, logger_context=None):
152 self.setup_logger(logger_context)
153
154 try:
155 registration = Registration.objects.get(id=registration_id)
156 registration.charge_id = result['id']
157 registration.charge_amount = result['amount']
158 registration.charge_status = result['status']
159 registration.save()
160 notify_user_payment(
161 constants.SOCKET_PAYMENT_SUCCESS, registration, success_message='Betaling gjennomført'
162 )
163 except IntegrityError as e:
164 log.error('registration_save_error', exception=e, registration_id=registration_id)
165 raise self.retry(exc=e)
166
167
168 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
169 def check_for_bump_on_pool_creation_or_expansion(self, event_id, logger_context=None):
170 """Task checking for bumps when event and pools are updated"""
171 self.setup_logger(logger_context)
172
173 # Event is locked using the instance field "is_ready"
174 event = Event.objects.get(pk=event_id)
175 event.bump_on_pool_creation_or_expansion()
176 event.is_ready = True
177 event.save(update_fields=['is_ready'])
178
179
180 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
181 def stripe_webhook_event(self, event_id, event_type, logger_context=None):
182 self.setup_logger(logger_context)
183
184 if event_type in ['charge.failed', 'charge.refunded', 'charge.succeeded']:
185 event = stripe.Event.retrieve(event_id)
186 serializer = StripeObjectSerializer(data=event.data['object'])
187 serializer.is_valid(raise_exception=True)
188
189 metadata = serializer.data['metadata']
190 registration = Registration.objects.filter(
191 event_id=metadata['EVENT_ID'], user__email=metadata['EMAIL']
192 ).first()
193 if not registration:
194 log.error('stripe_webhook_error', event_id=event_id, metadata=metadata)
195 raise WebhookDidNotFindRegistration(event_id, metadata)
196 registration.charge_id = serializer.data['id']
197 registration.charge_amount = serializer.data['amount']
198 registration.charge_amount_refunded = serializer.data['amount_refunded']
199 registration.charge_status = serializer.data['status']
200 registration.save()
201 log.info('stripe_webhook_received', event_id=event_id, registration_id=registration.id)
202
203
204 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
205 def check_events_for_registrations_with_expired_penalties(self, logger_context=None):
206 self.setup_logger(logger_context)
207
208 events_ids = Event.objects.filter(start_time__gte=timezone.now()
209 ).exclude(registrations=None).values_list('id', flat=True)
210 for event_id in events_ids:
211 with transaction.atomic():
212 locked_event = Event.objects.select_for_update().get(pk=event_id)
213 if locked_event.waiting_registrations.exists():
214 for pool in locked_event.pools.all():
215 if pool.is_activated and not pool.is_full:
216 for i in range(locked_event.waiting_registrations.count()):
217 locked_event.check_for_bump_or_rebalance(pool)
218 if pool.is_full:
219 break
220
221
222 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
223 def bump_waiting_users_to_new_pool(self, logger_context=None):
224 self.setup_logger(logger_context)
225
226 events_ids = Event.objects.filter(start_time__gte=timezone.now()
227 ).exclude(registrations=None).values_list('id', flat=True)
228 for event_id in events_ids:
229 with transaction.atomic():
230 locked_event = Event.objects.select_for_update().get(pk=event_id)
231 if locked_event.waiting_registrations.exists():
232 for pool in locked_event.pools.all():
233 if not pool.is_full:
234 act = pool.activation_date
235 now = timezone.now()
236 if not pool.is_activated and act < now + timedelta(minutes=35):
237 locked_event.early_bump(pool)
238 log.info('early_bump_executed', event_id=event_id, pool_id=pool.id)
239 elif pool.is_activated and act > now - timedelta(minutes=35):
240 log.info('early_bump_executed', event_id=event_id, pool_id=pool.id)
241 locked_event.early_bump(pool)
242
243
244 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
245 def notify_user_when_payment_soon_overdue(self, logger_context=None):
246 self.setup_logger(logger_context)
247
248 time = timezone.now()
249 events = Event.objects.filter(
250 payment_due_date__range=(time - timedelta(days=2), time + timedelta(days=3)),
251 is_priced=True, use_stripe=True
252 ).exclude(registrations=None).prefetch_related('registrations')
253 for event in events:
254 for registration in event.registrations.exclude(pool=None):
255 if registration.should_notify(time):
256 log.info(
257 'registration_notified_overdue_payment', event_id=event.id,
258 registration_id=registration.id
259 )
260 get_handler(Registration).handle_payment_overdue(registration)
261
262
263 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
264 def notify_event_creator_when_payment_overdue(self, logger_context=None):
265 self.setup_logger(logger_context)
266
267 time = timezone.now()
268 events = Event.objects.filter(
269 payment_due_date__lte=time, is_priced=True, use_stripe=True, end_time__gte=time
270 ).exclude(registrations=None).prefetch_related('registrations')
271 for event in events:
272 registrations_due = event.registrations.exclude(pool=None).exclude(
273 charge_status__in=[constants.PAYMENT_MANUAL, constants.PAYMENT_SUCCESS]
274 ).prefetch_related('user')
275 if registrations_due:
276 users = [
277 {
278 'name': registration.user.get_full_name(),
279 'email': registration.user.email
280 } for registration in registrations_due
281 ]
282 notification = EventPaymentOverdueCreatorNotification(
283 event.created_by, event=event, users=users
284 )
285 notification.notify()
286 log.info(
287 'event_creator_notified_of_overdue_payments', event_id=event.id,
288 creator=event.created_by
289 )
290
291
292 @celery_app.task(serializer='json', bind=True, base=AbakusTask)
293 def check_that_pool_counters_match_registration_number(self, logger_context=None):
294 """
295 Task that checks whether pools counters are in sync with number of registrations. We do not
296 enforce this check for events that are merged, hence the merge_time filter, because
297 incrementing the counter decreases the registration performance
298 """
299 self.setup_logger(logger_context)
300
301 events_ids = Event.objects.filter(
302 start_time__gte=timezone.now(), merge_time__gte=timezone.now()
303 ).values_list('id', flat=True)
304
305 for event_id in events_ids:
306 with transaction.atomic():
307 locked_event = Event.objects.select_for_update().get(pk=event_id)
308 for pool in locked_event.pools.all():
309 registration_count = pool.registrations.count()
310 if pool.counter != registration_count:
311 log.critical('pool_counter_not_equal_registration_count', pool=pool)
312 raise PoolCounterNotEqualToRegistrationCount(
313 pool, registration_count, locked_event
314 )
315
[end of lego/apps/events/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lego/apps/events/tasks.py b/lego/apps/events/tasks.py
--- a/lego/apps/events/tasks.py
+++ b/lego/apps/events/tasks.py
@@ -127,8 +127,9 @@
amount=event.get_price(self.registration.user), currency='NOK', source=token,
description=event.slug, metadata={
'EVENT_ID': event.id,
+ 'USER_ID': self.registration.user.id,
'USER': self.registration.user.full_name,
- 'EMAIL': self.registration.user.email
+ 'EMAIL': self.registration.user.email,
}
)
log.info('stripe_payment_success', registration_id=self.registration.id)
|
{"golden_diff": "diff --git a/lego/apps/events/tasks.py b/lego/apps/events/tasks.py\n--- a/lego/apps/events/tasks.py\n+++ b/lego/apps/events/tasks.py\n@@ -127,8 +127,9 @@\n amount=event.get_price(self.registration.user), currency='NOK', source=token,\n description=event.slug, metadata={\n 'EVENT_ID': event.id,\n+ 'USER_ID': self.registration.user.id,\n 'USER': self.registration.user.full_name,\n- 'EMAIL': self.registration.user.email\n+ 'EMAIL': self.registration.user.email,\n }\n )\n log.info('stripe_payment_success', registration_id=self.registration.id)\n", "issue": "Add user id to metadata related to all data/actions in stripe\n\n", "before_files": [{"content": "from datetime import timedelta\n\nimport stripe\nfrom django.db import IntegrityError, transaction\nfrom django.utils import timezone\nfrom redis.exceptions import LockError\nfrom structlog import get_logger\n\nfrom lego import celery_app\nfrom lego.apps.events import constants\nfrom lego.apps.events.exceptions import (\n EventHasClosed, PoolCounterNotEqualToRegistrationCount, WebhookDidNotFindRegistration\n)\nfrom lego.apps.events.models import Event, Registration\nfrom lego.apps.events.notifications import EventPaymentOverdueCreatorNotification\nfrom lego.apps.events.serializers.registrations import StripeObjectSerializer\nfrom lego.apps.events.websockets import (\n notify_event_registration, notify_user_payment, notify_user_registration\n)\nfrom lego.apps.feed.registry import get_handler\nfrom lego.utils.tasks import AbakusTask\n\nlog = get_logger()\n\n\nclass AsyncRegister(AbakusTask):\n serializer = 'json'\n default_retry_delay = 5\n registration = None\n\n def on_failure(self, *args):\n if self.request.retries == self.max_retries:\n self.registration.status = constants.FAILURE_REGISTER\n self.registration.save()\n notify_user_registration(\n constants.SOCKET_REGISTRATION_FAILURE, self.registration,\n error_message='Registrering feilet'\n )\n\n\nclass Payment(AbakusTask):\n serializer = 'json'\n default_retry_delay = 5\n registration = None\n\n def on_failure(self, return_value, *args):\n if self.request.retries == self.max_retries:\n if return_value.json_body:\n error = return_value.json_body['error']\n self.registration.charge_id = error['charge']\n self.registration.charge_status = error['code']\n self.registration.save()\n notify_user_registration(\n constants.SOCKET_PAYMENT_FAILURE, self.registration,\n error_message=error['message']\n )\n else:\n self.registration.charge_status = constants.PAYMENT_FAILURE\n self.registration.save()\n notify_user_registration(\n constants.SOCKET_PAYMENT_FAILURE, self.registration,\n error_message='Payment failed'\n )\n\n\n@celery_app.task(base=AsyncRegister, bind=True)\ndef async_register(self, registration_id, logger_context=None):\n self.setup_logger(logger_context)\n\n self.registration = Registration.objects.get(id=registration_id)\n try:\n with transaction.atomic():\n self.registration.event.register(self.registration)\n transaction.on_commit(lambda: notify_event_registration(\n constants.SOCKET_REGISTRATION_SUCCESS, self.registration\n ))\n log.info('registration_success', registration_id=self.registration.id)\n except LockError as e:\n log.error(\n 'registration_cache_lock_error', exception=e, registration_id=self.registration.id\n )\n raise self.retry(exc=e, max_retries=3)\n except EventHasClosed as e:\n log.warn(\n 'registration_tried_after_started', exception=e, registration_id=self.registration.id\n )\n except (ValueError, IntegrityError) as e:\n log.error('registration_error', exception=e, registration_id=self.registration.id)\n raise self.retry(exc=e, max_retries=3)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask, default_retry_delay=30)\ndef async_unregister(self, registration_id, logger_context=None):\n self.setup_logger(logger_context)\n\n registration = Registration.objects.get(id=registration_id)\n pool_id = registration.pool_id\n try:\n with transaction.atomic():\n registration.event.unregister(registration)\n activation_time = registration.event.get_earliest_registration_time(registration.user)\n transaction.on_commit(lambda: notify_event_registration(\n constants.SOCKET_UNREGISTRATION_SUCCESS, registration,\n from_pool=pool_id, activation_time=activation_time\n ))\n log.info('unregistration_success', registration_id=registration.id)\n except EventHasClosed as e:\n log.warn('unregistration_tried_after_started', exception=e, registration_id=registration.id)\n except IntegrityError as e:\n log.error('unregistration_error', exception=e, registration_id=registration.id)\n registration.status = constants.FAILURE_UNREGISTER\n registration.save()\n notify_user_registration(\n constants.SOCKET_UNREGISTRATION_FAILURE, registration,\n error_message='Avregistrering feilet'\n )\n\n\n@celery_app.task(base=Payment, bind=True)\ndef async_payment(self, registration_id, token, logger_context=None):\n self.setup_logger(logger_context)\n\n self.registration = Registration.objects.get(id=registration_id)\n event = self.registration.event\n try:\n response = stripe.Charge.create(\n amount=event.get_price(self.registration.user), currency='NOK', source=token,\n description=event.slug, metadata={\n 'EVENT_ID': event.id,\n 'USER': self.registration.user.full_name,\n 'EMAIL': self.registration.user.email\n }\n )\n log.info('stripe_payment_success', registration_id=self.registration.id)\n return response\n except stripe.error.CardError as e:\n raise self.retry(exc=e)\n except stripe.error.InvalidRequestError as e:\n log.error('invalid_request', exception=e, registration_id=self.registration.id)\n self.registration.charge_status = e.json_body['error']['type']\n self.registration.save()\n notify_user_payment(\n constants.SOCKET_PAYMENT_FAILURE, self.registration, error_message='Invalid request'\n )\n except stripe.error.StripeError as e:\n log.error('stripe_error', exception=e, registration_id=self.registration.id)\n raise self.retry(exc=e)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef registration_payment_save(self, result, registration_id, logger_context=None):\n self.setup_logger(logger_context)\n\n try:\n registration = Registration.objects.get(id=registration_id)\n registration.charge_id = result['id']\n registration.charge_amount = result['amount']\n registration.charge_status = result['status']\n registration.save()\n notify_user_payment(\n constants.SOCKET_PAYMENT_SUCCESS, registration, success_message='Betaling gjennomf\u00f8rt'\n )\n except IntegrityError as e:\n log.error('registration_save_error', exception=e, registration_id=registration_id)\n raise self.retry(exc=e)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef check_for_bump_on_pool_creation_or_expansion(self, event_id, logger_context=None):\n \"\"\"Task checking for bumps when event and pools are updated\"\"\"\n self.setup_logger(logger_context)\n\n # Event is locked using the instance field \"is_ready\"\n event = Event.objects.get(pk=event_id)\n event.bump_on_pool_creation_or_expansion()\n event.is_ready = True\n event.save(update_fields=['is_ready'])\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef stripe_webhook_event(self, event_id, event_type, logger_context=None):\n self.setup_logger(logger_context)\n\n if event_type in ['charge.failed', 'charge.refunded', 'charge.succeeded']:\n event = stripe.Event.retrieve(event_id)\n serializer = StripeObjectSerializer(data=event.data['object'])\n serializer.is_valid(raise_exception=True)\n\n metadata = serializer.data['metadata']\n registration = Registration.objects.filter(\n event_id=metadata['EVENT_ID'], user__email=metadata['EMAIL']\n ).first()\n if not registration:\n log.error('stripe_webhook_error', event_id=event_id, metadata=metadata)\n raise WebhookDidNotFindRegistration(event_id, metadata)\n registration.charge_id = serializer.data['id']\n registration.charge_amount = serializer.data['amount']\n registration.charge_amount_refunded = serializer.data['amount_refunded']\n registration.charge_status = serializer.data['status']\n registration.save()\n log.info('stripe_webhook_received', event_id=event_id, registration_id=registration.id)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef check_events_for_registrations_with_expired_penalties(self, logger_context=None):\n self.setup_logger(logger_context)\n\n events_ids = Event.objects.filter(start_time__gte=timezone.now()\n ).exclude(registrations=None).values_list('id', flat=True)\n for event_id in events_ids:\n with transaction.atomic():\n locked_event = Event.objects.select_for_update().get(pk=event_id)\n if locked_event.waiting_registrations.exists():\n for pool in locked_event.pools.all():\n if pool.is_activated and not pool.is_full:\n for i in range(locked_event.waiting_registrations.count()):\n locked_event.check_for_bump_or_rebalance(pool)\n if pool.is_full:\n break\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef bump_waiting_users_to_new_pool(self, logger_context=None):\n self.setup_logger(logger_context)\n\n events_ids = Event.objects.filter(start_time__gte=timezone.now()\n ).exclude(registrations=None).values_list('id', flat=True)\n for event_id in events_ids:\n with transaction.atomic():\n locked_event = Event.objects.select_for_update().get(pk=event_id)\n if locked_event.waiting_registrations.exists():\n for pool in locked_event.pools.all():\n if not pool.is_full:\n act = pool.activation_date\n now = timezone.now()\n if not pool.is_activated and act < now + timedelta(minutes=35):\n locked_event.early_bump(pool)\n log.info('early_bump_executed', event_id=event_id, pool_id=pool.id)\n elif pool.is_activated and act > now - timedelta(minutes=35):\n log.info('early_bump_executed', event_id=event_id, pool_id=pool.id)\n locked_event.early_bump(pool)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef notify_user_when_payment_soon_overdue(self, logger_context=None):\n self.setup_logger(logger_context)\n\n time = timezone.now()\n events = Event.objects.filter(\n payment_due_date__range=(time - timedelta(days=2), time + timedelta(days=3)),\n is_priced=True, use_stripe=True\n ).exclude(registrations=None).prefetch_related('registrations')\n for event in events:\n for registration in event.registrations.exclude(pool=None):\n if registration.should_notify(time):\n log.info(\n 'registration_notified_overdue_payment', event_id=event.id,\n registration_id=registration.id\n )\n get_handler(Registration).handle_payment_overdue(registration)\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef notify_event_creator_when_payment_overdue(self, logger_context=None):\n self.setup_logger(logger_context)\n\n time = timezone.now()\n events = Event.objects.filter(\n payment_due_date__lte=time, is_priced=True, use_stripe=True, end_time__gte=time\n ).exclude(registrations=None).prefetch_related('registrations')\n for event in events:\n registrations_due = event.registrations.exclude(pool=None).exclude(\n charge_status__in=[constants.PAYMENT_MANUAL, constants.PAYMENT_SUCCESS]\n ).prefetch_related('user')\n if registrations_due:\n users = [\n {\n 'name': registration.user.get_full_name(),\n 'email': registration.user.email\n } for registration in registrations_due\n ]\n notification = EventPaymentOverdueCreatorNotification(\n event.created_by, event=event, users=users\n )\n notification.notify()\n log.info(\n 'event_creator_notified_of_overdue_payments', event_id=event.id,\n creator=event.created_by\n )\n\n\n@celery_app.task(serializer='json', bind=True, base=AbakusTask)\ndef check_that_pool_counters_match_registration_number(self, logger_context=None):\n \"\"\"\n Task that checks whether pools counters are in sync with number of registrations. We do not\n enforce this check for events that are merged, hence the merge_time filter, because\n incrementing the counter decreases the registration performance\n \"\"\"\n self.setup_logger(logger_context)\n\n events_ids = Event.objects.filter(\n start_time__gte=timezone.now(), merge_time__gte=timezone.now()\n ).values_list('id', flat=True)\n\n for event_id in events_ids:\n with transaction.atomic():\n locked_event = Event.objects.select_for_update().get(pk=event_id)\n for pool in locked_event.pools.all():\n registration_count = pool.registrations.count()\n if pool.counter != registration_count:\n log.critical('pool_counter_not_equal_registration_count', pool=pool)\n raise PoolCounterNotEqualToRegistrationCount(\n pool, registration_count, locked_event\n )\n", "path": "lego/apps/events/tasks.py"}]}
| 4,076 | 146 |
gh_patches_debug_18059
|
rasdani/github-patches
|
git_diff
|
pyscript__pyscript-1781
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not recycle lastElementChild in display
### Checklist
- [X] I added a descriptive title
- [X] I searched for other issues and couldn't find a solution or duplication
- [X] I already searched in Google and didn't find any good information or help
### What happened?
Instead of cleaning up the target container when `append=False` is used, we use the `lastElementChild` assuming that's the `div` we eventually previously created on *empty* containers.
The issues I see with this approach (this is PyScript classic too IIRC):
* when `<py-script>` is a target, it's already a visual container
* any element as target, is already a visual container
* because of previous 2 points, it's never been too clear to me why we even need to create a `div` to append anything, but then all integration tests expect that so there must be a reason - **amend** [probably not](https://github.com/pyscript/pyscript/issues/1780#issuecomment-1742988864)
* when `<script type="py">` is used, its `target` is already a visual container ... so that previous questions apply
* in no circumstance, when `append=False`, we should reuse any previous content, as the new content goal is to replace it, whatever it was
* checking for `lastElementChild` to then branch out logic when append means `element.append(new_content)` is also not super clear or useful, neither with new empty nodes, nor with already populated ones
* there are containers that don't accept `div` as content at all (`<picture>` and `<video>` IIRC and to name a few)
Accordingly, we should (imho) improve the `display` `append` attribute story, as right now it's rather causing issues instead and weird edge cases, failing expectations.
### What browsers are you seeing the problem on? (if applicable)
_No response_
### Console info
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of pyscript.core/src/stdlib/pyscript/display.py]
1 import base64
2 import html
3 import io
4 import re
5
6 from pyscript.magic_js import document, window, current_target
7
8 _MIME_METHODS = {
9 "__repr__": "text/plain",
10 "_repr_html_": "text/html",
11 "_repr_markdown_": "text/markdown",
12 "_repr_svg_": "image/svg+xml",
13 "_repr_pdf_": "application/pdf",
14 "_repr_jpeg_": "image/jpeg",
15 "_repr_png_": "image/png",
16 "_repr_latex": "text/latex",
17 "_repr_json_": "application/json",
18 "_repr_javascript_": "application/javascript",
19 "savefig": "image/png",
20 }
21
22
23 def _render_image(mime, value, meta):
24 # If the image value is using bytes we should convert it to base64
25 # otherwise it will return raw bytes and the browser will not be able to
26 # render it.
27 if isinstance(value, bytes):
28 value = base64.b64encode(value).decode("utf-8")
29
30 # This is the pattern of base64 strings
31 base64_pattern = re.compile(
32 r"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$"
33 )
34 # If value doesn't match the base64 pattern we should encode it to base64
35 if len(value) > 0 and not base64_pattern.match(value):
36 value = base64.b64encode(value.encode("utf-8")).decode("utf-8")
37
38 data = f"data:{mime};charset=utf-8;base64,{value}"
39 attrs = " ".join(['{k}="{v}"' for k, v in meta.items()])
40 return f'<img src="{data}" {attrs}></img>'
41
42
43 def _identity(value, meta):
44 return value
45
46
47 _MIME_RENDERERS = {
48 "text/plain": html.escape,
49 "text/html": _identity,
50 "image/png": lambda value, meta: _render_image("image/png", value, meta),
51 "image/jpeg": lambda value, meta: _render_image("image/jpeg", value, meta),
52 "image/svg+xml": _identity,
53 "application/json": _identity,
54 "application/javascript": lambda value, meta: f"<script>{value}<\\/script>",
55 }
56
57
58 class HTML:
59 """
60 Wrap a string so that display() can render it as plain HTML
61 """
62
63 def __init__(self, html):
64 self._html = html
65
66 def _repr_html_(self):
67 return self._html
68
69
70 def _eval_formatter(obj, print_method):
71 """
72 Evaluates a formatter method.
73 """
74 if print_method == "__repr__":
75 return repr(obj)
76 elif hasattr(obj, print_method):
77 if print_method == "savefig":
78 buf = io.BytesIO()
79 obj.savefig(buf, format="png")
80 buf.seek(0)
81 return base64.b64encode(buf.read()).decode("utf-8")
82 return getattr(obj, print_method)()
83 elif print_method == "_repr_mimebundle_":
84 return {}, {}
85 return None
86
87
88 def _format_mime(obj):
89 """
90 Formats object using _repr_x_ methods.
91 """
92 if isinstance(obj, str):
93 return html.escape(obj), "text/plain"
94
95 mimebundle = _eval_formatter(obj, "_repr_mimebundle_")
96 if isinstance(mimebundle, tuple):
97 format_dict, _ = mimebundle
98 else:
99 format_dict = mimebundle
100
101 output, not_available = None, []
102 for method, mime_type in reversed(_MIME_METHODS.items()):
103 if mime_type in format_dict:
104 output = format_dict[mime_type]
105 else:
106 output = _eval_formatter(obj, method)
107
108 if output is None:
109 continue
110 elif mime_type not in _MIME_RENDERERS:
111 not_available.append(mime_type)
112 continue
113 break
114 if output is None:
115 if not_available:
116 window.console.warn(
117 f"Rendered object requested unavailable MIME renderers: {not_available}"
118 )
119 output = repr(output)
120 mime_type = "text/plain"
121 elif isinstance(output, tuple):
122 output, meta = output
123 else:
124 meta = {}
125 return _MIME_RENDERERS[mime_type](output, meta), mime_type
126
127
128 def _write(element, value, append=False):
129 html, mime_type = _format_mime(value)
130 if html == "\\n":
131 return
132
133 if append:
134 out_element = document.createElement("div")
135 element.append(out_element)
136 else:
137 out_element = element.lastElementChild
138 if out_element is None:
139 out_element = element
140
141 if mime_type in ("application/javascript", "text/html"):
142 script_element = document.createRange().createContextualFragment(html)
143 out_element.append(script_element)
144 else:
145 out_element.innerHTML = html
146
147
148 def display(*values, target=None, append=True):
149 if target is None:
150 target = current_target()
151
152 element = document.getElementById(target)
153
154 # if element is a <script type="py">, it has a 'target' attribute which
155 # points to the visual element holding the displayed values. In that case,
156 # use that.
157 if element.tagName == 'SCRIPT' and hasattr(element, 'target'):
158 element = element.target
159
160 for v in values:
161 _write(element, v, append=append)
162
[end of pyscript.core/src/stdlib/pyscript/display.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyscript.core/src/stdlib/pyscript/display.py b/pyscript.core/src/stdlib/pyscript/display.py
--- a/pyscript.core/src/stdlib/pyscript/display.py
+++ b/pyscript.core/src/stdlib/pyscript/display.py
@@ -3,7 +3,7 @@
import io
import re
-from pyscript.magic_js import document, window, current_target
+from pyscript.magic_js import current_target, document, window
_MIME_METHODS = {
"__repr__": "text/plain",
@@ -154,8 +154,10 @@
# if element is a <script type="py">, it has a 'target' attribute which
# points to the visual element holding the displayed values. In that case,
# use that.
- if element.tagName == 'SCRIPT' and hasattr(element, 'target'):
+ if element.tagName == "SCRIPT" and hasattr(element, "target"):
element = element.target
for v in values:
+ if not append:
+ element.replaceChildren()
_write(element, v, append=append)
|
{"golden_diff": "diff --git a/pyscript.core/src/stdlib/pyscript/display.py b/pyscript.core/src/stdlib/pyscript/display.py\n--- a/pyscript.core/src/stdlib/pyscript/display.py\n+++ b/pyscript.core/src/stdlib/pyscript/display.py\n@@ -3,7 +3,7 @@\n import io\n import re\n \n-from pyscript.magic_js import document, window, current_target\n+from pyscript.magic_js import current_target, document, window\n \n _MIME_METHODS = {\n \"__repr__\": \"text/plain\",\n@@ -154,8 +154,10 @@\n # if element is a <script type=\"py\">, it has a 'target' attribute which\n # points to the visual element holding the displayed values. In that case,\n # use that.\n- if element.tagName == 'SCRIPT' and hasattr(element, 'target'):\n+ if element.tagName == \"SCRIPT\" and hasattr(element, \"target\"):\n element = element.target\n \n for v in values:\n+ if not append:\n+ element.replaceChildren()\n _write(element, v, append=append)\n", "issue": "Do not recycle lastElementChild in display\n### Checklist\r\n\r\n- [X] I added a descriptive title\r\n- [X] I searched for other issues and couldn't find a solution or duplication\r\n- [X] I already searched in Google and didn't find any good information or help\r\n\r\n### What happened?\r\n\r\nInstead of cleaning up the target container when `append=False` is used, we use the `lastElementChild` assuming that's the `div` we eventually previously created on *empty* containers.\r\n\r\nThe issues I see with this approach (this is PyScript classic too IIRC):\r\n\r\n * when `<py-script>` is a target, it's already a visual container\r\n * any element as target, is already a visual container\r\n * because of previous 2 points, it's never been too clear to me why we even need to create a `div` to append anything, but then all integration tests expect that so there must be a reason - **amend** [probably not](https://github.com/pyscript/pyscript/issues/1780#issuecomment-1742988864)\r\n * when `<script type=\"py\">` is used, its `target` is already a visual container ... so that previous questions apply\r\n * in no circumstance, when `append=False`, we should reuse any previous content, as the new content goal is to replace it, whatever it was\r\n * checking for `lastElementChild` to then branch out logic when append means `element.append(new_content)` is also not super clear or useful, neither with new empty nodes, nor with already populated ones\r\n * there are containers that don't accept `div` as content at all (`<picture>` and `<video>` IIRC and to name a few)\r\n\r\nAccordingly, we should (imho) improve the `display` `append` attribute story, as right now it's rather causing issues instead and weird edge cases, failing expectations.\r\n\r\n### What browsers are you seeing the problem on? (if applicable)\r\n\r\n_No response_\r\n\r\n### Console info\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "import base64\nimport html\nimport io\nimport re\n\nfrom pyscript.magic_js import document, window, current_target\n\n_MIME_METHODS = {\n \"__repr__\": \"text/plain\",\n \"_repr_html_\": \"text/html\",\n \"_repr_markdown_\": \"text/markdown\",\n \"_repr_svg_\": \"image/svg+xml\",\n \"_repr_pdf_\": \"application/pdf\",\n \"_repr_jpeg_\": \"image/jpeg\",\n \"_repr_png_\": \"image/png\",\n \"_repr_latex\": \"text/latex\",\n \"_repr_json_\": \"application/json\",\n \"_repr_javascript_\": \"application/javascript\",\n \"savefig\": \"image/png\",\n}\n\n\ndef _render_image(mime, value, meta):\n # If the image value is using bytes we should convert it to base64\n # otherwise it will return raw bytes and the browser will not be able to\n # render it.\n if isinstance(value, bytes):\n value = base64.b64encode(value).decode(\"utf-8\")\n\n # This is the pattern of base64 strings\n base64_pattern = re.compile(\n r\"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$\"\n )\n # If value doesn't match the base64 pattern we should encode it to base64\n if len(value) > 0 and not base64_pattern.match(value):\n value = base64.b64encode(value.encode(\"utf-8\")).decode(\"utf-8\")\n\n data = f\"data:{mime};charset=utf-8;base64,{value}\"\n attrs = \" \".join(['{k}=\"{v}\"' for k, v in meta.items()])\n return f'<img src=\"{data}\" {attrs}></img>'\n\n\ndef _identity(value, meta):\n return value\n\n\n_MIME_RENDERERS = {\n \"text/plain\": html.escape,\n \"text/html\": _identity,\n \"image/png\": lambda value, meta: _render_image(\"image/png\", value, meta),\n \"image/jpeg\": lambda value, meta: _render_image(\"image/jpeg\", value, meta),\n \"image/svg+xml\": _identity,\n \"application/json\": _identity,\n \"application/javascript\": lambda value, meta: f\"<script>{value}<\\\\/script>\",\n}\n\n\nclass HTML:\n \"\"\"\n Wrap a string so that display() can render it as plain HTML\n \"\"\"\n\n def __init__(self, html):\n self._html = html\n\n def _repr_html_(self):\n return self._html\n\n\ndef _eval_formatter(obj, print_method):\n \"\"\"\n Evaluates a formatter method.\n \"\"\"\n if print_method == \"__repr__\":\n return repr(obj)\n elif hasattr(obj, print_method):\n if print_method == \"savefig\":\n buf = io.BytesIO()\n obj.savefig(buf, format=\"png\")\n buf.seek(0)\n return base64.b64encode(buf.read()).decode(\"utf-8\")\n return getattr(obj, print_method)()\n elif print_method == \"_repr_mimebundle_\":\n return {}, {}\n return None\n\n\ndef _format_mime(obj):\n \"\"\"\n Formats object using _repr_x_ methods.\n \"\"\"\n if isinstance(obj, str):\n return html.escape(obj), \"text/plain\"\n\n mimebundle = _eval_formatter(obj, \"_repr_mimebundle_\")\n if isinstance(mimebundle, tuple):\n format_dict, _ = mimebundle\n else:\n format_dict = mimebundle\n\n output, not_available = None, []\n for method, mime_type in reversed(_MIME_METHODS.items()):\n if mime_type in format_dict:\n output = format_dict[mime_type]\n else:\n output = _eval_formatter(obj, method)\n\n if output is None:\n continue\n elif mime_type not in _MIME_RENDERERS:\n not_available.append(mime_type)\n continue\n break\n if output is None:\n if not_available:\n window.console.warn(\n f\"Rendered object requested unavailable MIME renderers: {not_available}\"\n )\n output = repr(output)\n mime_type = \"text/plain\"\n elif isinstance(output, tuple):\n output, meta = output\n else:\n meta = {}\n return _MIME_RENDERERS[mime_type](output, meta), mime_type\n\n\ndef _write(element, value, append=False):\n html, mime_type = _format_mime(value)\n if html == \"\\\\n\":\n return\n\n if append:\n out_element = document.createElement(\"div\")\n element.append(out_element)\n else:\n out_element = element.lastElementChild\n if out_element is None:\n out_element = element\n\n if mime_type in (\"application/javascript\", \"text/html\"):\n script_element = document.createRange().createContextualFragment(html)\n out_element.append(script_element)\n else:\n out_element.innerHTML = html\n\n\ndef display(*values, target=None, append=True):\n if target is None:\n target = current_target()\n\n element = document.getElementById(target)\n\n # if element is a <script type=\"py\">, it has a 'target' attribute which\n # points to the visual element holding the displayed values. In that case,\n # use that.\n if element.tagName == 'SCRIPT' and hasattr(element, 'target'):\n element = element.target\n\n for v in values:\n _write(element, v, append=append)\n", "path": "pyscript.core/src/stdlib/pyscript/display.py"}]}
| 2,567 | 244 |
gh_patches_debug_33789
|
rasdani/github-patches
|
git_diff
|
PokemonGoF__PokemonGo-Bot-4623
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] Randomize Evolve Speed
### Short Description
Randomize Evolve Speed
### Possible solution
"max_evolve_speed": 40,
"min_evolve_speed": 20,
### How it would help others
could make bot detection harder and bot more realistic.
</issue>
<code>
[start of pokemongo_bot/cell_workers/evolve_pokemon.py]
1 from pokemongo_bot import inventory
2 from pokemongo_bot.human_behaviour import sleep
3 from pokemongo_bot.inventory import Pokemon
4 from pokemongo_bot.item_list import Item
5 from pokemongo_bot.base_task import BaseTask
6 from pokemongo_bot.datastore import Datastore
7
8
9 class EvolvePokemon(Datastore, BaseTask):
10 SUPPORTED_TASK_API_VERSION = 1
11 def __init__(self, bot, config):
12 super(EvolvePokemon, self).__init__(bot, config)
13
14 def initialize(self):
15 self.api = self.bot.api
16 self.evolve_all = self.config.get('evolve_all', [])
17 self.evolve_speed = self.config.get('evolve_speed', 2)
18 self.first_evolve_by = self.config.get('first_evolve_by', 'cp')
19 self.evolve_above_cp = self.config.get('evolve_above_cp', 500)
20 self.evolve_above_iv = self.config.get('evolve_above_iv', 0.8)
21 self.cp_iv_logic = self.config.get('logic', 'or')
22 self.use_lucky_egg = self.config.get('use_lucky_egg', False)
23 self._validate_config()
24
25 def _validate_config(self):
26 if isinstance(self.evolve_all, basestring):
27 self.evolve_all = [str(pokemon_name).strip() for pokemon_name in self.evolve_all.split(',')]
28
29 def work(self):
30 if not self._should_run():
31 return
32
33 evolve_list = self._sort_and_filter()
34
35 if self.evolve_all[0] != 'all':
36 # filter out non-listed pokemons
37 evolve_list = filter(lambda x: x.name in self.evolve_all, evolve_list)
38
39 cache = {}
40 for pokemon in evolve_list:
41 if pokemon.can_evolve_now():
42 self._execute_pokemon_evolve(pokemon, cache)
43
44 def _should_run(self):
45 if not self.evolve_all or self.evolve_all[0] == 'none':
46 return False
47
48 # Evolve all is used - Use Lucky egg only at the first tick
49 if self.bot.tick_count is not 1 or not self.use_lucky_egg:
50 return True
51
52 lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value)
53
54 # Make sure the user has a lucky egg and skip if not
55 if lucky_egg.count > 0:
56 response_dict_lucky_egg = self.bot.use_lucky_egg()
57 if response_dict_lucky_egg:
58 result = response_dict_lucky_egg.get('responses', {}).get('USE_ITEM_XP_BOOST', {}).get('result', 0)
59 if result is 1: # Request success
60 lucky_egg.remove(1)
61 self.emit_event(
62 'used_lucky_egg',
63 formatted='Used lucky egg ({amount_left} left).',
64 data={
65 'amount_left': lucky_egg.count
66 }
67 )
68 return True
69 else:
70 self.emit_event(
71 'lucky_egg_error',
72 level='error',
73 formatted='Failed to use lucky egg!'
74 )
75 return False
76 else:
77 # Skipping evolve so they aren't wasted
78 self.emit_event(
79 'skip_evolve',
80 formatted='Skipping evolve because has no lucky egg.'
81 )
82 return False
83
84 def _sort_and_filter(self):
85 pokemons = []
86 logic_to_function = {
87 'or': lambda pokemon: pokemon.cp >= self.evolve_above_cp or pokemon.iv >= self.evolve_above_iv,
88 'and': lambda pokemon: pokemon.cp >= self.evolve_above_cp and pokemon.iv >= self.evolve_above_iv
89 }
90
91 for pokemon in inventory.pokemons().all():
92 if pokemon.unique_id > 0 and pokemon.has_next_evolution() and (logic_to_function[self.cp_iv_logic](pokemon)):
93 pokemons.append(pokemon)
94
95 if self.first_evolve_by == "cp":
96 pokemons.sort(key=lambda x: (x.pokemon_id, x.cp, x.iv), reverse=True)
97 else:
98 pokemons.sort(key=lambda x: (x.pokemon_id, x.iv, x.cp), reverse=True)
99
100 return pokemons
101
102 def _execute_pokemon_evolve(self, pokemon, cache):
103 if pokemon.name in cache:
104 return False
105
106 response_dict = self.api.evolve_pokemon(pokemon_id=pokemon.unique_id)
107 if response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('result', 0) == 1:
108 xp = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("experience_awarded", 0)
109 evolution = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("evolved_pokemon_data", {})
110 awarded_candies = response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('candy_awarded', 0)
111 candy = inventory.candies().get(pokemon.pokemon_id)
112
113 candy.consume(pokemon.evolution_cost - awarded_candies)
114
115 self.emit_event(
116 'pokemon_evolved',
117 formatted="Evolved {pokemon} [IV {iv}] [CP {cp}] [{candy} candies] [+{xp} xp]",
118 data={
119 'pokemon': pokemon.name,
120 'iv': pokemon.iv,
121 'cp': pokemon.cp,
122 'candy': candy.quantity,
123 'xp': xp,
124 }
125 )
126
127 inventory.pokemons().remove(pokemon.unique_id)
128 new_pokemon = inventory.Pokemon(evolution)
129 inventory.pokemons().add(new_pokemon)
130
131 sleep(self.evolve_speed)
132 evolve_result = True
133 else:
134 # cache pokemons we can't evolve. Less server calls
135 cache[pokemon.name] = 1
136 sleep(0.7)
137 evolve_result = False
138
139 with self.bot.database as conn:
140 c = conn.cursor()
141 c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='evolve_log'")
142
143 result = c.fetchone()
144
145 while True:
146 if result[0] == 1:
147 conn.execute('''INSERT INTO evolve_log (pokemon, iv, cp) VALUES (?, ?, ?)''', (pokemon.name, pokemon.iv, pokemon.cp))
148 break
149 else:
150 self.emit_event(
151 'evolve_log',
152 sender=self,
153 level='info',
154 formatted="evolve_log table not found, skipping log"
155 )
156 break
157
158 return evolve_result
159
[end of pokemongo_bot/cell_workers/evolve_pokemon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pokemongo_bot/cell_workers/evolve_pokemon.py b/pokemongo_bot/cell_workers/evolve_pokemon.py
--- a/pokemongo_bot/cell_workers/evolve_pokemon.py
+++ b/pokemongo_bot/cell_workers/evolve_pokemon.py
@@ -1,3 +1,5 @@
+from random import uniform
+
from pokemongo_bot import inventory
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.inventory import Pokemon
@@ -14,7 +16,8 @@
def initialize(self):
self.api = self.bot.api
self.evolve_all = self.config.get('evolve_all', [])
- self.evolve_speed = self.config.get('evolve_speed', 2)
+ self.min_evolve_speed = self.config.get('min_evolve_speed', 25)
+ self.max_evolve_speed = self.config.get('max_evolve_speed', 30)
self.first_evolve_by = self.config.get('first_evolve_by', 'cp')
self.evolve_above_cp = self.config.get('evolve_above_cp', 500)
self.evolve_above_iv = self.config.get('evolve_above_iv', 0.8)
@@ -26,6 +29,9 @@
if isinstance(self.evolve_all, basestring):
self.evolve_all = [str(pokemon_name).strip() for pokemon_name in self.evolve_all.split(',')]
+ if 'evolve_speed' in self.config:
+ self.logger.warning("evolve_speed is deprecated, please use instead 'min_evolve_speed' and 'max_evolved_speed'.")
+
def work(self):
if not self._should_run():
return
@@ -128,7 +134,7 @@
new_pokemon = inventory.Pokemon(evolution)
inventory.pokemons().add(new_pokemon)
- sleep(self.evolve_speed)
+ sleep(uniform(self.min_evolve_speed, self.max_evolve_speed))
evolve_result = True
else:
# cache pokemons we can't evolve. Less server calls
|
{"golden_diff": "diff --git a/pokemongo_bot/cell_workers/evolve_pokemon.py b/pokemongo_bot/cell_workers/evolve_pokemon.py\n--- a/pokemongo_bot/cell_workers/evolve_pokemon.py\n+++ b/pokemongo_bot/cell_workers/evolve_pokemon.py\n@@ -1,3 +1,5 @@\n+from random import uniform\n+\n from pokemongo_bot import inventory\n from pokemongo_bot.human_behaviour import sleep\n from pokemongo_bot.inventory import Pokemon\n@@ -14,7 +16,8 @@\n def initialize(self):\n self.api = self.bot.api\n self.evolve_all = self.config.get('evolve_all', [])\n- self.evolve_speed = self.config.get('evolve_speed', 2)\n+ self.min_evolve_speed = self.config.get('min_evolve_speed', 25)\n+ self.max_evolve_speed = self.config.get('max_evolve_speed', 30)\n self.first_evolve_by = self.config.get('first_evolve_by', 'cp')\n self.evolve_above_cp = self.config.get('evolve_above_cp', 500)\n self.evolve_above_iv = self.config.get('evolve_above_iv', 0.8)\n@@ -26,6 +29,9 @@\n if isinstance(self.evolve_all, basestring):\n self.evolve_all = [str(pokemon_name).strip() for pokemon_name in self.evolve_all.split(',')]\n \n+ if 'evolve_speed' in self.config:\n+ self.logger.warning(\"evolve_speed is deprecated, please use instead 'min_evolve_speed' and 'max_evolved_speed'.\")\n+\n def work(self):\n if not self._should_run():\n return\n@@ -128,7 +134,7 @@\n new_pokemon = inventory.Pokemon(evolution)\n inventory.pokemons().add(new_pokemon)\n \n- sleep(self.evolve_speed)\n+ sleep(uniform(self.min_evolve_speed, self.max_evolve_speed))\n evolve_result = True\n else:\n # cache pokemons we can't evolve. Less server calls\n", "issue": "[Feature Request] Randomize Evolve Speed\n### Short Description\n\nRandomize Evolve Speed\n### Possible solution\n\n\"max_evolve_speed\": 40,\n\"min_evolve_speed\": 20,\n### How it would help others\n\ncould make bot detection harder and bot more realistic.\n\n", "before_files": [{"content": "from pokemongo_bot import inventory\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.inventory import Pokemon\nfrom pokemongo_bot.item_list import Item\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.datastore import Datastore\n\n\nclass EvolvePokemon(Datastore, BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n def __init__(self, bot, config):\n super(EvolvePokemon, self).__init__(bot, config)\n\n def initialize(self):\n self.api = self.bot.api\n self.evolve_all = self.config.get('evolve_all', [])\n self.evolve_speed = self.config.get('evolve_speed', 2)\n self.first_evolve_by = self.config.get('first_evolve_by', 'cp')\n self.evolve_above_cp = self.config.get('evolve_above_cp', 500)\n self.evolve_above_iv = self.config.get('evolve_above_iv', 0.8)\n self.cp_iv_logic = self.config.get('logic', 'or')\n self.use_lucky_egg = self.config.get('use_lucky_egg', False)\n self._validate_config()\n\n def _validate_config(self):\n if isinstance(self.evolve_all, basestring):\n self.evolve_all = [str(pokemon_name).strip() for pokemon_name in self.evolve_all.split(',')]\n\n def work(self):\n if not self._should_run():\n return\n\n evolve_list = self._sort_and_filter()\n\n if self.evolve_all[0] != 'all':\n # filter out non-listed pokemons\n evolve_list = filter(lambda x: x.name in self.evolve_all, evolve_list)\n\n cache = {}\n for pokemon in evolve_list:\n if pokemon.can_evolve_now():\n self._execute_pokemon_evolve(pokemon, cache)\n\n def _should_run(self):\n if not self.evolve_all or self.evolve_all[0] == 'none':\n return False\n\n # Evolve all is used - Use Lucky egg only at the first tick\n if self.bot.tick_count is not 1 or not self.use_lucky_egg:\n return True\n\n lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value)\n\n # Make sure the user has a lucky egg and skip if not\n if lucky_egg.count > 0:\n response_dict_lucky_egg = self.bot.use_lucky_egg()\n if response_dict_lucky_egg:\n result = response_dict_lucky_egg.get('responses', {}).get('USE_ITEM_XP_BOOST', {}).get('result', 0)\n if result is 1: # Request success\n lucky_egg.remove(1)\n self.emit_event(\n 'used_lucky_egg',\n formatted='Used lucky egg ({amount_left} left).',\n data={\n 'amount_left': lucky_egg.count\n }\n )\n return True\n else:\n self.emit_event(\n 'lucky_egg_error',\n level='error',\n formatted='Failed to use lucky egg!'\n )\n return False\n else:\n # Skipping evolve so they aren't wasted\n self.emit_event(\n 'skip_evolve',\n formatted='Skipping evolve because has no lucky egg.'\n )\n return False\n\n def _sort_and_filter(self):\n pokemons = []\n logic_to_function = {\n 'or': lambda pokemon: pokemon.cp >= self.evolve_above_cp or pokemon.iv >= self.evolve_above_iv,\n 'and': lambda pokemon: pokemon.cp >= self.evolve_above_cp and pokemon.iv >= self.evolve_above_iv\n }\n\n for pokemon in inventory.pokemons().all():\n if pokemon.unique_id > 0 and pokemon.has_next_evolution() and (logic_to_function[self.cp_iv_logic](pokemon)):\n pokemons.append(pokemon)\n\n if self.first_evolve_by == \"cp\":\n pokemons.sort(key=lambda x: (x.pokemon_id, x.cp, x.iv), reverse=True)\n else:\n pokemons.sort(key=lambda x: (x.pokemon_id, x.iv, x.cp), reverse=True)\n\n return pokemons\n\n def _execute_pokemon_evolve(self, pokemon, cache):\n if pokemon.name in cache:\n return False\n\n response_dict = self.api.evolve_pokemon(pokemon_id=pokemon.unique_id)\n if response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('result', 0) == 1:\n xp = response_dict.get(\"responses\", {}).get(\"EVOLVE_POKEMON\", {}).get(\"experience_awarded\", 0)\n evolution = response_dict.get(\"responses\", {}).get(\"EVOLVE_POKEMON\", {}).get(\"evolved_pokemon_data\", {})\n awarded_candies = response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('candy_awarded', 0)\n candy = inventory.candies().get(pokemon.pokemon_id)\n\n candy.consume(pokemon.evolution_cost - awarded_candies)\n\n self.emit_event(\n 'pokemon_evolved',\n formatted=\"Evolved {pokemon} [IV {iv}] [CP {cp}] [{candy} candies] [+{xp} xp]\",\n data={\n 'pokemon': pokemon.name,\n 'iv': pokemon.iv,\n 'cp': pokemon.cp,\n 'candy': candy.quantity,\n 'xp': xp,\n }\n )\n\n inventory.pokemons().remove(pokemon.unique_id)\n new_pokemon = inventory.Pokemon(evolution)\n inventory.pokemons().add(new_pokemon)\n\n sleep(self.evolve_speed)\n evolve_result = True\n else:\n # cache pokemons we can't evolve. Less server calls\n cache[pokemon.name] = 1\n sleep(0.7)\n evolve_result = False\n\n with self.bot.database as conn:\n c = conn.cursor()\n c.execute(\"SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='evolve_log'\")\n\n result = c.fetchone()\n\n while True:\n if result[0] == 1:\n conn.execute('''INSERT INTO evolve_log (pokemon, iv, cp) VALUES (?, ?, ?)''', (pokemon.name, pokemon.iv, pokemon.cp))\n break\n else:\n self.emit_event(\n 'evolve_log',\n sender=self,\n level='info',\n formatted=\"evolve_log table not found, skipping log\"\n )\n break\n\n return evolve_result\n", "path": "pokemongo_bot/cell_workers/evolve_pokemon.py"}]}
| 2,395 | 467 |
gh_patches_debug_19451
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-1726
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash using weakmethod with python3
It just happen to me once, not currently reproducible, with python3:
```
File "/home/tito/code/kivy/kivy/app.py", line 600, in run
runTouchApp()
File "/home/tito/code/kivy/kivy/base.py", line 454, in runTouchApp
EventLoop.window.mainloop()
File "/home/tito/code/kivy/kivy/core/window/window_pygame.py", line 329, in mainloop
self._mainloop()
File "/home/tito/code/kivy/kivy/core/window/window_pygame.py", line 235, in _mainloop
EventLoop.idle()
File "/home/tito/code/kivy/kivy/base.py", line 294, in idle
Clock.tick()
File "/home/tito/code/kivy/kivy/clock.py", line 370, in tick
self._process_events()
File "/home/tito/code/kivy/kivy/clock.py", line 481, in _process_events
if event.tick(self._last_tick) is False:
File "/home/tito/code/kivy/kivy/clock.py", line 280, in tick
ret = callback(self._dt)
File "/home/tito/code/kivy/kivy/animation.py", line 289, in _update
self.dispatch('on_progress', widget, progress)
File "_event.pyx", line 272, in kivy._event.EventDispatcher.dispatch (kivy/_event.c:3992)
File "/home/tito/code/kivy/kivy/weakmethod.py", line 42, in __call__
if self.proxy:
ReferenceError: weakly-referenced object no longer exists
```
</issue>
<code>
[start of kivy/weakmethod.py]
1 '''
2 Weak Method
3 ===========
4
5 The :class:`WeakMethod` is used in the Clock class to allow a reference
6 to a bound method that permits the associated object to be garbage collected.
7 Check examples/core/clock_method.py for more information.
8
9 This WeakMethod class is taken from the recipe
10 http://code.activestate.com/recipes/81253/, based on the nicodemus version.
11 (thanks to him !)
12 '''
13
14 import weakref
15 import sys
16
17 if sys.version > '3':
18
19 class WeakMethod:
20 '''Implementation of a
21 `weakref <http://en.wikipedia.org/wiki/Weak_reference>`_
22 for functions and bound methods.
23 '''
24 def __init__(self, method):
25 self.method = None
26 self.method_name = None
27 try:
28 if method.__self__ is not None:
29 self.method_name = method.__func__.__name__
30 self.proxy = weakref.proxy(method.__self__)
31 else:
32 self.method = method
33 self.proxy = None
34 except AttributeError:
35 self.method = method
36 self.proxy = None
37
38 def __call__(self):
39 '''Return a new bound-method like the original, or the
40 original function if it was just a function or unbound
41 method.
42 Returns None if the original object doesn't exist.
43 '''
44 if self.proxy:
45 return getattr(self.proxy, self.method_name)
46 return self.method
47
48 def is_dead(self):
49 '''Returns True if the referenced callable was a bound method and
50 the instance no longer exists. Otherwise, return False.
51 '''
52 return self.proxy is not None and not bool(dir(self.proxy))
53
54 def __repr__(self):
55 return '<WeakMethod proxy={} method={} method_name={}>'.format(
56 self.proxy, self.method, self.method_name)
57
58 else:
59
60 import new
61
62 class WeakMethod(object):
63 '''Implementation of a
64 `weakref <http://en.wikipedia.org/wiki/Weak_reference>`_
65 for functions and bound methods.
66 '''
67
68 def __init__(self, method):
69 try:
70 if method.__self__ is not None:
71 # bound method
72 self._obj = weakref.ref(method.im_self)
73 else:
74 # unbound method
75 self._obj = None
76 self._func = method.im_func
77 self._class = method.im_class
78 except AttributeError:
79 # not a method
80 self._obj = None
81 self._func = method
82 self._class = None
83
84 def __call__(self):
85 '''Return a new bound-method like the original, or the
86 original function if it was just a function or unbound
87 method.
88 Returns None if the original object doesn't exist.
89 '''
90 if self.is_dead():
91 return None
92 if self._obj is not None:
93 return new.instancemethod(self._func, self._obj(), self._class)
94 else:
95 # we don't have an instance: return just the function
96 return self._func
97
98 def is_dead(self):
99 '''Returns True if the referenced callable was a bound method and
100 the instance no longer exists. Otherwise, return False.
101 '''
102 return self._obj is not None and self._obj() is None
103
104 def __eq__(self, other):
105 try:
106 return type(self) is type(other) and self() == other()
107 except:
108 return False
109
110 def __ne__(self, other):
111 return not self == other
112
113
[end of kivy/weakmethod.py]
[start of kivy/factory.py]
1 '''
2 Factory object
3 ==============
4
5 The factory can be used to automatically register any class or module
6 and instantiate classes from it anywhere in your project. It is an
7 implementation of the
8 `Factory Pattern <http://en.wikipedia.org/wiki/Factory_pattern>`_.
9
10 The class list and available modules are automatically generated by setup.py.
11
12 Example for registering a class/module::
13
14 >>> from kivy.factory import Factory
15 >>> Factory.register('Widget', module='kivy.uix.widget')
16 >>> Factory.register('Vector', module='kivy.vector')
17
18 Example of using the Factory::
19
20 >>> from kivy.factory import Factory
21 >>> widget = Factory.Widget(pos=(456,456))
22 >>> vector = Factory.Vector(9, 2)
23
24 Example using a class name::
25
26 >>> from kivy.factory import Factory
27 >>> Factory.register('MyWidget', cls=MyWidget)
28
29 By default, the first classname you register via the factory is permanent.
30 If you wish to change the registered class, you need to unregister the classname
31 before you re-assign it::
32
33 >>> from kivy.factory import Factory
34 >>> Factory.register('MyWidget', cls=MyWidget)
35 >>> widget = Factory.MyWidget()
36 >>> Factory.unregister('MyWidget')
37 >>> Factory.register('MyWidget', cls=CustomWidget)
38 >>> customWidget = Factory.MyWidget()
39 '''
40
41 __all__ = ('Factory', 'FactoryException')
42
43 from kivy.logger import Logger
44
45
46 class FactoryException(Exception):
47 pass
48
49
50 class FactoryBase(object):
51
52 def __init__(self):
53 super(FactoryBase, self).__init__()
54 self.classes = {}
55
56 def is_template(self, classname):
57 '''Return True if the classname is a template from the
58 :class:`~kivy.lang.Builder`.
59
60 .. versionadded:: 1.0.5
61 '''
62 if classname in self.classes:
63 return self.classes[classname]['is_template']
64 else:
65 return False
66
67 def register(self, classname, cls=None, module=None, is_template=False,
68 baseclasses=None, filename=None):
69 '''Register a new classname referring to a real class or
70 class definition in a module.
71
72 .. versionchanged:: 1.7.0
73 :data:`baseclasses` and :data:`filename` added
74
75 .. versionchanged:: 1.0.5
76 :data:`is_template` has been added in 1.0.5.
77 '''
78 if cls is None and module is None and baseclasses is None:
79 raise ValueError(
80 'You must specify either cls= or module= or baseclasses =')
81 if classname in self.classes:
82 return
83 self.classes[classname] = {
84 'module': module,
85 'cls': cls,
86 'is_template': is_template,
87 'baseclasses': baseclasses,
88 'filename': filename}
89
90 def unregister(self, *classnames):
91 '''Unregisters the classnames previously registered via the
92 register method. This allows the same classnames to be re-used in
93 different contexts.
94
95 .. versionadded:: 1.7.1
96 '''
97 for classname in classnames:
98 if classname in self.classes:
99 self.classes.pop(classname)
100
101 def unregister_from_filename(self, filename):
102 '''Unregister all the factory objects related to the filename passed in
103 the parameter.
104
105 .. versionadded:: 1.7.0
106 '''
107 to_remove = [x for x in self.classes
108 if self.classes[x]['filename'] == filename]
109 for name in to_remove:
110 del self.classes[name]
111
112 def __getattr__(self, name):
113 classes = self.classes
114 if name not in classes:
115 raise FactoryException('Unknown class <%s>' % name)
116
117 item = classes[name]
118 cls = item['cls']
119
120 # No class to return, import the module
121 if cls is None:
122 if item['module']:
123 module = __import__(name=item['module'], fromlist='.')
124 if not hasattr(module, name):
125 raise FactoryException(
126 'No class named <%s> in module <%s>' % (
127 name, item['module']))
128 cls = item['cls'] = getattr(module, name)
129
130 elif item['baseclasses']:
131 rootwidgets = []
132 for basecls in item['baseclasses'].split('+'):
133 rootwidgets.append(Factory.get(basecls))
134 cls = item['cls'] = type(name, tuple(rootwidgets), {})
135
136 else:
137 raise FactoryException('No information to create the class')
138
139 return cls
140
141 get = __getattr__
142
143
144 #: Factory instance to use for getting new classes
145 Factory = FactoryBase()
146
147 # Now import the file with all registers
148 # automatically generated by build_factory
149 import kivy.factory_registers
150 Logger.info('Factory: %d symbols loaded' % len(Factory.classes))
151
152 if __name__ == '__main__':
153 Factory.register('Vector', module='kivy.vector')
154 Factory.register('Widget', module='kivy.uix.widget')
155
156
[end of kivy/factory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/factory.py b/kivy/factory.py
--- a/kivy/factory.py
+++ b/kivy/factory.py
@@ -112,6 +112,10 @@
def __getattr__(self, name):
classes = self.classes
if name not in classes:
+ if name[0] == name[0].lower():
+ # if trying to access attributes like checking for `bind`
+ # then raise AttributeError
+ raise AttributeError
raise FactoryException('Unknown class <%s>' % name)
item = classes[name]
diff --git a/kivy/weakmethod.py b/kivy/weakmethod.py
--- a/kivy/weakmethod.py
+++ b/kivy/weakmethod.py
@@ -41,8 +41,11 @@
method.
Returns None if the original object doesn't exist.
'''
- if self.proxy:
- return getattr(self.proxy, self.method_name)
+ try:
+ if self.proxy:
+ return getattr(self.proxy, self.method_name)
+ except ReferenceError:
+ pass
return self.method
def is_dead(self):
|
{"golden_diff": "diff --git a/kivy/factory.py b/kivy/factory.py\n--- a/kivy/factory.py\n+++ b/kivy/factory.py\n@@ -112,6 +112,10 @@\n def __getattr__(self, name):\n classes = self.classes\n if name not in classes:\n+ if name[0] == name[0].lower():\n+ # if trying to access attributes like checking for `bind`\n+ # then raise AttributeError\n+ raise AttributeError\n raise FactoryException('Unknown class <%s>' % name)\n \n item = classes[name]\ndiff --git a/kivy/weakmethod.py b/kivy/weakmethod.py\n--- a/kivy/weakmethod.py\n+++ b/kivy/weakmethod.py\n@@ -41,8 +41,11 @@\n method.\n Returns None if the original object doesn't exist.\n '''\n- if self.proxy:\n- return getattr(self.proxy, self.method_name)\n+ try:\n+ if self.proxy:\n+ return getattr(self.proxy, self.method_name)\n+ except ReferenceError:\n+ pass\n return self.method\n \n def is_dead(self):\n", "issue": "Crash using weakmethod with python3\nIt just happen to me once, not currently reproducible, with python3:\n\n```\n File \"/home/tito/code/kivy/kivy/app.py\", line 600, in run\n runTouchApp()\n File \"/home/tito/code/kivy/kivy/base.py\", line 454, in runTouchApp\n EventLoop.window.mainloop()\n File \"/home/tito/code/kivy/kivy/core/window/window_pygame.py\", line 329, in mainloop\n self._mainloop()\n File \"/home/tito/code/kivy/kivy/core/window/window_pygame.py\", line 235, in _mainloop\n EventLoop.idle()\n File \"/home/tito/code/kivy/kivy/base.py\", line 294, in idle\n Clock.tick()\n File \"/home/tito/code/kivy/kivy/clock.py\", line 370, in tick\n self._process_events()\n File \"/home/tito/code/kivy/kivy/clock.py\", line 481, in _process_events\n if event.tick(self._last_tick) is False:\n File \"/home/tito/code/kivy/kivy/clock.py\", line 280, in tick\n ret = callback(self._dt)\n File \"/home/tito/code/kivy/kivy/animation.py\", line 289, in _update\n self.dispatch('on_progress', widget, progress)\n File \"_event.pyx\", line 272, in kivy._event.EventDispatcher.dispatch (kivy/_event.c:3992)\n File \"/home/tito/code/kivy/kivy/weakmethod.py\", line 42, in __call__\n if self.proxy:\n ReferenceError: weakly-referenced object no longer exists\n```\n\n", "before_files": [{"content": "'''\nWeak Method\n===========\n\nThe :class:`WeakMethod` is used in the Clock class to allow a reference\nto a bound method that permits the associated object to be garbage collected.\nCheck examples/core/clock_method.py for more information.\n\nThis WeakMethod class is taken from the recipe\nhttp://code.activestate.com/recipes/81253/, based on the nicodemus version.\n(thanks to him !)\n'''\n\nimport weakref\nimport sys\n\nif sys.version > '3':\n\n class WeakMethod:\n '''Implementation of a\n `weakref <http://en.wikipedia.org/wiki/Weak_reference>`_\n for functions and bound methods.\n '''\n def __init__(self, method):\n self.method = None\n self.method_name = None\n try:\n if method.__self__ is not None:\n self.method_name = method.__func__.__name__\n self.proxy = weakref.proxy(method.__self__)\n else:\n self.method = method\n self.proxy = None\n except AttributeError:\n self.method = method\n self.proxy = None\n\n def __call__(self):\n '''Return a new bound-method like the original, or the\n original function if it was just a function or unbound\n method.\n Returns None if the original object doesn't exist.\n '''\n if self.proxy:\n return getattr(self.proxy, self.method_name)\n return self.method\n\n def is_dead(self):\n '''Returns True if the referenced callable was a bound method and\n the instance no longer exists. Otherwise, return False.\n '''\n return self.proxy is not None and not bool(dir(self.proxy))\n\n def __repr__(self):\n return '<WeakMethod proxy={} method={} method_name={}>'.format(\n self.proxy, self.method, self.method_name)\n\nelse:\n\n import new\n\n class WeakMethod(object):\n '''Implementation of a\n `weakref <http://en.wikipedia.org/wiki/Weak_reference>`_\n for functions and bound methods.\n '''\n\n def __init__(self, method):\n try:\n if method.__self__ is not None:\n # bound method\n self._obj = weakref.ref(method.im_self)\n else:\n # unbound method\n self._obj = None\n self._func = method.im_func\n self._class = method.im_class\n except AttributeError:\n # not a method\n self._obj = None\n self._func = method\n self._class = None\n\n def __call__(self):\n '''Return a new bound-method like the original, or the\n original function if it was just a function or unbound\n method.\n Returns None if the original object doesn't exist.\n '''\n if self.is_dead():\n return None\n if self._obj is not None:\n return new.instancemethod(self._func, self._obj(), self._class)\n else:\n # we don't have an instance: return just the function\n return self._func\n\n def is_dead(self):\n '''Returns True if the referenced callable was a bound method and\n the instance no longer exists. Otherwise, return False.\n '''\n return self._obj is not None and self._obj() is None\n\n def __eq__(self, other):\n try:\n return type(self) is type(other) and self() == other()\n except:\n return False\n\n def __ne__(self, other):\n return not self == other\n\n", "path": "kivy/weakmethod.py"}, {"content": "'''\nFactory object\n==============\n\nThe factory can be used to automatically register any class or module\nand instantiate classes from it anywhere in your project. It is an\nimplementation of the\n`Factory Pattern <http://en.wikipedia.org/wiki/Factory_pattern>`_.\n\nThe class list and available modules are automatically generated by setup.py.\n\nExample for registering a class/module::\n\n >>> from kivy.factory import Factory\n >>> Factory.register('Widget', module='kivy.uix.widget')\n >>> Factory.register('Vector', module='kivy.vector')\n\nExample of using the Factory::\n\n >>> from kivy.factory import Factory\n >>> widget = Factory.Widget(pos=(456,456))\n >>> vector = Factory.Vector(9, 2)\n\nExample using a class name::\n\n >>> from kivy.factory import Factory\n >>> Factory.register('MyWidget', cls=MyWidget)\n\nBy default, the first classname you register via the factory is permanent.\nIf you wish to change the registered class, you need to unregister the classname\nbefore you re-assign it::\n\n >>> from kivy.factory import Factory\n >>> Factory.register('MyWidget', cls=MyWidget)\n >>> widget = Factory.MyWidget()\n >>> Factory.unregister('MyWidget')\n >>> Factory.register('MyWidget', cls=CustomWidget)\n >>> customWidget = Factory.MyWidget()\n'''\n\n__all__ = ('Factory', 'FactoryException')\n\nfrom kivy.logger import Logger\n\n\nclass FactoryException(Exception):\n pass\n\n\nclass FactoryBase(object):\n\n def __init__(self):\n super(FactoryBase, self).__init__()\n self.classes = {}\n\n def is_template(self, classname):\n '''Return True if the classname is a template from the\n :class:`~kivy.lang.Builder`.\n\n .. versionadded:: 1.0.5\n '''\n if classname in self.classes:\n return self.classes[classname]['is_template']\n else:\n return False\n\n def register(self, classname, cls=None, module=None, is_template=False,\n baseclasses=None, filename=None):\n '''Register a new classname referring to a real class or\n class definition in a module.\n\n .. versionchanged:: 1.7.0\n :data:`baseclasses` and :data:`filename` added\n\n .. versionchanged:: 1.0.5\n :data:`is_template` has been added in 1.0.5.\n '''\n if cls is None and module is None and baseclasses is None:\n raise ValueError(\n 'You must specify either cls= or module= or baseclasses =')\n if classname in self.classes:\n return\n self.classes[classname] = {\n 'module': module,\n 'cls': cls,\n 'is_template': is_template,\n 'baseclasses': baseclasses,\n 'filename': filename}\n\n def unregister(self, *classnames):\n '''Unregisters the classnames previously registered via the\n register method. This allows the same classnames to be re-used in\n different contexts.\n\n .. versionadded:: 1.7.1\n '''\n for classname in classnames:\n if classname in self.classes:\n self.classes.pop(classname)\n\n def unregister_from_filename(self, filename):\n '''Unregister all the factory objects related to the filename passed in\n the parameter.\n\n .. versionadded:: 1.7.0\n '''\n to_remove = [x for x in self.classes\n if self.classes[x]['filename'] == filename]\n for name in to_remove:\n del self.classes[name]\n\n def __getattr__(self, name):\n classes = self.classes\n if name not in classes:\n raise FactoryException('Unknown class <%s>' % name)\n\n item = classes[name]\n cls = item['cls']\n\n # No class to return, import the module\n if cls is None:\n if item['module']:\n module = __import__(name=item['module'], fromlist='.')\n if not hasattr(module, name):\n raise FactoryException(\n 'No class named <%s> in module <%s>' % (\n name, item['module']))\n cls = item['cls'] = getattr(module, name)\n\n elif item['baseclasses']:\n rootwidgets = []\n for basecls in item['baseclasses'].split('+'):\n rootwidgets.append(Factory.get(basecls))\n cls = item['cls'] = type(name, tuple(rootwidgets), {})\n\n else:\n raise FactoryException('No information to create the class')\n\n return cls\n\n get = __getattr__\n\n\n#: Factory instance to use for getting new classes\nFactory = FactoryBase()\n\n# Now import the file with all registers\n# automatically generated by build_factory\nimport kivy.factory_registers\nLogger.info('Factory: %d symbols loaded' % len(Factory.classes))\n\nif __name__ == '__main__':\n Factory.register('Vector', module='kivy.vector')\n Factory.register('Widget', module='kivy.uix.widget')\n\n", "path": "kivy/factory.py"}]}
| 3,374 | 253 |
gh_patches_debug_3012
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-4714
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Overlaid Config Files are applied in reverse order
http://www.pantsbuild.org/options.html#overlaying-config-files documents that one can do:
$ ./pants --pants-config-files=a.ini --pants-config-files=b.ini options --options-name="level"
level = info (from CONFIG in a.ini)
$ cat a.ini
[GLOBAL]
level: info
$ cat b.ini
[GLOBAL]
level: debug
According to the docs, the second --pants-config-files should overlay the earlier values, but this is not happening :/
</issue>
<code>
[start of src/python/pants/option/config.py]
1 # coding=utf-8
2 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
6 unicode_literals, with_statement)
7
8 import getpass
9 import itertools
10 import os
11
12 import six
13 from six.moves import configparser
14 from twitter.common.collections import OrderedSet
15
16 from pants.base.build_environment import get_buildroot, get_pants_cachedir, get_pants_configdir
17 from pants.util.eval import parse_expression
18 from pants.util.meta import AbstractClass
19
20
21 class Config(AbstractClass):
22 """Encapsulates ini-style config file loading and access.
23
24 Supports recursive variable substitution using standard python format strings. E.g.,
25 %(var_name)s will be replaced with the value of var_name.
26 """
27 DEFAULT_SECTION = configparser.DEFAULTSECT
28
29 class ConfigError(Exception):
30 pass
31
32 class ConfigValidationError(ConfigError):
33 pass
34
35 @classmethod
36 def load(cls, configpaths, seed_values=None):
37 """Loads config from the given paths.
38
39 A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT
40 section, and be available for use in substitutions. The caller may override some of these
41 seed values.
42
43 :param list configpaths: Load from these paths. Later instances take precedence over earlier
44 ones. If empty, returns an empty config.
45 :param seed_values: A dict with optional override seed values for buildroot, pants_workdir,
46 pants_supportdir and pants_distdir.
47 """
48 if not configpaths:
49 return _EmptyConfig()
50
51 single_file_configs = []
52 for configpath in configpaths:
53 parser = cls._create_parser(seed_values)
54 with open(configpath, 'r') as ini:
55 parser.readfp(ini)
56 single_file_configs.append(_SingleFileConfig(configpath, parser))
57 return _ChainedConfig(single_file_configs)
58
59 @classmethod
60 def _create_parser(cls, seed_values=None):
61 """Creates a config parser that supports %([key-name])s value substitution.
62
63 A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT
64 section, and be available for use in substitutions. The caller may override some of these
65 seed values.
66
67 :param seed_values: A dict with optional override seed values for buildroot, pants_workdir,
68 pants_supportdir and pants_distdir.
69 """
70 seed_values = seed_values or {}
71 buildroot = seed_values.get('buildroot', get_buildroot())
72
73 all_seed_values = {
74 'buildroot': buildroot,
75 'homedir': os.path.expanduser('~'),
76 'user': getpass.getuser(),
77 'pants_bootstrapdir': get_pants_cachedir(),
78 'pants_configdir': get_pants_configdir(),
79 }
80
81 def update_dir_from_seed_values(key, default):
82 all_seed_values[key] = seed_values.get(key, os.path.join(buildroot, default))
83 update_dir_from_seed_values('pants_workdir', '.pants.d')
84 update_dir_from_seed_values('pants_supportdir', 'build-support')
85 update_dir_from_seed_values('pants_distdir', 'dist')
86
87 return configparser.SafeConfigParser(all_seed_values)
88
89 def get(self, section, option, type_=six.string_types, default=None):
90 """Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as type.
91
92 If the specified section does not exist or is missing a definition for the option, the value is
93 looked up in the DEFAULT section. If there is still no definition found, the default value
94 supplied is returned.
95 """
96 return self._getinstance(section, option, type_, default)
97
98 def _getinstance(self, section, option, type_, default=None):
99 if not self.has_option(section, option):
100 return default
101
102 raw_value = self.get_value(section, option)
103 # We jump through some hoops here to deal with the fact that `six.string_types` is a tuple of
104 # types.
105 if (type_ == six.string_types or
106 (isinstance(type_, type) and issubclass(type_, six.string_types))):
107 return raw_value
108
109 key = '{}.{}'.format(section, option)
110 return parse_expression(name=key, val=raw_value, acceptable_types=type_,
111 raise_type=self.ConfigError)
112
113 # Subclasses must implement.
114 def configs(self):
115 """Returns the underlying single-file configs represented by this object."""
116 raise NotImplementedError()
117
118 def sources(self):
119 """Returns the sources of this config as a list of filenames."""
120 raise NotImplementedError()
121
122 def sections(self):
123 """Returns the sections in this config (not including DEFAULT)."""
124 raise NotImplementedError()
125
126 def has_section(self, section):
127 """Returns whether this config has the section."""
128 raise NotImplementedError()
129
130 def has_option(self, section, option):
131 """Returns whether this config specified a value the option."""
132 raise NotImplementedError()
133
134 def get_value(self, section, option):
135 """Returns the value of the option in this config as a string, or None if no value specified."""
136 raise NotImplementedError()
137
138 def get_source_for_option(self, section, option):
139 """Returns the path to the source file the given option was defined in.
140
141 :param string section: the scope of the option.
142 :param string option: the name of the option.
143 :returns: the path to the config file, or None if the option was not defined by a config file.
144 :rtype: string
145 """
146 raise NotImplementedError
147
148
149 class _EmptyConfig(Config):
150 """A dummy config with no data at all."""
151
152 def sources(self):
153 return []
154
155 def configs(self):
156 return []
157
158 def sections(self):
159 return []
160
161 def has_section(self, section):
162 return False
163
164 def has_option(self, section, option):
165 return False
166
167 def get_value(self, section, option):
168 return None
169
170 def get_source_for_option(self, section, option):
171 return None
172
173
174 class _SingleFileConfig(Config):
175 """Config read from a single file."""
176
177 def __init__(self, configpath, configparser):
178 super(_SingleFileConfig, self).__init__()
179 self.configpath = configpath
180 self.configparser = configparser
181
182 def configs(self):
183 return [self]
184
185 def sources(self):
186 return [self.configpath]
187
188 def sections(self):
189 return self.configparser.sections()
190
191 def has_section(self, section):
192 return self.configparser.has_section(section)
193
194 def has_option(self, section, option):
195 return self.configparser.has_option(section, option)
196
197 def get_value(self, section, option):
198 return self.configparser.get(section, option)
199
200 def get_source_for_option(self, section, option):
201 if self.has_option(section, option):
202 return self.sources()[0]
203 return None
204
205
206 class _ChainedConfig(Config):
207 """Config read from multiple sources."""
208
209 def __init__(self, configs):
210 """
211 :param configs: A list of Config instances to chain.
212 Later instances take precedence over earlier ones.
213 """
214 super(_ChainedConfig, self).__init__()
215 self._configs = list(reversed(configs))
216
217 def configs(self):
218 return self._configs
219
220 def sources(self):
221 return list(itertools.chain.from_iterable(cfg.sources() for cfg in self._configs))
222
223 def sections(self):
224 ret = OrderedSet()
225 for cfg in self._configs:
226 ret.update(cfg.sections())
227 return ret
228
229 def has_section(self, section):
230 for cfg in self._configs:
231 if cfg.has_section(section):
232 return True
233 return False
234
235 def has_option(self, section, option):
236 for cfg in self._configs:
237 if cfg.has_option(section, option):
238 return True
239 return False
240
241 def get_value(self, section, option):
242 for cfg in self._configs:
243 try:
244 return cfg.get_value(section, option)
245 except (configparser.NoSectionError, configparser.NoOptionError):
246 pass
247 if not self.has_section(section):
248 raise configparser.NoSectionError(section)
249 raise configparser.NoOptionError(option, section)
250
251 def get_source_for_option(self, section, option):
252 for cfg in self._configs:
253 if cfg.has_option(section, option):
254 return cfg.get_source_for_option(section, option)
255 return None
256
[end of src/python/pants/option/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/option/config.py b/src/python/pants/option/config.py
--- a/src/python/pants/option/config.py
+++ b/src/python/pants/option/config.py
@@ -218,7 +218,8 @@
return self._configs
def sources(self):
- return list(itertools.chain.from_iterable(cfg.sources() for cfg in self._configs))
+ # NB: Present the sources in the order we were given them.
+ return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs)))
def sections(self):
ret = OrderedSet()
|
{"golden_diff": "diff --git a/src/python/pants/option/config.py b/src/python/pants/option/config.py\n--- a/src/python/pants/option/config.py\n+++ b/src/python/pants/option/config.py\n@@ -218,7 +218,8 @@\n return self._configs\n \n def sources(self):\n- return list(itertools.chain.from_iterable(cfg.sources() for cfg in self._configs))\n+ # NB: Present the sources in the order we were given them.\n+ return list(itertools.chain.from_iterable(cfg.sources() for cfg in reversed(self._configs)))\n \n def sections(self):\n ret = OrderedSet()\n", "issue": "Overlaid Config Files are applied in reverse order\nhttp://www.pantsbuild.org/options.html#overlaying-config-files documents that one can do:\r\n\r\n $ ./pants --pants-config-files=a.ini --pants-config-files=b.ini options --options-name=\"level\"\r\n level = info (from CONFIG in a.ini)\r\n\r\n $ cat a.ini\r\n [GLOBAL]\r\n level: info\r\n\r\n $ cat b.ini\r\n [GLOBAL]\r\n level: debug\r\n\r\nAccording to the docs, the second --pants-config-files should overlay the earlier values, but this is not happening :/\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport getpass\nimport itertools\nimport os\n\nimport six\nfrom six.moves import configparser\nfrom twitter.common.collections import OrderedSet\n\nfrom pants.base.build_environment import get_buildroot, get_pants_cachedir, get_pants_configdir\nfrom pants.util.eval import parse_expression\nfrom pants.util.meta import AbstractClass\n\n\nclass Config(AbstractClass):\n \"\"\"Encapsulates ini-style config file loading and access.\n\n Supports recursive variable substitution using standard python format strings. E.g.,\n %(var_name)s will be replaced with the value of var_name.\n \"\"\"\n DEFAULT_SECTION = configparser.DEFAULTSECT\n\n class ConfigError(Exception):\n pass\n\n class ConfigValidationError(ConfigError):\n pass\n\n @classmethod\n def load(cls, configpaths, seed_values=None):\n \"\"\"Loads config from the given paths.\n\n A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT\n section, and be available for use in substitutions. The caller may override some of these\n seed values.\n\n :param list configpaths: Load from these paths. Later instances take precedence over earlier\n ones. If empty, returns an empty config.\n :param seed_values: A dict with optional override seed values for buildroot, pants_workdir,\n pants_supportdir and pants_distdir.\n \"\"\"\n if not configpaths:\n return _EmptyConfig()\n\n single_file_configs = []\n for configpath in configpaths:\n parser = cls._create_parser(seed_values)\n with open(configpath, 'r') as ini:\n parser.readfp(ini)\n single_file_configs.append(_SingleFileConfig(configpath, parser))\n return _ChainedConfig(single_file_configs)\n\n @classmethod\n def _create_parser(cls, seed_values=None):\n \"\"\"Creates a config parser that supports %([key-name])s value substitution.\n\n A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT\n section, and be available for use in substitutions. The caller may override some of these\n seed values.\n\n :param seed_values: A dict with optional override seed values for buildroot, pants_workdir,\n pants_supportdir and pants_distdir.\n \"\"\"\n seed_values = seed_values or {}\n buildroot = seed_values.get('buildroot', get_buildroot())\n\n all_seed_values = {\n 'buildroot': buildroot,\n 'homedir': os.path.expanduser('~'),\n 'user': getpass.getuser(),\n 'pants_bootstrapdir': get_pants_cachedir(),\n 'pants_configdir': get_pants_configdir(),\n }\n\n def update_dir_from_seed_values(key, default):\n all_seed_values[key] = seed_values.get(key, os.path.join(buildroot, default))\n update_dir_from_seed_values('pants_workdir', '.pants.d')\n update_dir_from_seed_values('pants_supportdir', 'build-support')\n update_dir_from_seed_values('pants_distdir', 'dist')\n\n return configparser.SafeConfigParser(all_seed_values)\n\n def get(self, section, option, type_=six.string_types, default=None):\n \"\"\"Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as type.\n\n If the specified section does not exist or is missing a definition for the option, the value is\n looked up in the DEFAULT section. If there is still no definition found, the default value\n supplied is returned.\n \"\"\"\n return self._getinstance(section, option, type_, default)\n\n def _getinstance(self, section, option, type_, default=None):\n if not self.has_option(section, option):\n return default\n\n raw_value = self.get_value(section, option)\n # We jump through some hoops here to deal with the fact that `six.string_types` is a tuple of\n # types.\n if (type_ == six.string_types or\n (isinstance(type_, type) and issubclass(type_, six.string_types))):\n return raw_value\n\n key = '{}.{}'.format(section, option)\n return parse_expression(name=key, val=raw_value, acceptable_types=type_,\n raise_type=self.ConfigError)\n\n # Subclasses must implement.\n def configs(self):\n \"\"\"Returns the underlying single-file configs represented by this object.\"\"\"\n raise NotImplementedError()\n\n def sources(self):\n \"\"\"Returns the sources of this config as a list of filenames.\"\"\"\n raise NotImplementedError()\n\n def sections(self):\n \"\"\"Returns the sections in this config (not including DEFAULT).\"\"\"\n raise NotImplementedError()\n\n def has_section(self, section):\n \"\"\"Returns whether this config has the section.\"\"\"\n raise NotImplementedError()\n\n def has_option(self, section, option):\n \"\"\"Returns whether this config specified a value the option.\"\"\"\n raise NotImplementedError()\n\n def get_value(self, section, option):\n \"\"\"Returns the value of the option in this config as a string, or None if no value specified.\"\"\"\n raise NotImplementedError()\n\n def get_source_for_option(self, section, option):\n \"\"\"Returns the path to the source file the given option was defined in.\n\n :param string section: the scope of the option.\n :param string option: the name of the option.\n :returns: the path to the config file, or None if the option was not defined by a config file.\n :rtype: string\n \"\"\"\n raise NotImplementedError\n\n\nclass _EmptyConfig(Config):\n \"\"\"A dummy config with no data at all.\"\"\"\n\n def sources(self):\n return []\n\n def configs(self):\n return []\n\n def sections(self):\n return []\n\n def has_section(self, section):\n return False\n\n def has_option(self, section, option):\n return False\n\n def get_value(self, section, option):\n return None\n\n def get_source_for_option(self, section, option):\n return None\n\n\nclass _SingleFileConfig(Config):\n \"\"\"Config read from a single file.\"\"\"\n\n def __init__(self, configpath, configparser):\n super(_SingleFileConfig, self).__init__()\n self.configpath = configpath\n self.configparser = configparser\n\n def configs(self):\n return [self]\n\n def sources(self):\n return [self.configpath]\n\n def sections(self):\n return self.configparser.sections()\n\n def has_section(self, section):\n return self.configparser.has_section(section)\n\n def has_option(self, section, option):\n return self.configparser.has_option(section, option)\n\n def get_value(self, section, option):\n return self.configparser.get(section, option)\n\n def get_source_for_option(self, section, option):\n if self.has_option(section, option):\n return self.sources()[0]\n return None\n\n\nclass _ChainedConfig(Config):\n \"\"\"Config read from multiple sources.\"\"\"\n\n def __init__(self, configs):\n \"\"\"\n :param configs: A list of Config instances to chain.\n Later instances take precedence over earlier ones.\n \"\"\"\n super(_ChainedConfig, self).__init__()\n self._configs = list(reversed(configs))\n\n def configs(self):\n return self._configs\n\n def sources(self):\n return list(itertools.chain.from_iterable(cfg.sources() for cfg in self._configs))\n\n def sections(self):\n ret = OrderedSet()\n for cfg in self._configs:\n ret.update(cfg.sections())\n return ret\n\n def has_section(self, section):\n for cfg in self._configs:\n if cfg.has_section(section):\n return True\n return False\n\n def has_option(self, section, option):\n for cfg in self._configs:\n if cfg.has_option(section, option):\n return True\n return False\n\n def get_value(self, section, option):\n for cfg in self._configs:\n try:\n return cfg.get_value(section, option)\n except (configparser.NoSectionError, configparser.NoOptionError):\n pass\n if not self.has_section(section):\n raise configparser.NoSectionError(section)\n raise configparser.NoOptionError(option, section)\n\n def get_source_for_option(self, section, option):\n for cfg in self._configs:\n if cfg.has_option(section, option):\n return cfg.get_source_for_option(section, option)\n return None\n", "path": "src/python/pants/option/config.py"}]}
| 3,160 | 138 |
gh_patches_debug_26961
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1888
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Basingstoke and Deane is broken since the 8th of Feb
### I Have A Problem With:
A specific source
### What's Your Problem
The service no longer downloads the waste updates. I tried 1.46 and the master.
### Source (if relevant)
_No response_
### Logs
```Shell
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
integration: waste_collection_schedule (documentation)
First occurred: 11:26:41 (1 occurrences)
Last logged: 11:26:41
fetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py", line 65, in fetch date=datetime.strptime(date_str, "%A, %d %B %Y").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 333, in _strptime raise ValueError("time data %r does not match format %r" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y'
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: basingstoke_gov_uk
args:
uprn: "1000809XXXX"
customize:
- type: Garden
show: True
- type: Waste
show: True
- type: Recycling
show: True
- type: Glass
show: True
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py]
1 from datetime import datetime
2
3 import requests
4 import urllib3
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 # With verify=True the POST fails due to a SSLCertVerificationError.
9 # Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
10 # https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
11 # https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
12 # These two lines areused to suppress the InsecureRequestWarning when using verify=False
13 urllib3.disable_warnings()
14
15 TITLE = "Basingstoke and Deane Borough Council"
16 DESCRIPTION = "Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK."
17 URL = "https://basingstoke.gov.uk"
18 TEST_CASES = {
19 "Test_001": {"uprn": "100060234732"},
20 "Test_002": {"uprn": "100060218986"},
21 "Test_003": {"uprn": 100060235836},
22 "Test_004": {"uprn": 100060224194},
23 }
24 HEADERS = {
25 "user-agent": "Mozilla/5.0",
26 }
27 ICON_MAP = {
28 "WASTE": "mdi:trash-can",
29 "RECYCLING": "mdi:recycle",
30 "GARDEN": "mdi:leaf",
31 "GLASS": "mdi:glass-fragile",
32 }
33
34
35 class Source:
36 def __init__(self, uprn):
37 self._uprn = str(uprn)
38
39 def fetch(self):
40 REQUEST_COOKIES = {
41 "cookie_control_popup": "N",
42 "WhenAreMyBinsCollected": self._uprn,
43 }
44 r = requests.get(
45 "https://www.basingstoke.gov.uk/bincollections",
46 headers=HEADERS,
47 cookies=REQUEST_COOKIES,
48 verify=False,
49 )
50 r.raise_for_status()
51
52 soup = BeautifulSoup(r.text, "html.parser")
53
54 services = soup.findAll("div", {"class": "service"})
55
56 entries = []
57
58 for service in services:
59 waste_type = service.find("h2").text.split(" ")[0]
60 schedule_dates = service.findAll("li")
61 for schedule in schedule_dates:
62 date_str = schedule.text.split("(")[0].strip()
63 entries.append(
64 Collection(
65 date=datetime.strptime(date_str, "%A, %d %B %Y").date(),
66 t=waste_type,
67 icon=ICON_MAP.get(waste_type.upper()),
68 )
69 )
70
71 return entries
72
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py
@@ -1,3 +1,4 @@
+import logging
from datetime import datetime
import requests
@@ -30,6 +31,7 @@
"GARDEN": "mdi:leaf",
"GLASS": "mdi:glass-fragile",
}
+LOGGER = logging.getLogger(__name__)
class Source:
@@ -60,9 +62,17 @@
schedule_dates = service.findAll("li")
for schedule in schedule_dates:
date_str = schedule.text.split("(")[0].strip()
+ try:
+ date = datetime.strptime(date_str, "%A, %d %B %Y").date()
+ except ValueError as e:
+ LOGGER.warning(
+ f"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}"
+ )
+ continue
+
entries.append(
Collection(
- date=datetime.strptime(date_str, "%A, %d %B %Y").date(),
+ date=date,
t=waste_type,
icon=ICON_MAP.get(waste_type.upper()),
)
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\n@@ -1,3 +1,4 @@\n+import logging\n from datetime import datetime\n \n import requests\n@@ -30,6 +31,7 @@\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n }\n+LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n@@ -60,9 +62,17 @@\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n+ try:\n+ date = datetime.strptime(date_str, \"%A, %d %B %Y\").date()\n+ except ValueError as e:\n+ LOGGER.warning(\n+ f\"Failed to parse date '{date_str}' for wastetype {waste_type}: {e}\"\n+ )\n+ continue\n+\n entries.append(\n Collection(\n- date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n+ date=date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n", "issue": "[Bug]: Basingstoke and Deane is broken since the 8th of Feb\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe service no longer downloads the waste updates. I tried 1.46 and the master.\n\n### Source (if relevant)\n\n_No response_\n\n### Logs\n\n```Shell\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nintegration: waste_collection_schedule (documentation)\r\nFirst occurred: 11:26:41 (1 occurrences)\r\nLast logged: 11:26:41\r\n\r\nfetch failed for source Basingstoke and Deane Borough Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py\", line 65, in fetch date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(), ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 333, in _strptime raise ValueError(\"time data %r does not match format %r\" % ValueError: time data 'none / unknown' does not match format '%A, %d %B %Y'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: basingstoke_gov_uk\r\n args:\r\n uprn: \"1000809XXXX\"\r\n customize:\r\n - type: Garden\r\n show: True\r\n - type: Waste\r\n show: True\r\n - type: Recycling\r\n show: True\r\n - type: Glass\r\n show: True\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nimport urllib3\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"Basingstoke and Deane Borough Council\"\nDESCRIPTION = \"Source for basingstoke.gov.uk services for Basingstoke and Deane Borough Council, UK.\"\nURL = \"https://basingstoke.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100060234732\"},\n \"Test_002\": {\"uprn\": \"100060218986\"},\n \"Test_003\": {\"uprn\": 100060235836},\n \"Test_004\": {\"uprn\": 100060224194},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"WASTE\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN\": \"mdi:leaf\",\n \"GLASS\": \"mdi:glass-fragile\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn)\n\n def fetch(self):\n REQUEST_COOKIES = {\n \"cookie_control_popup\": \"N\",\n \"WhenAreMyBinsCollected\": self._uprn,\n }\n r = requests.get(\n \"https://www.basingstoke.gov.uk/bincollections\",\n headers=HEADERS,\n cookies=REQUEST_COOKIES,\n verify=False,\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n services = soup.findAll(\"div\", {\"class\": \"service\"})\n\n entries = []\n\n for service in services:\n waste_type = service.find(\"h2\").text.split(\" \")[0]\n schedule_dates = service.findAll(\"li\")\n for schedule in schedule_dates:\n date_str = schedule.text.split(\"(\")[0].strip()\n entries.append(\n Collection(\n date=datetime.strptime(date_str, \"%A, %d %B %Y\").date(),\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/basingstoke_gov_uk.py"}]}
| 2,003 | 324 |
gh_patches_debug_14671
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClasswiseWrapper yields different results
## 🐛 Bug
Using `JaccardIndex` with `ClasswiseWrapper` results in different values than without `ClasswiseWrapper`.
### To Reproduce
Steps to reproduce the behavior...
Run the code snippet.
#### Code sample
```python
from torchmetrics import ClasswiseWrapper, JaccardIndex
import torch
target = torch.randint(0, 2, (10, 25, 25))
preds = [ torch.randint(0, 2, (10, 25, 25)) for i in range (3)]
jaccard_single = JaccardIndex(num_classes=2, average=None)
class_wrapper = ClasswiseWrapper(
JaccardIndex(num_classes=2, average=None),
labels=["class1", "class2"]
)
for p in preds:
print("Metric ",jaccard_single(p,target))
print("Wraped metric ",class_wrapper(p,target))
```
The code produces the following output:
```
Metric tensor([0.3351, 0.3333])
Wraped metric {'jaccardindex_class1': tensor(0.3351), 'jaccardindex_class2': tensor(0.3333)}
Metric tensor([0.3293, 0.3357])
Wraped metric {'jaccardindex_class1': tensor(0.3322), 'jaccardindex_class2': tensor(0.3345)}
Metric tensor([0.3424, 0.3435])
Wraped metric {'jaccardindex_class1': tensor(0.3356), 'jaccardindex_class2': tensor(0.3375)}
```
### Expected behavior
I would expect that the wrapped metric outputs the same values as the simple `JaccardIndex`.
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):
Installed version 0.9.2 using pip
- Python & PyTorch Version (e.g., 1.0):
Tested with Python 3.8.14 and pytorch 1.3.1
- Any other relevant information such as OS (e.g., Linux):
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/wrappers/classwise.py]
1 from typing import Any, Dict, List, Optional
2
3 from torch import Tensor
4
5 from torchmetrics import Metric
6
7
8 class ClasswiseWrapper(Metric):
9 """Wrapper class for altering the output of classification metrics that returns multiple values to include
10 label information.
11
12 Args:
13 metric: base metric that should be wrapped. It is assumed that the metric outputs a single
14 tensor that is split along the first dimension.
15 labels: list of strings indicating the different classes.
16
17 Example:
18 >>> import torch
19 >>> _ = torch.manual_seed(42)
20 >>> from torchmetrics import Accuracy, ClasswiseWrapper
21 >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))
22 >>> preds = torch.randn(10, 3).softmax(dim=-1)
23 >>> target = torch.randint(3, (10,))
24 >>> metric(preds, target)
25 {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}
26
27 Example (labels as list of strings):
28 >>> import torch
29 >>> from torchmetrics import Accuracy, ClasswiseWrapper
30 >>> metric = ClasswiseWrapper(
31 ... Accuracy(num_classes=3, average=None),
32 ... labels=["horse", "fish", "dog"]
33 ... )
34 >>> preds = torch.randn(10, 3).softmax(dim=-1)
35 >>> target = torch.randint(3, (10,))
36 >>> metric(preds, target)
37 {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}
38
39 Example (in metric collection):
40 >>> import torch
41 >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall
42 >>> labels = ["horse", "fish", "dog"]
43 >>> metric = MetricCollection(
44 ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),
45 ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}
46 ... )
47 >>> preds = torch.randn(10, 3).softmax(dim=-1)
48 >>> target = torch.randint(3, (10,))
49 >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
50 {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),
51 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
52 """
53
54 def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
55 super().__init__()
56 if not isinstance(metric, Metric):
57 raise ValueError(f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}")
58 if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):
59 raise ValueError(f"Expected argument `labels` to either be `None` or a list of strings but got {labels}")
60 self.metric = metric
61 self.labels = labels
62
63 def _convert(self, x: Tensor) -> Dict[str, Any]:
64 name = self.metric.__class__.__name__.lower()
65 if self.labels is None:
66 return {f"{name}_{i}": val for i, val in enumerate(x)}
67 return {f"{name}_{lab}": val for lab, val in zip(self.labels, x)}
68
69 def update(self, *args: Any, **kwargs: Any) -> None:
70 self.metric.update(*args, **kwargs)
71
72 def compute(self) -> Dict[str, Tensor]:
73 return self._convert(self.metric.compute())
74
[end of src/torchmetrics/wrappers/classwise.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py
--- a/src/torchmetrics/wrappers/classwise.py
+++ b/src/torchmetrics/wrappers/classwise.py
@@ -51,6 +51,8 @@
'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
"""
+ full_state_update: Optional[bool] = True
+
def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
super().__init__()
if not isinstance(metric, Metric):
@@ -71,3 +73,6 @@
def compute(self) -> Dict[str, Tensor]:
return self._convert(self.metric.compute())
+
+ def reset(self) -> None:
+ self.metric.reset()
|
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py\n--- a/src/torchmetrics/wrappers/classwise.py\n+++ b/src/torchmetrics/wrappers/classwise.py\n@@ -51,6 +51,8 @@\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n \n+ full_state_update: Optional[bool] = True\n+\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n@@ -71,3 +73,6 @@\n \n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n+\n+ def reset(self) -> None:\n+ self.metric.reset()\n", "issue": "ClasswiseWrapper yields different results \n## \ud83d\udc1b Bug\r\n\r\nUsing `JaccardIndex` with `ClasswiseWrapper` results in different values than without `ClasswiseWrapper`. \r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\nRun the code snippet. \r\n\r\n#### Code sample\r\n\r\n```python\r\nfrom torchmetrics import ClasswiseWrapper, JaccardIndex\r\nimport torch \r\n\r\ntarget = torch.randint(0, 2, (10, 25, 25))\r\npreds = [ torch.randint(0, 2, (10, 25, 25)) for i in range (3)]\r\njaccard_single = JaccardIndex(num_classes=2, average=None)\r\nclass_wrapper = ClasswiseWrapper(\r\n JaccardIndex(num_classes=2, average=None),\r\n labels=[\"class1\", \"class2\"]\r\n )\r\n\r\nfor p in preds:\r\n print(\"Metric \",jaccard_single(p,target)) \r\n print(\"Wraped metric \",class_wrapper(p,target))\r\n```\r\n\r\nThe code produces the following output: \r\n\r\n```\r\nMetric tensor([0.3351, 0.3333])\r\nWraped metric {'jaccardindex_class1': tensor(0.3351), 'jaccardindex_class2': tensor(0.3333)}\r\nMetric tensor([0.3293, 0.3357])\r\nWraped metric {'jaccardindex_class1': tensor(0.3322), 'jaccardindex_class2': tensor(0.3345)}\r\nMetric tensor([0.3424, 0.3435])\r\nWraped metric {'jaccardindex_class1': tensor(0.3356), 'jaccardindex_class2': tensor(0.3375)}\r\n```\r\n\r\n### Expected behavior\r\n\r\nI would expect that the wrapped metric outputs the same values as the simple `JaccardIndex`. \r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): \r\n Installed version 0.9.2 using pip \r\n- Python & PyTorch Version (e.g., 1.0):\r\n Tested with Python 3.8.14 and pytorch 1.3.1\r\n- Any other relevant information such as OS (e.g., Linux):\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional\n\nfrom torch import Tensor\n\nfrom torchmetrics import Metric\n\n\nclass ClasswiseWrapper(Metric):\n \"\"\"Wrapper class for altering the output of classification metrics that returns multiple values to include\n label information.\n\n Args:\n metric: base metric that should be wrapped. It is assumed that the metric outputs a single\n tensor that is split along the first dimension.\n labels: list of strings indicating the different classes.\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}\n\n Example (labels as list of strings):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(\n ... Accuracy(num_classes=3, average=None),\n ... labels=[\"horse\", \"fish\", \"dog\"]\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}\n\n Example (in metric collection):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall\n >>> labels = [\"horse\", \"fish\", \"dog\"]\n >>> metric = MetricCollection(\n ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),\n ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n raise ValueError(f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}\")\n if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n\n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n if self.labels is None:\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n\n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n", "path": "src/torchmetrics/wrappers/classwise.py"}]}
| 2,063 | 204 |
gh_patches_debug_24356
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3167
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hook: Interpolate paths (and other bytestrings) correctly into commands
### Problem
I have the following configuration for the Hook plugin in my config.yaml:
```
hook:
hooks:
- event: album_imported
command: /usr/bin/ls -l "{album.path}"
```
This is just a test to see how beets presents the path values. It appears that the paths are returned as bytes objects rather than strings. This is problematic when using the path values as arguments for external shell scripts. As can be seen below, the shell is unable to use the value provided by {album.path}.
```sh
$ beet -vv import /tmp/music/new/Al\ Di\ Meola\ -\ Elegant\ Gypsy/
```
Led to this problem:
```
hook: running command "/usr/bin/ls -l b'/tmp/music/FLAC/Al Di Meola/Elegant Gypsy'" for event album_imported
/usr/bin/ls: cannot access "b'/tmp/music/FLAC/Al Di Meola/Elegant Gypsy'": No such file or directory
```
The path "/tmp/music/FLAC/Al Di Meola/Elegant Gypsy" does exist on the filesystem after the import is complete.
### Setup
* OS: Arch Linux
* Python version: 3.4.5
* beets version: 1.4.7
* Turning off plugins made problem go away (yes/no): This issue is related to a plugin, so I didn't turn them off
My configuration (output of `beet config`) is:
```yaml
plugins: inline convert badfiles info missing lastgenre fetchart mbsync scrub smartplaylist hook
directory: /tmp/music/FLAC
library: ~/.config/beets/library.db
import:
copy: yes
write: yes
log: ~/.config/beets/import.log
languages: en
per_disc_numbering: yes
paths:
default: $albumartist/$album%aunique{}/$disc_and_track - $title
comp: $albumartist/$album%aunique{}/$disc_and_track - $title
item_fields:
disc_and_track: u'%01i-%02i' % (disc, track) if disctotal > 1 else u'%02i' % (track)
ui:
color: yes
match:
ignored: missing_tracks unmatched_tracks
convert:
copy_album_art: yes
dest: /tmp/music/ogg
embed: yes
never_convert_lossy_files: yes
format: ogg
formats:
ogg:
command: oggenc -Q -q 4 -o $dest $source
extension: ogg
aac:
command: ffmpeg -i $source -y -vn -acodec aac -aq 1 $dest
extension: m4a
alac:
command: ffmpeg -i $source -y -vn -acodec alac $dest
extension: m4a
flac: ffmpeg -i $source -y -vn -acodec flac $dest
mp3: ffmpeg -i $source -y -vn -aq 2 $dest
opus: ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest
wma: ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest
pretend: no
threads: 4
max_bitrate: 500
auto: no
tmpdir:
quiet: no
paths: {}
no_convert: ''
album_art_maxwidth: 0
lastgenre:
force: yes
prefer_specific: no
min_weight: 20
count: 4
separator: '; '
whitelist: yes
fallback:
canonical: no
source: album
auto: yes
fetchart:
sources: filesystem coverart amazon albumart
auto: yes
minwidth: 0
maxwidth: 0
enforce_ratio: no
cautious: no
cover_names:
- cover
- front
- art
- album
- folder
google_key: REDACTED
google_engine: 001442825323518660753:hrh5ch1gjzm
fanarttv_key: REDACTED
store_source: no
hook:
hooks: [{event: album_imported, command: '/usr/bin/ls -l "{album.path}"'}]
pathfields: {}
album_fields: {}
scrub:
auto: yes
missing:
count: no
total: no
album: no
smartplaylist:
relative_to:
playlist_dir: .
auto: yes
playlists: []
```
I created a Python 2 virtual environment, installed beets and any dependencies in to that virtualenv, cleaned my test library, and imported the same files using the same config.yaml. This time the shell was able to use the path value returned by the hook configuration:
```
hook: running command "/usr/bin/ls -l /tmp/music/FLAC/Al Di Meola/Elegant Gypsy" for event album_imported
total 254944
-rw-r--r-- 1 mike mike 50409756 Jun 24 13:46 01 - Flight Over Rio.flac
-rw-r--r-- 1 mike mike 43352354 Jun 24 13:46 02 - Midnight Tango.flac
-rw-r--r-- 1 mike mike 7726389 Jun 24 13:46 03 - Percussion Intro.flac
-rw-r--r-- 1 mike mike 32184646 Jun 24 13:46 04 - Mediterranean Sundance.flac
-rw-r--r-- 1 mike mike 45770796 Jun 24 13:46 05 - Race With Devil on Spanish Highway.flac
-rw-r--r-- 1 mike mike 10421006 Jun 24 13:46 06 - Lady of Rome, Sister of Brazil.flac
-rw-r--r-- 1 mike mike 65807504 Jun 24 13:46 07 - Elegant Gypsy Suite.flac
-rw-r--r-- 1 mike mike 5366515 Jun 24 13:46 cover.jpg
```
I'm guessing this is due to a data type difference between Python 2 and Python 3.
</issue>
<code>
[start of beetsplug/hook.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2015, Adrian Sampson.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Allows custom commands to be run when an event is emitted by beets"""
17 from __future__ import division, absolute_import, print_function
18
19 import string
20 import subprocess
21 import six
22
23 from beets.plugins import BeetsPlugin
24 from beets.util import shlex_split, arg_encoding
25
26
27 class CodingFormatter(string.Formatter):
28 """A variant of `string.Formatter` that converts everything to `unicode`
29 strings.
30
31 This is necessary on Python 2, where formatting otherwise occurs on
32 bytestrings. It intercepts two points in the formatting process to decode
33 the format string and all fields using the specified encoding. If decoding
34 fails, the values are used as-is.
35 """
36
37 def __init__(self, coding):
38 """Creates a new coding formatter with the provided coding."""
39 self._coding = coding
40
41 def format(self, format_string, *args, **kwargs):
42 """Formats the provided string using the provided arguments and keyword
43 arguments.
44
45 This method decodes the format string using the formatter's coding.
46
47 See str.format and string.Formatter.format.
48 """
49 try:
50 format_string = format_string.decode(self._coding)
51 except UnicodeEncodeError:
52 pass
53
54 return super(CodingFormatter, self).format(format_string, *args,
55 **kwargs)
56
57 def convert_field(self, value, conversion):
58 """Converts the provided value given a conversion type.
59
60 This method decodes the converted value using the formatter's coding.
61
62 See string.Formatter.convert_field.
63 """
64 converted = super(CodingFormatter, self).convert_field(value,
65 conversion)
66
67 if isinstance(converted, bytes):
68 return converted.decode(self._coding)
69
70 return converted
71
72
73 class HookPlugin(BeetsPlugin):
74 """Allows custom commands to be run when an event is emitted by beets"""
75 def __init__(self):
76 super(HookPlugin, self).__init__()
77
78 self.config.add({
79 'hooks': []
80 })
81
82 hooks = self.config['hooks'].get(list)
83
84 for hook_index in range(len(hooks)):
85 hook = self.config['hooks'][hook_index]
86
87 hook_event = hook['event'].as_str()
88 hook_command = hook['command'].as_str()
89
90 self.create_and_register_hook(hook_event, hook_command)
91
92 def create_and_register_hook(self, event, command):
93 def hook_function(**kwargs):
94 if command is None or len(command) == 0:
95 self._log.error('invalid command "{0}"', command)
96 return
97
98 # Use a string formatter that works on Unicode strings.
99 if six.PY2:
100 formatter = CodingFormatter(arg_encoding())
101 else:
102 formatter = string.Formatter()
103
104 command_pieces = shlex_split(command)
105
106 for i, piece in enumerate(command_pieces):
107 command_pieces[i] = formatter.format(piece, event=event,
108 **kwargs)
109
110 self._log.debug(u'running command "{0}" for event {1}',
111 u' '.join(command_pieces), event)
112
113 try:
114 subprocess.Popen(command_pieces).wait()
115 except OSError as exc:
116 self._log.error(u'hook for {0} failed: {1}', event, exc)
117
118 self.register_listener(event, hook_function)
119
[end of beetsplug/hook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/beetsplug/hook.py b/beetsplug/hook.py
--- a/beetsplug/hook.py
+++ b/beetsplug/hook.py
@@ -18,7 +18,6 @@
import string
import subprocess
-import six
from beets.plugins import BeetsPlugin
from beets.util import shlex_split, arg_encoding
@@ -46,10 +45,8 @@
See str.format and string.Formatter.format.
"""
- try:
+ if isinstance(format_string, bytes):
format_string = format_string.decode(self._coding)
- except UnicodeEncodeError:
- pass
return super(CodingFormatter, self).format(format_string, *args,
**kwargs)
@@ -96,10 +93,7 @@
return
# Use a string formatter that works on Unicode strings.
- if six.PY2:
- formatter = CodingFormatter(arg_encoding())
- else:
- formatter = string.Formatter()
+ formatter = CodingFormatter(arg_encoding())
command_pieces = shlex_split(command)
|
{"golden_diff": "diff --git a/beetsplug/hook.py b/beetsplug/hook.py\n--- a/beetsplug/hook.py\n+++ b/beetsplug/hook.py\n@@ -18,7 +18,6 @@\n \n import string\n import subprocess\n-import six\n \n from beets.plugins import BeetsPlugin\n from beets.util import shlex_split, arg_encoding\n@@ -46,10 +45,8 @@\n \n See str.format and string.Formatter.format.\n \"\"\"\n- try:\n+ if isinstance(format_string, bytes):\n format_string = format_string.decode(self._coding)\n- except UnicodeEncodeError:\n- pass\n \n return super(CodingFormatter, self).format(format_string, *args,\n **kwargs)\n@@ -96,10 +93,7 @@\n return\n \n # Use a string formatter that works on Unicode strings.\n- if six.PY2:\n- formatter = CodingFormatter(arg_encoding())\n- else:\n- formatter = string.Formatter()\n+ formatter = CodingFormatter(arg_encoding())\n \n command_pieces = shlex_split(command)\n", "issue": "hook: Interpolate paths (and other bytestrings) correctly into commands\n### Problem\r\n\r\nI have the following configuration for the Hook plugin in my config.yaml:\r\n\r\n```\r\nhook:\r\n hooks:\r\n - event: album_imported\r\n command: /usr/bin/ls -l \"{album.path}\"\r\n```\r\n\r\nThis is just a test to see how beets presents the path values. It appears that the paths are returned as bytes objects rather than strings. This is problematic when using the path values as arguments for external shell scripts. As can be seen below, the shell is unable to use the value provided by {album.path}.\r\n\r\n\r\n```sh\r\n$ beet -vv import /tmp/music/new/Al\\ Di\\ Meola\\ -\\ Elegant\\ Gypsy/\r\n```\r\n\r\nLed to this problem:\r\n\r\n```\r\nhook: running command \"/usr/bin/ls -l b'/tmp/music/FLAC/Al Di Meola/Elegant Gypsy'\" for event album_imported\r\n/usr/bin/ls: cannot access \"b'/tmp/music/FLAC/Al Di Meola/Elegant Gypsy'\": No such file or directory\r\n\r\n```\r\n\r\nThe path \"/tmp/music/FLAC/Al Di Meola/Elegant Gypsy\" does exist on the filesystem after the import is complete.\r\n\r\n\r\n\r\n### Setup\r\n\r\n* OS: Arch Linux\r\n* Python version: 3.4.5\r\n* beets version: 1.4.7\r\n* Turning off plugins made problem go away (yes/no): This issue is related to a plugin, so I didn't turn them off\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\n```yaml\r\nplugins: inline convert badfiles info missing lastgenre fetchart mbsync scrub smartplaylist hook\r\ndirectory: /tmp/music/FLAC\r\nlibrary: ~/.config/beets/library.db\r\n\r\nimport:\r\n copy: yes\r\n write: yes\r\n log: ~/.config/beets/import.log\r\n languages: en\r\nper_disc_numbering: yes\r\n\r\npaths:\r\n default: $albumartist/$album%aunique{}/$disc_and_track - $title\r\n comp: $albumartist/$album%aunique{}/$disc_and_track - $title\r\nitem_fields:\r\n disc_and_track: u'%01i-%02i' % (disc, track) if disctotal > 1 else u'%02i' % (track)\r\n\r\nui:\r\n color: yes\r\n\r\nmatch:\r\n ignored: missing_tracks unmatched_tracks\r\nconvert:\r\n copy_album_art: yes\r\n dest: /tmp/music/ogg\r\n embed: yes\r\n never_convert_lossy_files: yes\r\n format: ogg\r\n formats:\r\n ogg:\r\n command: oggenc -Q -q 4 -o $dest $source\r\n extension: ogg\r\n aac:\r\n command: ffmpeg -i $source -y -vn -acodec aac -aq 1 $dest\r\n extension: m4a\r\n alac:\r\n command: ffmpeg -i $source -y -vn -acodec alac $dest\r\n extension: m4a\r\n flac: ffmpeg -i $source -y -vn -acodec flac $dest\r\n mp3: ffmpeg -i $source -y -vn -aq 2 $dest\r\n opus: ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest\r\n wma: ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest\r\n pretend: no\r\n threads: 4\r\n max_bitrate: 500\r\n auto: no\r\n tmpdir:\r\n quiet: no\r\n\r\n paths: {}\r\n no_convert: ''\r\n album_art_maxwidth: 0\r\nlastgenre:\r\n force: yes\r\n prefer_specific: no\r\n min_weight: 20\r\n count: 4\r\n separator: '; '\r\n whitelist: yes\r\n fallback:\r\n canonical: no\r\n source: album\r\n auto: yes\r\nfetchart:\r\n sources: filesystem coverart amazon albumart\r\n auto: yes\r\n minwidth: 0\r\n maxwidth: 0\r\n enforce_ratio: no\r\n cautious: no\r\n cover_names:\r\n - cover\r\n - front\r\n - art\r\n - album\r\n - folder\r\n google_key: REDACTED\r\n google_engine: 001442825323518660753:hrh5ch1gjzm\r\n fanarttv_key: REDACTED\r\n store_source: no\r\nhook:\r\n hooks: [{event: album_imported, command: '/usr/bin/ls -l \"{album.path}\"'}]\r\npathfields: {}\r\nalbum_fields: {}\r\nscrub:\r\n auto: yes\r\nmissing:\r\n count: no\r\n total: no\r\n album: no\r\nsmartplaylist:\r\n relative_to:\r\n playlist_dir: .\r\n auto: yes\r\n playlists: []\r\n```\r\n\r\nI created a Python 2 virtual environment, installed beets and any dependencies in to that virtualenv, cleaned my test library, and imported the same files using the same config.yaml. This time the shell was able to use the path value returned by the hook configuration:\r\n\r\n```\r\nhook: running command \"/usr/bin/ls -l /tmp/music/FLAC/Al Di Meola/Elegant Gypsy\" for event album_imported\r\ntotal 254944\r\n-rw-r--r-- 1 mike mike 50409756 Jun 24 13:46 01 - Flight Over Rio.flac\r\n-rw-r--r-- 1 mike mike 43352354 Jun 24 13:46 02 - Midnight Tango.flac\r\n-rw-r--r-- 1 mike mike 7726389 Jun 24 13:46 03 - Percussion Intro.flac\r\n-rw-r--r-- 1 mike mike 32184646 Jun 24 13:46 04 - Mediterranean Sundance.flac\r\n-rw-r--r-- 1 mike mike 45770796 Jun 24 13:46 05 - Race With Devil on Spanish Highway.flac\r\n-rw-r--r-- 1 mike mike 10421006 Jun 24 13:46 06 - Lady of Rome, Sister of Brazil.flac\r\n-rw-r--r-- 1 mike mike 65807504 Jun 24 13:46 07 - Elegant Gypsy Suite.flac\r\n-rw-r--r-- 1 mike mike 5366515 Jun 24 13:46 cover.jpg\r\n```\r\nI'm guessing this is due to a data type difference between Python 2 and Python 3.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport string\nimport subprocess\nimport six\n\nfrom beets.plugins import BeetsPlugin\nfrom beets.util import shlex_split, arg_encoding\n\n\nclass CodingFormatter(string.Formatter):\n \"\"\"A variant of `string.Formatter` that converts everything to `unicode`\n strings.\n\n This is necessary on Python 2, where formatting otherwise occurs on\n bytestrings. It intercepts two points in the formatting process to decode\n the format string and all fields using the specified encoding. If decoding\n fails, the values are used as-is.\n \"\"\"\n\n def __init__(self, coding):\n \"\"\"Creates a new coding formatter with the provided coding.\"\"\"\n self._coding = coding\n\n def format(self, format_string, *args, **kwargs):\n \"\"\"Formats the provided string using the provided arguments and keyword\n arguments.\n\n This method decodes the format string using the formatter's coding.\n\n See str.format and string.Formatter.format.\n \"\"\"\n try:\n format_string = format_string.decode(self._coding)\n except UnicodeEncodeError:\n pass\n\n return super(CodingFormatter, self).format(format_string, *args,\n **kwargs)\n\n def convert_field(self, value, conversion):\n \"\"\"Converts the provided value given a conversion type.\n\n This method decodes the converted value using the formatter's coding.\n\n See string.Formatter.convert_field.\n \"\"\"\n converted = super(CodingFormatter, self).convert_field(value,\n conversion)\n\n if isinstance(converted, bytes):\n return converted.decode(self._coding)\n\n return converted\n\n\nclass HookPlugin(BeetsPlugin):\n \"\"\"Allows custom commands to be run when an event is emitted by beets\"\"\"\n def __init__(self):\n super(HookPlugin, self).__init__()\n\n self.config.add({\n 'hooks': []\n })\n\n hooks = self.config['hooks'].get(list)\n\n for hook_index in range(len(hooks)):\n hook = self.config['hooks'][hook_index]\n\n hook_event = hook['event'].as_str()\n hook_command = hook['command'].as_str()\n\n self.create_and_register_hook(hook_event, hook_command)\n\n def create_and_register_hook(self, event, command):\n def hook_function(**kwargs):\n if command is None or len(command) == 0:\n self._log.error('invalid command \"{0}\"', command)\n return\n\n # Use a string formatter that works on Unicode strings.\n if six.PY2:\n formatter = CodingFormatter(arg_encoding())\n else:\n formatter = string.Formatter()\n\n command_pieces = shlex_split(command)\n\n for i, piece in enumerate(command_pieces):\n command_pieces[i] = formatter.format(piece, event=event,\n **kwargs)\n\n self._log.debug(u'running command \"{0}\" for event {1}',\n u' '.join(command_pieces), event)\n\n try:\n subprocess.Popen(command_pieces).wait()\n except OSError as exc:\n self._log.error(u'hook for {0} failed: {1}', event, exc)\n\n self.register_listener(event, hook_function)\n", "path": "beetsplug/hook.py"}]}
| 3,137 | 234 |
gh_patches_debug_24897
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1205
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] TrainTestFeatureDrift doesn't display anything for dataframe data
**Describe the bug**
When running the check on naïve iris dataframe, we get no display at all, rather than a display showing the exact same distribution which will happen if constructing a Dataset from the dataframe prior.
**To Reproduce**
```
import pandas as pd
from deepchecks.tabular.dataset import Dataset
from deepchecks.tabular.checks import TrainTestFeatureDrift
iris = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')
TrainTestFeatureDrift().run(iris, iris)
ds = Dataset(iris)
TrainTestFeatureDrift().run(ds , ds )
```
**Expected behavior**
Exact same behavior for the two cases.
**Screenshots**

</issue>
<code>
[start of deepchecks/tabular/checks/distribution/whole_dataset_drift.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module contains the domain classifier drift check."""
12 from deepchecks.core import CheckResult, ConditionResult, ConditionCategory
13 from deepchecks.tabular import Context, TrainTestCheck
14 from deepchecks.core.check_utils.whole_dataset_drift_utils import run_whole_dataset_drift
15 from deepchecks.utils.strings import format_number
16
17 __all__ = ['WholeDatasetDrift']
18
19
20 class WholeDatasetDrift(TrainTestCheck):
21 """
22 Calculate drift between the entire train and test datasets using a model trained to distinguish between them.
23
24 Check fits a new model to distinguish between train and test datasets, called a Domain Classifier.
25 Once the Domain Classifier is fitted the check calculates the feature importance for the domain classifier
26 model. The result of the check is based on the AUC of the domain classifier model, and the check displays
27 the change in distribution between train and test for the top features according to the
28 calculated feature importance.
29
30 Parameters
31 ----------
32 n_top_columns : int , default: 3
33 Amount of columns to show ordered by domain classifier feature importance. This limit is used together
34 (AND) with min_feature_importance, so less than n_top_columns features can be displayed.
35 min_feature_importance : float , default: 0.05
36 Minimum feature importance to show in the check display. Feature importance
37 sums to 1, so for example the default value of 0.05 means that all features with importance contributing
38 less than 5% to the predictive power of the Domain Classifier won't be displayed. This limit is used
39 together (AND) with n_top_columns, so features more important than min_feature_importance can be
40 hidden.
41 max_num_categories : int , default: 10
42 Only for categorical columns. Max number of categories to display in distributio plots. If there are
43 more, they are binned into an "Other" category in the display. If max_num_categories=None, there is
44 no limit.
45 sample_size : int , default: 10_000
46 Max number of rows to use from each dataset for the training and evaluation of the domain classifier.
47 random_state : int , default: 42
48 Random seed for the check.
49 test_size : float , default: 0.3
50 Fraction of the combined datasets to use for the evaluation of the domain classifier.
51 min_meaningful_drift_score : float , default 0.05
52 Minimum drift score for displaying drift in check. Under that score, check will display "nothing found".
53 """
54
55 def __init__(
56 self,
57 n_top_columns: int = 3,
58 min_feature_importance: float = 0.05,
59 max_num_categories: int = 10,
60 sample_size: int = 10_000,
61 random_state: int = 42,
62 test_size: float = 0.3,
63 min_meaningful_drift_score: float = 0.05,
64 **kwargs
65 ):
66 super().__init__(**kwargs)
67
68 self.n_top_columns = n_top_columns
69 self.min_feature_importance = min_feature_importance
70 self.max_num_categories = max_num_categories
71 self.sample_size = sample_size
72 self.random_state = random_state
73 self.test_size = test_size
74 self.min_meaningful_drift_score = min_meaningful_drift_score
75
76 def run_logic(self, context: Context) -> CheckResult:
77 """Run check.
78
79 Returns
80 -------
81 CheckResult
82 value: dictionary containing the domain classifier auc and a dict of column name to its feature
83 importance as calculated for the domain classifier model.
84 display: distribution graph for each column for the columns most explaining the dataset difference,
85 comparing the train and test distributions.
86
87 Raises
88 ------
89 DeepchecksValueError
90 If the object is not a Dataset or DataFrame instance
91 """
92 train_dataset = context.train
93 test_dataset = context.test
94 features = train_dataset.features
95 cat_features = train_dataset.cat_features
96 numerical_features = train_dataset.numerical_features
97
98 sample_size = min(self.sample_size, train_dataset.n_samples, test_dataset.n_samples)
99
100 headnote = """
101 <span>
102 The shown features are the features that are most important for the domain classifier - the
103 domain_classifier trained to distinguish between the train and test datasets.<br>
104 </span>
105 """
106
107 values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.data[features],
108 test_dataframe=test_dataset.data[features],
109 numerical_features=numerical_features,
110 cat_features=cat_features,
111 sample_size=sample_size, random_state=self.random_state,
112 test_size=self.test_size, n_top_columns=self.n_top_columns,
113 min_feature_importance=self.min_feature_importance,
114 max_num_categories=self.max_num_categories,
115 min_meaningful_drift_score=self.min_meaningful_drift_score)
116
117 if displays:
118 displays.insert(0, headnote)
119
120 return CheckResult(value=values_dict, display=displays, header='Whole Dataset Drift')
121
122 def add_condition_overall_drift_value_not_greater_than(self, max_drift_value: float = 0.25):
123 """Add condition.
124
125 Overall drift score, calculated as (2 * AUC - 1) for the AUC of the dataset discriminator model, is not greater
126 than the specified value. This value is used as it scales the AUC value to the range [0, 1], where 0 indicates
127 a random model (and no drift) and 1 indicates a perfect model (and completely distinguishable datasets).
128
129 Parameters
130 ----------
131 max_drift_value : float , default: 0.25
132 Maximal drift value allowed (value 0 and above)
133 """
134
135 def condition(result: dict):
136 drift_score = result['domain_classifier_drift_score']
137 if drift_score > max_drift_value:
138 message = f'Found drift value of: {format_number(drift_score)}, corresponding to a domain classifier ' \
139 f'AUC of: {format_number(result["domain_classifier_auc"])}'
140 return ConditionResult(ConditionCategory.FAIL, message)
141 else:
142 return ConditionResult(ConditionCategory.PASS)
143
144 return self.add_condition(f'Drift value is not greater than {format_number(max_drift_value)}',
145 condition)
146
[end of deepchecks/tabular/checks/distribution/whole_dataset_drift.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/tabular/checks/distribution/whole_dataset_drift.py b/deepchecks/tabular/checks/distribution/whole_dataset_drift.py
--- a/deepchecks/tabular/checks/distribution/whole_dataset_drift.py
+++ b/deepchecks/tabular/checks/distribution/whole_dataset_drift.py
@@ -91,7 +91,6 @@
"""
train_dataset = context.train
test_dataset = context.test
- features = train_dataset.features
cat_features = train_dataset.cat_features
numerical_features = train_dataset.numerical_features
@@ -104,8 +103,8 @@
</span>
"""
- values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.data[features],
- test_dataframe=test_dataset.data[features],
+ values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.features_columns,
+ test_dataframe=test_dataset.features_columns,
numerical_features=numerical_features,
cat_features=cat_features,
sample_size=sample_size, random_state=self.random_state,
|
{"golden_diff": "diff --git a/deepchecks/tabular/checks/distribution/whole_dataset_drift.py b/deepchecks/tabular/checks/distribution/whole_dataset_drift.py\n--- a/deepchecks/tabular/checks/distribution/whole_dataset_drift.py\n+++ b/deepchecks/tabular/checks/distribution/whole_dataset_drift.py\n@@ -91,7 +91,6 @@\n \"\"\"\n train_dataset = context.train\n test_dataset = context.test\n- features = train_dataset.features\n cat_features = train_dataset.cat_features\n numerical_features = train_dataset.numerical_features\n \n@@ -104,8 +103,8 @@\n </span>\n \"\"\"\n \n- values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.data[features],\n- test_dataframe=test_dataset.data[features],\n+ values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.features_columns,\n+ test_dataframe=test_dataset.features_columns,\n numerical_features=numerical_features,\n cat_features=cat_features,\n sample_size=sample_size, random_state=self.random_state,\n", "issue": "[BUG] TrainTestFeatureDrift doesn't display anything for dataframe data\n**Describe the bug**\r\nWhen running the check on na\u00efve iris dataframe, we get no display at all, rather than a display showing the exact same distribution which will happen if constructing a Dataset from the dataframe prior.\r\n\r\n**To Reproduce**\r\n\r\n```\r\nimport pandas as pd\r\nfrom deepchecks.tabular.dataset import Dataset\r\nfrom deepchecks.tabular.checks import TrainTestFeatureDrift\r\n\r\niris = pd.read_csv('https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv')\r\nTrainTestFeatureDrift().run(iris, iris)\r\n\r\nds = Dataset(iris)\r\nTrainTestFeatureDrift().run(ds , ds )\r\n\r\n```\r\n\r\n**Expected behavior**\r\nExact same behavior for the two cases. \r\n\r\n**Screenshots**\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module contains the domain classifier drift check.\"\"\"\nfrom deepchecks.core import CheckResult, ConditionResult, ConditionCategory\nfrom deepchecks.tabular import Context, TrainTestCheck\nfrom deepchecks.core.check_utils.whole_dataset_drift_utils import run_whole_dataset_drift\nfrom deepchecks.utils.strings import format_number\n\n__all__ = ['WholeDatasetDrift']\n\n\nclass WholeDatasetDrift(TrainTestCheck):\n \"\"\"\n Calculate drift between the entire train and test datasets using a model trained to distinguish between them.\n\n Check fits a new model to distinguish between train and test datasets, called a Domain Classifier.\n Once the Domain Classifier is fitted the check calculates the feature importance for the domain classifier\n model. The result of the check is based on the AUC of the domain classifier model, and the check displays\n the change in distribution between train and test for the top features according to the\n calculated feature importance.\n\n Parameters\n ----------\n n_top_columns : int , default: 3\n Amount of columns to show ordered by domain classifier feature importance. This limit is used together\n (AND) with min_feature_importance, so less than n_top_columns features can be displayed.\n min_feature_importance : float , default: 0.05\n Minimum feature importance to show in the check display. Feature importance\n sums to 1, so for example the default value of 0.05 means that all features with importance contributing\n less than 5% to the predictive power of the Domain Classifier won't be displayed. This limit is used\n together (AND) with n_top_columns, so features more important than min_feature_importance can be\n hidden.\n max_num_categories : int , default: 10\n Only for categorical columns. Max number of categories to display in distributio plots. If there are\n more, they are binned into an \"Other\" category in the display. If max_num_categories=None, there is\n no limit.\n sample_size : int , default: 10_000\n Max number of rows to use from each dataset for the training and evaluation of the domain classifier.\n random_state : int , default: 42\n Random seed for the check.\n test_size : float , default: 0.3\n Fraction of the combined datasets to use for the evaluation of the domain classifier.\n min_meaningful_drift_score : float , default 0.05\n Minimum drift score for displaying drift in check. Under that score, check will display \"nothing found\".\n \"\"\"\n\n def __init__(\n self,\n n_top_columns: int = 3,\n min_feature_importance: float = 0.05,\n max_num_categories: int = 10,\n sample_size: int = 10_000,\n random_state: int = 42,\n test_size: float = 0.3,\n min_meaningful_drift_score: float = 0.05,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n self.n_top_columns = n_top_columns\n self.min_feature_importance = min_feature_importance\n self.max_num_categories = max_num_categories\n self.sample_size = sample_size\n self.random_state = random_state\n self.test_size = test_size\n self.min_meaningful_drift_score = min_meaningful_drift_score\n\n def run_logic(self, context: Context) -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value: dictionary containing the domain classifier auc and a dict of column name to its feature\n importance as calculated for the domain classifier model.\n display: distribution graph for each column for the columns most explaining the dataset difference,\n comparing the train and test distributions.\n\n Raises\n ------\n DeepchecksValueError\n If the object is not a Dataset or DataFrame instance\n \"\"\"\n train_dataset = context.train\n test_dataset = context.test\n features = train_dataset.features\n cat_features = train_dataset.cat_features\n numerical_features = train_dataset.numerical_features\n\n sample_size = min(self.sample_size, train_dataset.n_samples, test_dataset.n_samples)\n\n headnote = \"\"\"\n <span>\n The shown features are the features that are most important for the domain classifier - the\n domain_classifier trained to distinguish between the train and test datasets.<br>\n </span>\n \"\"\"\n\n values_dict, displays = run_whole_dataset_drift(train_dataframe=train_dataset.data[features],\n test_dataframe=test_dataset.data[features],\n numerical_features=numerical_features,\n cat_features=cat_features,\n sample_size=sample_size, random_state=self.random_state,\n test_size=self.test_size, n_top_columns=self.n_top_columns,\n min_feature_importance=self.min_feature_importance,\n max_num_categories=self.max_num_categories,\n min_meaningful_drift_score=self.min_meaningful_drift_score)\n\n if displays:\n displays.insert(0, headnote)\n\n return CheckResult(value=values_dict, display=displays, header='Whole Dataset Drift')\n\n def add_condition_overall_drift_value_not_greater_than(self, max_drift_value: float = 0.25):\n \"\"\"Add condition.\n\n Overall drift score, calculated as (2 * AUC - 1) for the AUC of the dataset discriminator model, is not greater\n than the specified value. This value is used as it scales the AUC value to the range [0, 1], where 0 indicates\n a random model (and no drift) and 1 indicates a perfect model (and completely distinguishable datasets).\n\n Parameters\n ----------\n max_drift_value : float , default: 0.25\n Maximal drift value allowed (value 0 and above)\n \"\"\"\n\n def condition(result: dict):\n drift_score = result['domain_classifier_drift_score']\n if drift_score > max_drift_value:\n message = f'Found drift value of: {format_number(drift_score)}, corresponding to a domain classifier ' \\\n f'AUC of: {format_number(result[\"domain_classifier_auc\"])}'\n return ConditionResult(ConditionCategory.FAIL, message)\n else:\n return ConditionResult(ConditionCategory.PASS)\n\n return self.add_condition(f'Drift value is not greater than {format_number(max_drift_value)}',\n condition)\n", "path": "deepchecks/tabular/checks/distribution/whole_dataset_drift.py"}]}
| 2,585 | 237 |
gh_patches_debug_32666
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1776
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infinite self-responses in Mattermost connector
After fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692.
</issue>
<code>
[start of opsdroid/connector/mattermost/__init__.py]
1 """A connector for Mattermost."""
2 import logging
3 import json
4
5 from mattermostdriver import Driver, Websocket
6 from voluptuous import Required
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {
13 Required("token"): str,
14 Required("url"): str,
15 Required("team-name"): str,
16 "scheme": str,
17 "port": int,
18 "ssl-verify": bool,
19 "connect-timeout": int,
20 }
21
22
23 class ConnectorMattermost(Connector):
24 """A connector for Mattermost."""
25
26 def __init__(self, config, opsdroid=None):
27 """Create the connector."""
28 super().__init__(config, opsdroid=opsdroid)
29 _LOGGER.debug(_("Starting Mattermost connector"))
30 self.name = "mattermost"
31 self.token = config["token"]
32 self.url = config["url"]
33 self.team_name = config["team-name"]
34 self.scheme = config.get("scheme", "https")
35 self.port = config.get("port", 8065)
36 self.verify = config.get("ssl-verify", True)
37 self.timeout = config.get("connect-timeout", 30)
38 self.request_timeout = None
39 self.mfa_token = None
40 self.debug = False
41 self.listening = True
42
43 self.mm_driver = Driver(
44 {
45 "url": self.url,
46 "token": self.token,
47 "scheme": self.scheme,
48 "port": self.port,
49 "verify": self.verify,
50 "timeout": self.timeout,
51 "request_timeout": self.request_timeout,
52 "mfa_token": self.mfa_token,
53 "debug": self.debug,
54 }
55 )
56
57 async def connect(self):
58 """Connect to the chat service."""
59 _LOGGER.info(_("Connecting to Mattermost"))
60
61 login_response = self.mm_driver.login()
62
63 _LOGGER.info(login_response)
64
65 if "id" in login_response:
66 self.bot_id = login_response["id"]
67 if "username" in login_response:
68 self.bot_name = login_response["username"]
69
70 _LOGGER.info(_("Connected as %s"), self.bot_name)
71
72 self.mm_driver.websocket = Websocket(
73 self.mm_driver.options, self.mm_driver.client.token
74 )
75
76 _LOGGER.info(_("Connected successfully"))
77
78 async def disconnect(self):
79 """Disconnect from Mattermost."""
80 self.listening = False
81 self.mm_driver.logout()
82
83 async def listen(self):
84 """Listen for and parse new messages."""
85 await self.mm_driver.websocket.connect(self.process_message)
86
87 async def process_message(self, raw_message):
88 """Process a raw message and pass it to the parser."""
89 _LOGGER.info(raw_message)
90
91 message = json.loads(raw_message)
92
93 if "event" in message and message["event"] == "posted":
94 data = message["data"]
95 post = json.loads(data["post"])
96 await self.opsdroid.parse(
97 Message(
98 text=post["message"],
99 user=data["sender_name"],
100 target=data["channel_name"],
101 connector=self,
102 raw_event=message,
103 )
104 )
105
106 @register_event(Message)
107 async def send_message(self, message):
108 """Respond with a message."""
109 _LOGGER.debug(
110 _("Responding with: '%s' in room %s"), message.text, message.target
111 )
112 channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(
113 self.team_name, message.target
114 )["id"]
115 self.mm_driver.posts.create_post(
116 options={"channel_id": channel_id, "message": message.text}
117 )
118
[end of opsdroid/connector/mattermost/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py
--- a/opsdroid/connector/mattermost/__init__.py
+++ b/opsdroid/connector/mattermost/__init__.py
@@ -39,6 +39,7 @@
self.mfa_token = None
self.debug = False
self.listening = True
+ self.bot_id = None
self.mm_driver = Driver(
{
@@ -66,8 +67,7 @@
self.bot_id = login_response["id"]
if "username" in login_response:
self.bot_name = login_response["username"]
-
- _LOGGER.info(_("Connected as %s"), self.bot_name)
+ _LOGGER.info(_("Connected as %s"), self.bot_name)
self.mm_driver.websocket = Websocket(
self.mm_driver.options, self.mm_driver.client.token
@@ -93,15 +93,18 @@
if "event" in message and message["event"] == "posted":
data = message["data"]
post = json.loads(data["post"])
- await self.opsdroid.parse(
- Message(
- text=post["message"],
- user=data["sender_name"],
- target=data["channel_name"],
- connector=self,
- raw_event=message,
+ # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)
+ # (but also parse if somehow our bot_id is unknown, like in the unit tests)
+ if self.bot_id is None or self.bot_id != post["user_id"]:
+ await self.opsdroid.parse(
+ Message(
+ text=post["message"],
+ user=data["sender_name"],
+ target=data["channel_name"],
+ connector=self,
+ raw_event=message,
+ )
)
- )
@register_event(Message)
async def send_message(self, message):
|
{"golden_diff": "diff --git a/opsdroid/connector/mattermost/__init__.py b/opsdroid/connector/mattermost/__init__.py\n--- a/opsdroid/connector/mattermost/__init__.py\n+++ b/opsdroid/connector/mattermost/__init__.py\n@@ -39,6 +39,7 @@\n self.mfa_token = None\n self.debug = False\n self.listening = True\n+ self.bot_id = None\n \n self.mm_driver = Driver(\n {\n@@ -66,8 +67,7 @@\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n-\n- _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n+ _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n \n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n@@ -93,15 +93,18 @@\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n- await self.opsdroid.parse(\n- Message(\n- text=post[\"message\"],\n- user=data[\"sender_name\"],\n- target=data[\"channel_name\"],\n- connector=self,\n- raw_event=message,\n+ # don't parse our own messages (https://github.com/opsdroid/opsdroid/issues/1775)\n+ # (but also parse if somehow our bot_id is unknown, like in the unit tests)\n+ if self.bot_id is None or self.bot_id != post[\"user_id\"]:\n+ await self.opsdroid.parse(\n+ Message(\n+ text=post[\"message\"],\n+ user=data[\"sender_name\"],\n+ target=data[\"channel_name\"],\n+ connector=self,\n+ raw_event=message,\n+ )\n )\n- )\n \n @register_event(Message)\n async def send_message(self, message):\n", "issue": "Infinite self-responses in Mattermost connector\nAfter fixing the Mattermost connector with PR #1774 it turns out it suffers from the same infinite self-response problem (#1691) as was fixed for the Gitter connector in #1692.\n", "before_files": [{"content": "\"\"\"A connector for Mattermost.\"\"\"\nimport logging\nimport json\n\nfrom mattermostdriver import Driver, Websocket\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n Required(\"url\"): str,\n Required(\"team-name\"): str,\n \"scheme\": str,\n \"port\": int,\n \"ssl-verify\": bool,\n \"connect-timeout\": int,\n}\n\n\nclass ConnectorMattermost(Connector):\n \"\"\"A connector for Mattermost.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Mattermost connector\"))\n self.name = \"mattermost\"\n self.token = config[\"token\"]\n self.url = config[\"url\"]\n self.team_name = config[\"team-name\"]\n self.scheme = config.get(\"scheme\", \"https\")\n self.port = config.get(\"port\", 8065)\n self.verify = config.get(\"ssl-verify\", True)\n self.timeout = config.get(\"connect-timeout\", 30)\n self.request_timeout = None\n self.mfa_token = None\n self.debug = False\n self.listening = True\n\n self.mm_driver = Driver(\n {\n \"url\": self.url,\n \"token\": self.token,\n \"scheme\": self.scheme,\n \"port\": self.port,\n \"verify\": self.verify,\n \"timeout\": self.timeout,\n \"request_timeout\": self.request_timeout,\n \"mfa_token\": self.mfa_token,\n \"debug\": self.debug,\n }\n )\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Mattermost\"))\n\n login_response = self.mm_driver.login()\n\n _LOGGER.info(login_response)\n\n if \"id\" in login_response:\n self.bot_id = login_response[\"id\"]\n if \"username\" in login_response:\n self.bot_name = login_response[\"username\"]\n\n _LOGGER.info(_(\"Connected as %s\"), self.bot_name)\n\n self.mm_driver.websocket = Websocket(\n self.mm_driver.options, self.mm_driver.client.token\n )\n\n _LOGGER.info(_(\"Connected successfully\"))\n\n async def disconnect(self):\n \"\"\"Disconnect from Mattermost.\"\"\"\n self.listening = False\n self.mm_driver.logout()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n await self.mm_driver.websocket.connect(self.process_message)\n\n async def process_message(self, raw_message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n _LOGGER.info(raw_message)\n\n message = json.loads(raw_message)\n\n if \"event\" in message and message[\"event\"] == \"posted\":\n data = message[\"data\"]\n post = json.loads(data[\"post\"])\n await self.opsdroid.parse(\n Message(\n text=post[\"message\"],\n user=data[\"sender_name\"],\n target=data[\"channel_name\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(\n self.team_name, message.target\n )[\"id\"]\n self.mm_driver.posts.create_post(\n options={\"channel_id\": channel_id, \"message\": message.text}\n )\n", "path": "opsdroid/connector/mattermost/__init__.py"}]}
| 1,636 | 440 |
gh_patches_debug_18344
|
rasdani/github-patches
|
git_diff
|
LibraryOfCongress__concordia-731
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include attribution in exported text files
**Is your feature request related to a problem? Please describe.**
The plain text files, one per page / asset, which are exported in BagIt form need to have attributions at the bottom of the text.
**Describe the solution you'd like**
Each text file should contain the attribution "Transcribed and reviewed by volunteers participating in the By The People project at crowd.loc.gov." at the bottom.
</issue>
<code>
[start of exporter/views.py]
1 import os
2 import re
3 import shutil
4 import tempfile
5 from logging import getLogger
6
7 import bagit
8 import boto3
9 from django.conf import settings
10 from django.contrib.admin.views.decorators import staff_member_required
11 from django.db.models import OuterRef, Subquery
12 from django.http import HttpResponse, HttpResponseRedirect
13 from django.utils.decorators import method_decorator
14 from django.views.generic import TemplateView
15 from tabular_export.core import export_to_csv_response, flatten_queryset
16
17 from concordia.models import Asset, Transcription, TranscriptionStatus
18
19 logger = getLogger(__name__)
20
21
22 def get_latest_transcription_data(asset_qs):
23 latest_trans_subquery = (
24 Transcription.objects.filter(asset=OuterRef("pk"))
25 .order_by("-pk")
26 .values("text")
27 )
28
29 assets = asset_qs.annotate(latest_transcription=Subquery(latest_trans_subquery[:1]))
30 return assets
31
32
33 def get_original_asset_id(download_url):
34 """
35 Extract the bit from the download url
36 that identifies this image uniquely on loc.gov
37 """
38 if download_url.startswith("http://tile.loc.gov/"):
39 pattern = r"/service:([A-Za-z0-9:\-]+)/"
40 asset_id = re.search(pattern, download_url)
41 if not asset_id:
42 logger.error(
43 "Couldn't find a matching asset ID in download URL %s", download_url
44 )
45 raise AssertionError
46 else:
47 matching_asset_id = asset_id.group(1)
48 logger.debug(
49 "Found asset ID %s in download URL %s", matching_asset_id, download_url
50 )
51 return matching_asset_id
52 else:
53 logger.warning("Download URL doesn't start with tile.loc.gov: %s", download_url)
54 return download_url
55
56
57 def do_bagit_export(assets, export_base_dir, export_filename_base):
58 """
59 Executes bagit.py to turn temp directory into LC-specific bagit strucutre.
60 Builds and exports bagit structure as zip.
61 Uploads zip to S3 if configured.
62 """
63
64 for asset in assets:
65 asset_id = get_original_asset_id(asset.download_url)
66 logger.debug("Exporting asset %s into %s", asset_id, export_base_dir)
67
68 asset_id = asset_id.replace(":", "/")
69 asset_path, asset_filename = os.path.split(asset_id)
70
71 dest_path = os.path.join(export_base_dir, asset_path)
72 os.makedirs(dest_path, exist_ok=True)
73
74 # Build transcription output text file
75 text_output_path = os.path.join(dest_path, "%s.txt" % asset_filename)
76 with open(text_output_path, "w") as f:
77 f.write(asset.latest_transcription or "")
78
79 # Turn Structure into bagit format
80 bagit.make_bag(
81 export_base_dir,
82 {
83 "Content-Access": "web",
84 "Content-Custodian": "dcms",
85 "Content-Process": "crowdsourced",
86 "Content-Type": "textual",
87 "LC-Bag-Id": export_filename_base,
88 "LC-Items": "%d transcriptions" % len(assets),
89 "LC-Project": "gdccrowd",
90 "License-Information": "Public domain",
91 },
92 )
93
94 # Build .zip file of bagit formatted Campaign Folder
95 archive_name = export_base_dir
96 shutil.make_archive(archive_name, "zip", export_base_dir)
97
98 export_filename = "%s.zip" % export_filename_base
99
100 # Upload zip to S3 bucket
101 s3_bucket = getattr(settings, "EXPORT_S3_BUCKET_NAME", None)
102
103 if s3_bucket:
104 logger.debug("Uploading exported bag to S3 bucket %s", s3_bucket)
105 s3 = boto3.resource("s3")
106 s3.Bucket(s3_bucket).upload_file(
107 "%s.zip" % export_base_dir, "%s" % export_filename
108 )
109
110 return HttpResponseRedirect(
111 "https://%s.s3.amazonaws.com/%s" % (s3_bucket, export_filename)
112 )
113 else:
114 # Download zip from local storage
115 with open("%s.zip" % export_base_dir, "rb") as zip_file:
116 response = HttpResponse(zip_file, content_type="application/zip")
117 response["Content-Disposition"] = "attachment; filename=%s" % export_filename
118 return response
119
120
121 class ExportCampaignToCSV(TemplateView):
122 """
123 Exports the most recent transcription for each asset in a campaign
124 """
125
126 @method_decorator(staff_member_required)
127 def get(self, request, *args, **kwargs):
128 asset_qs = Asset.objects.filter(
129 item__project__campaign__slug=self.kwargs["campaign_slug"]
130 )
131 assets = get_latest_transcription_data(asset_qs)
132
133 headers, data = flatten_queryset(
134 assets,
135 field_names=[
136 "item__project__campaign__title",
137 "item__project__title",
138 "item__title",
139 "item__item_id",
140 "title",
141 "transcription_status",
142 "download_url",
143 "latest_transcription",
144 ],
145 extra_verbose_names={
146 "item__project__campaign__title": "Campaign",
147 "item__project__title": "Project",
148 "item__title": "Item",
149 "item__item_id": "ItemId",
150 "item_id": "ItemId",
151 "title": "Asset",
152 "transcription_status": "AssetStatus",
153 "download_url": "DownloadUrl",
154 "latest_transcription": "Transcription",
155 },
156 )
157
158 return export_to_csv_response(
159 "%s.csv" % self.kwargs["campaign_slug"], headers, data
160 )
161
162
163 class ExportItemToBagIt(TemplateView):
164 @method_decorator(staff_member_required)
165 def get(self, request, *args, **kwargs):
166 campaign_slug = self.kwargs["campaign_slug"]
167 project_slug = self.kwargs["project_slug"]
168 item_id = self.kwargs["item_id"]
169
170 asset_qs = Asset.objects.filter(
171 item__project__campaign__slug=campaign_slug,
172 item__project__slug=project_slug,
173 item__item_id=item_id,
174 transcription_status=TranscriptionStatus.COMPLETED,
175 )
176
177 assets = get_latest_transcription_data(asset_qs)
178
179 export_filename_base = "%s-%s-%s" % (campaign_slug, project_slug, item_id)
180
181 with tempfile.TemporaryDirectory(
182 prefix=export_filename_base
183 ) as export_base_dir:
184 return do_bagit_export(assets, export_base_dir, export_filename_base)
185
186
187 class ExportProjectToBagIt(TemplateView):
188 @method_decorator(staff_member_required)
189 def get(self, request, *args, **kwargs):
190 campaign_slug = self.kwargs["campaign_slug"]
191 project_slug = self.kwargs["project_slug"]
192 asset_qs = Asset.objects.filter(
193 item__project__campaign__slug=campaign_slug,
194 item__project__slug=project_slug,
195 transcription_status=TranscriptionStatus.COMPLETED,
196 )
197
198 assets = get_latest_transcription_data(asset_qs)
199
200 export_filename_base = "%s-%s" % (campaign_slug, project_slug)
201
202 with tempfile.TemporaryDirectory(
203 prefix=export_filename_base
204 ) as export_base_dir:
205 return do_bagit_export(assets, export_base_dir, export_filename_base)
206
207
208 class ExportCampaignToBagit(TemplateView):
209 @method_decorator(staff_member_required)
210 def get(self, request, *args, **kwargs):
211 campaign_slug = self.kwargs["campaign_slug"]
212 asset_qs = Asset.objects.filter(
213 item__project__campaign__slug=campaign_slug,
214 transcription_status=TranscriptionStatus.COMPLETED,
215 )
216
217 assets = get_latest_transcription_data(asset_qs)
218
219 export_filename_base = "%s" % (campaign_slug,)
220
221 with tempfile.TemporaryDirectory(
222 prefix=export_filename_base
223 ) as export_base_dir:
224 return do_bagit_export(assets, export_base_dir, export_filename_base)
225
[end of exporter/views.py]
[start of concordia/settings_prod.py]
1 import json
2 import os
3
4 from .secrets import get_secret
5 from .settings_template import *
6
7 LOGGING["handlers"]["stream"]["level"] = "INFO"
8 LOGGING["handlers"]["file"]["level"] = "INFO"
9 LOGGING["handlers"]["file"]["filename"] = "./logs/concordia-web.log"
10 LOGGING["handlers"]["celery"]["level"] = "INFO"
11 LOGGING["handlers"]["celery"]["filename"] = "./logs/concordia-celery.log"
12 LOGGING["loggers"]["django"]["level"] = "INFO"
13 LOGGING["loggers"]["celery"]["level"] = "INFO"
14
15 if os.getenv("AWS"):
16 ENV_NAME = os.getenv("ENV_NAME")
17
18 django_secret_json = get_secret("crowd/%s/Django/SecretKey" % ENV_NAME)
19 django_secret = json.loads(django_secret_json)
20 DJANGO_SECRET_KEY = django_secret["DjangoSecretKey"]
21
22 postgres_secret_json = get_secret("crowd/%s/DB/MasterUserPassword" % ENV_NAME)
23 postgres_secret = json.loads(postgres_secret_json)
24
25 DATABASES["default"].update({"PASSWORD": postgres_secret["password"]})
26
27 smtp_secret_json = get_secret("concordia/SMTP")
28 smtp_secret = json.loads(smtp_secret_json)
29 EMAIL_HOST = smtp_secret["Hostname"]
30 EMAIL_HOST_USER = smtp_secret["Username"]
31 EMAIL_HOST_PASSWORD = smtp_secret["Password"]
32
33 else:
34 DJANGO_SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "changeme")
35 EMAIL_HOST = os.environ.get("EMAIL_HOST", "localhost")
36 EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", "")
37 EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", "")
38
39 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
40
41 EMAIL_USE_TLS = True
42 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
43 EMAIL_PORT = 587
44 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL", "[email protected]")
45 DEFAULT_TO_EMAIL = DEFAULT_FROM_EMAIL
46
47 CSRF_COOKIE_SECURE = True
48
49 CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "pyamqp://guest@rabbit:5672")
50 CELERY_RESULT_BACKEND = "rpc://"
51
52 S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME")
53 EXPORT_S3_BUCKET_NAME = os.getenv("EXPORT_S3_BUCKET_NAME")
54
55 DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
56 AWS_STORAGE_BUCKET_NAME = S3_BUCKET_NAME
57 AWS_DEFAULT_ACL = None # Don't set an ACL on the files, inherit the bucket ACLs
58
59 if CONCORDIA_ENVIRONMENT == "production":
60 MEDIA_URL = "https://crowd-media.loc.gov/"
61 else:
62 MEDIA_URL = "https://%s.s3.amazonaws.com/" % S3_BUCKET_NAME
63
64 ELASTICSEARCH_DSL_AUTOSYNC = os.getenv("ELASTICSEARCH_DSL_AUTOSYNC", False)
65
66 INSTALLED_APPS += ["django_elasticsearch_dsl"]
67
68 ELASTICSEARCH_DSL_SIGNAL_PROCESSOR = (
69 "django_elasticsearch_dsl.signals.RealTimeSignalProcessor"
70 )
71 ELASTICSEARCH_DSL = {
72 "default": {"hosts": os.getenv("ELASTICSEARCH_ENDPOINT", "elk:9200")}
73 }
74
75 # HMAC activation flow provide the two-step registration process,
76 # the user signs up and then completes activation via email instructions.
77
78 REGISTRATION_SALT = "django_registration" # doesn't need to be secret
79
80 SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
81
82 RATELIMIT_BLOCK = os.getenv("RATELIMIT_BLOCK", "").lower() not in ("false", "0")
83
[end of concordia/settings_prod.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/concordia/settings_prod.py b/concordia/settings_prod.py
--- a/concordia/settings_prod.py
+++ b/concordia/settings_prod.py
@@ -80,3 +80,9 @@
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
RATELIMIT_BLOCK = os.getenv("RATELIMIT_BLOCK", "").lower() not in ("false", "0")
+
+# Exporter attribution text for BagIt exports to LC
+ATTRIBUTION_TEXT = (
+ "Transcribed and reviewed by volunteers participating in the "
+ "By The People project at crowd.loc.gov."
+)
diff --git a/exporter/views.py b/exporter/views.py
--- a/exporter/views.py
+++ b/exporter/views.py
@@ -75,6 +75,9 @@
text_output_path = os.path.join(dest_path, "%s.txt" % asset_filename)
with open(text_output_path, "w") as f:
f.write(asset.latest_transcription or "")
+ if hasattr(settings, "ATTRIBUTION_TEXT"):
+ f.write("\n\n")
+ f.write(settings.ATTRIBUTION_TEXT)
# Turn Structure into bagit format
bagit.make_bag(
|
{"golden_diff": "diff --git a/concordia/settings_prod.py b/concordia/settings_prod.py\n--- a/concordia/settings_prod.py\n+++ b/concordia/settings_prod.py\n@@ -80,3 +80,9 @@\n SESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n \n RATELIMIT_BLOCK = os.getenv(\"RATELIMIT_BLOCK\", \"\").lower() not in (\"false\", \"0\")\n+\n+# Exporter attribution text for BagIt exports to LC\n+ATTRIBUTION_TEXT = (\n+ \"Transcribed and reviewed by volunteers participating in the \"\n+ \"By The People project at crowd.loc.gov.\"\n+)\ndiff --git a/exporter/views.py b/exporter/views.py\n--- a/exporter/views.py\n+++ b/exporter/views.py\n@@ -75,6 +75,9 @@\n text_output_path = os.path.join(dest_path, \"%s.txt\" % asset_filename)\n with open(text_output_path, \"w\") as f:\n f.write(asset.latest_transcription or \"\")\n+ if hasattr(settings, \"ATTRIBUTION_TEXT\"):\n+ f.write(\"\\n\\n\")\n+ f.write(settings.ATTRIBUTION_TEXT)\n \n # Turn Structure into bagit format\n bagit.make_bag(\n", "issue": "Include attribution in exported text files\n**Is your feature request related to a problem? Please describe.**\r\nThe plain text files, one per page / asset, which are exported in BagIt form need to have attributions at the bottom of the text.\r\n\r\n**Describe the solution you'd like**\r\nEach text file should contain the attribution \"Transcribed and reviewed by volunteers participating in the By The People project at crowd.loc.gov.\" at the bottom.\r\n\n", "before_files": [{"content": "import os\nimport re\nimport shutil\nimport tempfile\nfrom logging import getLogger\n\nimport bagit\nimport boto3\nfrom django.conf import settings\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.db.models import OuterRef, Subquery\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\nfrom tabular_export.core import export_to_csv_response, flatten_queryset\n\nfrom concordia.models import Asset, Transcription, TranscriptionStatus\n\nlogger = getLogger(__name__)\n\n\ndef get_latest_transcription_data(asset_qs):\n latest_trans_subquery = (\n Transcription.objects.filter(asset=OuterRef(\"pk\"))\n .order_by(\"-pk\")\n .values(\"text\")\n )\n\n assets = asset_qs.annotate(latest_transcription=Subquery(latest_trans_subquery[:1]))\n return assets\n\n\ndef get_original_asset_id(download_url):\n \"\"\"\n Extract the bit from the download url\n that identifies this image uniquely on loc.gov\n \"\"\"\n if download_url.startswith(\"http://tile.loc.gov/\"):\n pattern = r\"/service:([A-Za-z0-9:\\-]+)/\"\n asset_id = re.search(pattern, download_url)\n if not asset_id:\n logger.error(\n \"Couldn't find a matching asset ID in download URL %s\", download_url\n )\n raise AssertionError\n else:\n matching_asset_id = asset_id.group(1)\n logger.debug(\n \"Found asset ID %s in download URL %s\", matching_asset_id, download_url\n )\n return matching_asset_id\n else:\n logger.warning(\"Download URL doesn't start with tile.loc.gov: %s\", download_url)\n return download_url\n\n\ndef do_bagit_export(assets, export_base_dir, export_filename_base):\n \"\"\"\n Executes bagit.py to turn temp directory into LC-specific bagit strucutre.\n Builds and exports bagit structure as zip.\n Uploads zip to S3 if configured.\n \"\"\"\n\n for asset in assets:\n asset_id = get_original_asset_id(asset.download_url)\n logger.debug(\"Exporting asset %s into %s\", asset_id, export_base_dir)\n\n asset_id = asset_id.replace(\":\", \"/\")\n asset_path, asset_filename = os.path.split(asset_id)\n\n dest_path = os.path.join(export_base_dir, asset_path)\n os.makedirs(dest_path, exist_ok=True)\n\n # Build transcription output text file\n text_output_path = os.path.join(dest_path, \"%s.txt\" % asset_filename)\n with open(text_output_path, \"w\") as f:\n f.write(asset.latest_transcription or \"\")\n\n # Turn Structure into bagit format\n bagit.make_bag(\n export_base_dir,\n {\n \"Content-Access\": \"web\",\n \"Content-Custodian\": \"dcms\",\n \"Content-Process\": \"crowdsourced\",\n \"Content-Type\": \"textual\",\n \"LC-Bag-Id\": export_filename_base,\n \"LC-Items\": \"%d transcriptions\" % len(assets),\n \"LC-Project\": \"gdccrowd\",\n \"License-Information\": \"Public domain\",\n },\n )\n\n # Build .zip file of bagit formatted Campaign Folder\n archive_name = export_base_dir\n shutil.make_archive(archive_name, \"zip\", export_base_dir)\n\n export_filename = \"%s.zip\" % export_filename_base\n\n # Upload zip to S3 bucket\n s3_bucket = getattr(settings, \"EXPORT_S3_BUCKET_NAME\", None)\n\n if s3_bucket:\n logger.debug(\"Uploading exported bag to S3 bucket %s\", s3_bucket)\n s3 = boto3.resource(\"s3\")\n s3.Bucket(s3_bucket).upload_file(\n \"%s.zip\" % export_base_dir, \"%s\" % export_filename\n )\n\n return HttpResponseRedirect(\n \"https://%s.s3.amazonaws.com/%s\" % (s3_bucket, export_filename)\n )\n else:\n # Download zip from local storage\n with open(\"%s.zip\" % export_base_dir, \"rb\") as zip_file:\n response = HttpResponse(zip_file, content_type=\"application/zip\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % export_filename\n return response\n\n\nclass ExportCampaignToCSV(TemplateView):\n \"\"\"\n Exports the most recent transcription for each asset in a campaign\n \"\"\"\n\n @method_decorator(staff_member_required)\n def get(self, request, *args, **kwargs):\n asset_qs = Asset.objects.filter(\n item__project__campaign__slug=self.kwargs[\"campaign_slug\"]\n )\n assets = get_latest_transcription_data(asset_qs)\n\n headers, data = flatten_queryset(\n assets,\n field_names=[\n \"item__project__campaign__title\",\n \"item__project__title\",\n \"item__title\",\n \"item__item_id\",\n \"title\",\n \"transcription_status\",\n \"download_url\",\n \"latest_transcription\",\n ],\n extra_verbose_names={\n \"item__project__campaign__title\": \"Campaign\",\n \"item__project__title\": \"Project\",\n \"item__title\": \"Item\",\n \"item__item_id\": \"ItemId\",\n \"item_id\": \"ItemId\",\n \"title\": \"Asset\",\n \"transcription_status\": \"AssetStatus\",\n \"download_url\": \"DownloadUrl\",\n \"latest_transcription\": \"Transcription\",\n },\n )\n\n return export_to_csv_response(\n \"%s.csv\" % self.kwargs[\"campaign_slug\"], headers, data\n )\n\n\nclass ExportItemToBagIt(TemplateView):\n @method_decorator(staff_member_required)\n def get(self, request, *args, **kwargs):\n campaign_slug = self.kwargs[\"campaign_slug\"]\n project_slug = self.kwargs[\"project_slug\"]\n item_id = self.kwargs[\"item_id\"]\n\n asset_qs = Asset.objects.filter(\n item__project__campaign__slug=campaign_slug,\n item__project__slug=project_slug,\n item__item_id=item_id,\n transcription_status=TranscriptionStatus.COMPLETED,\n )\n\n assets = get_latest_transcription_data(asset_qs)\n\n export_filename_base = \"%s-%s-%s\" % (campaign_slug, project_slug, item_id)\n\n with tempfile.TemporaryDirectory(\n prefix=export_filename_base\n ) as export_base_dir:\n return do_bagit_export(assets, export_base_dir, export_filename_base)\n\n\nclass ExportProjectToBagIt(TemplateView):\n @method_decorator(staff_member_required)\n def get(self, request, *args, **kwargs):\n campaign_slug = self.kwargs[\"campaign_slug\"]\n project_slug = self.kwargs[\"project_slug\"]\n asset_qs = Asset.objects.filter(\n item__project__campaign__slug=campaign_slug,\n item__project__slug=project_slug,\n transcription_status=TranscriptionStatus.COMPLETED,\n )\n\n assets = get_latest_transcription_data(asset_qs)\n\n export_filename_base = \"%s-%s\" % (campaign_slug, project_slug)\n\n with tempfile.TemporaryDirectory(\n prefix=export_filename_base\n ) as export_base_dir:\n return do_bagit_export(assets, export_base_dir, export_filename_base)\n\n\nclass ExportCampaignToBagit(TemplateView):\n @method_decorator(staff_member_required)\n def get(self, request, *args, **kwargs):\n campaign_slug = self.kwargs[\"campaign_slug\"]\n asset_qs = Asset.objects.filter(\n item__project__campaign__slug=campaign_slug,\n transcription_status=TranscriptionStatus.COMPLETED,\n )\n\n assets = get_latest_transcription_data(asset_qs)\n\n export_filename_base = \"%s\" % (campaign_slug,)\n\n with tempfile.TemporaryDirectory(\n prefix=export_filename_base\n ) as export_base_dir:\n return do_bagit_export(assets, export_base_dir, export_filename_base)\n", "path": "exporter/views.py"}, {"content": "import json\nimport os\n\nfrom .secrets import get_secret\nfrom .settings_template import *\n\nLOGGING[\"handlers\"][\"stream\"][\"level\"] = \"INFO\"\nLOGGING[\"handlers\"][\"file\"][\"level\"] = \"INFO\"\nLOGGING[\"handlers\"][\"file\"][\"filename\"] = \"./logs/concordia-web.log\"\nLOGGING[\"handlers\"][\"celery\"][\"level\"] = \"INFO\"\nLOGGING[\"handlers\"][\"celery\"][\"filename\"] = \"./logs/concordia-celery.log\"\nLOGGING[\"loggers\"][\"django\"][\"level\"] = \"INFO\"\nLOGGING[\"loggers\"][\"celery\"][\"level\"] = \"INFO\"\n\nif os.getenv(\"AWS\"):\n ENV_NAME = os.getenv(\"ENV_NAME\")\n\n django_secret_json = get_secret(\"crowd/%s/Django/SecretKey\" % ENV_NAME)\n django_secret = json.loads(django_secret_json)\n DJANGO_SECRET_KEY = django_secret[\"DjangoSecretKey\"]\n\n postgres_secret_json = get_secret(\"crowd/%s/DB/MasterUserPassword\" % ENV_NAME)\n postgres_secret = json.loads(postgres_secret_json)\n\n DATABASES[\"default\"].update({\"PASSWORD\": postgres_secret[\"password\"]})\n\n smtp_secret_json = get_secret(\"concordia/SMTP\")\n smtp_secret = json.loads(smtp_secret_json)\n EMAIL_HOST = smtp_secret[\"Hostname\"]\n EMAIL_HOST_USER = smtp_secret[\"Username\"]\n EMAIL_HOST_PASSWORD = smtp_secret[\"Password\"]\n\nelse:\n DJANGO_SECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"changeme\")\n EMAIL_HOST = os.environ.get(\"EMAIL_HOST\", \"localhost\")\n EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\", \"\")\n EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\", \"\")\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nEMAIL_USE_TLS = True\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_PORT = 587\nDEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\", \"[email protected]\")\nDEFAULT_TO_EMAIL = DEFAULT_FROM_EMAIL\n\nCSRF_COOKIE_SECURE = True\n\nCELERY_BROKER_URL = os.getenv(\"CELERY_BROKER_URL\", \"pyamqp://guest@rabbit:5672\")\nCELERY_RESULT_BACKEND = \"rpc://\"\n\nS3_BUCKET_NAME = os.getenv(\"S3_BUCKET_NAME\")\nEXPORT_S3_BUCKET_NAME = os.getenv(\"EXPORT_S3_BUCKET_NAME\")\n\nDEFAULT_FILE_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"\nAWS_STORAGE_BUCKET_NAME = S3_BUCKET_NAME\nAWS_DEFAULT_ACL = None # Don't set an ACL on the files, inherit the bucket ACLs\n\nif CONCORDIA_ENVIRONMENT == \"production\":\n MEDIA_URL = \"https://crowd-media.loc.gov/\"\nelse:\n MEDIA_URL = \"https://%s.s3.amazonaws.com/\" % S3_BUCKET_NAME\n\nELASTICSEARCH_DSL_AUTOSYNC = os.getenv(\"ELASTICSEARCH_DSL_AUTOSYNC\", False)\n\nINSTALLED_APPS += [\"django_elasticsearch_dsl\"]\n\nELASTICSEARCH_DSL_SIGNAL_PROCESSOR = (\n \"django_elasticsearch_dsl.signals.RealTimeSignalProcessor\"\n)\nELASTICSEARCH_DSL = {\n \"default\": {\"hosts\": os.getenv(\"ELASTICSEARCH_ENDPOINT\", \"elk:9200\")}\n}\n\n# HMAC activation flow provide the two-step registration process,\n# the user signs up and then completes activation via email instructions.\n\nREGISTRATION_SALT = \"django_registration\" # doesn't need to be secret\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nRATELIMIT_BLOCK = os.getenv(\"RATELIMIT_BLOCK\", \"\").lower() not in (\"false\", \"0\")\n", "path": "concordia/settings_prod.py"}]}
| 3,857 | 258 |
gh_patches_debug_13709
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-4687
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search input filter in collection editor might be catching valid names.
In the search bar in the collection editor, currently the regex allows "A-Za-z0-9-_ ". But when an exploration is selected from the dropdown it is displayed as "Name (explorationID)". On trying to make further edits to the search bar the bar is highlighted in red ( as '(' and ')') are invalid characters as per the regex.
One of the demo explorations has title "Welcome to Oppia!" where '!' is not an allowed character in the title field. This is not being caught by the title validators. This also is shown up in red in the search field.
Expected Behavior: The input field does not turn red for valid inputs
Search input filter in collection editor might be catching valid names.
In the search bar in the collection editor, currently the regex allows "A-Za-z0-9-_ ". But when an exploration is selected from the dropdown it is displayed as "Name (explorationID)". On trying to make further edits to the search bar the bar is highlighted in red ( as '(' and ')') are invalid characters as per the regex.
One of the demo explorations has title "Welcome to Oppia!" where '!' is not an allowed character in the title field. This is not being caught by the title validators. This also is shown up in red in the search field.
Expected Behavior: The input field does not turn red for valid inputs
</issue>
<code>
[start of core/controllers/collection_editor.py]
1 # coding: utf-8
2
3 # Copyright 2015 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Controllers for the collections editor."""
18
19 from core.controllers import base
20 from core.domain import acl_decorators
21 from core.domain import collection_services
22 from core.domain import rights_manager
23 from core.domain import search_services
24 from core.domain import summary_services
25 from core.platform import models
26 import feconf
27 import utils
28
29 current_user_services = models.Registry.import_current_user_services()
30
31
32 def _require_valid_version(version_from_payload, collection_version):
33 """Check that the payload version matches the given collection version."""
34 if version_from_payload is None:
35 raise base.BaseHandler.InvalidInputException(
36 'Invalid POST request: a version must be specified.')
37
38 if version_from_payload != collection_version:
39 raise base.BaseHandler.InvalidInputException(
40 'Trying to update version %s of collection from version %s, '
41 'which is too old. Please reload the page and try again.'
42 % (collection_version, version_from_payload))
43
44
45 class CollectionEditorHandler(base.BaseHandler):
46 """Base class for all handlers for the collection editor page."""
47 pass
48
49
50 class CollectionEditorPage(CollectionEditorHandler):
51 """The editor page for a single collection."""
52
53 @acl_decorators.can_edit_collection
54 def get(self, collection_id):
55 """Handles GET requests."""
56
57 collection = collection_services.get_collection_by_id(
58 collection_id, strict=False)
59
60 self.values.update({
61 'collection_id': collection.id,
62 'nav_mode': feconf.NAV_MODE_CREATE,
63 'SHOW_COLLECTION_NAVIGATION_TAB_HISTORY': (
64 feconf.SHOW_COLLECTION_NAVIGATION_TAB_HISTORY),
65 'SHOW_COLLECTION_NAVIGATION_TAB_STATS': (
66 feconf.SHOW_COLLECTION_NAVIGATION_TAB_STATS),
67 'TAG_REGEX': feconf.TAG_REGEX,
68 })
69
70 self.render_template('pages/collection_editor/collection_editor.html')
71
72
73 class EditableCollectionDataHandler(CollectionEditorHandler):
74 """A data handler for collections which supports writing."""
75
76 def _require_valid_version(self, version_from_payload, collection_version):
77 """Check that the payload version matches the given collection version.
78 """
79 if version_from_payload is None:
80 raise base.BaseHandler.InvalidInputException(
81 'Invalid POST request: a version must be specified.')
82
83 if version_from_payload != collection_version:
84 raise base.BaseHandler.InvalidInputException(
85 'Trying to update version %s of collection from version %s, '
86 'which is too old. Please reload the page and try again.'
87 % (collection_version, version_from_payload))
88
89 @acl_decorators.can_edit_collection
90 def get(self, collection_id):
91 """Populates the data on the individual collection page."""
92
93 try:
94 # Try to retrieve collection
95 collection_dict = (
96 summary_services.get_learner_collection_dict_by_id(
97 collection_id, self.user,
98 allow_invalid_explorations=True))
99 except Exception as e:
100 raise self.PageNotFoundException(e)
101
102 self.values.update({
103 'collection': collection_dict
104 })
105
106 self.render_json(self.values)
107
108 @acl_decorators.can_edit_collection
109 def put(self, collection_id):
110 """Updates properties of the given collection."""
111
112 collection = collection_services.get_collection_by_id(collection_id)
113 version = self.payload.get('version')
114 self._require_valid_version(version, collection.version)
115
116 commit_message = self.payload.get('commit_message')
117 change_list = self.payload.get('change_list')
118
119 try:
120 collection_services.update_collection(
121 self.user_id, collection_id, change_list, commit_message)
122 except utils.ValidationError as e:
123 raise self.InvalidInputException(e)
124
125 collection_dict = (
126 summary_services.get_learner_collection_dict_by_id(
127 collection_id, self.user,
128 allow_invalid_explorations=True))
129
130 # Send the updated collection back to the frontend.
131 self.values.update({
132 'collection': collection_dict
133 })
134
135 self.render_json(self.values)
136
137
138 class CollectionRightsHandler(CollectionEditorHandler):
139 """Handles management of collection editing rights."""
140
141 @acl_decorators.can_edit_collection
142 def get(self, collection_id):
143 """Gets the editing rights for the given collection.
144
145 Args:
146 collection_id: str. ID for the collection.
147 """
148 (collection, collection_rights) = (
149 collection_services.get_collection_and_collection_rights_by_id(
150 collection_id))
151
152 self.values.update({
153 'can_edit': True,
154 'can_unpublish': rights_manager.check_can_unpublish_activity(
155 self.user, collection_rights),
156 'collection_id': collection.id,
157 'is_private': rights_manager.is_collection_private(collection_id),
158 'owner_names': rights_manager.get_collection_owner_names(
159 collection_id)
160 })
161
162 self.render_json(self.values)
163
164
165 class CollectionPublishHandler(base.BaseHandler):
166 """Handles the publication of the given collection."""
167
168 @acl_decorators.can_publish_collection
169 def put(self, collection_id):
170 """Publishes the given collection."""
171 collection = collection_services.get_collection_by_id(collection_id)
172 version = self.payload.get('version')
173 _require_valid_version(version, collection.version)
174
175 try:
176 collection.validate(strict=True)
177 collection_services.validate_exps_in_collection_are_public(
178 collection)
179 except utils.ValidationError as e:
180 raise self.InvalidInputException(e)
181
182 collection_services.publish_collection_and_update_user_profiles(
183 self.user, collection_id)
184 collection_services.index_collections_given_ids([
185 collection_id])
186
187 collection_rights = rights_manager.get_collection_rights(
188 collection_id, strict=False)
189
190 self.values.update({
191 'can_edit': True,
192 'can_unpublish': rights_manager.check_can_unpublish_activity(
193 self.user, collection_rights),
194 'collection_id': collection.id,
195 'is_private': rights_manager.is_collection_private(collection_id),
196 'owner_names': rights_manager.get_collection_owner_names(
197 collection_id)
198 })
199 self.render_json(self.values)
200
201
202 class CollectionUnpublishHandler(base.BaseHandler):
203 """Handles the unpublication of the given collection."""
204
205 @acl_decorators.can_unpublish_collection
206 def put(self, collection_id):
207 """Unpublishes the given collection."""
208 collection = collection_services.get_collection_by_id(collection_id)
209 version = self.payload.get('version')
210 _require_valid_version(version, collection.version)
211
212 rights_manager.unpublish_collection(self.user, collection_id)
213 search_services.delete_collections_from_search_index([
214 collection_id])
215
216 collection_rights = rights_manager.get_collection_rights(
217 collection_id, strict=False)
218
219 self.values.update({
220 'can_edit': True,
221 'can_unpublish': rights_manager.check_can_unpublish_activity(
222 self.user, collection_rights),
223 'collection_id': collection.id,
224 'is_private': rights_manager.is_collection_private(collection_id),
225 'owner_names': rights_manager.get_collection_owner_names(
226 collection_id)
227 })
228 self.render_json(self.values)
229
230
231 class ExplorationMetadataSearchHandler(base.BaseHandler):
232 """Provides data for exploration search."""
233
234 @acl_decorators.open_access
235 def get(self):
236 """Handles GET requests."""
237 query_string = self.request.get('q')
238
239 search_cursor = self.request.get('cursor', None)
240
241 collection_node_metadata_list, new_search_cursor = (
242 summary_services.get_exp_metadata_dicts_matching_query(
243 query_string, search_cursor, self.user))
244
245 self.values.update({
246 'collection_node_metadata_list': collection_node_metadata_list,
247 'search_cursor': new_search_cursor,
248 })
249
250 self.render_json(self.values)
251
[end of core/controllers/collection_editor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/controllers/collection_editor.py b/core/controllers/collection_editor.py
--- a/core/controllers/collection_editor.py
+++ b/core/controllers/collection_editor.py
@@ -16,6 +16,8 @@
"""Controllers for the collections editor."""
+import base64
+
from core.controllers import base
from core.domain import acl_decorators
from core.domain import collection_services
@@ -234,7 +236,7 @@
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
- query_string = self.request.get('q')
+ query_string = base64.b64decode(self.request.get('q'))
search_cursor = self.request.get('cursor', None)
|
{"golden_diff": "diff --git a/core/controllers/collection_editor.py b/core/controllers/collection_editor.py\n--- a/core/controllers/collection_editor.py\n+++ b/core/controllers/collection_editor.py\n@@ -16,6 +16,8 @@\n \n \"\"\"Controllers for the collections editor.\"\"\"\n \n+import base64\n+\n from core.controllers import base\n from core.domain import acl_decorators\n from core.domain import collection_services\n@@ -234,7 +236,7 @@\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n- query_string = self.request.get('q')\n+ query_string = base64.b64decode(self.request.get('q'))\n \n search_cursor = self.request.get('cursor', None)\n", "issue": "Search input filter in collection editor might be catching valid names.\nIn the search bar in the collection editor, currently the regex allows \"A-Za-z0-9-_ \". But when an exploration is selected from the dropdown it is displayed as \"Name (explorationID)\". On trying to make further edits to the search bar the bar is highlighted in red ( as '(' and ')') are invalid characters as per the regex. \r\nOne of the demo explorations has title \"Welcome to Oppia!\" where '!' is not an allowed character in the title field. This is not being caught by the title validators. This also is shown up in red in the search field. \r\n\r\nExpected Behavior: The input field does not turn red for valid inputs\r\n\nSearch input filter in collection editor might be catching valid names.\nIn the search bar in the collection editor, currently the regex allows \"A-Za-z0-9-_ \". But when an exploration is selected from the dropdown it is displayed as \"Name (explorationID)\". On trying to make further edits to the search bar the bar is highlighted in red ( as '(' and ')') are invalid characters as per the regex. \r\nOne of the demo explorations has title \"Welcome to Oppia!\" where '!' is not an allowed character in the title field. This is not being caught by the title validators. This also is shown up in red in the search field. \r\n\r\nExpected Behavior: The input field does not turn red for valid inputs\r\n\n", "before_files": [{"content": "# coding: utf-8\n\n# Copyright 2015 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the collections editor.\"\"\"\n\nfrom core.controllers import base\nfrom core.domain import acl_decorators\nfrom core.domain import collection_services\nfrom core.domain import rights_manager\nfrom core.domain import search_services\nfrom core.domain import summary_services\nfrom core.platform import models\nimport feconf\nimport utils\n\ncurrent_user_services = models.Registry.import_current_user_services()\n\n\ndef _require_valid_version(version_from_payload, collection_version):\n \"\"\"Check that the payload version matches the given collection version.\"\"\"\n if version_from_payload is None:\n raise base.BaseHandler.InvalidInputException(\n 'Invalid POST request: a version must be specified.')\n\n if version_from_payload != collection_version:\n raise base.BaseHandler.InvalidInputException(\n 'Trying to update version %s of collection from version %s, '\n 'which is too old. Please reload the page and try again.'\n % (collection_version, version_from_payload))\n\n\nclass CollectionEditorHandler(base.BaseHandler):\n \"\"\"Base class for all handlers for the collection editor page.\"\"\"\n pass\n\n\nclass CollectionEditorPage(CollectionEditorHandler):\n \"\"\"The editor page for a single collection.\"\"\"\n\n @acl_decorators.can_edit_collection\n def get(self, collection_id):\n \"\"\"Handles GET requests.\"\"\"\n\n collection = collection_services.get_collection_by_id(\n collection_id, strict=False)\n\n self.values.update({\n 'collection_id': collection.id,\n 'nav_mode': feconf.NAV_MODE_CREATE,\n 'SHOW_COLLECTION_NAVIGATION_TAB_HISTORY': (\n feconf.SHOW_COLLECTION_NAVIGATION_TAB_HISTORY),\n 'SHOW_COLLECTION_NAVIGATION_TAB_STATS': (\n feconf.SHOW_COLLECTION_NAVIGATION_TAB_STATS),\n 'TAG_REGEX': feconf.TAG_REGEX,\n })\n\n self.render_template('pages/collection_editor/collection_editor.html')\n\n\nclass EditableCollectionDataHandler(CollectionEditorHandler):\n \"\"\"A data handler for collections which supports writing.\"\"\"\n\n def _require_valid_version(self, version_from_payload, collection_version):\n \"\"\"Check that the payload version matches the given collection version.\n \"\"\"\n if version_from_payload is None:\n raise base.BaseHandler.InvalidInputException(\n 'Invalid POST request: a version must be specified.')\n\n if version_from_payload != collection_version:\n raise base.BaseHandler.InvalidInputException(\n 'Trying to update version %s of collection from version %s, '\n 'which is too old. Please reload the page and try again.'\n % (collection_version, version_from_payload))\n\n @acl_decorators.can_edit_collection\n def get(self, collection_id):\n \"\"\"Populates the data on the individual collection page.\"\"\"\n\n try:\n # Try to retrieve collection\n collection_dict = (\n summary_services.get_learner_collection_dict_by_id(\n collection_id, self.user,\n allow_invalid_explorations=True))\n except Exception as e:\n raise self.PageNotFoundException(e)\n\n self.values.update({\n 'collection': collection_dict\n })\n\n self.render_json(self.values)\n\n @acl_decorators.can_edit_collection\n def put(self, collection_id):\n \"\"\"Updates properties of the given collection.\"\"\"\n\n collection = collection_services.get_collection_by_id(collection_id)\n version = self.payload.get('version')\n self._require_valid_version(version, collection.version)\n\n commit_message = self.payload.get('commit_message')\n change_list = self.payload.get('change_list')\n\n try:\n collection_services.update_collection(\n self.user_id, collection_id, change_list, commit_message)\n except utils.ValidationError as e:\n raise self.InvalidInputException(e)\n\n collection_dict = (\n summary_services.get_learner_collection_dict_by_id(\n collection_id, self.user,\n allow_invalid_explorations=True))\n\n # Send the updated collection back to the frontend.\n self.values.update({\n 'collection': collection_dict\n })\n\n self.render_json(self.values)\n\n\nclass CollectionRightsHandler(CollectionEditorHandler):\n \"\"\"Handles management of collection editing rights.\"\"\"\n\n @acl_decorators.can_edit_collection\n def get(self, collection_id):\n \"\"\"Gets the editing rights for the given collection.\n\n Args:\n collection_id: str. ID for the collection.\n \"\"\"\n (collection, collection_rights) = (\n collection_services.get_collection_and_collection_rights_by_id(\n collection_id))\n\n self.values.update({\n 'can_edit': True,\n 'can_unpublish': rights_manager.check_can_unpublish_activity(\n self.user, collection_rights),\n 'collection_id': collection.id,\n 'is_private': rights_manager.is_collection_private(collection_id),\n 'owner_names': rights_manager.get_collection_owner_names(\n collection_id)\n })\n\n self.render_json(self.values)\n\n\nclass CollectionPublishHandler(base.BaseHandler):\n \"\"\"Handles the publication of the given collection.\"\"\"\n\n @acl_decorators.can_publish_collection\n def put(self, collection_id):\n \"\"\"Publishes the given collection.\"\"\"\n collection = collection_services.get_collection_by_id(collection_id)\n version = self.payload.get('version')\n _require_valid_version(version, collection.version)\n\n try:\n collection.validate(strict=True)\n collection_services.validate_exps_in_collection_are_public(\n collection)\n except utils.ValidationError as e:\n raise self.InvalidInputException(e)\n\n collection_services.publish_collection_and_update_user_profiles(\n self.user, collection_id)\n collection_services.index_collections_given_ids([\n collection_id])\n\n collection_rights = rights_manager.get_collection_rights(\n collection_id, strict=False)\n\n self.values.update({\n 'can_edit': True,\n 'can_unpublish': rights_manager.check_can_unpublish_activity(\n self.user, collection_rights),\n 'collection_id': collection.id,\n 'is_private': rights_manager.is_collection_private(collection_id),\n 'owner_names': rights_manager.get_collection_owner_names(\n collection_id)\n })\n self.render_json(self.values)\n\n\nclass CollectionUnpublishHandler(base.BaseHandler):\n \"\"\"Handles the unpublication of the given collection.\"\"\"\n\n @acl_decorators.can_unpublish_collection\n def put(self, collection_id):\n \"\"\"Unpublishes the given collection.\"\"\"\n collection = collection_services.get_collection_by_id(collection_id)\n version = self.payload.get('version')\n _require_valid_version(version, collection.version)\n\n rights_manager.unpublish_collection(self.user, collection_id)\n search_services.delete_collections_from_search_index([\n collection_id])\n\n collection_rights = rights_manager.get_collection_rights(\n collection_id, strict=False)\n\n self.values.update({\n 'can_edit': True,\n 'can_unpublish': rights_manager.check_can_unpublish_activity(\n self.user, collection_rights),\n 'collection_id': collection.id,\n 'is_private': rights_manager.is_collection_private(collection_id),\n 'owner_names': rights_manager.get_collection_owner_names(\n collection_id)\n })\n self.render_json(self.values)\n\n\nclass ExplorationMetadataSearchHandler(base.BaseHandler):\n \"\"\"Provides data for exploration search.\"\"\"\n\n @acl_decorators.open_access\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n query_string = self.request.get('q')\n\n search_cursor = self.request.get('cursor', None)\n\n collection_node_metadata_list, new_search_cursor = (\n summary_services.get_exp_metadata_dicts_matching_query(\n query_string, search_cursor, self.user))\n\n self.values.update({\n 'collection_node_metadata_list': collection_node_metadata_list,\n 'search_cursor': new_search_cursor,\n })\n\n self.render_json(self.values)\n", "path": "core/controllers/collection_editor.py"}]}
| 3,191 | 160 |
gh_patches_debug_23355
|
rasdani/github-patches
|
git_diff
|
ansible__awx-14105
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ad_hoc_command: execution_environment option is ignored
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)
### Bug Summary
The `execution_environment` option in `ad_hoc_command` module is ignored and runs with the default EE (`AWX EE (latest)`).
### AWX version
22.3.0
### Select the relevant components
- [ ] UI
- [ ] UI (tech preview)
- [ ] API
- [ ] Docs
- [X] Collection
- [ ] CLI
- [ ] Other
### Installation method
N/A
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
Run with `execution_environment` option in the `awx.awx.ad_hoc_command` module. As in the following Playbook.
```yaml
- name: Ad Hoc command test
awx.awx.ad_hoc_command:
inventory: Demo Inventory
credential: Demo Credential
module_name: command
module_args: echo I <3 Ansible
execution_environment: my_ee
wait: true
````
### Expected results
Runs in the execution environment specified by the execution_environment option.
### Actual results
The execution_environment option is ignored and runs with the default EE (`AWX EE (latest)`).
### Additional information
_No response_
</issue>
<code>
[start of awx_collection/plugins/modules/ad_hoc_command.py]
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4
5 # (c) 2020, John Westcott IV <[email protected]>
6 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
7
8 from __future__ import absolute_import, division, print_function
9
10 __metaclass__ = type
11
12
13 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
14
15 DOCUMENTATION = '''
16 ---
17 module: ad_hoc_command
18 author: "John Westcott IV (@john-westcott-iv)"
19 version_added: "4.0.0"
20 short_description: create, update, or destroy Automation Platform Controller ad hoc commands.
21 description:
22 - Create, update, or destroy Automation Platform Controller ad hoc commands. See
23 U(https://www.ansible.com/tower) for an overview.
24 options:
25 job_type:
26 description:
27 - Job_type to use for the ad hoc command.
28 type: str
29 choices: [ 'run', 'check' ]
30 execution_environment:
31 description:
32 - Execution Environment to use for the ad hoc command.
33 required: False
34 type: str
35 inventory:
36 description:
37 - Inventory to use for the ad hoc command.
38 required: True
39 type: str
40 limit:
41 description:
42 - Limit to use for the ad hoc command.
43 type: str
44 credential:
45 description:
46 - Credential to use for ad hoc command.
47 required: True
48 type: str
49 module_name:
50 description:
51 - The Ansible module to execute.
52 required: True
53 type: str
54 module_args:
55 description:
56 - The arguments to pass to the module.
57 type: str
58 forks:
59 description:
60 - The number of forks to use for this ad hoc execution.
61 type: int
62 verbosity:
63 description:
64 - Verbosity level for this ad hoc command run
65 type: int
66 choices: [ 0, 1, 2, 3, 4, 5 ]
67 extra_vars:
68 description:
69 - Extra variables to use for the ad hoc command..
70 type: dict
71 become_enabled:
72 description:
73 - If the become flag should be set.
74 type: bool
75 diff_mode:
76 description:
77 - Show the changes made by Ansible tasks where supported
78 type: bool
79 wait:
80 description:
81 - Wait for the command to complete.
82 default: False
83 type: bool
84 interval:
85 description:
86 - The interval to request an update from the controller.
87 default: 2
88 type: float
89 timeout:
90 description:
91 - If waiting for the command to complete this will abort after this
92 amount of seconds
93 type: int
94 extends_documentation_fragment: awx.awx.auth
95 '''
96
97 EXAMPLES = '''
98 '''
99
100 RETURN = '''
101 id:
102 description: id of the newly launched command
103 returned: success
104 type: int
105 sample: 86
106 status:
107 description: status of newly launched command
108 returned: success
109 type: str
110 sample: pending
111 '''
112
113 from ..module_utils.controller_api import ControllerAPIModule
114
115
116 def main():
117 # Any additional arguments that are not fields of the item can be added here
118 argument_spec = dict(
119 job_type=dict(choices=['run', 'check']),
120 inventory=dict(required=True),
121 limit=dict(),
122 credential=dict(required=True),
123 module_name=dict(required=True),
124 module_args=dict(),
125 forks=dict(type='int'),
126 verbosity=dict(type='int', choices=[0, 1, 2, 3, 4, 5]),
127 extra_vars=dict(type='dict'),
128 become_enabled=dict(type='bool'),
129 diff_mode=dict(type='bool'),
130 wait=dict(default=False, type='bool'),
131 interval=dict(default=2.0, type='float'),
132 timeout=dict(type='int'),
133 execution_environment=dict(),
134 )
135
136 # Create a module for ourselves
137 module = ControllerAPIModule(argument_spec=argument_spec)
138
139 # Extract our parameters
140 inventory = module.params.get('inventory')
141 credential = module.params.get('credential')
142 module_name = module.params.get('module_name')
143 module_args = module.params.get('module_args')
144
145 wait = module.params.get('wait')
146 interval = module.params.get('interval')
147 timeout = module.params.get('timeout')
148
149 # Create a datastructure to pass into our command launch
150 post_data = {
151 'module_name': module_name,
152 'module_args': module_args,
153 }
154 for arg in ['job_type', 'limit', 'forks', 'verbosity', 'extra_vars', 'become_enabled', 'diff_mode']:
155 if module.params.get(arg):
156 post_data[arg] = module.params.get(arg)
157
158 # Attempt to look up the related items the user specified (these will fail the module if not found)
159 post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)
160 post_data['credential'] = module.resolve_name_to_id('credentials', credential)
161
162 # Launch the ad hoc command
163 results = module.post_endpoint('ad_hoc_commands', **{'data': post_data})
164
165 if results['status_code'] != 201:
166 module.fail_json(msg="Failed to launch command, see response for details", **{'response': results})
167
168 if not wait:
169 module.exit_json(
170 **{
171 'changed': True,
172 'id': results['json']['id'],
173 'status': results['json']['status'],
174 }
175 )
176
177 # Invoke wait function
178 results = module.wait_on_url(url=results['json']['url'], object_name=module_name, object_type='Ad Hoc Command', timeout=timeout, interval=interval)
179
180 module.exit_json(
181 **{
182 'changed': True,
183 'id': results['json']['id'],
184 'status': results['json']['status'],
185 }
186 )
187
188
189 if __name__ == '__main__':
190 main()
191
[end of awx_collection/plugins/modules/ad_hoc_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/awx_collection/plugins/modules/ad_hoc_command.py b/awx_collection/plugins/modules/ad_hoc_command.py
--- a/awx_collection/plugins/modules/ad_hoc_command.py
+++ b/awx_collection/plugins/modules/ad_hoc_command.py
@@ -145,6 +145,7 @@
wait = module.params.get('wait')
interval = module.params.get('interval')
timeout = module.params.get('timeout')
+ execution_environment = module.params.get('execution_environment')
# Create a datastructure to pass into our command launch
post_data = {
@@ -158,6 +159,8 @@
# Attempt to look up the related items the user specified (these will fail the module if not found)
post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)
post_data['credential'] = module.resolve_name_to_id('credentials', credential)
+ if execution_environment:
+ post_data['execution_environment'] = module.resolve_name_to_id('execution_environments', execution_environment)
# Launch the ad hoc command
results = module.post_endpoint('ad_hoc_commands', **{'data': post_data})
|
{"golden_diff": "diff --git a/awx_collection/plugins/modules/ad_hoc_command.py b/awx_collection/plugins/modules/ad_hoc_command.py\n--- a/awx_collection/plugins/modules/ad_hoc_command.py\n+++ b/awx_collection/plugins/modules/ad_hoc_command.py\n@@ -145,6 +145,7 @@\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n+ execution_environment = module.params.get('execution_environment')\n \n # Create a datastructure to pass into our command launch\n post_data = {\n@@ -158,6 +159,8 @@\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)\n post_data['credential'] = module.resolve_name_to_id('credentials', credential)\n+ if execution_environment:\n+ post_data['execution_environment'] = module.resolve_name_to_id('execution_environments', execution_environment)\n \n # Launch the ad hoc command\n results = module.post_endpoint('ad_hoc_commands', **{'data': post_data})\n", "issue": "ad_hoc_command: execution_environment option is ignored\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\r\n\r\n### Bug Summary\r\n\r\nThe `execution_environment` option in `ad_hoc_command` module is ignored and runs with the default EE (`AWX EE (latest)`).\r\n\r\n\r\n\r\n### AWX version\r\n\r\n22.3.0\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [ ] UI (tech preview)\r\n- [ ] API\r\n- [ ] Docs\r\n- [X] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nN/A\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n\r\n\r\nRun with `execution_environment` option in the `awx.awx.ad_hoc_command` module. As in the following Playbook.\r\n\r\n```yaml\r\n - name: Ad Hoc command test\r\n awx.awx.ad_hoc_command:\r\n inventory: Demo Inventory\r\n credential: Demo Credential \r\n module_name: command\r\n module_args: echo I <3 Ansible\r\n execution_environment: my_ee\r\n wait: true\r\n````\r\n\r\n\r\n### Expected results\r\n\r\nRuns in the execution environment specified by the execution_environment option.\r\n\r\n\r\n### Actual results\r\n\r\nThe execution_environment option is ignored and runs with the default EE (`AWX EE (latest)`).\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n\n# (c) 2020, John Westcott IV <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: ad_hoc_command\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nversion_added: \"4.0.0\"\nshort_description: create, update, or destroy Automation Platform Controller ad hoc commands.\ndescription:\n - Create, update, or destroy Automation Platform Controller ad hoc commands. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n job_type:\n description:\n - Job_type to use for the ad hoc command.\n type: str\n choices: [ 'run', 'check' ]\n execution_environment:\n description:\n - Execution Environment to use for the ad hoc command.\n required: False\n type: str\n inventory:\n description:\n - Inventory to use for the ad hoc command.\n required: True\n type: str\n limit:\n description:\n - Limit to use for the ad hoc command.\n type: str\n credential:\n description:\n - Credential to use for ad hoc command.\n required: True\n type: str\n module_name:\n description:\n - The Ansible module to execute.\n required: True\n type: str\n module_args:\n description:\n - The arguments to pass to the module.\n type: str\n forks:\n description:\n - The number of forks to use for this ad hoc execution.\n type: int\n verbosity:\n description:\n - Verbosity level for this ad hoc command run\n type: int\n choices: [ 0, 1, 2, 3, 4, 5 ]\n extra_vars:\n description:\n - Extra variables to use for the ad hoc command..\n type: dict\n become_enabled:\n description:\n - If the become flag should be set.\n type: bool\n diff_mode:\n description:\n - Show the changes made by Ansible tasks where supported\n type: bool\n wait:\n description:\n - Wait for the command to complete.\n default: False\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the command to complete this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n'''\n\nRETURN = '''\nid:\n description: id of the newly launched command\n returned: success\n type: int\n sample: 86\nstatus:\n description: status of newly launched command\n returned: success\n type: str\n sample: pending\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n job_type=dict(choices=['run', 'check']),\n inventory=dict(required=True),\n limit=dict(),\n credential=dict(required=True),\n module_name=dict(required=True),\n module_args=dict(),\n forks=dict(type='int'),\n verbosity=dict(type='int', choices=[0, 1, 2, 3, 4, 5]),\n extra_vars=dict(type='dict'),\n become_enabled=dict(type='bool'),\n diff_mode=dict(type='bool'),\n wait=dict(default=False, type='bool'),\n interval=dict(default=2.0, type='float'),\n timeout=dict(type='int'),\n execution_environment=dict(),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n inventory = module.params.get('inventory')\n credential = module.params.get('credential')\n module_name = module.params.get('module_name')\n module_args = module.params.get('module_args')\n\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n # Create a datastructure to pass into our command launch\n post_data = {\n 'module_name': module_name,\n 'module_args': module_args,\n }\n for arg in ['job_type', 'limit', 'forks', 'verbosity', 'extra_vars', 'become_enabled', 'diff_mode']:\n if module.params.get(arg):\n post_data[arg] = module.params.get(arg)\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)\n post_data['credential'] = module.resolve_name_to_id('credentials', credential)\n\n # Launch the ad hoc command\n results = module.post_endpoint('ad_hoc_commands', **{'data': post_data})\n\n if results['status_code'] != 201:\n module.fail_json(msg=\"Failed to launch command, see response for details\", **{'response': results})\n\n if not wait:\n module.exit_json(\n **{\n 'changed': True,\n 'id': results['json']['id'],\n 'status': results['json']['status'],\n }\n )\n\n # Invoke wait function\n results = module.wait_on_url(url=results['json']['url'], object_name=module_name, object_type='Ad Hoc Command', timeout=timeout, interval=interval)\n\n module.exit_json(\n **{\n 'changed': True,\n 'id': results['json']['id'],\n 'status': results['json']['status'],\n }\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/ad_hoc_command.py"}]}
| 2,738 | 256 |
gh_patches_debug_42349
|
rasdani/github-patches
|
git_diff
|
google-deepmind__optax-189
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__all__ is currently not useful in __init__.py
`__all__` in the `__init__.py` overrides which functions are imported when a user does `from optax import *`. It seems like every function in that file should be exposed through a wildcard import, so there is no need for the `__all__`. Besides being redundant, having it creates opportunities for bugs: right now, many of the functions (e.g. `maybe_update`, `keep_params_nonnegative`) are imported but not exposed in `__all__`. I believe it should be removed, and would be happy to create a PR if that makes sense.
</issue>
<code>
[start of optax/__init__.py]
1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Optax: composable gradient processing and optimization, in JAX."""
16
17 from optax._src.alias import adabelief
18 from optax._src.alias import adafactor
19 from optax._src.alias import adagrad
20 from optax._src.alias import adam
21 from optax._src.alias import adamw
22 from optax._src.alias import dpsgd
23 from optax._src.alias import fromage
24 from optax._src.alias import lamb
25 from optax._src.alias import lars
26 from optax._src.alias import noisy_sgd
27 from optax._src.alias import radam
28 from optax._src.alias import rmsprop
29 from optax._src.alias import sgd
30 from optax._src.alias import sm3
31 from optax._src.alias import yogi
32 from optax._src.base import EmptyState
33 from optax._src.base import GradientTransformation
34 from optax._src.base import identity
35 from optax._src.base import OptState
36 from optax._src.base import Params
37 from optax._src.base import Schedule
38 from optax._src.base import TransformInitFn
39 from optax._src.base import TransformUpdateFn
40 from optax._src.base import Updates
41 from optax._src.clipping import adaptive_grad_clip
42 from optax._src.clipping import AdaptiveGradClipState
43 from optax._src.clipping import clip
44 from optax._src.clipping import clip_by_block_rms
45 from optax._src.clipping import clip_by_global_norm
46 from optax._src.clipping import ClipByGlobalNormState
47 from optax._src.clipping import ClipState
48 from optax._src.combine import chain
49 from optax._src.combine import multi_transform
50 from optax._src.combine import MultiTransformState
51 from optax._src.constrain import keep_params_nonnegative
52 from optax._src.constrain import NonNegativeParamsState
53 from optax._src.constrain import zero_nans
54 from optax._src.constrain import ZeroNansState
55 from optax._src.control_variates import control_delta_method
56 from optax._src.control_variates import control_variates_jacobians
57 from optax._src.control_variates import moving_avg_baseline
58 from optax._src.factorized import FactoredState
59 from optax._src.factorized import scale_by_factored_rms
60 from optax._src.linear_algebra import global_norm
61 from optax._src.linear_algebra import matrix_inverse_pth_root
62 from optax._src.linear_algebra import power_iteration
63 from optax._src.lookahead import lookahead
64 from optax._src.lookahead import LookaheadParams
65 from optax._src.lookahead import LookaheadState
66 from optax._src.loss import cosine_distance
67 from optax._src.loss import cosine_similarity
68 from optax._src.loss import huber_loss
69 from optax._src.loss import l2_loss
70 from optax._src.loss import log_cosh
71 from optax._src.loss import sigmoid_binary_cross_entropy
72 from optax._src.loss import smooth_labels
73 from optax._src.loss import softmax_cross_entropy
74 from optax._src.privacy import differentially_private_aggregate
75 from optax._src.privacy import DifferentiallyPrivateAggregateState
76 from optax._src.schedule import constant_schedule
77 from optax._src.schedule import cosine_decay_schedule
78 from optax._src.schedule import cosine_onecycle_schedule
79 from optax._src.schedule import exponential_decay
80 from optax._src.schedule import inject_hyperparams
81 from optax._src.schedule import InjectHyperparamsState
82 from optax._src.schedule import join_schedules
83 from optax._src.schedule import linear_onecycle_schedule
84 from optax._src.schedule import linear_schedule
85 from optax._src.schedule import piecewise_constant_schedule
86 from optax._src.schedule import piecewise_interpolate_schedule
87 from optax._src.schedule import polynomial_schedule
88 from optax._src.schedule import sgdr_schedule
89 from optax._src.schedule import warmup_cosine_decay_schedule
90 from optax._src.schedule import warmup_exponential_decay_schedule
91 from optax._src.second_order import fisher_diag
92 from optax._src.second_order import hessian_diag
93 from optax._src.second_order import hvp
94 from optax._src.stochastic_gradient_estimators import measure_valued_jacobians
95 from optax._src.stochastic_gradient_estimators import pathwise_jacobians
96 from optax._src.stochastic_gradient_estimators import score_function_jacobians
97 from optax._src.transform import add_decayed_weights
98 from optax._src.transform import add_noise
99 from optax._src.transform import AddDecayedWeightsState
100 from optax._src.transform import additive_weight_decay
101 from optax._src.transform import AdditiveWeightDecayState
102 from optax._src.transform import AddNoiseState
103 from optax._src.transform import apply_every
104 from optax._src.transform import ApplyEvery
105 from optax._src.transform import centralize
106 from optax._src.transform import ema
107 from optax._src.transform import EmaState
108 from optax._src.transform import scale
109 from optax._src.transform import scale_by_adam
110 from optax._src.transform import scale_by_belief
111 from optax._src.transform import scale_by_param_block_norm
112 from optax._src.transform import scale_by_param_block_rms
113 from optax._src.transform import scale_by_radam
114 from optax._src.transform import scale_by_rms
115 from optax._src.transform import scale_by_rss
116 from optax._src.transform import scale_by_schedule
117 from optax._src.transform import scale_by_sm3
118 from optax._src.transform import scale_by_stddev
119 from optax._src.transform import scale_by_trust_ratio
120 from optax._src.transform import scale_by_yogi
121 from optax._src.transform import ScaleByAdamState
122 from optax._src.transform import ScaleByFromageState
123 from optax._src.transform import ScaleByRmsState
124 from optax._src.transform import ScaleByRssState
125 from optax._src.transform import ScaleByRStdDevState
126 from optax._src.transform import ScaleByScheduleState
127 from optax._src.transform import ScaleBySM3State
128 from optax._src.transform import ScaleByTrustRatioState
129 from optax._src.transform import ScaleState
130 from optax._src.transform import trace
131 from optax._src.transform import TraceState
132 from optax._src.update import apply_updates
133 from optax._src.update import incremental_update
134 from optax._src.update import periodic_update
135 from optax._src.utils import multi_normal
136 from optax._src.wrappers import apply_if_finite
137 from optax._src.wrappers import ApplyIfFiniteState
138 from optax._src.wrappers import flatten
139 from optax._src.wrappers import masked
140 from optax._src.wrappers import MaskedState
141 from optax._src.wrappers import maybe_update
142 from optax._src.wrappers import MaybeUpdateState
143 from optax._src.wrappers import MultiSteps
144 from optax._src.wrappers import MultiStepsState
145
146 __version__ = "0.0.9"
147
148 __all__ = (
149 "adabelief",
150 "adafactor",
151 "adagrad",
152 "adam",
153 "adamw",
154 "AdaptiveGradClipState",
155 "adaptive_grad_clip",
156 "add_decayed_weights",
157 "add_noise",
158 "AddDecayedWeightsState",
159 "additive_weight_decay",
160 "AdditiveWeightDecayState",
161 "AddNoiseState",
162 "apply_if_finite",
163 "apply_every",
164 "apply_updates",
165 "ApplyEvery",
166 "ApplyIfFiniteState",
167 "centralize",
168 "chain",
169 "clip",
170 "clip_by_block_rms",
171 "clip_by_global_norm",
172 "ClipByGlobalNormState",
173 "ClipState",
174 "constant_schedule",
175 "control_delta_method",
176 "control_variates_jacobians",
177 "cosine_decay_schedule",
178 "cosine_distance",
179 "cosine_onecycle_schedule",
180 "cosine_similarity",
181 "dpsgd",
182 "differentially_private_aggregate",
183 "DifferentiallyPrivateAggregateState",
184 "ema",
185 "EmaState",
186 "EmptyState",
187 "exponential_decay",
188 "FactoredState",
189 "fisher_diag",
190 "flatten",
191 "fromage",
192 "global_norm",
193 "GradientTransformation",
194 "hessian_diag",
195 "huber_loss",
196 "hvp",
197 "identity",
198 "incremental_update",
199 "inject_hyperparams",
200 "InjectHyperparamsState",
201 "join_schedules",
202 "lamb",
203 "lars",
204 "log_cosh",
205 "lookahead",
206 "LookaheadParams",
207 "LookaheadState",
208 "l2_loss",
209 "linear_onecycle_schedule",
210 "linear_schedule",
211 "matrix_inverse_pth_root",
212 "masked",
213 "MaskedState",
214 "multi_transform",
215 "MultiTransformState",
216 "measure_valued_jacobians",
217 "moving_avg_baseline",
218 "multi_normal",
219 "noisy_sgd",
220 "OptState",
221 "Params",
222 "pathwise_jacobians",
223 "periodic_update",
224 "piecewise_constant_schedule",
225 "piecewise_interpolate_schedule",
226 "polynomial_schedule",
227 "power_iteration",
228 "radam",
229 "rmsprop",
230 "scale",
231 "scale_by_adam",
232 "scale_by_belief",
233 "scale_by_param_block_norm",
234 "scale_by_param_block_rms",
235 "scale_by_radam",
236 "scale_by_rms",
237 "scale_by_rss",
238 "scale_by_schedule",
239 "scale_by_sm3",
240 "scale_by_stddev",
241 "scale_by_trust_ratio",
242 "scale_by_yogi",
243 "ScaleByAdamState",
244 "ScaleByFromageState",
245 "ScaleByRmsState",
246 "ScaleByRssState",
247 "ScaleByRStdDevState",
248 "ScaleByScheduleState",
249 "ScaleBySM3State",
250 "ScaleByTrustRatioState",
251 "ScaleState",
252 "Schedule",
253 "score_function_jacobians",
254 "sgd",
255 "sgdr_schedule",
256 "sm3",
257 "sigmoid_binary_cross_entropy",
258 "smooth_labels",
259 "softmax_cross_entropy",
260 "trace",
261 "TraceState",
262 "TransformInitFn",
263 "TransformUpdateFn",
264 "Updates",
265 "warmup_cosine_decay_schedule",
266 "warmup_exponential_decay_schedule",
267 "yogi",
268 )
269
270 # _________________________________________
271 # / Please don't use symbols in `_src` they \
272 # \ are not part of the Optax public API. /
273 # -----------------------------------------
274 # \ ^__^
275 # \ (oo)\_______
276 # (__)\ )\/\
277 # ||----w |
278 # || ||
279 #
280
[end of optax/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optax/__init__.py b/optax/__init__.py
--- a/optax/__init__.py
+++ b/optax/__init__.py
@@ -151,24 +151,24 @@
"adagrad",
"adam",
"adamw",
- "AdaptiveGradClipState",
"adaptive_grad_clip",
+ "AdaptiveGradClipState",
"add_decayed_weights",
"add_noise",
"AddDecayedWeightsState",
"additive_weight_decay",
"AdditiveWeightDecayState",
"AddNoiseState",
- "apply_if_finite",
"apply_every",
+ "apply_if_finite",
"apply_updates",
"ApplyEvery",
"ApplyIfFiniteState",
"centralize",
"chain",
- "clip",
"clip_by_block_rms",
"clip_by_global_norm",
+ "clip",
"ClipByGlobalNormState",
"ClipState",
"constant_schedule",
@@ -178,9 +178,9 @@
"cosine_distance",
"cosine_onecycle_schedule",
"cosine_similarity",
- "dpsgd",
"differentially_private_aggregate",
"DifferentiallyPrivateAggregateState",
+ "dpsgd",
"ema",
"EmaState",
"EmptyState",
@@ -199,24 +199,30 @@
"inject_hyperparams",
"InjectHyperparamsState",
"join_schedules",
+ "keep_params_nonnegative",
+ "l2_loss",
"lamb",
"lars",
+ "linear_onecycle_schedule",
+ "linear_schedule",
"log_cosh",
"lookahead",
"LookaheadParams",
"LookaheadState",
- "l2_loss",
- "linear_onecycle_schedule",
- "linear_schedule",
- "matrix_inverse_pth_root",
"masked",
"MaskedState",
- "multi_transform",
- "MultiTransformState",
+ "matrix_inverse_pth_root",
+ "maybe_update",
+ "MaybeUpdateState",
"measure_valued_jacobians",
"moving_avg_baseline",
"multi_normal",
+ "multi_transform",
+ "MultiSteps",
+ "MultiStepsState",
+ "MultiTransformState",
"noisy_sgd",
+ "NonNegativeParamsState",
"OptState",
"Params",
"pathwise_jacobians",
@@ -227,9 +233,9 @@
"power_iteration",
"radam",
"rmsprop",
- "scale",
"scale_by_adam",
"scale_by_belief",
+ "scale_by_factored_rms",
"scale_by_param_block_norm",
"scale_by_param_block_rms",
"scale_by_radam",
@@ -240,6 +246,7 @@
"scale_by_stddev",
"scale_by_trust_ratio",
"scale_by_yogi",
+ "scale",
"ScaleByAdamState",
"ScaleByFromageState",
"ScaleByRmsState",
@@ -253,8 +260,8 @@
"score_function_jacobians",
"sgd",
"sgdr_schedule",
- "sm3",
"sigmoid_binary_cross_entropy",
+ "sm3",
"smooth_labels",
"softmax_cross_entropy",
"trace",
@@ -265,6 +272,8 @@
"warmup_cosine_decay_schedule",
"warmup_exponential_decay_schedule",
"yogi",
+ "zero_nans",
+ "ZeroNansState",
)
# _________________________________________
|
{"golden_diff": "diff --git a/optax/__init__.py b/optax/__init__.py\n--- a/optax/__init__.py\n+++ b/optax/__init__.py\n@@ -151,24 +151,24 @@\n \"adagrad\",\n \"adam\",\n \"adamw\",\n- \"AdaptiveGradClipState\",\n \"adaptive_grad_clip\",\n+ \"AdaptiveGradClipState\",\n \"add_decayed_weights\",\n \"add_noise\",\n \"AddDecayedWeightsState\",\n \"additive_weight_decay\",\n \"AdditiveWeightDecayState\",\n \"AddNoiseState\",\n- \"apply_if_finite\",\n \"apply_every\",\n+ \"apply_if_finite\",\n \"apply_updates\",\n \"ApplyEvery\",\n \"ApplyIfFiniteState\",\n \"centralize\",\n \"chain\",\n- \"clip\",\n \"clip_by_block_rms\",\n \"clip_by_global_norm\",\n+ \"clip\",\n \"ClipByGlobalNormState\",\n \"ClipState\",\n \"constant_schedule\",\n@@ -178,9 +178,9 @@\n \"cosine_distance\",\n \"cosine_onecycle_schedule\",\n \"cosine_similarity\",\n- \"dpsgd\",\n \"differentially_private_aggregate\",\n \"DifferentiallyPrivateAggregateState\",\n+ \"dpsgd\",\n \"ema\",\n \"EmaState\",\n \"EmptyState\",\n@@ -199,24 +199,30 @@\n \"inject_hyperparams\",\n \"InjectHyperparamsState\",\n \"join_schedules\",\n+ \"keep_params_nonnegative\",\n+ \"l2_loss\",\n \"lamb\",\n \"lars\",\n+ \"linear_onecycle_schedule\",\n+ \"linear_schedule\",\n \"log_cosh\",\n \"lookahead\",\n \"LookaheadParams\",\n \"LookaheadState\",\n- \"l2_loss\",\n- \"linear_onecycle_schedule\",\n- \"linear_schedule\",\n- \"matrix_inverse_pth_root\",\n \"masked\",\n \"MaskedState\",\n- \"multi_transform\",\n- \"MultiTransformState\",\n+ \"matrix_inverse_pth_root\",\n+ \"maybe_update\",\n+ \"MaybeUpdateState\",\n \"measure_valued_jacobians\",\n \"moving_avg_baseline\",\n \"multi_normal\",\n+ \"multi_transform\",\n+ \"MultiSteps\",\n+ \"MultiStepsState\",\n+ \"MultiTransformState\",\n \"noisy_sgd\",\n+ \"NonNegativeParamsState\",\n \"OptState\",\n \"Params\",\n \"pathwise_jacobians\",\n@@ -227,9 +233,9 @@\n \"power_iteration\",\n \"radam\",\n \"rmsprop\",\n- \"scale\",\n \"scale_by_adam\",\n \"scale_by_belief\",\n+ \"scale_by_factored_rms\",\n \"scale_by_param_block_norm\",\n \"scale_by_param_block_rms\",\n \"scale_by_radam\",\n@@ -240,6 +246,7 @@\n \"scale_by_stddev\",\n \"scale_by_trust_ratio\",\n \"scale_by_yogi\",\n+ \"scale\",\n \"ScaleByAdamState\",\n \"ScaleByFromageState\",\n \"ScaleByRmsState\",\n@@ -253,8 +260,8 @@\n \"score_function_jacobians\",\n \"sgd\",\n \"sgdr_schedule\",\n- \"sm3\",\n \"sigmoid_binary_cross_entropy\",\n+ \"sm3\",\n \"smooth_labels\",\n \"softmax_cross_entropy\",\n \"trace\",\n@@ -265,6 +272,8 @@\n \"warmup_cosine_decay_schedule\",\n \"warmup_exponential_decay_schedule\",\n \"yogi\",\n+ \"zero_nans\",\n+ \"ZeroNansState\",\n )\n \n # _________________________________________\n", "issue": "__all__ is currently not useful in __init__.py\n`__all__` in the `__init__.py` overrides which functions are imported when a user does `from optax import *`. It seems like every function in that file should be exposed through a wildcard import, so there is no need for the `__all__`. Besides being redundant, having it creates opportunities for bugs: right now, many of the functions (e.g. `maybe_update`, `keep_params_nonnegative`) are imported but not exposed in `__all__`. I believe it should be removed, and would be happy to create a PR if that makes sense.\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Optax: composable gradient processing and optimization, in JAX.\"\"\"\n\nfrom optax._src.alias import adabelief\nfrom optax._src.alias import adafactor\nfrom optax._src.alias import adagrad\nfrom optax._src.alias import adam\nfrom optax._src.alias import adamw\nfrom optax._src.alias import dpsgd\nfrom optax._src.alias import fromage\nfrom optax._src.alias import lamb\nfrom optax._src.alias import lars\nfrom optax._src.alias import noisy_sgd\nfrom optax._src.alias import radam\nfrom optax._src.alias import rmsprop\nfrom optax._src.alias import sgd\nfrom optax._src.alias import sm3\nfrom optax._src.alias import yogi\nfrom optax._src.base import EmptyState\nfrom optax._src.base import GradientTransformation\nfrom optax._src.base import identity\nfrom optax._src.base import OptState\nfrom optax._src.base import Params\nfrom optax._src.base import Schedule\nfrom optax._src.base import TransformInitFn\nfrom optax._src.base import TransformUpdateFn\nfrom optax._src.base import Updates\nfrom optax._src.clipping import adaptive_grad_clip\nfrom optax._src.clipping import AdaptiveGradClipState\nfrom optax._src.clipping import clip\nfrom optax._src.clipping import clip_by_block_rms\nfrom optax._src.clipping import clip_by_global_norm\nfrom optax._src.clipping import ClipByGlobalNormState\nfrom optax._src.clipping import ClipState\nfrom optax._src.combine import chain\nfrom optax._src.combine import multi_transform\nfrom optax._src.combine import MultiTransformState\nfrom optax._src.constrain import keep_params_nonnegative\nfrom optax._src.constrain import NonNegativeParamsState\nfrom optax._src.constrain import zero_nans\nfrom optax._src.constrain import ZeroNansState\nfrom optax._src.control_variates import control_delta_method\nfrom optax._src.control_variates import control_variates_jacobians\nfrom optax._src.control_variates import moving_avg_baseline\nfrom optax._src.factorized import FactoredState\nfrom optax._src.factorized import scale_by_factored_rms\nfrom optax._src.linear_algebra import global_norm\nfrom optax._src.linear_algebra import matrix_inverse_pth_root\nfrom optax._src.linear_algebra import power_iteration\nfrom optax._src.lookahead import lookahead\nfrom optax._src.lookahead import LookaheadParams\nfrom optax._src.lookahead import LookaheadState\nfrom optax._src.loss import cosine_distance\nfrom optax._src.loss import cosine_similarity\nfrom optax._src.loss import huber_loss\nfrom optax._src.loss import l2_loss\nfrom optax._src.loss import log_cosh\nfrom optax._src.loss import sigmoid_binary_cross_entropy\nfrom optax._src.loss import smooth_labels\nfrom optax._src.loss import softmax_cross_entropy\nfrom optax._src.privacy import differentially_private_aggregate\nfrom optax._src.privacy import DifferentiallyPrivateAggregateState\nfrom optax._src.schedule import constant_schedule\nfrom optax._src.schedule import cosine_decay_schedule\nfrom optax._src.schedule import cosine_onecycle_schedule\nfrom optax._src.schedule import exponential_decay\nfrom optax._src.schedule import inject_hyperparams\nfrom optax._src.schedule import InjectHyperparamsState\nfrom optax._src.schedule import join_schedules\nfrom optax._src.schedule import linear_onecycle_schedule\nfrom optax._src.schedule import linear_schedule\nfrom optax._src.schedule import piecewise_constant_schedule\nfrom optax._src.schedule import piecewise_interpolate_schedule\nfrom optax._src.schedule import polynomial_schedule\nfrom optax._src.schedule import sgdr_schedule\nfrom optax._src.schedule import warmup_cosine_decay_schedule\nfrom optax._src.schedule import warmup_exponential_decay_schedule\nfrom optax._src.second_order import fisher_diag\nfrom optax._src.second_order import hessian_diag\nfrom optax._src.second_order import hvp\nfrom optax._src.stochastic_gradient_estimators import measure_valued_jacobians\nfrom optax._src.stochastic_gradient_estimators import pathwise_jacobians\nfrom optax._src.stochastic_gradient_estimators import score_function_jacobians\nfrom optax._src.transform import add_decayed_weights\nfrom optax._src.transform import add_noise\nfrom optax._src.transform import AddDecayedWeightsState\nfrom optax._src.transform import additive_weight_decay\nfrom optax._src.transform import AdditiveWeightDecayState\nfrom optax._src.transform import AddNoiseState\nfrom optax._src.transform import apply_every\nfrom optax._src.transform import ApplyEvery\nfrom optax._src.transform import centralize\nfrom optax._src.transform import ema\nfrom optax._src.transform import EmaState\nfrom optax._src.transform import scale\nfrom optax._src.transform import scale_by_adam\nfrom optax._src.transform import scale_by_belief\nfrom optax._src.transform import scale_by_param_block_norm\nfrom optax._src.transform import scale_by_param_block_rms\nfrom optax._src.transform import scale_by_radam\nfrom optax._src.transform import scale_by_rms\nfrom optax._src.transform import scale_by_rss\nfrom optax._src.transform import scale_by_schedule\nfrom optax._src.transform import scale_by_sm3\nfrom optax._src.transform import scale_by_stddev\nfrom optax._src.transform import scale_by_trust_ratio\nfrom optax._src.transform import scale_by_yogi\nfrom optax._src.transform import ScaleByAdamState\nfrom optax._src.transform import ScaleByFromageState\nfrom optax._src.transform import ScaleByRmsState\nfrom optax._src.transform import ScaleByRssState\nfrom optax._src.transform import ScaleByRStdDevState\nfrom optax._src.transform import ScaleByScheduleState\nfrom optax._src.transform import ScaleBySM3State\nfrom optax._src.transform import ScaleByTrustRatioState\nfrom optax._src.transform import ScaleState\nfrom optax._src.transform import trace\nfrom optax._src.transform import TraceState\nfrom optax._src.update import apply_updates\nfrom optax._src.update import incremental_update\nfrom optax._src.update import periodic_update\nfrom optax._src.utils import multi_normal\nfrom optax._src.wrappers import apply_if_finite\nfrom optax._src.wrappers import ApplyIfFiniteState\nfrom optax._src.wrappers import flatten\nfrom optax._src.wrappers import masked\nfrom optax._src.wrappers import MaskedState\nfrom optax._src.wrappers import maybe_update\nfrom optax._src.wrappers import MaybeUpdateState\nfrom optax._src.wrappers import MultiSteps\nfrom optax._src.wrappers import MultiStepsState\n\n__version__ = \"0.0.9\"\n\n__all__ = (\n \"adabelief\",\n \"adafactor\",\n \"adagrad\",\n \"adam\",\n \"adamw\",\n \"AdaptiveGradClipState\",\n \"adaptive_grad_clip\",\n \"add_decayed_weights\",\n \"add_noise\",\n \"AddDecayedWeightsState\",\n \"additive_weight_decay\",\n \"AdditiveWeightDecayState\",\n \"AddNoiseState\",\n \"apply_if_finite\",\n \"apply_every\",\n \"apply_updates\",\n \"ApplyEvery\",\n \"ApplyIfFiniteState\",\n \"centralize\",\n \"chain\",\n \"clip\",\n \"clip_by_block_rms\",\n \"clip_by_global_norm\",\n \"ClipByGlobalNormState\",\n \"ClipState\",\n \"constant_schedule\",\n \"control_delta_method\",\n \"control_variates_jacobians\",\n \"cosine_decay_schedule\",\n \"cosine_distance\",\n \"cosine_onecycle_schedule\",\n \"cosine_similarity\",\n \"dpsgd\",\n \"differentially_private_aggregate\",\n \"DifferentiallyPrivateAggregateState\",\n \"ema\",\n \"EmaState\",\n \"EmptyState\",\n \"exponential_decay\",\n \"FactoredState\",\n \"fisher_diag\",\n \"flatten\",\n \"fromage\",\n \"global_norm\",\n \"GradientTransformation\",\n \"hessian_diag\",\n \"huber_loss\",\n \"hvp\",\n \"identity\",\n \"incremental_update\",\n \"inject_hyperparams\",\n \"InjectHyperparamsState\",\n \"join_schedules\",\n \"lamb\",\n \"lars\",\n \"log_cosh\",\n \"lookahead\",\n \"LookaheadParams\",\n \"LookaheadState\",\n \"l2_loss\",\n \"linear_onecycle_schedule\",\n \"linear_schedule\",\n \"matrix_inverse_pth_root\",\n \"masked\",\n \"MaskedState\",\n \"multi_transform\",\n \"MultiTransformState\",\n \"measure_valued_jacobians\",\n \"moving_avg_baseline\",\n \"multi_normal\",\n \"noisy_sgd\",\n \"OptState\",\n \"Params\",\n \"pathwise_jacobians\",\n \"periodic_update\",\n \"piecewise_constant_schedule\",\n \"piecewise_interpolate_schedule\",\n \"polynomial_schedule\",\n \"power_iteration\",\n \"radam\",\n \"rmsprop\",\n \"scale\",\n \"scale_by_adam\",\n \"scale_by_belief\",\n \"scale_by_param_block_norm\",\n \"scale_by_param_block_rms\",\n \"scale_by_radam\",\n \"scale_by_rms\",\n \"scale_by_rss\",\n \"scale_by_schedule\",\n \"scale_by_sm3\",\n \"scale_by_stddev\",\n \"scale_by_trust_ratio\",\n \"scale_by_yogi\",\n \"ScaleByAdamState\",\n \"ScaleByFromageState\",\n \"ScaleByRmsState\",\n \"ScaleByRssState\",\n \"ScaleByRStdDevState\",\n \"ScaleByScheduleState\",\n \"ScaleBySM3State\",\n \"ScaleByTrustRatioState\",\n \"ScaleState\",\n \"Schedule\",\n \"score_function_jacobians\",\n \"sgd\",\n \"sgdr_schedule\",\n \"sm3\",\n \"sigmoid_binary_cross_entropy\",\n \"smooth_labels\",\n \"softmax_cross_entropy\",\n \"trace\",\n \"TraceState\",\n \"TransformInitFn\",\n \"TransformUpdateFn\",\n \"Updates\",\n \"warmup_cosine_decay_schedule\",\n \"warmup_exponential_decay_schedule\",\n \"yogi\",\n)\n\n# _________________________________________\n# / Please don't use symbols in `_src` they \\\n# \\ are not part of the Optax public API. /\n# -----------------------------------------\n# \\ ^__^\n# \\ (oo)\\_______\n# (__)\\ )\\/\\\n# ||----w |\n# || ||\n#\n", "path": "optax/__init__.py"}]}
| 3,869 | 827 |
gh_patches_debug_26810
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairscale-1108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lots of Commandline Output from this line.
https://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27
this warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start?
</issue>
<code>
[start of fairscale/internal/version.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import logging
7 import re
8 from typing import List, Tuple
9
10 import torch
11
12 __all__: List[str] = ["torch_version"]
13
14
15 def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:
16 numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version)
17 if not numbering:
18 return tuple()
19 # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,
20 if numbering.group(4):
21 # Two options here:
22 # - either skip this version (minor number check is not relevant)
23 # - or check that our codebase is not broken by this ongoing development.
24
25 # Assuming that we're interested in the second use-case more than the first,
26 # return the pre-release or dev numbering
27 logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it")
28
29 return tuple(int(numbering.group(n)) for n in range(1, 4))
30
[end of fairscale/internal/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py
--- a/fairscale/internal/version.py
+++ b/fairscale/internal/version.py
@@ -11,13 +11,15 @@
__all__: List[str] = ["torch_version"]
+_logged = False
def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", version)
if not numbering:
return tuple()
# Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,
- if numbering.group(4):
+ global _logged
+ if numbering.group(4) and not _logged:
# Two options here:
# - either skip this version (minor number check is not relevant)
# - or check that our codebase is not broken by this ongoing development.
@@ -25,5 +27,6 @@
# Assuming that we're interested in the second use-case more than the first,
# return the pre-release or dev numbering
logging.warning(f"Pytorch pre-release version {version} - assuming intent to test it")
+ _logged = True
return tuple(int(numbering.group(n)) for n in range(1, 4))
|
{"golden_diff": "diff --git a/fairscale/internal/version.py b/fairscale/internal/version.py\n--- a/fairscale/internal/version.py\n+++ b/fairscale/internal/version.py\n@@ -11,13 +11,15 @@\n \n __all__: List[str] = [\"torch_version\"]\n \n+_logged = False\n \n def torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n- if numbering.group(4):\n+ global _logged\n+ if numbering.group(4) and not _logged:\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n@@ -25,5 +27,6 @@\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n+ _logged = True\n \n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "issue": "Lots of Commandline Output from this line.\n\r\nhttps://github.com/facebookresearch/fairscale/blob/2350968ee61a6f9ca6ecd24aba9db536e814a24c/fairscale/internal/version.py#L27\r\n\r\nthis warning appears a LOT in the commandline output when training. Can we remove it or place it in a place where it gets only shown once at the start?\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport re\nfrom typing import List, Tuple\n\nimport torch\n\n__all__: List[str] = [\"torch_version\"]\n\n\ndef torch_version(version: str = torch.__version__) -> Tuple[int, ...]:\n numbering = re.search(r\"^(\\d+).(\\d+).(\\d+)([^\\+]*)(\\+\\S*)?$\", version)\n if not numbering:\n return tuple()\n # Catch torch version if run against internal pre-releases, like `1.8.0a0fb`,\n if numbering.group(4):\n # Two options here:\n # - either skip this version (minor number check is not relevant)\n # - or check that our codebase is not broken by this ongoing development.\n\n # Assuming that we're interested in the second use-case more than the first,\n # return the pre-release or dev numbering\n logging.warning(f\"Pytorch pre-release version {version} - assuming intent to test it\")\n\n return tuple(int(numbering.group(n)) for n in range(1, 4))\n", "path": "fairscale/internal/version.py"}]}
| 959 | 300 |
gh_patches_debug_13256
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__datumaro-278
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test errors after tensorflow installation on Ubuntu 20.04
Ubuntu20.04, Python 3.8.5
Installation of tensorflow (to enable skipped tests) results with tests errors.
**Steps to reproduce:**
```
git clone https://github.com/openvinotoolkit/datumaro
cd datumaro
python3 -m pip install virtualenv
python3 -m virtualenv venv
. venv/bin/activate
pip install datumaro
python3 -m unittest -v
//there are some skipped tests - required tensorflow and pandas)
pip install tensorflow
//during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed
python3 -m unittest -v
```
**Expected result:**
No test errors after installation libraries required to perform initially skipped tests.
**Current result:**
```
Ran 390 tests in 11.807s
FAILED (errors=29, skipped=7)
```
```======================================================================
ERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/sstrehlk/src/datum_p/tests/test_validator.py", line 803, in test_validate_annotations_segmentation
actual_results = validate_annotations(self.dataset, 'segmentation',
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1255, in validate_annotations
stats = validator.compute_statistics(dataset)
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1064, in compute_statistics
_update_mask_stats_by_label(
File "/home/sstrehlk/src/datum_p/datumaro/components/validator.py", line 1026, in _update_mask_stats_by_label
area = ann.get_area()
File "/home/sstrehlk/src/datum_p/datumaro/components/extractor.py", line 374, in get_area
import pycocotools.mask as mask_utils
File "/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py", line 3, in <module>
import pycocotools._mask as _mask
File "pycocotools/_mask.pyx", line 1, in init pycocotools._mask
ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject
----------------------------------------------------------------------
```
It seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it:
```
pip install pycocotools==2.0.0
```
</issue>
<code>
[start of setup.py]
1
2 # Copyright (C) 2019-2020 Intel Corporation
3 #
4 # SPDX-License-Identifier: MIT
5
6 from distutils.util import strtobool
7 import os
8 import os.path as osp
9 import re
10 import setuptools
11
12 # Snyk scan integration
13 here = None
14
15
16 def find_version(project_dir=None):
17 if not project_dir:
18 project_dir = osp.dirname(osp.abspath(__file__))
19
20 file_path = osp.join(project_dir, 'datumaro', 'version.py')
21
22 with open(file_path, 'r') as version_file:
23 version_text = version_file.read()
24
25 # PEP440:
26 # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions
27 pep_regex = r'([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?'
28 version_regex = r'VERSION\s*=\s*.(' + pep_regex + ').'
29 match = re.match(version_regex, version_text)
30 if not match:
31 raise RuntimeError("Failed to find version string in '%s'" % file_path)
32
33 version = version_text[match.start(1) : match.end(1)]
34 return version
35
36 def get_requirements():
37 requirements = [
38 'attrs>=19.3.0',
39 'defusedxml',
40 'GitPython',
41 'lxml',
42 'matplotlib',
43 'numpy>=1.17.3',
44 'Pillow',
45 'pycocotools; platform_system != "Windows"',
46 'pycocotools-windows; platform_system == "Windows"',
47 'PyYAML',
48 'scikit-image',
49 'tensorboardX',
50 ]
51 if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):
52 requirements.append('opencv-python-headless')
53 else:
54 requirements.append('opencv-python')
55
56 return requirements
57
58 with open('README.md', 'r') as fh:
59 long_description = fh.read()
60
61 setuptools.dist.Distribution().fetch_build_eggs([
62 'Cython>=0.27.3' # required for pycocotools and others, if need to compile
63 ])
64
65 setuptools.setup(
66 name="datumaro",
67 version=find_version(here),
68 author="Intel",
69 author_email="[email protected]",
70 description="Dataset Management Framework (Datumaro)",
71 long_description=long_description,
72 long_description_content_type="text/markdown",
73 url="https://github.com/openvinotoolkit/datumaro",
74 packages=setuptools.find_packages(exclude=['tests*']),
75 classifiers=[
76 "Programming Language :: Python :: 3",
77 "License :: OSI Approved :: MIT License",
78 "Operating System :: OS Independent",
79 ],
80 python_requires='>=3.6',
81 install_requires=get_requirements(),
82 extras_require={
83 'tf': ['tensorflow'],
84 'tf-gpu': ['tensorflow-gpu'],
85 },
86 entry_points={
87 'console_scripts': [
88 'datum=datumaro.cli.__main__:main',
89 ],
90 },
91 )
92
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,8 +42,17 @@
'matplotlib',
'numpy>=1.17.3',
'Pillow',
- 'pycocotools; platform_system != "Windows"',
+
+ # Avoid 2.0.2 Linux binary distribution because of
+ # a conflict in numpy versions with TensorFlow:
+ # - TF is compiled with numpy 1.19 ABI
+ # - pycocotools is compiled with numpy 1.20 ABI
+ # Using a previous version allows to force package rebuilding.
+ #
+ # https://github.com/openvinotoolkit/datumaro/issues/253
+ 'pycocotools!=2.0.2; platform_system != "Windows"',
'pycocotools-windows; platform_system == "Windows"',
+
'PyYAML',
'scikit-image',
'tensorboardX',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,8 +42,17 @@\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n- 'pycocotools; platform_system != \"Windows\"',\n+\n+ # Avoid 2.0.2 Linux binary distribution because of\n+ # a conflict in numpy versions with TensorFlow:\n+ # - TF is compiled with numpy 1.19 ABI\n+ # - pycocotools is compiled with numpy 1.20 ABI\n+ # Using a previous version allows to force package rebuilding.\n+ #\n+ # https://github.com/openvinotoolkit/datumaro/issues/253\n+ 'pycocotools!=2.0.2; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n+\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n", "issue": "Test errors after tensorflow installation on Ubuntu 20.04\nUbuntu20.04, Python 3.8.5 \r\nInstallation of tensorflow (to enable skipped tests) results with tests errors.\r\n\r\n**Steps to reproduce:**\r\n```\r\ngit clone https://github.com/openvinotoolkit/datumaro \r\ncd datumaro\r\npython3 -m pip install virtualenv\r\npython3 -m virtualenv venv\r\n. venv/bin/activate\r\npip install datumaro\r\npython3 -m unittest -v\r\n//there are some skipped tests - required tensorflow and pandas)\r\npip install tensorflow\r\n//during installation numpy 1.20.3 was uninstalled and 1.19.5 was installed\r\npython3 -m unittest -v\r\n```\r\n**Expected result:**\r\nNo test errors after installation libraries required to perform initially skipped tests.\r\n\r\n**Current result:**\r\n```\r\nRan 390 tests in 11.807s\r\n\r\nFAILED (errors=29, skipped=7)\r\n```\r\n```======================================================================\r\nERROR: test_validate_annotations_segmentation (tests.test_validator.TestValidateAnnotations)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/sstrehlk/src/datum_p/tests/test_validator.py\", line 803, in test_validate_annotations_segmentation\r\n actual_results = validate_annotations(self.dataset, 'segmentation',\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1255, in validate_annotations\r\n stats = validator.compute_statistics(dataset)\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1064, in compute_statistics\r\n _update_mask_stats_by_label(\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/validator.py\", line 1026, in _update_mask_stats_by_label\r\n area = ann.get_area()\r\n File \"/home/sstrehlk/src/datum_p/datumaro/components/extractor.py\", line 374, in get_area\r\n import pycocotools.mask as mask_utils\r\n File \"/home/sstrehlk/src/datum_p/venv/lib/python3.8/site-packages/pycocotools/mask.py\", line 3, in <module>\r\n import pycocotools._mask as _mask\r\n File \"pycocotools/_mask.pyx\", line 1, in init pycocotools._mask\r\nValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject\r\n\r\n----------------------------------------------------------------------\r\n```\r\nIt seems that there is incompatibility between numpy 1.19.5 and pycocotools 2.0.2. There is workaround for it: \r\n```\r\npip install pycocotools==2.0.0\r\n``` \n", "before_files": [{"content": "\n# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom distutils.util import strtobool\nimport os\nimport os.path as osp\nimport re\nimport setuptools\n\n# Snyk scan integration\nhere = None\n\n\ndef find_version(project_dir=None):\n if not project_dir:\n project_dir = osp.dirname(osp.abspath(__file__))\n\n file_path = osp.join(project_dir, 'datumaro', 'version.py')\n\n with open(file_path, 'r') as version_file:\n version_text = version_file.read()\n\n # PEP440:\n # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions\n pep_regex = r'([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*((a|b|rc)(0|[1-9]\\d*))?(\\.post(0|[1-9]\\d*))?(\\.dev(0|[1-9]\\d*))?'\n version_regex = r'VERSION\\s*=\\s*.(' + pep_regex + ').'\n match = re.match(version_regex, version_text)\n if not match:\n raise RuntimeError(\"Failed to find version string in '%s'\" % file_path)\n\n version = version_text[match.start(1) : match.end(1)]\n return version\n\ndef get_requirements():\n requirements = [\n 'attrs>=19.3.0',\n 'defusedxml',\n 'GitPython',\n 'lxml',\n 'matplotlib',\n 'numpy>=1.17.3',\n 'Pillow',\n 'pycocotools; platform_system != \"Windows\"',\n 'pycocotools-windows; platform_system == \"Windows\"',\n 'PyYAML',\n 'scikit-image',\n 'tensorboardX',\n ]\n if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()):\n requirements.append('opencv-python-headless')\n else:\n requirements.append('opencv-python')\n\n return requirements\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.dist.Distribution().fetch_build_eggs([\n 'Cython>=0.27.3' # required for pycocotools and others, if need to compile\n])\n\nsetuptools.setup(\n name=\"datumaro\",\n version=find_version(here),\n author=\"Intel\",\n author_email=\"[email protected]\",\n description=\"Dataset Management Framework (Datumaro)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/openvinotoolkit/datumaro\",\n packages=setuptools.find_packages(exclude=['tests*']),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=get_requirements(),\n extras_require={\n 'tf': ['tensorflow'],\n 'tf-gpu': ['tensorflow-gpu'],\n },\n entry_points={\n 'console_scripts': [\n 'datum=datumaro.cli.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}]}
| 2,043 | 221 |
gh_patches_debug_28710
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1639
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Waste type not showing for City of Doncaster (Green Bin Collection)
### I Have A Problem With:
A specific source, The integration in general
### What's Your Problem
For Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed?
### Source (if relevant)
doncaster_gov_uk
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: doncaster_gov_uk
args:
uprn: "xxxxxxx"
- platform: waste_collection_schedule
name: Bins
details_format: appointment_types
leadtime: 90
# value_template: VALUE_TEMPLATE
# date_template: DATE_TEMPLATE
add_days_to: true
# event_index: EVENT_INDEX
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [ ] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py]
1 import re
2 import requests
3 import json
4 from datetime import datetime, timedelta
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "City of Doncaster Council"
8 DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
9 URL = "https://doncaster.gov.uk"
10
11 TEST_CASES = {
12 "Test_001": {"uprn": "100050701118"},
13 "Test_002": {"uprn": "100050753396"},
14 "Test_003": {"uprn": 100050699118},
15 }
16
17 ICON_MAP = {
18 "GREEN": "mdi:leaf",
19 "RECYCLING": "mdi:recycle",
20 "BLACK": "mdi:trash-can",
21 "BULKY": "mdi:fridge",
22 "RE-USE": "mdi:sofa",
23 }
24
25 REGEX_DATE = r"\(([0-9]{10})"
26
27
28 class Source:
29 def __init__(self, uprn):
30 self._uprn = str(uprn).zfill(12)
31
32
33 def fetch(self):
34
35 # Query needs start and end epoch dates
36 today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
37 start = (today - timedelta(weeks=3)).strftime("%s")
38 end = (today + timedelta(weeks=3)).strftime("%s")
39 url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}"
40 # start = start.strftime("%s")
41 # end = end.strftime("%s")
42
43 s = requests.Session()
44 r = s.get(url)
45 data = json.loads(r.text)
46
47 entries = []
48
49 for entry in data["slots"]:
50 waste_type = entry["title"]
51 waste_date = entry["end"]
52 epoch = re.findall(REGEX_DATE, waste_date)
53 waste_date = datetime.fromtimestamp(int(epoch[0])).date()
54 entries.append(
55 Collection(
56 date=waste_date,
57 t=waste_type,
58 icon=ICON_MAP.get(waste_type.upper()),
59 )
60 )
61
62 return entries
63
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py
@@ -1,11 +1,14 @@
-import re
-import requests
import json
+import re
from datetime import datetime, timedelta
+
+import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "City of Doncaster Council"
-DESCRIPTION = "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
+DESCRIPTION = (
+ "Source for doncaster.gov.uk services for the City of Doncaster Council, UK."
+)
URL = "https://doncaster.gov.uk"
TEST_CASES = {
@@ -29,13 +32,11 @@
def __init__(self, uprn):
self._uprn = str(uprn).zfill(12)
-
def fetch(self):
-
- # Query needs start and end epoch dates
+ # Query needs start and end epoch dates
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
- start = (today - timedelta(weeks=3)).strftime("%s")
- end = (today + timedelta(weeks=3)).strftime("%s")
+ start = (today - timedelta(days=365)).strftime("%s")
+ end = (today + timedelta(days=365)).strftime("%s")
url = f"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}"
# start = start.strftime("%s")
# end = end.strftime("%s")
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py\n@@ -1,11 +1,14 @@\n-import re\n-import requests\n import json\n+import re\n from datetime import datetime, timedelta\n+\n+import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"City of Doncaster Council\"\n-DESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+DESCRIPTION = (\n+ \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\n+)\n URL = \"https://doncaster.gov.uk\"\n \n TEST_CASES = {\n@@ -29,13 +32,11 @@\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n \n-\n def fetch(self):\n-\n- # Query needs start and end epoch dates \n+ # Query needs start and end epoch dates\n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n- start = (today - timedelta(weeks=3)).strftime(\"%s\")\n- end = (today + timedelta(weeks=3)).strftime(\"%s\")\n+ start = (today - timedelta(days=365)).strftime(\"%s\")\n+ end = (today + timedelta(days=365)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n", "issue": "[Bug]: Waste type not showing for City of Doncaster (Green Bin Collection)\n### I Have A Problem With:\n\nA specific source, The integration in general\n\n### What's Your Problem\n\nFor Doncaster Council the Green bin collection only runs 9months out of 12. Next collection is in early March which can be viewed via the Council bin look-up calendar, but the waste type is not returned on the integration, it only seems to scrape 3 weeks in advance, can this be changed? \n\n### Source (if relevant)\n\ndoncaster_gov_uk\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: doncaster_gov_uk\r\n args:\r\n uprn: \"xxxxxxx\"\r\n\r\n - platform: waste_collection_schedule\r\n name: Bins\r\n details_format: appointment_types\r\n leadtime: 90\r\n# value_template: VALUE_TEMPLATE\r\n# date_template: DATE_TEMPLATE\r\n add_days_to: true\r\n# event_index: EVENT_INDEX\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nimport requests\nimport json\nfrom datetime import datetime, timedelta\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"City of Doncaster Council\"\nDESCRIPTION = \"Source for doncaster.gov.uk services for the City of Doncaster Council, UK.\"\nURL = \"https://doncaster.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"100050701118\"},\n \"Test_002\": {\"uprn\": \"100050753396\"},\n \"Test_003\": {\"uprn\": 100050699118},\n}\n\nICON_MAP = {\n \"GREEN\": \"mdi:leaf\",\n \"RECYCLING\": \"mdi:recycle\",\n \"BLACK\": \"mdi:trash-can\",\n \"BULKY\": \"mdi:fridge\",\n \"RE-USE\": \"mdi:sofa\",\n}\n\nREGEX_DATE = r\"\\(([0-9]{10})\"\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = str(uprn).zfill(12)\n\n\n def fetch(self):\n\n # Query needs start and end epoch dates \n today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n start = (today - timedelta(weeks=3)).strftime(\"%s\")\n end = (today + timedelta(weeks=3)).strftime(\"%s\")\n url = f\"https://www.doncaster.gov.uk/Compass/PremiseDetail/GetCollectionsForCalendar?UPRN={self._uprn}&Start={start}&End={end}\"\n # start = start.strftime(\"%s\")\n # end = end.strftime(\"%s\")\n\n s = requests.Session()\n r = s.get(url)\n data = json.loads(r.text)\n\n entries = []\n\n for entry in data[\"slots\"]:\n waste_type = entry[\"title\"]\n waste_date = entry[\"end\"]\n epoch = re.findall(REGEX_DATE, waste_date)\n waste_date = datetime.fromtimestamp(int(epoch[0])).date()\n entries.append(\n Collection(\n date=waste_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type.upper()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/doncaster_gov_uk.py"}]}
| 1,630 | 435 |
gh_patches_debug_1937
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-23588
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ifft2
</issue>
<code>
[start of ivy/functional/frontends/jax/numpy/fft.py]
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
4 from ivy.func_wrapper import with_unsupported_dtypes
5
6
7 @to_ivy_arrays_and_back
8 def fft(a, n=None, axis=-1, norm=None):
9 if norm is None:
10 norm = "backward"
11 return ivy.fft(a, axis, norm=norm, n=n)
12
13
14 @to_ivy_arrays_and_back
15 def fft2(a, s=None, axes=(-2, -1), norm=None):
16 if norm is None:
17 norm = "backward"
18 return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
19
20
21 @to_ivy_arrays_and_back
22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
23 def fftshift(x, axes=None, name=None):
24 shape = x.shape
25
26 if axes is None:
27 axes = tuple(range(x.ndim))
28 shifts = [(dim // 2) for dim in shape]
29 elif isinstance(axes, int):
30 shifts = shape[axes] // 2
31 else:
32 shifts = [shape[ax] // 2 for ax in axes]
33
34 roll = ivy.roll(x, shifts, axis=axes)
35
36 return roll
37
38
39 @to_ivy_arrays_and_back
40 def ifft(a, n=None, axis=-1, norm=None):
41 if norm is None:
42 norm = "backward"
43 return ivy.ifft(a, axis, norm=norm, n=n)
44
[end of ivy/functional/frontends/jax/numpy/fft.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py
--- a/ivy/functional/frontends/jax/numpy/fft.py
+++ b/ivy/functional/frontends/jax/numpy/fft.py
@@ -41,3 +41,10 @@
if norm is None:
norm = "backward"
return ivy.ifft(a, axis, norm=norm, n=n)
+
+
+@to_ivy_arrays_and_back
+def ifft2(a, s=None, axes=(-2, -1), norm=None):
+ if norm is None:
+ norm = "backward"
+ return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))
|
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/fft.py b/ivy/functional/frontends/jax/numpy/fft.py\n--- a/ivy/functional/frontends/jax/numpy/fft.py\n+++ b/ivy/functional/frontends/jax/numpy/fft.py\n@@ -41,3 +41,10 @@\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n+\n+\n+@to_ivy_arrays_and_back\n+def ifft2(a, s=None, axes=(-2, -1), norm=None):\n+ if norm is None:\n+ norm = \"backward\"\n+ return ivy.array(ivy.ifft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n", "issue": " ifft2\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.fft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\ndef fft2(a, s=None, axes=(-2, -1), norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.array(ivy.fft2(a, s=s, dim=axes, norm=norm), dtype=ivy.dtype(a))\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = [shape[ax] // 2 for ax in axes]\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n", "path": "ivy/functional/frontends/jax/numpy/fft.py"}]}
| 990 | 181 |
gh_patches_debug_13514
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-4265
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upload logo is not working
### CKAN Version if known (or site URL)
2.8+
### Please describe the expected behaviour
When uploading logo from config page, we should see new logo on the portal
### Please describe the actual behaviour
Logo is not uploaded
### What steps can be taken to reproduce the issue?
Go to
https://beta.ckan.org/ckan-admin/config
Upload an image
Update config
</issue>
<code>
[start of ckan/views/admin.py]
1 # encoding: utf-8
2
3 import logging
4
5 from ckan.controllers.home import CACHE_PARAMETERS
6 from flask import Blueprint
7 from flask.views import MethodView
8
9 import ckan.lib.app_globals as app_globals
10 import ckan.lib.base as base
11 import ckan.lib.helpers as h
12 import ckan.lib.navl.dictization_functions as dict_fns
13 import ckan.logic as logic
14 import ckan.model as model
15 from ckan.common import g, _, config, request
16
17 log = logging.getLogger(__name__)
18
19 admin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')
20
21
22 def _get_sysadmins():
23 q = model.Session.query(model.User).filter(model.User.sysadmin.is_(True),
24 model.User.state == u'active')
25 return q
26
27
28 def _get_config_options():
29 styles = [{
30 u'text': u'Default',
31 u'value': u'/base/css/main.css'
32 }, {
33 u'text': u'Red',
34 u'value': u'/base/css/red.css'
35 }, {
36 u'text': u'Green',
37 u'value': u'/base/css/green.css'
38 }, {
39 u'text': u'Maroon',
40 u'value': u'/base/css/maroon.css'
41 }, {
42 u'text': u'Fuchsia',
43 u'value': u'/base/css/fuchsia.css'
44 }]
45
46 homepages = [{
47 u'value': u'1',
48 u'text': (u'Introductory area, search, featured'
49 u' group and featured organization')
50 }, {
51 u'value': u'2',
52 u'text': (u'Search, stats, introductory area, '
53 u'featured organization and featured group')
54 }, {
55 u'value': u'3',
56 u'text': u'Search, introductory area and stats'
57 }]
58
59 return dict(styles=styles, homepages=homepages)
60
61
62 def _get_config_items():
63 return [
64 u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',
65 u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',
66 u'ckan.site_custom_css', u'ckan.homepage_style'
67 ]
68
69
70 @admin.before_request
71 def before_request():
72 try:
73 context = dict(model=model, user=g.user, auth_user_obj=g.userobj)
74 logic.check_access(u'sysadmin', context)
75 except logic.NotAuthorized:
76 base.abort(403, _(u'Need to be system administrator to administer'))
77
78
79 def index():
80 data = dict(sysadmins=[a.name for a in _get_sysadmins()])
81 return base.render(u'admin/index.html', extra_vars=data)
82
83
84 class ResetConfigView(MethodView):
85 def get(self):
86 if u'cancel' in request.args:
87 return h.redirect_to(u'admin.config')
88 return base.render(u'admin/confirm_reset.html', extra_vars={})
89
90 def post(self):
91 # remove sys info items
92 for item in _get_config_items():
93 model.delete_system_info(item)
94 # reset to values in config
95 app_globals.reset()
96 return h.redirect_to(u'admin.config')
97
98
99 class ConfigView(MethodView):
100 def get(self):
101 items = _get_config_options()
102 schema = logic.schema.update_configuration_schema()
103 data = {}
104 for key in schema:
105 data[key] = config.get(key)
106
107 vars = dict(data=data, errors={}, **items)
108
109 return base.render(u'admin/config.html', extra_vars=vars)
110
111 def post(self):
112 try:
113 data_dict = logic.clean_dict(
114 dict_fns.unflatten(
115 logic.tuplize_dict(
116 logic.parse_params(
117 request.form, ignore_keys=CACHE_PARAMETERS))))
118 del data_dict['save']
119 data = logic.get_action(u'config_option_update')({
120 u'user': g.user
121 }, data_dict)
122
123 except logic.ValidationError as e:
124 items = _get_config_options()
125 data = request.form
126 errors = e.error_dict
127 error_summary = e.error_summary
128 vars = dict(
129 data=data,
130 errors=errors,
131 error_summary=error_summary,
132 form_items=items,
133 **items)
134 return base.render(u'admin/config.html', extra_vars=vars)
135
136 return h.redirect_to(u'admin.config')
137
138
139 class TrashView(MethodView):
140 def __init__(self):
141 self.deleted_packages = model.Session.query(
142 model.Package).filter_by(state=model.State.DELETED)
143
144 def get(self):
145 data = dict(deleted_packages=self.deleted_packages)
146 return base.render(u'admin/trash.html', extra_vars=data)
147
148 def post(self):
149 deleted_revisions = model.Session.query(
150 model.Revision).filter_by(state=model.State.DELETED)
151 # NB: we repeat retrieval of of revisions
152 # this is obviously inefficient (but probably not *that* bad)
153 # but has to be done to avoid (odd) sqlalchemy errors (when doing
154 # purge packages) of form: "this object already exists in the
155 # session"
156 msgs = []
157 if (u'purge-packages' in request.form) or (
158 u'purge-revisions' in request.form):
159 if u'purge-packages' in request.form:
160 revs_to_purge = []
161 for pkg in self.deleted_packages:
162 revisions = [x[0] for x in pkg.all_related_revisions]
163 # ensure no accidental purging of other(non-deleted)
164 # packages initially just avoided purging revisions
165 # where non-deleted packages were affected
166 # however this lead to confusing outcomes e.g.
167 # we succesfully deleted revision in which package
168 # was deleted (so package now active again) but no
169 # other revisions
170 problem = False
171 for r in revisions:
172 affected_pkgs = set(r.packages).\
173 difference(set(self.deleted_packages))
174 if affected_pkgs:
175 msg = _(u'Cannot purge package %s as '
176 u'associated revision %s includes '
177 u'non-deleted packages %s')
178 msg = msg % (pkg.id, r.id,
179 [pkg.id for r in affected_pkgs])
180 msgs.append(msg)
181 problem = True
182 break
183 if not problem:
184 revs_to_purge += [r.id for r in revisions]
185 model.Session.remove()
186 else:
187 revs_to_purge = [rev.id for rev in deleted_revisions]
188 revs_to_purge = list(set(revs_to_purge))
189 for id in revs_to_purge:
190 revision = model.Session.query(model.Revision).get(id)
191 try:
192 # TODO deleting the head revision corrupts the edit
193 # page Ensure that whatever 'head' pointer is used
194 # gets moved down to the next revision
195 model.repo.purge_revision(revision, leave_record=False)
196 except Exception as inst:
197 msg = _(u'Problem purging revision %s: %s') % (id, inst)
198 msgs.append(msg)
199 h.flash_success(_(u'Purge complete'))
200 else:
201 msgs.append(_(u'Action not implemented.'))
202
203 for msg in msgs:
204 h.flash_error(msg)
205 return h.redirect_to(u'admin.trash')
206
207
208 admin.add_url_rule(u'/', view_func=index, strict_slashes=False)
209 admin.add_url_rule(
210 u'/reset_config', view_func=ResetConfigView.as_view(str(u'reset_config')))
211 admin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))
212 admin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))
213
[end of ckan/views/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckan/views/admin.py b/ckan/views/admin.py
--- a/ckan/views/admin.py
+++ b/ckan/views/admin.py
@@ -110,11 +110,14 @@
def post(self):
try:
+ req = request.form.copy()
+ req.update(request.files.to_dict())
data_dict = logic.clean_dict(
dict_fns.unflatten(
logic.tuplize_dict(
logic.parse_params(
- request.form, ignore_keys=CACHE_PARAMETERS))))
+ req, ignore_keys=CACHE_PARAMETERS))))
+
del data_dict['save']
data = logic.get_action(u'config_option_update')({
u'user': g.user
|
{"golden_diff": "diff --git a/ckan/views/admin.py b/ckan/views/admin.py\n--- a/ckan/views/admin.py\n+++ b/ckan/views/admin.py\n@@ -110,11 +110,14 @@\n \n def post(self):\n try:\n+ req = request.form.copy()\n+ req.update(request.files.to_dict())\n data_dict = logic.clean_dict(\n dict_fns.unflatten(\n logic.tuplize_dict(\n logic.parse_params(\n- request.form, ignore_keys=CACHE_PARAMETERS))))\n+ req, ignore_keys=CACHE_PARAMETERS))))\n+\n del data_dict['save']\n data = logic.get_action(u'config_option_update')({\n u'user': g.user\n", "issue": "Upload logo is not working\n### CKAN Version if known (or site URL)\r\n\r\n2.8+ \r\n### Please describe the expected behaviour\r\nWhen uploading logo from config page, we should see new logo on the portal\r\n\r\n### Please describe the actual behaviour\r\nLogo is not uploaded \r\n\r\n### What steps can be taken to reproduce the issue? \r\nGo to \r\nhttps://beta.ckan.org/ckan-admin/config\r\n\r\nUpload an image \r\nUpdate config\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nfrom ckan.controllers.home import CACHE_PARAMETERS\nfrom flask import Blueprint\nfrom flask.views import MethodView\n\nimport ckan.lib.app_globals as app_globals\nimport ckan.lib.base as base\nimport ckan.lib.helpers as h\nimport ckan.lib.navl.dictization_functions as dict_fns\nimport ckan.logic as logic\nimport ckan.model as model\nfrom ckan.common import g, _, config, request\n\nlog = logging.getLogger(__name__)\n\nadmin = Blueprint(u'admin', __name__, url_prefix=u'/ckan-admin')\n\n\ndef _get_sysadmins():\n q = model.Session.query(model.User).filter(model.User.sysadmin.is_(True),\n model.User.state == u'active')\n return q\n\n\ndef _get_config_options():\n styles = [{\n u'text': u'Default',\n u'value': u'/base/css/main.css'\n }, {\n u'text': u'Red',\n u'value': u'/base/css/red.css'\n }, {\n u'text': u'Green',\n u'value': u'/base/css/green.css'\n }, {\n u'text': u'Maroon',\n u'value': u'/base/css/maroon.css'\n }, {\n u'text': u'Fuchsia',\n u'value': u'/base/css/fuchsia.css'\n }]\n\n homepages = [{\n u'value': u'1',\n u'text': (u'Introductory area, search, featured'\n u' group and featured organization')\n }, {\n u'value': u'2',\n u'text': (u'Search, stats, introductory area, '\n u'featured organization and featured group')\n }, {\n u'value': u'3',\n u'text': u'Search, introductory area and stats'\n }]\n\n return dict(styles=styles, homepages=homepages)\n\n\ndef _get_config_items():\n return [\n u'ckan.site_title', u'ckan.main_css', u'ckan.site_description',\n u'ckan.site_logo', u'ckan.site_about', u'ckan.site_intro_text',\n u'ckan.site_custom_css', u'ckan.homepage_style'\n ]\n\n\[email protected]_request\ndef before_request():\n try:\n context = dict(model=model, user=g.user, auth_user_obj=g.userobj)\n logic.check_access(u'sysadmin', context)\n except logic.NotAuthorized:\n base.abort(403, _(u'Need to be system administrator to administer'))\n\n\ndef index():\n data = dict(sysadmins=[a.name for a in _get_sysadmins()])\n return base.render(u'admin/index.html', extra_vars=data)\n\n\nclass ResetConfigView(MethodView):\n def get(self):\n if u'cancel' in request.args:\n return h.redirect_to(u'admin.config')\n return base.render(u'admin/confirm_reset.html', extra_vars={})\n\n def post(self):\n # remove sys info items\n for item in _get_config_items():\n model.delete_system_info(item)\n # reset to values in config\n app_globals.reset()\n return h.redirect_to(u'admin.config')\n\n\nclass ConfigView(MethodView):\n def get(self):\n items = _get_config_options()\n schema = logic.schema.update_configuration_schema()\n data = {}\n for key in schema:\n data[key] = config.get(key)\n\n vars = dict(data=data, errors={}, **items)\n\n return base.render(u'admin/config.html', extra_vars=vars)\n\n def post(self):\n try:\n data_dict = logic.clean_dict(\n dict_fns.unflatten(\n logic.tuplize_dict(\n logic.parse_params(\n request.form, ignore_keys=CACHE_PARAMETERS))))\n del data_dict['save']\n data = logic.get_action(u'config_option_update')({\n u'user': g.user\n }, data_dict)\n\n except logic.ValidationError as e:\n items = _get_config_options()\n data = request.form\n errors = e.error_dict\n error_summary = e.error_summary\n vars = dict(\n data=data,\n errors=errors,\n error_summary=error_summary,\n form_items=items,\n **items)\n return base.render(u'admin/config.html', extra_vars=vars)\n\n return h.redirect_to(u'admin.config')\n\n\nclass TrashView(MethodView):\n def __init__(self):\n self.deleted_packages = model.Session.query(\n model.Package).filter_by(state=model.State.DELETED)\n\n def get(self):\n data = dict(deleted_packages=self.deleted_packages)\n return base.render(u'admin/trash.html', extra_vars=data)\n\n def post(self):\n deleted_revisions = model.Session.query(\n model.Revision).filter_by(state=model.State.DELETED)\n # NB: we repeat retrieval of of revisions\n # this is obviously inefficient (but probably not *that* bad)\n # but has to be done to avoid (odd) sqlalchemy errors (when doing\n # purge packages) of form: \"this object already exists in the\n # session\"\n msgs = []\n if (u'purge-packages' in request.form) or (\n u'purge-revisions' in request.form):\n if u'purge-packages' in request.form:\n revs_to_purge = []\n for pkg in self.deleted_packages:\n revisions = [x[0] for x in pkg.all_related_revisions]\n # ensure no accidental purging of other(non-deleted)\n # packages initially just avoided purging revisions\n # where non-deleted packages were affected\n # however this lead to confusing outcomes e.g.\n # we succesfully deleted revision in which package\n # was deleted (so package now active again) but no\n # other revisions\n problem = False\n for r in revisions:\n affected_pkgs = set(r.packages).\\\n difference(set(self.deleted_packages))\n if affected_pkgs:\n msg = _(u'Cannot purge package %s as '\n u'associated revision %s includes '\n u'non-deleted packages %s')\n msg = msg % (pkg.id, r.id,\n [pkg.id for r in affected_pkgs])\n msgs.append(msg)\n problem = True\n break\n if not problem:\n revs_to_purge += [r.id for r in revisions]\n model.Session.remove()\n else:\n revs_to_purge = [rev.id for rev in deleted_revisions]\n revs_to_purge = list(set(revs_to_purge))\n for id in revs_to_purge:\n revision = model.Session.query(model.Revision).get(id)\n try:\n # TODO deleting the head revision corrupts the edit\n # page Ensure that whatever 'head' pointer is used\n # gets moved down to the next revision\n model.repo.purge_revision(revision, leave_record=False)\n except Exception as inst:\n msg = _(u'Problem purging revision %s: %s') % (id, inst)\n msgs.append(msg)\n h.flash_success(_(u'Purge complete'))\n else:\n msgs.append(_(u'Action not implemented.'))\n\n for msg in msgs:\n h.flash_error(msg)\n return h.redirect_to(u'admin.trash')\n\n\nadmin.add_url_rule(u'/', view_func=index, strict_slashes=False)\nadmin.add_url_rule(\n u'/reset_config', view_func=ResetConfigView.as_view(str(u'reset_config')))\nadmin.add_url_rule(u'/config', view_func=ConfigView.as_view(str(u'config')))\nadmin.add_url_rule(u'/trash', view_func=TrashView.as_view(str(u'trash')))\n", "path": "ckan/views/admin.py"}]}
| 2,817 | 155 |
gh_patches_debug_37299
|
rasdani/github-patches
|
git_diff
|
evennia__evennia-1725
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
develop: Website not correctly logging in when logged in in-game
#### Brief summary of issue / Description of requested feature:
When logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in.
#### Steps to reproduce the issue / Reasons for adding feature:
1. Don't log into the website but open the web client
2. Log into the webclient as usual.
3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error.
#### Error output / Expected result of feature
When logged into the game, this should be reflected by the web site. See closed #1063.
#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:
This is a regression, probably from changes in the session handling/sharing between client and website.
</issue>
<code>
[start of evennia/web/utils/middleware.py]
1 from django.contrib.auth import authenticate, login
2 from evennia.accounts.models import AccountDB
3 from evennia.utils import logger
4
5 class SharedLoginMiddleware(object):
6 """
7 Handle the shared login between website and webclient.
8
9 """
10 def __init__(self, get_response):
11 # One-time configuration and initialization.
12 self.get_response = get_response
13
14 def __call__(self, request):
15 # Code to be executed for each request before
16 # the view (and later middleware) are called.
17
18 # Process view
19 response = self.get_response(request)
20
21 # Code to be executed for each request/response after
22 # the view is called.
23
24 # Synchronize credentials
25 self.make_shared_login(request)
26
27 # Return processed view
28 return response
29
30 @classmethod
31 def make_shared_login(cls, request):
32 csession = request.session
33 account = request.user
34 website_uid = csession.get("website_authenticated_uid", None)
35 webclient_uid = csession.get("webclient_authenticated_uid", None)
36
37 if not csession.session_key:
38 # this is necessary to build the sessid key
39 csession.save()
40
41 if account.is_authenticated():
42 # Logged into website
43 if not website_uid:
44 # fresh website login (just from login page)
45 csession["website_authenticated_uid"] = account.id
46 if webclient_uid is None:
47 # auto-login web client
48 csession["webclient_authenticated_uid"] = account.id
49
50 elif webclient_uid:
51 # Not logged into website, but logged into webclient
52 if not website_uid:
53 csession["website_authenticated_uid"] = account.id
54 account = AccountDB.objects.get(id=webclient_uid)
55 try:
56 # calls our custom authenticate, in web/utils/backend.py
57 authenticate(autologin=account)
58 login(request, account)
59 except AttributeError:
60 logger.log_trace()
[end of evennia/web/utils/middleware.py]
[start of evennia/web/webclient/views.py]
1
2 """
3 This contains a simple view for rendering the webclient
4 page and serve it eventual static content.
5
6 """
7 from __future__ import print_function
8 from django.shortcuts import render
9 from django.contrib.auth import login, authenticate
10
11 from evennia.accounts.models import AccountDB
12 from evennia.utils import logger
13
14
15 def _shared_login(request):
16 """
17 Handle the shared login between website and webclient.
18
19 """
20 csession = request.session
21 account = request.user
22 # these can have 3 values:
23 # None - previously unused (auto-login)
24 # False - actively logged out (don't auto-login)
25 # <uid> - logged in User/Account id
26 website_uid = csession.get("website_authenticated_uid", None)
27 webclient_uid = csession.get("webclient_authenticated_uid", None)
28
29 # check if user has authenticated to website
30 if not csession.session_key:
31 # this is necessary to build the sessid key
32 csession.save()
33
34 if webclient_uid:
35 # The webclient has previously registered a login to this browser_session
36 if not account.is_authenticated() and not website_uid:
37 try:
38 account = AccountDB.objects.get(id=webclient_uid)
39 except AccountDB.DoesNotExist:
40 # this can happen e.g. for guest accounts or deletions
41 csession["website_authenticated_uid"] = False
42 csession["webclient_authenticated_uid"] = False
43 return
44 try:
45 # calls our custom authenticate in web/utils/backends.py
46 account = authenticate(autologin=account)
47 login(request, account)
48 csession["website_authenticated_uid"] = webclient_uid
49 except AttributeError:
50 logger.log_trace()
51
52
53 def webclient(request):
54 """
55 Webclient page template loading.
56
57 """
58 # handle webclient-website shared login
59 _shared_login(request)
60
61 # make sure to store the browser session's hash so the webclient can get to it!
62 pagevars = {'browser_sessid': request.session.session_key}
63
64 return render(request, 'webclient.html', pagevars)
65
[end of evennia/web/webclient/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py
--- a/evennia/web/utils/middleware.py
+++ b/evennia/web/utils/middleware.py
@@ -15,15 +15,16 @@
# Code to be executed for each request before
# the view (and later middleware) are called.
+ # Synchronize credentials between webclient and website
+ # Must be performed *before* rendering the view (issue #1723)
+ self.make_shared_login(request)
+
# Process view
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
- # Synchronize credentials
- self.make_shared_login(request)
-
# Return processed view
return response
diff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py
--- a/evennia/web/webclient/views.py
+++ b/evennia/web/webclient/views.py
@@ -12,52 +12,13 @@
from evennia.utils import logger
-def _shared_login(request):
- """
- Handle the shared login between website and webclient.
-
- """
- csession = request.session
- account = request.user
- # these can have 3 values:
- # None - previously unused (auto-login)
- # False - actively logged out (don't auto-login)
- # <uid> - logged in User/Account id
- website_uid = csession.get("website_authenticated_uid", None)
- webclient_uid = csession.get("webclient_authenticated_uid", None)
-
- # check if user has authenticated to website
- if not csession.session_key:
- # this is necessary to build the sessid key
- csession.save()
-
- if webclient_uid:
- # The webclient has previously registered a login to this browser_session
- if not account.is_authenticated() and not website_uid:
- try:
- account = AccountDB.objects.get(id=webclient_uid)
- except AccountDB.DoesNotExist:
- # this can happen e.g. for guest accounts or deletions
- csession["website_authenticated_uid"] = False
- csession["webclient_authenticated_uid"] = False
- return
- try:
- # calls our custom authenticate in web/utils/backends.py
- account = authenticate(autologin=account)
- login(request, account)
- csession["website_authenticated_uid"] = webclient_uid
- except AttributeError:
- logger.log_trace()
-
-
def webclient(request):
"""
Webclient page template loading.
"""
- # handle webclient-website shared login
- _shared_login(request)
-
+ # auto-login is now handled by evennia.web.utils.middleware
+
# make sure to store the browser session's hash so the webclient can get to it!
pagevars = {'browser_sessid': request.session.session_key}
|
{"golden_diff": "diff --git a/evennia/web/utils/middleware.py b/evennia/web/utils/middleware.py\n--- a/evennia/web/utils/middleware.py\n+++ b/evennia/web/utils/middleware.py\n@@ -15,15 +15,16 @@\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n+ # Synchronize credentials between webclient and website\n+ # Must be performed *before* rendering the view (issue #1723)\n+ self.make_shared_login(request)\n+ \n # Process view\n response = self.get_response(request)\n \n # Code to be executed for each request/response after\n # the view is called.\n \n- # Synchronize credentials\n- self.make_shared_login(request)\n- \n # Return processed view\n return response\n \ndiff --git a/evennia/web/webclient/views.py b/evennia/web/webclient/views.py\n--- a/evennia/web/webclient/views.py\n+++ b/evennia/web/webclient/views.py\n@@ -12,52 +12,13 @@\n from evennia.utils import logger\n \n \n-def _shared_login(request):\n- \"\"\"\n- Handle the shared login between website and webclient.\n-\n- \"\"\"\n- csession = request.session\n- account = request.user\n- # these can have 3 values:\n- # None - previously unused (auto-login)\n- # False - actively logged out (don't auto-login)\n- # <uid> - logged in User/Account id\n- website_uid = csession.get(\"website_authenticated_uid\", None)\n- webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n-\n- # check if user has authenticated to website\n- if not csession.session_key:\n- # this is necessary to build the sessid key\n- csession.save()\n-\n- if webclient_uid:\n- # The webclient has previously registered a login to this browser_session\n- if not account.is_authenticated() and not website_uid:\n- try:\n- account = AccountDB.objects.get(id=webclient_uid)\n- except AccountDB.DoesNotExist:\n- # this can happen e.g. for guest accounts or deletions\n- csession[\"website_authenticated_uid\"] = False\n- csession[\"webclient_authenticated_uid\"] = False\n- return\n- try:\n- # calls our custom authenticate in web/utils/backends.py\n- account = authenticate(autologin=account)\n- login(request, account)\n- csession[\"website_authenticated_uid\"] = webclient_uid\n- except AttributeError:\n- logger.log_trace()\n-\n-\n def webclient(request):\n \"\"\"\n Webclient page template loading.\n \n \"\"\"\n- # handle webclient-website shared login\n- _shared_login(request)\n-\n+ # auto-login is now handled by evennia.web.utils.middleware\n+ \n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n", "issue": "develop: Website not correctly logging in when logged in in-game\n#### Brief summary of issue / Description of requested feature:\r\n\r\nWhen logging in in-game, the website does not detect it, but if you try to log in you still get a warning saying that you are already logged in.\r\n\r\n#### Steps to reproduce the issue / Reasons for adding feature:\r\n\r\n1. Don't log into the website but open the web client\r\n2. Log into the webclient as usual.\r\n3. Go back to the website - you are not shown as logged in, but clicking `Log in` will still give you an error. \r\n\r\n#### Error output / Expected result of feature\r\n\r\nWhen logged into the game, this should be reflected by the web site. See closed #1063. \r\n\r\n#### Extra information, such as Evennia revision/repo/branch, operating system and ideas for how to solve / implement:\r\n\r\nThis is a regression, probably from changes in the session handling/sharing between client and website.\n", "before_files": [{"content": "from django.contrib.auth import authenticate, login\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\nclass SharedLoginMiddleware(object):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n def __init__(self, get_response):\n # One-time configuration and initialization.\n self.get_response = get_response\n \n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n \n # Process view\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n \n # Synchronize credentials\n self.make_shared_login(request)\n \n # Return processed view\n return response\n \n @classmethod\n def make_shared_login(cls, request):\n csession = request.session\n account = request.user\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n \n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n \n if account.is_authenticated():\n # Logged into website\n if not website_uid:\n # fresh website login (just from login page)\n csession[\"website_authenticated_uid\"] = account.id\n if webclient_uid is None:\n # auto-login web client\n csession[\"webclient_authenticated_uid\"] = account.id\n \n elif webclient_uid:\n # Not logged into website, but logged into webclient\n if not website_uid:\n csession[\"website_authenticated_uid\"] = account.id\n account = AccountDB.objects.get(id=webclient_uid)\n try:\n # calls our custom authenticate, in web/utils/backend.py\n authenticate(autologin=account)\n login(request, account)\n except AttributeError:\n logger.log_trace()", "path": "evennia/web/utils/middleware.py"}, {"content": "\n\"\"\"\nThis contains a simple view for rendering the webclient\npage and serve it eventual static content.\n\n\"\"\"\nfrom __future__ import print_function\nfrom django.shortcuts import render\nfrom django.contrib.auth import login, authenticate\n\nfrom evennia.accounts.models import AccountDB\nfrom evennia.utils import logger\n\n\ndef _shared_login(request):\n \"\"\"\n Handle the shared login between website and webclient.\n\n \"\"\"\n csession = request.session\n account = request.user\n # these can have 3 values:\n # None - previously unused (auto-login)\n # False - actively logged out (don't auto-login)\n # <uid> - logged in User/Account id\n website_uid = csession.get(\"website_authenticated_uid\", None)\n webclient_uid = csession.get(\"webclient_authenticated_uid\", None)\n\n # check if user has authenticated to website\n if not csession.session_key:\n # this is necessary to build the sessid key\n csession.save()\n\n if webclient_uid:\n # The webclient has previously registered a login to this browser_session\n if not account.is_authenticated() and not website_uid:\n try:\n account = AccountDB.objects.get(id=webclient_uid)\n except AccountDB.DoesNotExist:\n # this can happen e.g. for guest accounts or deletions\n csession[\"website_authenticated_uid\"] = False\n csession[\"webclient_authenticated_uid\"] = False\n return\n try:\n # calls our custom authenticate in web/utils/backends.py\n account = authenticate(autologin=account)\n login(request, account)\n csession[\"website_authenticated_uid\"] = webclient_uid\n except AttributeError:\n logger.log_trace()\n\n\ndef webclient(request):\n \"\"\"\n Webclient page template loading.\n\n \"\"\"\n # handle webclient-website shared login\n _shared_login(request)\n\n # make sure to store the browser session's hash so the webclient can get to it!\n pagevars = {'browser_sessid': request.session.session_key}\n\n return render(request, 'webclient.html', pagevars)\n", "path": "evennia/web/webclient/views.py"}]}
| 1,851 | 674 |
gh_patches_debug_10430
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1301
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookie value from request header is not getting sanitized
Hi,
The APM python agent is silently throwing error when trying to sanitize the cookie value from request header. On debugging, I found that we are reading the cookie value incorrectly from context.
Instead of reading the cookie value using `event["context"]["request"]["headers"]["Cookie"]`, we read it as `event["context"]["request"]["headers"]["cookie"]`.
https://github.com/elastic/apm-agent-python/blob/31c858595db709c766226e5cddb826d27e377d9d/elasticapm/processors.py#L116
</issue>
<code>
[start of elasticapm/processors.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 import warnings
33 from collections import defaultdict
34
35 from elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES, ERROR, MASK, SPAN, TRANSACTION
36 from elasticapm.utils import compat, varmap
37 from elasticapm.utils.encoding import force_text
38 from elasticapm.utils.stacks import get_lines_from_file
39
40
41 def for_events(*events):
42 """
43 :param events: list of event types
44 Only calls wrapped function if given event_type is in list of events
45 """
46 events = set(events)
47
48 def wrap(func):
49 func.event_types = events
50 return func
51
52 return wrap
53
54
55 @for_events(ERROR, TRANSACTION)
56 def remove_http_request_body(client, event):
57 """
58 Removes request.body from context
59 :param client: an ElasticAPM client
60 :param event: a transaction or error event
61 :return: The modified event
62 """
63 if "context" in event and "request" in event["context"]:
64 event["context"]["request"].pop("body", None)
65 return event
66
67
68 @for_events(ERROR, SPAN)
69 def remove_stacktrace_locals(client, event):
70 """
71 Removes local variables from any frames.
72 :param client: an ElasticAPM client
73 :param event: a transaction or error event
74 :return: The modified event
75 """
76 func = lambda frame: frame.pop("vars", None)
77 return _process_stack_frames(event, func)
78
79
80 @for_events(ERROR, SPAN)
81 def sanitize_stacktrace_locals(client, event):
82 """
83 Sanitizes local variables in all frames
84 :param client: an ElasticAPM client
85 :param event: a transaction or error event
86 :return: The modified event
87 """
88
89 def func(frame):
90 if "vars" in frame:
91 frame["vars"] = varmap(_sanitize, frame["vars"], sanitize_field_names=client.config.sanitize_field_names)
92
93 return _process_stack_frames(event, func)
94
95
96 @for_events(ERROR, TRANSACTION)
97 def sanitize_http_request_cookies(client, event):
98 """
99 Sanitizes http request cookies
100 :param client: an ElasticAPM client
101 :param event: a transaction or error event
102 :return: The modified event
103 """
104
105 # sanitize request.cookies dict
106 try:
107 cookies = event["context"]["request"]["cookies"]
108 event["context"]["request"]["cookies"] = varmap(
109 _sanitize, cookies, sanitize_field_names=client.config.sanitize_field_names
110 )
111 except (KeyError, TypeError):
112 pass
113
114 # sanitize request.header.cookie string
115 try:
116 cookie_string = force_text(event["context"]["request"]["headers"]["cookie"], errors="replace")
117 event["context"]["request"]["headers"]["cookie"] = _sanitize_string(
118 cookie_string, "; ", "=", sanitize_field_names=client.config.sanitize_field_names
119 )
120 except (KeyError, TypeError):
121 pass
122 return event
123
124
125 @for_events(ERROR, TRANSACTION)
126 def sanitize_http_response_cookies(client, event):
127 """
128 Sanitizes the set-cookie header of the response
129 :param client: an ElasticAPM client
130 :param event: a transaction or error event
131 :return: The modified event
132 """
133 try:
134 cookie_string = force_text(event["context"]["response"]["headers"]["set-cookie"], errors="replace")
135 event["context"]["response"]["headers"]["set-cookie"] = _sanitize_string(
136 cookie_string, ";", "=", sanitize_field_names=client.config.sanitize_field_names
137 )
138 except (KeyError, TypeError):
139 pass
140 return event
141
142
143 @for_events(ERROR, TRANSACTION)
144 def sanitize_http_headers(client, event):
145 """
146 Sanitizes http request/response headers
147 :param client: an ElasticAPM client
148 :param event: a transaction or error event
149 :return: The modified event
150 """
151 # request headers
152 try:
153 headers = event["context"]["request"]["headers"]
154 event["context"]["request"]["headers"] = varmap(
155 _sanitize, headers, sanitize_field_names=client.config.sanitize_field_names
156 )
157 except (KeyError, TypeError):
158 pass
159
160 # response headers
161 try:
162 headers = event["context"]["response"]["headers"]
163 event["context"]["response"]["headers"] = varmap(
164 _sanitize, headers, sanitize_field_names=client.config.sanitize_field_names
165 )
166 except (KeyError, TypeError):
167 pass
168
169 return event
170
171
172 @for_events(ERROR, TRANSACTION)
173 def sanitize_http_wsgi_env(client, event):
174 """
175 Sanitizes WSGI environment variables
176 :param client: an ElasticAPM client
177 :param event: a transaction or error event
178 :return: The modified event
179 """
180 try:
181 env = event["context"]["request"]["env"]
182 event["context"]["request"]["env"] = varmap(
183 _sanitize, env, sanitize_field_names=client.config.sanitize_field_names
184 )
185 except (KeyError, TypeError):
186 pass
187 return event
188
189
190 @for_events(ERROR, TRANSACTION)
191 def sanitize_http_request_body(client, event):
192 """
193 Sanitizes http request body. This only works if the request body
194 is a query-encoded string. Other types (e.g. JSON) are not handled by
195 this sanitizer.
196 :param client: an ElasticAPM client
197 :param event: a transaction or error event
198 :return: The modified event
199 """
200 try:
201 body = force_text(event["context"]["request"]["body"], errors="replace")
202 except (KeyError, TypeError):
203 return event
204 if "=" in body:
205 sanitized_query_string = _sanitize_string(
206 body, "&", "=", sanitize_field_names=client.config.sanitize_field_names
207 )
208 event["context"]["request"]["body"] = sanitized_query_string
209 return event
210
211
212 @for_events(ERROR, SPAN)
213 def add_context_lines_to_frames(client, event):
214 # divide frames up into source files before reading from disk. This should help
215 # with utilizing the disk cache better
216 #
217 # TODO: further optimize by only opening each file once and reading all needed source
218 # TODO: blocks at once.
219 per_file = defaultdict(list)
220 _process_stack_frames(
221 event,
222 lambda frame: per_file[frame["context_metadata"][0]].append(frame) if "context_metadata" in frame else None,
223 )
224 for filename, frames in compat.iteritems(per_file):
225 for frame in frames:
226 # context_metadata key has been set in elasticapm.utils.stacks.get_frame_info for
227 # all frames for which we should gather source code context lines
228 fname, lineno, context_lines, loader, module_name = frame.pop("context_metadata")
229 pre_context, context_line, post_context = get_lines_from_file(
230 fname, lineno, context_lines, loader, module_name
231 )
232 if context_line:
233 frame["pre_context"] = pre_context
234 frame["context_line"] = context_line
235 frame["post_context"] = post_context
236 return event
237
238
239 @for_events(ERROR, SPAN)
240 def mark_in_app_frames(client, event):
241 warnings.warn(
242 "The mark_in_app_frames processor is deprecated and can be removed from your PROCESSORS setting",
243 DeprecationWarning,
244 )
245 return event
246
247
248 def _sanitize(key, value, **kwargs):
249 if "sanitize_field_names" in kwargs:
250 sanitize_field_names = kwargs["sanitize_field_names"]
251 else:
252 sanitize_field_names = BASE_SANITIZE_FIELD_NAMES
253
254 if value is None:
255 return
256
257 if isinstance(value, dict):
258 # varmap will call _sanitize on each k:v pair of the dict, so we don't
259 # have to do anything with dicts here
260 return value
261
262 if not key: # key can be a NoneType
263 return value
264
265 key = key.lower()
266 for field in sanitize_field_names:
267 if field.match(key.strip()):
268 # store mask as a fixed length for security
269 return MASK
270 return value
271
272
273 def _sanitize_string(unsanitized, itemsep, kvsep, sanitize_field_names=BASE_SANITIZE_FIELD_NAMES):
274 """
275 sanitizes a string that contains multiple key/value items
276 :param unsanitized: the unsanitized string
277 :param itemsep: string that separates items
278 :param kvsep: string that separates key from value
279 :param sanitize_field_names: field names to pass to _sanitize
280 :return: a sanitized string
281 """
282 sanitized = []
283 kvs = unsanitized.split(itemsep)
284 for kv in kvs:
285 kv = kv.split(kvsep)
286 if len(kv) == 2:
287 sanitized.append((kv[0], _sanitize(kv[0], kv[1], sanitize_field_names=sanitize_field_names)))
288 else:
289 sanitized.append(kv)
290 return itemsep.join(kvsep.join(kv) for kv in sanitized)
291
292
293 def _process_stack_frames(event, func):
294 if "stacktrace" in event:
295 for frame in event["stacktrace"]:
296 func(frame)
297 # an error can have two stacktraces, one in "exception", one in "log"
298 if "exception" in event and "stacktrace" in event["exception"]:
299 for frame in event["exception"]["stacktrace"]:
300 func(frame)
301 # check for chained exceptions
302 cause = event["exception"].get("cause", None)
303 while cause:
304 if "stacktrace" in cause[0]:
305 for frame in cause[0]["stacktrace"]:
306 func(frame)
307 cause = cause[0].get("cause", None)
308 if "log" in event and "stacktrace" in event["log"]:
309 for frame in event["log"]["stacktrace"]:
310 func(frame)
311 return event
312
[end of elasticapm/processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/processors.py b/elasticapm/processors.py
--- a/elasticapm/processors.py
+++ b/elasticapm/processors.py
@@ -118,7 +118,14 @@
cookie_string, "; ", "=", sanitize_field_names=client.config.sanitize_field_names
)
except (KeyError, TypeError):
- pass
+ try:
+ # Sometimes it's Cookie, not cookie
+ cookie_string = force_text(event["context"]["request"]["headers"]["Cookie"], errors="replace")
+ event["context"]["request"]["headers"]["Cookie"] = _sanitize_string(
+ cookie_string, "; ", "=", sanitize_field_names=client.config.sanitize_field_names
+ )
+ except (KeyError, TypeError):
+ pass
return event
|
{"golden_diff": "diff --git a/elasticapm/processors.py b/elasticapm/processors.py\n--- a/elasticapm/processors.py\n+++ b/elasticapm/processors.py\n@@ -118,7 +118,14 @@\n cookie_string, \"; \", \"=\", sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n- pass\n+ try:\n+ # Sometimes it's Cookie, not cookie\n+ cookie_string = force_text(event[\"context\"][\"request\"][\"headers\"][\"Cookie\"], errors=\"replace\")\n+ event[\"context\"][\"request\"][\"headers\"][\"Cookie\"] = _sanitize_string(\n+ cookie_string, \"; \", \"=\", sanitize_field_names=client.config.sanitize_field_names\n+ )\n+ except (KeyError, TypeError):\n+ pass\n return event\n", "issue": "Cookie value from request header is not getting sanitized\nHi, \r\n\r\nThe APM python agent is silently throwing error when trying to sanitize the cookie value from request header. On debugging, I found that we are reading the cookie value incorrectly from context. \r\n \r\nInstead of reading the cookie value using `event[\"context\"][\"request\"][\"headers\"][\"Cookie\"]`, we read it as `event[\"context\"][\"request\"][\"headers\"][\"cookie\"]`. \r\n\r\nhttps://github.com/elastic/apm-agent-python/blob/31c858595db709c766226e5cddb826d27e377d9d/elasticapm/processors.py#L116\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport warnings\nfrom collections import defaultdict\n\nfrom elasticapm.conf.constants import BASE_SANITIZE_FIELD_NAMES, ERROR, MASK, SPAN, TRANSACTION\nfrom elasticapm.utils import compat, varmap\nfrom elasticapm.utils.encoding import force_text\nfrom elasticapm.utils.stacks import get_lines_from_file\n\n\ndef for_events(*events):\n \"\"\"\n :param events: list of event types\n Only calls wrapped function if given event_type is in list of events\n \"\"\"\n events = set(events)\n\n def wrap(func):\n func.event_types = events\n return func\n\n return wrap\n\n\n@for_events(ERROR, TRANSACTION)\ndef remove_http_request_body(client, event):\n \"\"\"\n Removes request.body from context\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n if \"context\" in event and \"request\" in event[\"context\"]:\n event[\"context\"][\"request\"].pop(\"body\", None)\n return event\n\n\n@for_events(ERROR, SPAN)\ndef remove_stacktrace_locals(client, event):\n \"\"\"\n Removes local variables from any frames.\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n func = lambda frame: frame.pop(\"vars\", None)\n return _process_stack_frames(event, func)\n\n\n@for_events(ERROR, SPAN)\ndef sanitize_stacktrace_locals(client, event):\n \"\"\"\n Sanitizes local variables in all frames\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n\n def func(frame):\n if \"vars\" in frame:\n frame[\"vars\"] = varmap(_sanitize, frame[\"vars\"], sanitize_field_names=client.config.sanitize_field_names)\n\n return _process_stack_frames(event, func)\n\n\n@for_events(ERROR, TRANSACTION)\ndef sanitize_http_request_cookies(client, event):\n \"\"\"\n Sanitizes http request cookies\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n\n # sanitize request.cookies dict\n try:\n cookies = event[\"context\"][\"request\"][\"cookies\"]\n event[\"context\"][\"request\"][\"cookies\"] = varmap(\n _sanitize, cookies, sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n\n # sanitize request.header.cookie string\n try:\n cookie_string = force_text(event[\"context\"][\"request\"][\"headers\"][\"cookie\"], errors=\"replace\")\n event[\"context\"][\"request\"][\"headers\"][\"cookie\"] = _sanitize_string(\n cookie_string, \"; \", \"=\", sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n return event\n\n\n@for_events(ERROR, TRANSACTION)\ndef sanitize_http_response_cookies(client, event):\n \"\"\"\n Sanitizes the set-cookie header of the response\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n try:\n cookie_string = force_text(event[\"context\"][\"response\"][\"headers\"][\"set-cookie\"], errors=\"replace\")\n event[\"context\"][\"response\"][\"headers\"][\"set-cookie\"] = _sanitize_string(\n cookie_string, \";\", \"=\", sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n return event\n\n\n@for_events(ERROR, TRANSACTION)\ndef sanitize_http_headers(client, event):\n \"\"\"\n Sanitizes http request/response headers\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n # request headers\n try:\n headers = event[\"context\"][\"request\"][\"headers\"]\n event[\"context\"][\"request\"][\"headers\"] = varmap(\n _sanitize, headers, sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n\n # response headers\n try:\n headers = event[\"context\"][\"response\"][\"headers\"]\n event[\"context\"][\"response\"][\"headers\"] = varmap(\n _sanitize, headers, sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n\n return event\n\n\n@for_events(ERROR, TRANSACTION)\ndef sanitize_http_wsgi_env(client, event):\n \"\"\"\n Sanitizes WSGI environment variables\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n try:\n env = event[\"context\"][\"request\"][\"env\"]\n event[\"context\"][\"request\"][\"env\"] = varmap(\n _sanitize, env, sanitize_field_names=client.config.sanitize_field_names\n )\n except (KeyError, TypeError):\n pass\n return event\n\n\n@for_events(ERROR, TRANSACTION)\ndef sanitize_http_request_body(client, event):\n \"\"\"\n Sanitizes http request body. This only works if the request body\n is a query-encoded string. Other types (e.g. JSON) are not handled by\n this sanitizer.\n :param client: an ElasticAPM client\n :param event: a transaction or error event\n :return: The modified event\n \"\"\"\n try:\n body = force_text(event[\"context\"][\"request\"][\"body\"], errors=\"replace\")\n except (KeyError, TypeError):\n return event\n if \"=\" in body:\n sanitized_query_string = _sanitize_string(\n body, \"&\", \"=\", sanitize_field_names=client.config.sanitize_field_names\n )\n event[\"context\"][\"request\"][\"body\"] = sanitized_query_string\n return event\n\n\n@for_events(ERROR, SPAN)\ndef add_context_lines_to_frames(client, event):\n # divide frames up into source files before reading from disk. This should help\n # with utilizing the disk cache better\n #\n # TODO: further optimize by only opening each file once and reading all needed source\n # TODO: blocks at once.\n per_file = defaultdict(list)\n _process_stack_frames(\n event,\n lambda frame: per_file[frame[\"context_metadata\"][0]].append(frame) if \"context_metadata\" in frame else None,\n )\n for filename, frames in compat.iteritems(per_file):\n for frame in frames:\n # context_metadata key has been set in elasticapm.utils.stacks.get_frame_info for\n # all frames for which we should gather source code context lines\n fname, lineno, context_lines, loader, module_name = frame.pop(\"context_metadata\")\n pre_context, context_line, post_context = get_lines_from_file(\n fname, lineno, context_lines, loader, module_name\n )\n if context_line:\n frame[\"pre_context\"] = pre_context\n frame[\"context_line\"] = context_line\n frame[\"post_context\"] = post_context\n return event\n\n\n@for_events(ERROR, SPAN)\ndef mark_in_app_frames(client, event):\n warnings.warn(\n \"The mark_in_app_frames processor is deprecated and can be removed from your PROCESSORS setting\",\n DeprecationWarning,\n )\n return event\n\n\ndef _sanitize(key, value, **kwargs):\n if \"sanitize_field_names\" in kwargs:\n sanitize_field_names = kwargs[\"sanitize_field_names\"]\n else:\n sanitize_field_names = BASE_SANITIZE_FIELD_NAMES\n\n if value is None:\n return\n\n if isinstance(value, dict):\n # varmap will call _sanitize on each k:v pair of the dict, so we don't\n # have to do anything with dicts here\n return value\n\n if not key: # key can be a NoneType\n return value\n\n key = key.lower()\n for field in sanitize_field_names:\n if field.match(key.strip()):\n # store mask as a fixed length for security\n return MASK\n return value\n\n\ndef _sanitize_string(unsanitized, itemsep, kvsep, sanitize_field_names=BASE_SANITIZE_FIELD_NAMES):\n \"\"\"\n sanitizes a string that contains multiple key/value items\n :param unsanitized: the unsanitized string\n :param itemsep: string that separates items\n :param kvsep: string that separates key from value\n :param sanitize_field_names: field names to pass to _sanitize\n :return: a sanitized string\n \"\"\"\n sanitized = []\n kvs = unsanitized.split(itemsep)\n for kv in kvs:\n kv = kv.split(kvsep)\n if len(kv) == 2:\n sanitized.append((kv[0], _sanitize(kv[0], kv[1], sanitize_field_names=sanitize_field_names)))\n else:\n sanitized.append(kv)\n return itemsep.join(kvsep.join(kv) for kv in sanitized)\n\n\ndef _process_stack_frames(event, func):\n if \"stacktrace\" in event:\n for frame in event[\"stacktrace\"]:\n func(frame)\n # an error can have two stacktraces, one in \"exception\", one in \"log\"\n if \"exception\" in event and \"stacktrace\" in event[\"exception\"]:\n for frame in event[\"exception\"][\"stacktrace\"]:\n func(frame)\n # check for chained exceptions\n cause = event[\"exception\"].get(\"cause\", None)\n while cause:\n if \"stacktrace\" in cause[0]:\n for frame in cause[0][\"stacktrace\"]:\n func(frame)\n cause = cause[0].get(\"cause\", None)\n if \"log\" in event and \"stacktrace\" in event[\"log\"]:\n for frame in event[\"log\"][\"stacktrace\"]:\n func(frame)\n return event\n", "path": "elasticapm/processors.py"}]}
| 3,964 | 179 |
gh_patches_debug_14283
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1394
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache busting for static files when changes need to be loaded
When the javascript is updates, there are usually a handful of users who experience broken behavior (#1284) because their browser is still working off a cached version of the previous one. I think using https://docs.djangoproject.com/en/3.2/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage will resolve this?
</issue>
<code>
[start of bookwyrm/context_processors.py]
1 """ customize the info available in context for rendering templates """
2 from bookwyrm import models, settings
3
4
5 def site_settings(request): # pylint: disable=unused-argument
6 """include the custom info about the site"""
7 request_protocol = "https://"
8 if not request.is_secure():
9 request_protocol = "http://"
10
11 return {
12 "site": models.SiteSettings.objects.get(),
13 "active_announcements": models.Announcement.active_announcements(),
14 "thumbnail_generation_enabled": settings.ENABLE_THUMBNAIL_GENERATION,
15 "media_full_url": settings.MEDIA_FULL_URL,
16 "preview_images_enabled": settings.ENABLE_PREVIEW_IMAGES,
17 "request_protocol": request_protocol,
18 }
19
[end of bookwyrm/context_processors.py]
[start of bookwyrm/settings.py]
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 DOMAIN = env("DOMAIN")
11 VERSION = "0.0.1"
12
13 PAGE_LENGTH = env("PAGE_LENGTH", 15)
14 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
15
16 # email
17 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
18 EMAIL_HOST = env("EMAIL_HOST")
19 EMAIL_PORT = env("EMAIL_PORT", 587)
20 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
21 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
22 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
23 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
24 DEFAULT_FROM_EMAIL = "admin@{:s}".format(env("DOMAIN"))
25
26 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
27 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28 LOCALE_PATHS = [
29 os.path.join(BASE_DIR, "locale"),
30 ]
31
32 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
33
34 # Preview image
35 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
36 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
37 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
38 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
39 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
40 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
41
42 # Quick-start development settings - unsuitable for production
43 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
44
45 # SECURITY WARNING: keep the secret key used in production secret!
46 SECRET_KEY = env("SECRET_KEY")
47
48 # SECURITY WARNING: don't run with debug turned on in production!
49 DEBUG = env.bool("DEBUG", True)
50 USE_HTTPS = env.bool("USE_HTTPS", False)
51
52 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
53
54 # Application definition
55
56 INSTALLED_APPS = [
57 "django.contrib.admin",
58 "django.contrib.auth",
59 "django.contrib.contenttypes",
60 "django.contrib.sessions",
61 "django.contrib.messages",
62 "django.contrib.staticfiles",
63 "django.contrib.humanize",
64 "django_rename_app",
65 "bookwyrm",
66 "celery",
67 "imagekit",
68 "storages",
69 ]
70
71 MIDDLEWARE = [
72 "django.middleware.security.SecurityMiddleware",
73 "django.contrib.sessions.middleware.SessionMiddleware",
74 "django.middleware.locale.LocaleMiddleware",
75 "django.middleware.common.CommonMiddleware",
76 "django.middleware.csrf.CsrfViewMiddleware",
77 "django.contrib.auth.middleware.AuthenticationMiddleware",
78 "bookwyrm.timezone_middleware.TimezoneMiddleware",
79 "django.contrib.messages.middleware.MessageMiddleware",
80 "django.middleware.clickjacking.XFrameOptionsMiddleware",
81 ]
82
83 ROOT_URLCONF = "bookwyrm.urls"
84
85 TEMPLATES = [
86 {
87 "BACKEND": "django.template.backends.django.DjangoTemplates",
88 "DIRS": ["templates"],
89 "APP_DIRS": True,
90 "OPTIONS": {
91 "context_processors": [
92 "django.template.context_processors.debug",
93 "django.template.context_processors.request",
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 "bookwyrm.context_processors.site_settings",
97 ],
98 },
99 },
100 ]
101
102
103 WSGI_APPLICATION = "bookwyrm.wsgi.application"
104
105 # redis/activity streams settings
106 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
107 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
108 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
109
110 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
111
112 STREAMS = [
113 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
114 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
115 ]
116
117 # Database
118 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
119
120 DATABASES = {
121 "default": {
122 "ENGINE": "django.db.backends.postgresql_psycopg2",
123 "NAME": env("POSTGRES_DB", "fedireads"),
124 "USER": env("POSTGRES_USER", "fedireads"),
125 "PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
126 "HOST": env("POSTGRES_HOST", ""),
127 "PORT": env("POSTGRES_PORT", 5432),
128 },
129 }
130
131
132 LOGIN_URL = "/login/"
133 AUTH_USER_MODEL = "bookwyrm.User"
134
135 # Password validation
136 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
137
138 # pylint: disable=line-too-long
139 AUTH_PASSWORD_VALIDATORS = [
140 {
141 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
142 },
143 {
144 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
145 },
146 {
147 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
148 },
149 {
150 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
151 },
152 ]
153
154
155 # Internationalization
156 # https://docs.djangoproject.com/en/3.2/topics/i18n/
157
158 LANGUAGE_CODE = "en-us"
159 LANGUAGES = [
160 ("en-us", _("English")),
161 ("de-de", _("German")),
162 ("es", _("Spanish")),
163 ("fr-fr", _("French")),
164 ("zh-hans", _("Simplified Chinese")),
165 ("zh-hant", _("Traditional Chinese")),
166 ]
167
168
169 TIME_ZONE = "UTC"
170
171 USE_I18N = True
172
173 USE_L10N = True
174
175 USE_TZ = True
176
177
178 USER_AGENT = "%s (BookWyrm/%s; +https://%s/)" % (
179 requests.utils.default_user_agent(),
180 VERSION,
181 DOMAIN,
182 )
183
184 # Imagekit generated thumbnails
185 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
186 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
187
188 # Static files (CSS, JavaScript, Images)
189 # https://docs.djangoproject.com/en/3.2/howto/static-files/
190
191 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
192
193 # Storage
194
195 PROTOCOL = "http"
196 if USE_HTTPS:
197 PROTOCOL = "https"
198
199 USE_S3 = env.bool("USE_S3", False)
200
201 if USE_S3:
202 # AWS settings
203 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
204 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
205 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
206 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
207 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
208 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
209 AWS_DEFAULT_ACL = "public-read"
210 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
211 # S3 Static settings
212 STATIC_LOCATION = "static"
213 STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)
214 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
215 # S3 Media settings
216 MEDIA_LOCATION = "images"
217 MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
218 MEDIA_FULL_URL = MEDIA_URL
219 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
220 # I don't know if it's used, but the site crashes without it
221 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
222 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
223 else:
224 STATIC_URL = "/static/"
225 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
226 MEDIA_URL = "/images/"
227 MEDIA_FULL_URL = "%s://%s%s" % (PROTOCOL, DOMAIN, MEDIA_URL)
228 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
229
[end of bookwyrm/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/context_processors.py b/bookwyrm/context_processors.py
--- a/bookwyrm/context_processors.py
+++ b/bookwyrm/context_processors.py
@@ -15,4 +15,5 @@
"media_full_url": settings.MEDIA_FULL_URL,
"preview_images_enabled": settings.ENABLE_PREVIEW_IMAGES,
"request_protocol": request_protocol,
+ "js_cache": settings.JS_CACHE,
}
diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -13,6 +13,8 @@
PAGE_LENGTH = env("PAGE_LENGTH", 15)
DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
+JS_CACHE = "19447742"
+
# email
EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
EMAIL_HOST = env("EMAIL_HOST")
|
{"golden_diff": "diff --git a/bookwyrm/context_processors.py b/bookwyrm/context_processors.py\n--- a/bookwyrm/context_processors.py\n+++ b/bookwyrm/context_processors.py\n@@ -15,4 +15,5 @@\n \"media_full_url\": settings.MEDIA_FULL_URL,\n \"preview_images_enabled\": settings.ENABLE_PREVIEW_IMAGES,\n \"request_protocol\": request_protocol,\n+ \"js_cache\": settings.JS_CACHE,\n }\ndiff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -13,6 +13,8 @@\n PAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\n DEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n \n+JS_CACHE = \"19447742\"\n+\n # email\n EMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\n EMAIL_HOST = env(\"EMAIL_HOST\")\n", "issue": "Cache busting for static files when changes need to be loaded\nWhen the javascript is updates, there are usually a handful of users who experience broken behavior (#1284) because their browser is still working off a cached version of the previous one. I think using https://docs.djangoproject.com/en/3.2/ref/contrib/staticfiles/#django.contrib.staticfiles.storage.ManifestStaticFilesStorage will resolve this?\n", "before_files": [{"content": "\"\"\" customize the info available in context for rendering templates \"\"\"\nfrom bookwyrm import models, settings\n\n\ndef site_settings(request): # pylint: disable=unused-argument\n \"\"\"include the custom info about the site\"\"\"\n request_protocol = \"https://\"\n if not request.is_secure():\n request_protocol = \"http://\"\n\n return {\n \"site\": models.SiteSettings.objects.get(),\n \"active_announcements\": models.Announcement.active_announcements(),\n \"thumbnail_generation_enabled\": settings.ENABLE_THUMBNAIL_GENERATION,\n \"media_full_url\": settings.MEDIA_FULL_URL,\n \"preview_images_enabled\": settings.ENABLE_PREVIEW_IMAGES,\n \"request_protocol\": request_protocol,\n }\n", "path": "bookwyrm/context_processors.py"}, {"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]}
| 3,169 | 203 |
gh_patches_debug_12647
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1672
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: rbenv: no such command `install'
fails: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420816191
passes: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420881311
The difference in the failing job is pre-commit 2.8.1 (passing is using 2.7.1). It seems similar to https://stackoverflow.com/questions/17618113/the-command-rbenv-install-is-missing... perhaps Travis doesn't include that particular bit.
failure log snippet:
```
$ pre-commit run --all-files
[INFO] Installing environment for https://github.com/chriskuehl/puppet-pre-commit-hooks.git.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/bin/bash', '/home/travis/.cache/pre-commit/repoz146d2mg/rbenv-default/bin/rbenv', 'install', 'default')
return code: 1
expected return code: 0
stdout: (none)
stderr:
rbenv: no such command `install'
Check the log at /home/travis/.cache/pre-commit/pre-commit.log
```
</issue>
<code>
[start of pre_commit/languages/ruby.py]
1 import contextlib
2 import functools
3 import os.path
4 import shutil
5 import tarfile
6 from typing import Generator
7 from typing import Sequence
8 from typing import Tuple
9
10 import pre_commit.constants as C
11 from pre_commit.envcontext import envcontext
12 from pre_commit.envcontext import PatchesT
13 from pre_commit.envcontext import UNSET
14 from pre_commit.envcontext import Var
15 from pre_commit.hook import Hook
16 from pre_commit.languages import helpers
17 from pre_commit.prefix import Prefix
18 from pre_commit.util import CalledProcessError
19 from pre_commit.util import clean_path_on_failure
20 from pre_commit.util import resource_bytesio
21
22 ENVIRONMENT_DIR = 'rbenv'
23 healthy = helpers.basic_healthy
24
25
26 @functools.lru_cache(maxsize=1)
27 def get_default_version() -> str:
28 if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):
29 return 'system'
30 else:
31 return C.DEFAULT
32
33
34 def get_env_patch(
35 venv: str,
36 language_version: str,
37 ) -> PatchesT:
38 patches: PatchesT = (
39 ('GEM_HOME', os.path.join(venv, 'gems')),
40 ('GEM_PATH', UNSET),
41 ('BUNDLE_IGNORE_CONFIG', '1'),
42 )
43 if language_version == 'system':
44 patches += (
45 (
46 'PATH', (
47 os.path.join(venv, 'gems', 'bin'), os.pathsep,
48 Var('PATH'),
49 ),
50 ),
51 )
52 else: # pragma: win32 no cover
53 patches += (
54 ('RBENV_ROOT', venv),
55 ('RBENV_VERSION', language_version),
56 (
57 'PATH', (
58 os.path.join(venv, 'gems', 'bin'), os.pathsep,
59 os.path.join(venv, 'shims'), os.pathsep,
60 os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),
61 ),
62 ),
63 )
64 return patches
65
66
67 @contextlib.contextmanager
68 def in_env(
69 prefix: Prefix,
70 language_version: str,
71 ) -> Generator[None, None, None]:
72 envdir = prefix.path(
73 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
74 )
75 with envcontext(get_env_patch(envdir, language_version)):
76 yield
77
78
79 def _extract_resource(filename: str, dest: str) -> None:
80 with resource_bytesio(filename) as bio:
81 with tarfile.open(fileobj=bio) as tf:
82 tf.extractall(dest)
83
84
85 def _install_rbenv(
86 prefix: Prefix,
87 version: str,
88 ) -> None: # pragma: win32 no cover
89 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
90
91 _extract_resource('rbenv.tar.gz', prefix.path('.'))
92 shutil.move(prefix.path('rbenv'), prefix.path(directory))
93
94 # Only install ruby-build if the version is specified
95 if version != C.DEFAULT:
96 plugins_dir = prefix.path(directory, 'plugins')
97 _extract_resource('ruby-download.tar.gz', plugins_dir)
98 _extract_resource('ruby-build.tar.gz', plugins_dir)
99
100
101 def _install_ruby(
102 prefix: Prefix,
103 version: str,
104 ) -> None: # pragma: win32 no cover
105 try:
106 helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))
107 except CalledProcessError: # pragma: no cover (usually find with download)
108 # Failed to download from mirror for some reason, build it instead
109 helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))
110
111
112 def install_environment(
113 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
114 ) -> None:
115 additional_dependencies = tuple(additional_dependencies)
116 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
117 with clean_path_on_failure(prefix.path(directory)):
118 if version != 'system': # pragma: win32 no cover
119 _install_rbenv(prefix, version)
120 with in_env(prefix, version):
121 # Need to call this before installing so rbenv's directories
122 # are set up
123 helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
124 # XXX: this will *always* fail if `version == C.DEFAULT`
125 _install_ruby(prefix, version)
126 # Need to call this after installing to set up the shims
127 helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
128
129 with in_env(prefix, version):
130 helpers.run_setup_cmd(
131 prefix, ('gem', 'build', *prefix.star('.gemspec')),
132 )
133 helpers.run_setup_cmd(
134 prefix,
135 (
136 'gem', 'install',
137 '--no-document', '--no-format-executable',
138 *prefix.star('.gem'), *additional_dependencies,
139 ),
140 )
141
142
143 def run_hook(
144 hook: Hook,
145 file_args: Sequence[str],
146 color: bool,
147 ) -> Tuple[int, bytes]:
148 with in_env(hook.prefix, hook.language_version):
149 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
150
[end of pre_commit/languages/ruby.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -121,8 +121,8 @@
# Need to call this before installing so rbenv's directories
# are set up
helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
- # XXX: this will *always* fail if `version == C.DEFAULT`
- _install_ruby(prefix, version)
+ if version != C.DEFAULT:
+ _install_ruby(prefix, version)
# Need to call this after installing to set up the shims
helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
|
{"golden_diff": "diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py\n--- a/pre_commit/languages/ruby.py\n+++ b/pre_commit/languages/ruby.py\n@@ -121,8 +121,8 @@\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n- # XXX: this will *always* fail if `version == C.DEFAULT`\n- _install_ruby(prefix, version)\n+ if version != C.DEFAULT:\n+ _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n", "issue": "bug: rbenv: no such command `install'\nfails: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420816191\r\npasses: https://travis-ci.com/github/mozilla-platform-ops/ronin_puppet/jobs/420881311\r\n\r\nThe difference in the failing job is pre-commit 2.8.1 (passing is using 2.7.1). It seems similar to https://stackoverflow.com/questions/17618113/the-command-rbenv-install-is-missing... perhaps Travis doesn't include that particular bit.\r\n\r\nfailure log snippet:\r\n```\r\n$ pre-commit run --all-files\r\n\r\n[INFO] Installing environment for https://github.com/chriskuehl/puppet-pre-commit-hooks.git.\r\n\r\n[INFO] Once installed this environment will be reused.\r\n\r\n[INFO] This may take a few minutes...\r\n\r\nAn unexpected error has occurred: CalledProcessError: command: ('/bin/bash', '/home/travis/.cache/pre-commit/repoz146d2mg/rbenv-default/bin/rbenv', 'install', 'default')\r\n\r\nreturn code: 1\r\n\r\nexpected return code: 0\r\n\r\nstdout: (none)\r\n\r\nstderr:\r\n\r\n rbenv: no such command `install'\r\n\r\n \r\n\r\nCheck the log at /home/travis/.cache/pre-commit/pre-commit.log\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n ('RBENV_VERSION', language_version),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n # XXX: this will *always* fail if `version == C.DEFAULT`\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}]}
| 2,262 | 166 |
gh_patches_debug_18069
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-1647
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkpoint saving not working as expected
### Description
After migrating to release 0.5.0 noticed that checkpoint saving is not working as expected.
## description
tried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model.
after training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory.
## severance
the bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run.
## expected behavior
using a given configuration expected to see:
- checkpoint files saved every number of epoch
- the last epoch checkpoint file
- the checkpoints should have been saved to the given directory
## observed behavior
- after training several epochs only the first was saved.
- the single checkpoint were saved to another directory under the logger output
## initial investigation
1. checkpoint callback created and training fit called
2. later, see image and call stack:
seems like c'tor called again with save_last=None

3. when saving later supposed to happen, the save_last is None:

4. last checkpoint saving is skipped
### Steps to reproduce
1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)
2. call trainer fit and run several epochs
3. check expected results:
- saving location as expected e.g., under C:\foo
- check last epoch checkpoint saved - must have last.ckpt
- check how many checkpoints were saved e.g., every 2 etc
### Version
torchgeo version 0.5.0, lightning version 2.0.9
</issue>
<code>
[start of torchgeo/trainers/base.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Base classes for all :mod:`torchgeo` trainers."""
5
6 from abc import ABC, abstractmethod
7 from typing import Any
8
9 import lightning
10 from lightning.pytorch import LightningModule
11 from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint
12 from torch.optim import AdamW
13 from torch.optim.lr_scheduler import ReduceLROnPlateau
14
15
16 class BaseTask(LightningModule, ABC):
17 """Abstract base class for all TorchGeo trainers.
18
19 .. versionadded:: 0.5
20 """
21
22 #: Model to train.
23 model: Any
24
25 #: Performance metric to monitor in learning rate scheduler and callbacks.
26 monitor = "val_loss"
27
28 #: Whether the goal is to minimize or maximize the performance metric to monitor.
29 mode = "min"
30
31 def __init__(self) -> None:
32 """Initialize a new BaseTask instance."""
33 super().__init__()
34 self.save_hyperparameters()
35 self.configure_losses()
36 self.configure_metrics()
37 self.configure_models()
38
39 def configure_callbacks(self) -> list[Callback]:
40 """Initialize model-specific callbacks.
41
42 Returns:
43 List of callbacks to apply.
44 """
45 return [
46 ModelCheckpoint(monitor=self.monitor, mode=self.mode),
47 EarlyStopping(monitor=self.monitor, mode=self.mode),
48 ]
49
50 def configure_losses(self) -> None:
51 """Initialize the loss criterion."""
52
53 def configure_metrics(self) -> None:
54 """Initialize the performance metrics."""
55
56 @abstractmethod
57 def configure_models(self) -> None:
58 """Initialize the model."""
59
60 def configure_optimizers(
61 self,
62 ) -> "lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig":
63 """Initialize the optimizer and learning rate scheduler.
64
65 Returns:
66 Optimizer and learning rate scheduler.
67 """
68 optimizer = AdamW(self.parameters(), lr=self.hparams["lr"])
69 scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams["patience"])
70 return {
71 "optimizer": optimizer,
72 "lr_scheduler": {"scheduler": scheduler, "monitor": self.monitor},
73 }
74
75 def forward(self, *args: Any, **kwargs: Any) -> Any:
76 """Forward pass of the model.
77
78 Args:
79 args: Arguments to pass to model.
80 kwargs: Keyword arguments to pass to model.
81
82 Returns:
83 Output of the model.
84 """
85 return self.model(*args, **kwargs)
86
[end of torchgeo/trainers/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py
--- a/torchgeo/trainers/base.py
+++ b/torchgeo/trainers/base.py
@@ -8,7 +8,6 @@
import lightning
from lightning.pytorch import LightningModule
-from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint
from torch.optim import AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau
@@ -36,17 +35,6 @@
self.configure_metrics()
self.configure_models()
- def configure_callbacks(self) -> list[Callback]:
- """Initialize model-specific callbacks.
-
- Returns:
- List of callbacks to apply.
- """
- return [
- ModelCheckpoint(monitor=self.monitor, mode=self.mode),
- EarlyStopping(monitor=self.monitor, mode=self.mode),
- ]
-
def configure_losses(self) -> None:
"""Initialize the loss criterion."""
|
{"golden_diff": "diff --git a/torchgeo/trainers/base.py b/torchgeo/trainers/base.py\n--- a/torchgeo/trainers/base.py\n+++ b/torchgeo/trainers/base.py\n@@ -8,7 +8,6 @@\n \n import lightning\n from lightning.pytorch import LightningModule\n-from lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\n from torch.optim import AdamW\n from torch.optim.lr_scheduler import ReduceLROnPlateau\n \n@@ -36,17 +35,6 @@\n self.configure_metrics()\n self.configure_models()\n \n- def configure_callbacks(self) -> list[Callback]:\n- \"\"\"Initialize model-specific callbacks.\n-\n- Returns:\n- List of callbacks to apply.\n- \"\"\"\n- return [\n- ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n- EarlyStopping(monitor=self.monitor, mode=self.mode),\n- ]\n-\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n", "issue": "Checkpoint saving not working as expected\n### Description\r\n\r\nAfter migrating to release 0.5.0 noticed that checkpoint saving is not working as expected.\r\n\r\n## description\r\ntried different configuration e.g., `checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)` for example when running 20-30 epochs for training a model.\r\nafter training was completed could not find the ckpt file. what was found was a single ckpt file of the first epoch only, in a wrong directory.\r\n\r\n## severance\r\nthe bug is very limiting. for example, after hours of training a model, there is no way to load the model from a checkpoint to run inference. the single shot to run inference was during the same run. \r\n\r\n## expected behavior\r\nusing a given configuration expected to see:\r\n- checkpoint files saved every number of epoch\r\n- the last epoch checkpoint file\r\n- the checkpoints should have been saved to the given directory\r\n\r\n## observed behavior\r\n- after training several epochs only the first was saved.\r\n- the single checkpoint were saved to another directory under the logger output\r\n\r\n## initial investigation\r\n1. checkpoint callback created and training fit called\r\n2. later, see image and call stack:\r\nseems like c'tor called again with save_last=None\r\n\r\n\r\n3. when saving later supposed to happen, the save_last is None:\r\n\r\n\r\n4. last checkpoint saving is skipped\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. create a checkpoint callback and use different checkpoints saving parameters e.g., checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=ckpt_dir, save_last=True, every_n_epochs=1, save_top_k=1)\r\n2. call trainer fit and run several epochs\r\n3. check expected results:\r\n- saving location as expected e.g., under C:\\foo \r\n- check last epoch checkpoint saved - must have last.ckpt\r\n- check how many checkpoints were saved e.g., every 2 etc\r\n\r\n### Version\r\n\r\ntorchgeo version 0.5.0, lightning version 2.0.9\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Base classes for all :mod:`torchgeo` trainers.\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any\n\nimport lightning\nfrom lightning.pytorch import LightningModule\nfrom lightning.pytorch.callbacks import Callback, EarlyStopping, ModelCheckpoint\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\n\nclass BaseTask(LightningModule, ABC):\n \"\"\"Abstract base class for all TorchGeo trainers.\n\n .. versionadded:: 0.5\n \"\"\"\n\n #: Model to train.\n model: Any\n\n #: Performance metric to monitor in learning rate scheduler and callbacks.\n monitor = \"val_loss\"\n\n #: Whether the goal is to minimize or maximize the performance metric to monitor.\n mode = \"min\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new BaseTask instance.\"\"\"\n super().__init__()\n self.save_hyperparameters()\n self.configure_losses()\n self.configure_metrics()\n self.configure_models()\n\n def configure_callbacks(self) -> list[Callback]:\n \"\"\"Initialize model-specific callbacks.\n\n Returns:\n List of callbacks to apply.\n \"\"\"\n return [\n ModelCheckpoint(monitor=self.monitor, mode=self.mode),\n EarlyStopping(monitor=self.monitor, mode=self.mode),\n ]\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\"\"\"\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n\n @abstractmethod\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\"\"\"\n\n def configure_optimizers(\n self,\n ) -> \"lightning.pytorch.utilities.types.OptimizerLRSchedulerConfig\":\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n Optimizer and learning rate scheduler.\n \"\"\"\n optimizer = AdamW(self.parameters(), lr=self.hparams[\"lr\"])\n scheduler = ReduceLROnPlateau(optimizer, patience=self.hparams[\"patience\"])\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\"scheduler\": scheduler, \"monitor\": self.monitor},\n }\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n args: Arguments to pass to model.\n kwargs: Keyword arguments to pass to model.\n\n Returns:\n Output of the model.\n \"\"\"\n return self.model(*args, **kwargs)\n", "path": "torchgeo/trainers/base.py"}]}
| 1,771 | 206 |
gh_patches_debug_1576
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-19504
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: Google Drive backup file names malformed
## Description of the issue
The files are uploaded with the full file path as the file name.
This makes extracting and restoring the files difficult.

## Context information (for bug reports)
**Output of `bench version`**
```
ERPNext: v13.19.0
Frappe Framework: v13.19.0
```
## Steps to reproduce the issue
1. Back up to Google Drive
### Observed result
Malformed file names
### Expected result
Normal file names
</issue>
<code>
[start of frappe/integrations/doctype/google_drive/google_drive.py]
1 # Copyright (c) 2019, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import os
5 from urllib.parse import quote
6
7 from apiclient.http import MediaFileUpload
8 from googleapiclient.errors import HttpError
9
10 import frappe
11 from frappe import _
12 from frappe.integrations.google_oauth import GoogleOAuth
13 from frappe.integrations.offsite_backup_utils import (
14 get_latest_backup_file,
15 send_email,
16 validate_file_size,
17 )
18 from frappe.model.document import Document
19 from frappe.utils import get_backups_path, get_bench_path
20 from frappe.utils.background_jobs import enqueue
21 from frappe.utils.backups import new_backup
22
23
24 class GoogleDrive(Document):
25 def validate(self):
26 doc_before_save = self.get_doc_before_save()
27 if doc_before_save and doc_before_save.backup_folder_name != self.backup_folder_name:
28 self.backup_folder_id = ""
29
30 def get_access_token(self):
31 if not self.refresh_token:
32 button_label = frappe.bold(_("Allow Google Drive Access"))
33 raise frappe.ValidationError(_("Click on {0} to generate Refresh Token.").format(button_label))
34
35 oauth_obj = GoogleOAuth("drive")
36 r = oauth_obj.refresh_access_token(
37 self.get_password(fieldname="refresh_token", raise_exception=False)
38 )
39
40 return r.get("access_token")
41
42
43 @frappe.whitelist(methods=["POST"])
44 def authorize_access(reauthorize=False, code=None):
45 """
46 If no Authorization code get it from Google and then request for Refresh Token.
47 Google Contact Name is set to flags to set_value after Authorization Code is obtained.
48 """
49
50 oauth_code = (
51 frappe.db.get_single_value("Google Drive", "authorization_code") if not code else code
52 )
53 oauth_obj = GoogleOAuth("drive")
54
55 if not oauth_code or reauthorize:
56 if reauthorize:
57 frappe.db.set_single_value("Google Drive", "backup_folder_id", "")
58 return oauth_obj.get_authentication_url(
59 {
60 "redirect": f"/app/Form/{quote('Google Drive')}",
61 },
62 )
63
64 r = oauth_obj.authorize(oauth_code)
65 frappe.db.set_single_value(
66 "Google Drive",
67 {"authorization_code": oauth_code, "refresh_token": r.get("refresh_token")},
68 )
69
70
71 def get_google_drive_object():
72 """
73 Returns an object of Google Drive.
74 """
75 account = frappe.get_doc("Google Drive")
76 oauth_obj = GoogleOAuth("drive")
77
78 google_drive = oauth_obj.get_google_service_object(
79 account.get_access_token(),
80 account.get_password(fieldname="indexing_refresh_token", raise_exception=False),
81 )
82
83 return google_drive, account
84
85
86 def check_for_folder_in_google_drive():
87 """Checks if folder exists in Google Drive else create it."""
88
89 def _create_folder_in_google_drive(google_drive, account):
90 file_metadata = {
91 "name": account.backup_folder_name,
92 "mimeType": "application/vnd.google-apps.folder",
93 }
94
95 try:
96 folder = google_drive.files().create(body=file_metadata, fields="id").execute()
97 frappe.db.set_single_value("Google Drive", "backup_folder_id", folder.get("id"))
98 frappe.db.commit()
99 except HttpError as e:
100 frappe.throw(
101 _("Google Drive - Could not create folder in Google Drive - Error Code {0}").format(e)
102 )
103
104 google_drive, account = get_google_drive_object()
105
106 if account.backup_folder_id:
107 return
108
109 backup_folder_exists = False
110
111 try:
112 google_drive_folders = (
113 google_drive.files().list(q="mimeType='application/vnd.google-apps.folder'").execute()
114 )
115 except HttpError as e:
116 frappe.throw(
117 _("Google Drive - Could not find folder in Google Drive - Error Code {0}").format(e)
118 )
119
120 for f in google_drive_folders.get("files"):
121 if f.get("name") == account.backup_folder_name:
122 frappe.db.set_single_value("Google Drive", "backup_folder_id", f.get("id"))
123 frappe.db.commit()
124 backup_folder_exists = True
125 break
126
127 if not backup_folder_exists:
128 _create_folder_in_google_drive(google_drive, account)
129
130
131 @frappe.whitelist()
132 def take_backup():
133 """Enqueue longjob for taking backup to Google Drive"""
134 enqueue(
135 "frappe.integrations.doctype.google_drive.google_drive.upload_system_backup_to_google_drive",
136 queue="long",
137 timeout=1500,
138 )
139 frappe.msgprint(_("Queued for backup. It may take a few minutes to an hour."))
140
141
142 def upload_system_backup_to_google_drive():
143 """
144 Upload system backup to Google Drive
145 """
146 # Get Google Drive Object
147 google_drive, account = get_google_drive_object()
148
149 # Check if folder exists in Google Drive
150 check_for_folder_in_google_drive()
151 account.load_from_db()
152
153 validate_file_size()
154
155 if frappe.flags.create_new_backup:
156 set_progress(1, "Backing up Data.")
157 backup = new_backup()
158 file_urls = []
159 file_urls.append(backup.backup_path_db)
160 file_urls.append(backup.backup_path_conf)
161
162 if account.file_backup:
163 file_urls.append(backup.backup_path_files)
164 file_urls.append(backup.backup_path_private_files)
165 else:
166 file_urls = get_latest_backup_file(with_files=account.file_backup)
167
168 for fileurl in file_urls:
169 if not fileurl:
170 continue
171
172 file_metadata = {"name": fileurl, "parents": [account.backup_folder_id]}
173
174 try:
175 media = MediaFileUpload(
176 get_absolute_path(filename=fileurl), mimetype="application/gzip", resumable=True
177 )
178 except OSError as e:
179 frappe.throw(_("Google Drive - Could not locate - {0}").format(e))
180
181 try:
182 set_progress(2, "Uploading backup to Google Drive.")
183 google_drive.files().create(body=file_metadata, media_body=media, fields="id").execute()
184 except HttpError as e:
185 send_email(False, "Google Drive", "Google Drive", "email", error_status=e)
186
187 set_progress(3, "Uploading successful.")
188 frappe.db.set_single_value("Google Drive", "last_backup_on", frappe.utils.now_datetime())
189 send_email(True, "Google Drive", "Google Drive", "email")
190 return _("Google Drive Backup Successful.")
191
192
193 def daily_backup():
194 drive_settings = frappe.db.get_singles_dict("Google Drive", cast=True)
195 if drive_settings.enable and drive_settings.frequency == "Daily":
196 upload_system_backup_to_google_drive()
197
198
199 def weekly_backup():
200 drive_settings = frappe.db.get_singles_dict("Google Drive", cast=True)
201 if drive_settings.enable and drive_settings.frequency == "Weekly":
202 upload_system_backup_to_google_drive()
203
204
205 def get_absolute_path(filename):
206 file_path = os.path.join(get_backups_path()[2:], os.path.basename(filename))
207 return f"{get_bench_path()}/sites/{file_path}"
208
209
210 def set_progress(progress, message):
211 frappe.publish_realtime(
212 "upload_to_google_drive",
213 dict(progress=progress, total=3, message=message),
214 user=frappe.session.user,
215 )
216
[end of frappe/integrations/doctype/google_drive/google_drive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/frappe/integrations/doctype/google_drive/google_drive.py b/frappe/integrations/doctype/google_drive/google_drive.py
--- a/frappe/integrations/doctype/google_drive/google_drive.py
+++ b/frappe/integrations/doctype/google_drive/google_drive.py
@@ -169,7 +169,7 @@
if not fileurl:
continue
- file_metadata = {"name": fileurl, "parents": [account.backup_folder_id]}
+ file_metadata = {"name": os.path.basename(fileurl), "parents": [account.backup_folder_id]}
try:
media = MediaFileUpload(
|
{"golden_diff": "diff --git a/frappe/integrations/doctype/google_drive/google_drive.py b/frappe/integrations/doctype/google_drive/google_drive.py\n--- a/frappe/integrations/doctype/google_drive/google_drive.py\n+++ b/frappe/integrations/doctype/google_drive/google_drive.py\n@@ -169,7 +169,7 @@\n \t\tif not fileurl:\n \t\t\tcontinue\n \n-\t\tfile_metadata = {\"name\": fileurl, \"parents\": [account.backup_folder_id]}\n+\t\tfile_metadata = {\"name\": os.path.basename(fileurl), \"parents\": [account.backup_folder_id]}\n \n \t\ttry:\n \t\t\tmedia = MediaFileUpload(\n", "issue": "bug: Google Drive backup file names malformed\n## Description of the issue\r\n\r\nThe files are uploaded with the full file path as the file name.\r\nThis makes extracting and restoring the files difficult.\r\n\r\n\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nERPNext: v13.19.0\r\nFrappe Framework: v13.19.0\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Back up to Google Drive\r\n\r\n### Observed result\r\nMalformed file names\r\n\r\n### Expected result\r\nNormal file names\r\n\n", "before_files": [{"content": "# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport os\nfrom urllib.parse import quote\n\nfrom apiclient.http import MediaFileUpload\nfrom googleapiclient.errors import HttpError\n\nimport frappe\nfrom frappe import _\nfrom frappe.integrations.google_oauth import GoogleOAuth\nfrom frappe.integrations.offsite_backup_utils import (\n\tget_latest_backup_file,\n\tsend_email,\n\tvalidate_file_size,\n)\nfrom frappe.model.document import Document\nfrom frappe.utils import get_backups_path, get_bench_path\nfrom frappe.utils.background_jobs import enqueue\nfrom frappe.utils.backups import new_backup\n\n\nclass GoogleDrive(Document):\n\tdef validate(self):\n\t\tdoc_before_save = self.get_doc_before_save()\n\t\tif doc_before_save and doc_before_save.backup_folder_name != self.backup_folder_name:\n\t\t\tself.backup_folder_id = \"\"\n\n\tdef get_access_token(self):\n\t\tif not self.refresh_token:\n\t\t\tbutton_label = frappe.bold(_(\"Allow Google Drive Access\"))\n\t\t\traise frappe.ValidationError(_(\"Click on {0} to generate Refresh Token.\").format(button_label))\n\n\t\toauth_obj = GoogleOAuth(\"drive\")\n\t\tr = oauth_obj.refresh_access_token(\n\t\t\tself.get_password(fieldname=\"refresh_token\", raise_exception=False)\n\t\t)\n\n\t\treturn r.get(\"access_token\")\n\n\[email protected](methods=[\"POST\"])\ndef authorize_access(reauthorize=False, code=None):\n\t\"\"\"\n\tIf no Authorization code get it from Google and then request for Refresh Token.\n\tGoogle Contact Name is set to flags to set_value after Authorization Code is obtained.\n\t\"\"\"\n\n\toauth_code = (\n\t\tfrappe.db.get_single_value(\"Google Drive\", \"authorization_code\") if not code else code\n\t)\n\toauth_obj = GoogleOAuth(\"drive\")\n\n\tif not oauth_code or reauthorize:\n\t\tif reauthorize:\n\t\t\tfrappe.db.set_single_value(\"Google Drive\", \"backup_folder_id\", \"\")\n\t\treturn oauth_obj.get_authentication_url(\n\t\t\t{\n\t\t\t\t\"redirect\": f\"/app/Form/{quote('Google Drive')}\",\n\t\t\t},\n\t\t)\n\n\tr = oauth_obj.authorize(oauth_code)\n\tfrappe.db.set_single_value(\n\t\t\"Google Drive\",\n\t\t{\"authorization_code\": oauth_code, \"refresh_token\": r.get(\"refresh_token\")},\n\t)\n\n\ndef get_google_drive_object():\n\t\"\"\"\n\tReturns an object of Google Drive.\n\t\"\"\"\n\taccount = frappe.get_doc(\"Google Drive\")\n\toauth_obj = GoogleOAuth(\"drive\")\n\n\tgoogle_drive = oauth_obj.get_google_service_object(\n\t\taccount.get_access_token(),\n\t\taccount.get_password(fieldname=\"indexing_refresh_token\", raise_exception=False),\n\t)\n\n\treturn google_drive, account\n\n\ndef check_for_folder_in_google_drive():\n\t\"\"\"Checks if folder exists in Google Drive else create it.\"\"\"\n\n\tdef _create_folder_in_google_drive(google_drive, account):\n\t\tfile_metadata = {\n\t\t\t\"name\": account.backup_folder_name,\n\t\t\t\"mimeType\": \"application/vnd.google-apps.folder\",\n\t\t}\n\n\t\ttry:\n\t\t\tfolder = google_drive.files().create(body=file_metadata, fields=\"id\").execute()\n\t\t\tfrappe.db.set_single_value(\"Google Drive\", \"backup_folder_id\", folder.get(\"id\"))\n\t\t\tfrappe.db.commit()\n\t\texcept HttpError as e:\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Google Drive - Could not create folder in Google Drive - Error Code {0}\").format(e)\n\t\t\t)\n\n\tgoogle_drive, account = get_google_drive_object()\n\n\tif account.backup_folder_id:\n\t\treturn\n\n\tbackup_folder_exists = False\n\n\ttry:\n\t\tgoogle_drive_folders = (\n\t\t\tgoogle_drive.files().list(q=\"mimeType='application/vnd.google-apps.folder'\").execute()\n\t\t)\n\texcept HttpError as e:\n\t\tfrappe.throw(\n\t\t\t_(\"Google Drive - Could not find folder in Google Drive - Error Code {0}\").format(e)\n\t\t)\n\n\tfor f in google_drive_folders.get(\"files\"):\n\t\tif f.get(\"name\") == account.backup_folder_name:\n\t\t\tfrappe.db.set_single_value(\"Google Drive\", \"backup_folder_id\", f.get(\"id\"))\n\t\t\tfrappe.db.commit()\n\t\t\tbackup_folder_exists = True\n\t\t\tbreak\n\n\tif not backup_folder_exists:\n\t\t_create_folder_in_google_drive(google_drive, account)\n\n\[email protected]()\ndef take_backup():\n\t\"\"\"Enqueue longjob for taking backup to Google Drive\"\"\"\n\tenqueue(\n\t\t\"frappe.integrations.doctype.google_drive.google_drive.upload_system_backup_to_google_drive\",\n\t\tqueue=\"long\",\n\t\ttimeout=1500,\n\t)\n\tfrappe.msgprint(_(\"Queued for backup. It may take a few minutes to an hour.\"))\n\n\ndef upload_system_backup_to_google_drive():\n\t\"\"\"\n\tUpload system backup to Google Drive\n\t\"\"\"\n\t# Get Google Drive Object\n\tgoogle_drive, account = get_google_drive_object()\n\n\t# Check if folder exists in Google Drive\n\tcheck_for_folder_in_google_drive()\n\taccount.load_from_db()\n\n\tvalidate_file_size()\n\n\tif frappe.flags.create_new_backup:\n\t\tset_progress(1, \"Backing up Data.\")\n\t\tbackup = new_backup()\n\t\tfile_urls = []\n\t\tfile_urls.append(backup.backup_path_db)\n\t\tfile_urls.append(backup.backup_path_conf)\n\n\t\tif account.file_backup:\n\t\t\tfile_urls.append(backup.backup_path_files)\n\t\t\tfile_urls.append(backup.backup_path_private_files)\n\telse:\n\t\tfile_urls = get_latest_backup_file(with_files=account.file_backup)\n\n\tfor fileurl in file_urls:\n\t\tif not fileurl:\n\t\t\tcontinue\n\n\t\tfile_metadata = {\"name\": fileurl, \"parents\": [account.backup_folder_id]}\n\n\t\ttry:\n\t\t\tmedia = MediaFileUpload(\n\t\t\t\tget_absolute_path(filename=fileurl), mimetype=\"application/gzip\", resumable=True\n\t\t\t)\n\t\texcept OSError as e:\n\t\t\tfrappe.throw(_(\"Google Drive - Could not locate - {0}\").format(e))\n\n\t\ttry:\n\t\t\tset_progress(2, \"Uploading backup to Google Drive.\")\n\t\t\tgoogle_drive.files().create(body=file_metadata, media_body=media, fields=\"id\").execute()\n\t\texcept HttpError as e:\n\t\t\tsend_email(False, \"Google Drive\", \"Google Drive\", \"email\", error_status=e)\n\n\tset_progress(3, \"Uploading successful.\")\n\tfrappe.db.set_single_value(\"Google Drive\", \"last_backup_on\", frappe.utils.now_datetime())\n\tsend_email(True, \"Google Drive\", \"Google Drive\", \"email\")\n\treturn _(\"Google Drive Backup Successful.\")\n\n\ndef daily_backup():\n\tdrive_settings = frappe.db.get_singles_dict(\"Google Drive\", cast=True)\n\tif drive_settings.enable and drive_settings.frequency == \"Daily\":\n\t\tupload_system_backup_to_google_drive()\n\n\ndef weekly_backup():\n\tdrive_settings = frappe.db.get_singles_dict(\"Google Drive\", cast=True)\n\tif drive_settings.enable and drive_settings.frequency == \"Weekly\":\n\t\tupload_system_backup_to_google_drive()\n\n\ndef get_absolute_path(filename):\n\tfile_path = os.path.join(get_backups_path()[2:], os.path.basename(filename))\n\treturn f\"{get_bench_path()}/sites/{file_path}\"\n\n\ndef set_progress(progress, message):\n\tfrappe.publish_realtime(\n\t\t\"upload_to_google_drive\",\n\t\tdict(progress=progress, total=3, message=message),\n\t\tuser=frappe.session.user,\n\t)\n", "path": "frappe/integrations/doctype/google_drive/google_drive.py"}]}
| 2,919 | 138 |
gh_patches_debug_11509
|
rasdani/github-patches
|
git_diff
|
scalableminds__webknossos-libs-59
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
elementClass is missing in datasource-properties.json when no mag1 resolution is available
</issue>
<code>
[start of wkcuber/metadata.py]
1 import json
2 import re
3 import wkw
4 import logging
5 import numpy as np
6
7 from argparse import ArgumentParser
8 from glob import iglob
9 from os import path, listdir
10 from typing import Optional
11 from .mag import Mag
12 from typing import List
13
14
15 def create_parser():
16 parser = ArgumentParser()
17
18 parser.add_argument("path", help="Directory containing the dataset.")
19
20 parser.add_argument("--name", "-n", help="Name of the dataset")
21
22 parser.add_argument(
23 "--scale",
24 "-s",
25 help="Scale of the dataset (e.g. 11.2,11.2,25)",
26 default="1,1,1",
27 )
28
29 group = parser.add_mutually_exclusive_group()
30 group.add_argument(
31 "--compute_max_id",
32 "-c",
33 help="set to compute max id",
34 default=False,
35 action="store_true",
36 )
37 group.add_argument("--max_id", help="set max id of segmentation.", default=0)
38
39 return parser
40
41
42 def write_webknossos_metadata(
43 dataset_path,
44 name,
45 scale,
46 max_id=0,
47 compute_max_id=False,
48 exact_bounding_box: Optional[dict] = None,
49 ):
50
51 # Generate a metadata file for webKnossos
52 # Currently includes no source of information for team
53 datasource_properties_path = path.join(dataset_path, "datasource-properties.json")
54 layers = list(
55 detect_layers(dataset_path, max_id, compute_max_id, exact_bounding_box)
56 )
57 with open(datasource_properties_path, "wt") as datasource_properties_json:
58 json.dump(
59 {
60 "id": {"name": name, "team": "<unknown>"},
61 "dataLayers": layers,
62 "scale": scale,
63 },
64 datasource_properties_json,
65 indent=2,
66 )
67
68
69 def read_metadata_for_layer(wkw_path, layer_name):
70 datasource_properties = json.load(
71 open(path.join(wkw_path, "datasource-properties.json"), "r")
72 )
73 layers = datasource_properties["dataLayers"]
74 layer_info = next(layer for layer in layers if layer["name"] == layer_name)
75 dtype = np.dtype(layer_info["elementClass"])
76 bounding_box = layer_info["boundingBox"]
77 origin = bounding_box["topLeft"]
78 bounding_box = [
79 bounding_box["width"],
80 bounding_box["height"],
81 bounding_box["depth"],
82 ]
83
84 return layer_info, dtype, bounding_box, origin
85
86
87 def detect_dtype(dataset_path, layer, mag: Mag = Mag(1)):
88 layer_path = path.join(dataset_path, layer, str(mag))
89 if path.exists(layer_path):
90 with wkw.Dataset.open(layer_path) as dataset:
91 voxel_type = dataset.header.voxel_type
92 num_channels = dataset.header.num_channels
93 voxel_size = np.dtype(voxel_type)
94 if voxel_size == np.uint8 and num_channels > 1:
95 return "uint" + str(8 * num_channels)
96 else:
97 return str(np.dtype(voxel_type))
98
99
100 def detect_cubeLength(dataset_path, layer, mag: Mag = Mag(1)):
101 layer_path = path.join(dataset_path, layer, str(mag))
102 if path.exists(layer_path):
103 with wkw.Dataset.open(layer_path) as dataset:
104 return dataset.header.block_len * dataset.header.file_len
105
106
107 def detect_bbox(dataset_path, layer, mag: Mag = Mag(1)):
108 # Detect the coarse bounding box of a dataset by iterating
109 # over the WKW cubes
110 layer_path = path.join(dataset_path, layer, str(mag))
111
112 def list_files(layer_path):
113 return iglob(path.join(layer_path, "*", "*", "*.wkw"), recursive=True)
114
115 def parse_cube_file_name(filename):
116 CUBE_REGEX = re.compile(r"z(\d+)/y(\d+)/x(\d+)(\.wkw)$")
117 m = CUBE_REGEX.search(filename)
118 return (int(m.group(3)), int(m.group(2)), int(m.group(1)))
119
120 def list_cubes(layer_path):
121 return (parse_cube_file_name(f) for f in list_files(layer_path))
122
123 xs, ys, zs = list(zip(*list_cubes(layer_path)))
124
125 min_x, min_y, min_z = min(xs), min(ys), min(zs)
126 max_x, max_y, max_z = max(xs), max(ys), max(zs)
127
128 cubeLength = detect_cubeLength(dataset_path, layer, mag)
129
130 return {
131 "topLeft": [min_x * cubeLength, min_y * cubeLength, min_z * cubeLength],
132 "width": (1 + max_x - min_x) * cubeLength,
133 "height": (1 + max_y - min_y) * cubeLength,
134 "depth": (1 + max_z - min_z) * cubeLength,
135 }
136
137
138 def detect_resolutions(dataset_path, layer) -> List[Mag]:
139 for mag in listdir(path.join(dataset_path, layer)):
140 try:
141 yield Mag(mag)
142 except ValueError:
143 logging.info("ignoring {} as resolution".format(mag))
144
145
146 def detect_standard_layer(dataset_path, layer_name, exact_bounding_box=None):
147 # Perform metadata detection for well-known layers
148
149 if exact_bounding_box is None:
150 bbox = detect_bbox(dataset_path, layer_name)
151 else:
152 bbox = exact_bounding_box
153
154 dtype = detect_dtype(dataset_path, layer_name)
155
156 mags = list(detect_resolutions(dataset_path, layer_name))
157 mags = sorted(mags)
158 resolutions = [
159 {
160 "resolution": mag.to_array(),
161 "cubeLength": detect_cubeLength(dataset_path, layer_name, mag),
162 }
163 for mag in mags
164 ]
165
166 return {
167 "dataFormat": "wkw",
168 "name": layer_name,
169 "category": layer_name,
170 "elementClass": dtype,
171 "boundingBox": bbox,
172 "wkwResolutions": list(resolutions),
173 }
174
175
176 def detect_segmentation_layer(
177 dataset_path, layer_name, max_id, compute_max_id=False, exact_bounding_box=None
178 ):
179 layer_info = detect_standard_layer(dataset_path, layer_name, exact_bounding_box)
180 layer_info["mappings"] = []
181 layer_info["largestSegmentId"] = max_id
182
183 if compute_max_id:
184 logging.info("Computing max id of layer={}".format(layer_name))
185 # Computing the current largest segment id
186 # This may take very long due to IO load
187 layer_path = path.join(dataset_path, layer_name, "1")
188 with wkw.Dataset.open(layer_path) as dataset:
189 bbox = layer_info["boundingBox"]
190 layer_info["largestSegmentId"] = int(
191 np.max(
192 dataset.read(
193 bbox["topLeft"], [bbox["width"], bbox["height"], bbox["depth"]]
194 )
195 )
196 )
197 logging.info(
198 "Max id of layer={} is {}".format(
199 layer_name, layer_info["largestSegmentId"]
200 )
201 )
202
203 return layer_info
204
205
206 def detect_layers(dataset_path, max_id, compute_max_id, exact_bounding_box=None):
207 # Detect metadata for well-known layers, e.g. color and segmentation
208 if path.exists(path.join(dataset_path, "color")):
209 yield detect_standard_layer(dataset_path, "color", exact_bounding_box)
210 if path.exists(path.join(dataset_path, "segmentation")):
211 yield detect_segmentation_layer(
212 dataset_path, "segmentation", max_id, compute_max_id, exact_bounding_box
213 )
214
215
216 if __name__ == "__main__":
217 logging.basicConfig(level=logging.DEBUG)
218 args = create_parser().parse_args()
219 scale = tuple(float(x) for x in args.scale.split(","))
220 write_webknossos_metadata(
221 args.path, args.name, scale, args.max_id, args.compute_max_id
222 )
223
[end of wkcuber/metadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wkcuber/metadata.py b/wkcuber/metadata.py
--- a/wkcuber/metadata.py
+++ b/wkcuber/metadata.py
@@ -151,8 +151,6 @@
else:
bbox = exact_bounding_box
- dtype = detect_dtype(dataset_path, layer_name)
-
mags = list(detect_resolutions(dataset_path, layer_name))
mags = sorted(mags)
resolutions = [
@@ -163,6 +161,9 @@
for mag in mags
]
+ assert len(mags) > 0, "No resolutions found"
+ dtype = detect_dtype(dataset_path, layer_name, mags[0])
+
return {
"dataFormat": "wkw",
"name": layer_name,
|
{"golden_diff": "diff --git a/wkcuber/metadata.py b/wkcuber/metadata.py\n--- a/wkcuber/metadata.py\n+++ b/wkcuber/metadata.py\n@@ -151,8 +151,6 @@\n else:\n bbox = exact_bounding_box\n \n- dtype = detect_dtype(dataset_path, layer_name)\n-\n mags = list(detect_resolutions(dataset_path, layer_name))\n mags = sorted(mags)\n resolutions = [\n@@ -163,6 +161,9 @@\n for mag in mags\n ]\n \n+ assert len(mags) > 0, \"No resolutions found\"\n+ dtype = detect_dtype(dataset_path, layer_name, mags[0])\n+\n return {\n \"dataFormat\": \"wkw\",\n \"name\": layer_name,\n", "issue": "elementClass is missing in datasource-properties.json when no mag1 resolution is available\n\n", "before_files": [{"content": "import json\nimport re\nimport wkw\nimport logging\nimport numpy as np\n\nfrom argparse import ArgumentParser\nfrom glob import iglob\nfrom os import path, listdir\nfrom typing import Optional\nfrom .mag import Mag\nfrom typing import List\n\n\ndef create_parser():\n parser = ArgumentParser()\n\n parser.add_argument(\"path\", help=\"Directory containing the dataset.\")\n\n parser.add_argument(\"--name\", \"-n\", help=\"Name of the dataset\")\n\n parser.add_argument(\n \"--scale\",\n \"-s\",\n help=\"Scale of the dataset (e.g. 11.2,11.2,25)\",\n default=\"1,1,1\",\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--compute_max_id\",\n \"-c\",\n help=\"set to compute max id\",\n default=False,\n action=\"store_true\",\n )\n group.add_argument(\"--max_id\", help=\"set max id of segmentation.\", default=0)\n\n return parser\n\n\ndef write_webknossos_metadata(\n dataset_path,\n name,\n scale,\n max_id=0,\n compute_max_id=False,\n exact_bounding_box: Optional[dict] = None,\n):\n\n # Generate a metadata file for webKnossos\n # Currently includes no source of information for team\n datasource_properties_path = path.join(dataset_path, \"datasource-properties.json\")\n layers = list(\n detect_layers(dataset_path, max_id, compute_max_id, exact_bounding_box)\n )\n with open(datasource_properties_path, \"wt\") as datasource_properties_json:\n json.dump(\n {\n \"id\": {\"name\": name, \"team\": \"<unknown>\"},\n \"dataLayers\": layers,\n \"scale\": scale,\n },\n datasource_properties_json,\n indent=2,\n )\n\n\ndef read_metadata_for_layer(wkw_path, layer_name):\n datasource_properties = json.load(\n open(path.join(wkw_path, \"datasource-properties.json\"), \"r\")\n )\n layers = datasource_properties[\"dataLayers\"]\n layer_info = next(layer for layer in layers if layer[\"name\"] == layer_name)\n dtype = np.dtype(layer_info[\"elementClass\"])\n bounding_box = layer_info[\"boundingBox\"]\n origin = bounding_box[\"topLeft\"]\n bounding_box = [\n bounding_box[\"width\"],\n bounding_box[\"height\"],\n bounding_box[\"depth\"],\n ]\n\n return layer_info, dtype, bounding_box, origin\n\n\ndef detect_dtype(dataset_path, layer, mag: Mag = Mag(1)):\n layer_path = path.join(dataset_path, layer, str(mag))\n if path.exists(layer_path):\n with wkw.Dataset.open(layer_path) as dataset:\n voxel_type = dataset.header.voxel_type\n num_channels = dataset.header.num_channels\n voxel_size = np.dtype(voxel_type)\n if voxel_size == np.uint8 and num_channels > 1:\n return \"uint\" + str(8 * num_channels)\n else:\n return str(np.dtype(voxel_type))\n\n\ndef detect_cubeLength(dataset_path, layer, mag: Mag = Mag(1)):\n layer_path = path.join(dataset_path, layer, str(mag))\n if path.exists(layer_path):\n with wkw.Dataset.open(layer_path) as dataset:\n return dataset.header.block_len * dataset.header.file_len\n\n\ndef detect_bbox(dataset_path, layer, mag: Mag = Mag(1)):\n # Detect the coarse bounding box of a dataset by iterating\n # over the WKW cubes\n layer_path = path.join(dataset_path, layer, str(mag))\n\n def list_files(layer_path):\n return iglob(path.join(layer_path, \"*\", \"*\", \"*.wkw\"), recursive=True)\n\n def parse_cube_file_name(filename):\n CUBE_REGEX = re.compile(r\"z(\\d+)/y(\\d+)/x(\\d+)(\\.wkw)$\")\n m = CUBE_REGEX.search(filename)\n return (int(m.group(3)), int(m.group(2)), int(m.group(1)))\n\n def list_cubes(layer_path):\n return (parse_cube_file_name(f) for f in list_files(layer_path))\n\n xs, ys, zs = list(zip(*list_cubes(layer_path)))\n\n min_x, min_y, min_z = min(xs), min(ys), min(zs)\n max_x, max_y, max_z = max(xs), max(ys), max(zs)\n\n cubeLength = detect_cubeLength(dataset_path, layer, mag)\n\n return {\n \"topLeft\": [min_x * cubeLength, min_y * cubeLength, min_z * cubeLength],\n \"width\": (1 + max_x - min_x) * cubeLength,\n \"height\": (1 + max_y - min_y) * cubeLength,\n \"depth\": (1 + max_z - min_z) * cubeLength,\n }\n\n\ndef detect_resolutions(dataset_path, layer) -> List[Mag]:\n for mag in listdir(path.join(dataset_path, layer)):\n try:\n yield Mag(mag)\n except ValueError:\n logging.info(\"ignoring {} as resolution\".format(mag))\n\n\ndef detect_standard_layer(dataset_path, layer_name, exact_bounding_box=None):\n # Perform metadata detection for well-known layers\n\n if exact_bounding_box is None:\n bbox = detect_bbox(dataset_path, layer_name)\n else:\n bbox = exact_bounding_box\n\n dtype = detect_dtype(dataset_path, layer_name)\n\n mags = list(detect_resolutions(dataset_path, layer_name))\n mags = sorted(mags)\n resolutions = [\n {\n \"resolution\": mag.to_array(),\n \"cubeLength\": detect_cubeLength(dataset_path, layer_name, mag),\n }\n for mag in mags\n ]\n\n return {\n \"dataFormat\": \"wkw\",\n \"name\": layer_name,\n \"category\": layer_name,\n \"elementClass\": dtype,\n \"boundingBox\": bbox,\n \"wkwResolutions\": list(resolutions),\n }\n\n\ndef detect_segmentation_layer(\n dataset_path, layer_name, max_id, compute_max_id=False, exact_bounding_box=None\n):\n layer_info = detect_standard_layer(dataset_path, layer_name, exact_bounding_box)\n layer_info[\"mappings\"] = []\n layer_info[\"largestSegmentId\"] = max_id\n\n if compute_max_id:\n logging.info(\"Computing max id of layer={}\".format(layer_name))\n # Computing the current largest segment id\n # This may take very long due to IO load\n layer_path = path.join(dataset_path, layer_name, \"1\")\n with wkw.Dataset.open(layer_path) as dataset:\n bbox = layer_info[\"boundingBox\"]\n layer_info[\"largestSegmentId\"] = int(\n np.max(\n dataset.read(\n bbox[\"topLeft\"], [bbox[\"width\"], bbox[\"height\"], bbox[\"depth\"]]\n )\n )\n )\n logging.info(\n \"Max id of layer={} is {}\".format(\n layer_name, layer_info[\"largestSegmentId\"]\n )\n )\n\n return layer_info\n\n\ndef detect_layers(dataset_path, max_id, compute_max_id, exact_bounding_box=None):\n # Detect metadata for well-known layers, e.g. color and segmentation\n if path.exists(path.join(dataset_path, \"color\")):\n yield detect_standard_layer(dataset_path, \"color\", exact_bounding_box)\n if path.exists(path.join(dataset_path, \"segmentation\")):\n yield detect_segmentation_layer(\n dataset_path, \"segmentation\", max_id, compute_max_id, exact_bounding_box\n )\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n args = create_parser().parse_args()\n scale = tuple(float(x) for x in args.scale.split(\",\"))\n write_webknossos_metadata(\n args.path, args.name, scale, args.max_id, args.compute_max_id\n )\n", "path": "wkcuber/metadata.py"}]}
| 2,822 | 178 |
gh_patches_debug_14240
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-87
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
@ character
If I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py)
TypeError: b'hola @honguitobot' is not JSON serializable
</issue>
<code>
[start of examples/echobot.py]
1 #!/usr/bin/env python
2 #
3 # Simple Bot to reply Telegram messages
4 # Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see [http://www.gnu.org/licenses/].
18
19
20 import logging
21 import telegram
22
23
24 LAST_UPDATE_ID = None
25
26
27 def main():
28 global LAST_UPDATE_ID
29
30 logging.basicConfig(
31 format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
32
33 # Telegram Bot Authorization Token
34 bot = telegram.Bot('TOKEN')
35
36 # This will be our global variable to keep the latest update_id when requesting
37 # for updates. It starts with the latest update_id if available.
38 try:
39 LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
40 except IndexError:
41 LAST_UPDATE_ID = None
42
43 while True:
44 echo(bot)
45
46
47 def echo(bot):
48 global LAST_UPDATE_ID
49
50 # Request updates after the last updated_id
51 for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):
52 # chat_id is required to reply any message
53 chat_id = update.message.chat_id
54 message = update.message.text.encode('utf-8')
55
56 if (message):
57 # Reply the message
58 bot.sendMessage(chat_id=chat_id,
59 text=message)
60
61 # Updates global offset to get the new updates
62 LAST_UPDATE_ID = update.update_id + 1
63
64
65 if __name__ == '__main__':
66 main()
67
[end of examples/echobot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/echobot.py b/examples/echobot.py
--- a/examples/echobot.py
+++ b/examples/echobot.py
@@ -51,12 +51,12 @@
for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):
# chat_id is required to reply any message
chat_id = update.message.chat_id
- message = update.message.text.encode('utf-8')
+ reply_text = update.message.text
- if (message):
+ if (reply_text):
# Reply the message
bot.sendMessage(chat_id=chat_id,
- text=message)
+ text=reply_text)
# Updates global offset to get the new updates
LAST_UPDATE_ID = update.update_id + 1
|
{"golden_diff": "diff --git a/examples/echobot.py b/examples/echobot.py\n--- a/examples/echobot.py\n+++ b/examples/echobot.py\n@@ -51,12 +51,12 @@\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n- message = update.message.text.encode('utf-8')\n+ reply_text = update.message.text\n \n- if (message):\n+ if (reply_text):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n- text=message)\n+ text=reply_text)\n \n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n", "issue": "@ character\nIf I type @ in a chat I got this. (using https://github.com/leandrotoledo/python-telegram-bot/blob/master/examples/echobot.py)\n\nTypeError: b'hola @honguitobot' is not JSON serializable\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Simple Bot to reply Telegram messages\n# Copyright (C) 2015 Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\n\nimport logging\nimport telegram\n\n\nLAST_UPDATE_ID = None\n\n\ndef main():\n global LAST_UPDATE_ID\n\n logging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Telegram Bot Authorization Token\n bot = telegram.Bot('TOKEN')\n\n # This will be our global variable to keep the latest update_id when requesting\n # for updates. It starts with the latest update_id if available.\n try:\n LAST_UPDATE_ID = bot.getUpdates()[-1].update_id\n except IndexError:\n LAST_UPDATE_ID = None\n\n while True:\n echo(bot)\n\n\ndef echo(bot):\n global LAST_UPDATE_ID\n\n # Request updates after the last updated_id\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n # chat_id is required to reply any message\n chat_id = update.message.chat_id\n message = update.message.text.encode('utf-8')\n\n if (message):\n # Reply the message\n bot.sendMessage(chat_id=chat_id,\n text=message)\n\n # Updates global offset to get the new updates\n LAST_UPDATE_ID = update.update_id + 1\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/echobot.py"}]}
| 1,172 | 170 |
gh_patches_debug_32178
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-6402
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
YleAreena extractor error
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
United Kingdom
### Provide a description that is worded well enough to be understood
YleAreena extractor error
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['https://areena.yle.fi/1-64829589', '-vU']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2023.01.06 [6becd25] (pip)
[debug] Python 3.10.9 (CPython x86_64 64bit) - Linux-5.15.89-1-lts-x86_64-with-glibc2.36 (OpenSSL 3.0.7 1 Nov 2022, glibc 2.36)
[debug] exe versions: ffmpeg 5.1.2 (setts), ffprobe 5.1.2, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.16.0, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4
[debug] Proxy map: {}
[debug] Loaded 1760 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.01.06, Current version: 2023.01.06
yt-dlp is up to date (2023.01.06)
[YleAreena] Extracting URL: https://areena.yle.fi/1-64829589
[YleAreena] 1-64829589: Downloading webpage
[YleAreena] 1-64829589: Downloading JSON metadata
ERROR: 1-64829589: An extractor error has occurred. (caused by KeyError('kaltura')); please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "/home/erwin/.local/lib/python3.10/site-packages/yt_dlp/extractor/common.py", line 680, in extract
ie_result = self._real_extract(url)
File "/home/erwin/.local/lib/python3.10/site-packages/yt_dlp/extractor/yle_areena.py", line 97, in _real_extract
f'kaltura:1955031:{video_data["data"]["ongoing_ondemand"]["kaltura"]["id"]}',
KeyError: 'kaltura'
```
</issue>
<code>
[start of yt_dlp/extractor/yle_areena.py]
1 from .common import InfoExtractor
2 from .kaltura import KalturaIE
3 from ..utils import (
4 int_or_none,
5 smuggle_url,
6 traverse_obj,
7 unified_strdate,
8 url_or_none,
9 )
10
11
12 class YleAreenaIE(InfoExtractor):
13 _VALID_URL = r'https?://areena\.yle\.fi/(?P<id>[\d-]+)'
14 _TESTS = [
15 {
16 'url': 'https://areena.yle.fi/1-4371942',
17 'md5': '932edda0ecf5dfd6423804182d32f8ac',
18 'info_dict': {
19 'id': '0_a3tjk92c',
20 'ext': 'mp4',
21 'title': 'Pouchit',
22 'description': 'md5:d487309c3abbe5650265bbd1742d2f82',
23 'series': 'Modernit miehet',
24 'season': 'Season 1',
25 'season_number': 1,
26 'episode': 'Episode 2',
27 'episode_number': 2,
28 'thumbnail': 'http://cfvod.kaltura.com/p/1955031/sp/195503100/thumbnail/entry_id/0_a3tjk92c/version/100061',
29 'uploader_id': '[email protected]',
30 'duration': 1435,
31 'view_count': int,
32 'upload_date': '20181204',
33 'release_date': '20190106',
34 'timestamp': 1543916210,
35 'subtitles': {'fin': [{'url': r're:^https?://', 'ext': 'srt'}]},
36 'age_limit': 7,
37 'webpage_url': 'https://areena.yle.fi/1-4371942'
38 }
39 },
40 {
41 'url': 'https://areena.yle.fi/1-2158940',
42 'md5': 'cecb603661004e36af8c5188b5212b12',
43 'info_dict': {
44 'id': '1_l38iz9ur',
45 'ext': 'mp4',
46 'title': 'Albi haluaa vessan',
47 'description': 'md5:15236d810c837bed861fae0e88663c33',
48 'series': 'Albi Lumiukko',
49 'season': None,
50 'season_number': None,
51 'episode': None,
52 'episode_number': None,
53 'thumbnail': 'http://cfvod.kaltura.com/p/1955031/sp/195503100/thumbnail/entry_id/1_l38iz9ur/version/100021',
54 'uploader_id': '[email protected]',
55 'duration': 319,
56 'view_count': int,
57 'upload_date': '20211202',
58 'release_date': '20211215',
59 'timestamp': 1638448202,
60 'subtitles': {},
61 'age_limit': 0,
62 'webpage_url': 'https://areena.yle.fi/1-2158940'
63 }
64 }
65 ]
66
67 def _real_extract(self, url):
68 video_id = self._match_id(url)
69 info = self._search_json_ld(self._download_webpage(url, video_id), video_id, default={})
70 video_data = self._download_json(
71 f'https://player.api.yle.fi/v1/preview/{video_id}.json?app_id=player_static_prod&app_key=8930d72170e48303cf5f3867780d549b',
72 video_id, headers={
73 'origin': 'https://areena.yle.fi',
74 'referer': 'https://areena.yle.fi/',
75 'content-type': 'application/json'
76 })
77
78 # Example title: 'K1, J2: Pouchit | Modernit miehet'
79 series, season_number, episode_number, episode = self._search_regex(
80 r'K(?P<season_no>[\d]+),\s*J(?P<episode_no>[\d]+):?\s*\b(?P<episode>[^|]+)\s*|\s*(?P<series>.+)',
81 info.get('title') or '', 'episode metadata', group=('season_no', 'episode_no', 'episode', 'series'),
82 default=(None, None, None, None))
83 description = traverse_obj(video_data, ('data', 'ongoing_ondemand', 'description', 'fin'), expected_type=str)
84
85 subtitles = {}
86 for sub in traverse_obj(video_data, ('data', 'ongoing_ondemand', 'subtitles', ...)):
87 if url_or_none(sub.get('uri')):
88 subtitles.setdefault(sub.get('language') or 'und', []).append({
89 'url': sub['uri'],
90 'ext': 'srt',
91 'name': sub.get('kind'),
92 })
93
94 return {
95 '_type': 'url_transparent',
96 'url': smuggle_url(
97 f'kaltura:1955031:{video_data["data"]["ongoing_ondemand"]["kaltura"]["id"]}',
98 {'source_url': url}),
99 'ie_key': KalturaIE.ie_key(),
100 'title': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'title', 'fin'), expected_type=str)
101 or episode or info.get('title')),
102 'description': description,
103 'series': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'series', 'title', 'fin'), expected_type=str)
104 or series),
105 'season_number': (int_or_none(self._search_regex(r'Kausi (\d+)', description, 'season number', default=None))
106 or int_or_none(season_number)),
107 'episode_number': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'episode_number'), expected_type=int_or_none)
108 or int_or_none(episode_number)),
109 'thumbnails': traverse_obj(info, ('thumbnails', ..., {'url': 'url'})),
110 'age_limit': traverse_obj(video_data, ('data', 'ongoing_ondemand', 'content_rating', 'age_restriction'), expected_type=int_or_none),
111 'subtitles': subtitles,
112 'release_date': unified_strdate(traverse_obj(video_data, ('data', 'ongoing_ondemand', 'start_time'), expected_type=str)),
113 }
114
[end of yt_dlp/extractor/yle_areena.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/yle_areena.py b/yt_dlp/extractor/yle_areena.py
--- a/yt_dlp/extractor/yle_areena.py
+++ b/yt_dlp/extractor/yle_areena.py
@@ -61,7 +61,22 @@
'age_limit': 0,
'webpage_url': 'https://areena.yle.fi/1-2158940'
}
- }
+ },
+ {
+ 'url': 'https://areena.yle.fi/1-64829589',
+ 'info_dict': {
+ 'id': '1-64829589',
+ 'ext': 'mp4',
+ 'title': 'HKO & Mälkki & Tanner',
+ 'description': 'md5:b4f1b1af2c6569b33f75179a86eea156',
+ 'series': 'Helsingin kaupunginorkesterin konsertteja',
+ 'thumbnail': r're:^https?://.+\.jpg$',
+ 'release_date': '20230120',
+ },
+ 'params': {
+ 'skip_download': 'm3u8',
+ },
+ },
]
def _real_extract(self, url):
@@ -91,12 +106,22 @@
'name': sub.get('kind'),
})
+ kaltura_id = traverse_obj(video_data, ('data', 'ongoing_ondemand', 'kaltura', 'id'), expected_type=str)
+ if kaltura_id:
+ info_dict = {
+ '_type': 'url_transparent',
+ 'url': smuggle_url(f'kaltura:1955031:{kaltura_id}', {'source_url': url}),
+ 'ie_key': KalturaIE.ie_key(),
+ }
+ else:
+ info_dict = {
+ 'id': video_id,
+ 'formats': self._extract_m3u8_formats(
+ video_data['data']['ongoing_ondemand']['manifest_url'], video_id, 'mp4', m3u8_id='hls'),
+ }
+
return {
- '_type': 'url_transparent',
- 'url': smuggle_url(
- f'kaltura:1955031:{video_data["data"]["ongoing_ondemand"]["kaltura"]["id"]}',
- {'source_url': url}),
- 'ie_key': KalturaIE.ie_key(),
+ **info_dict,
'title': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'title', 'fin'), expected_type=str)
or episode or info.get('title')),
'description': description,
|
{"golden_diff": "diff --git a/yt_dlp/extractor/yle_areena.py b/yt_dlp/extractor/yle_areena.py\n--- a/yt_dlp/extractor/yle_areena.py\n+++ b/yt_dlp/extractor/yle_areena.py\n@@ -61,7 +61,22 @@\n 'age_limit': 0,\n 'webpage_url': 'https://areena.yle.fi/1-2158940'\n }\n- }\n+ },\n+ {\n+ 'url': 'https://areena.yle.fi/1-64829589',\n+ 'info_dict': {\n+ 'id': '1-64829589',\n+ 'ext': 'mp4',\n+ 'title': 'HKO & M\u00e4lkki & Tanner',\n+ 'description': 'md5:b4f1b1af2c6569b33f75179a86eea156',\n+ 'series': 'Helsingin kaupunginorkesterin konsertteja',\n+ 'thumbnail': r're:^https?://.+\\.jpg$',\n+ 'release_date': '20230120',\n+ },\n+ 'params': {\n+ 'skip_download': 'm3u8',\n+ },\n+ },\n ]\n \n def _real_extract(self, url):\n@@ -91,12 +106,22 @@\n 'name': sub.get('kind'),\n })\n \n+ kaltura_id = traverse_obj(video_data, ('data', 'ongoing_ondemand', 'kaltura', 'id'), expected_type=str)\n+ if kaltura_id:\n+ info_dict = {\n+ '_type': 'url_transparent',\n+ 'url': smuggle_url(f'kaltura:1955031:{kaltura_id}', {'source_url': url}),\n+ 'ie_key': KalturaIE.ie_key(),\n+ }\n+ else:\n+ info_dict = {\n+ 'id': video_id,\n+ 'formats': self._extract_m3u8_formats(\n+ video_data['data']['ongoing_ondemand']['manifest_url'], video_id, 'mp4', m3u8_id='hls'),\n+ }\n+\n return {\n- '_type': 'url_transparent',\n- 'url': smuggle_url(\n- f'kaltura:1955031:{video_data[\"data\"][\"ongoing_ondemand\"][\"kaltura\"][\"id\"]}',\n- {'source_url': url}),\n- 'ie_key': KalturaIE.ie_key(),\n+ **info_dict,\n 'title': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'title', 'fin'), expected_type=str)\n or episode or info.get('title')),\n 'description': description,\n", "issue": "YleAreena extractor error\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nUnited Kingdom\n\n### Provide a description that is worded well enough to be understood\n\nYleAreena extractor error\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['https://areena.yle.fi/1-64829589', '-vU']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version 2023.01.06 [6becd25] (pip)\r\n[debug] Python 3.10.9 (CPython x86_64 64bit) - Linux-5.15.89-1-lts-x86_64-with-glibc2.36 (OpenSSL 3.0.7 1 Nov 2022, glibc 2.36)\r\n[debug] exe versions: ffmpeg 5.1.2 (setts), ffprobe 5.1.2, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.16.0, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1760 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nLatest version: 2023.01.06, Current version: 2023.01.06\r\nyt-dlp is up to date (2023.01.06)\r\n[YleAreena] Extracting URL: https://areena.yle.fi/1-64829589\r\n[YleAreena] 1-64829589: Downloading webpage\r\n[YleAreena] 1-64829589: Downloading JSON metadata\r\nERROR: 1-64829589: An extractor error has occurred. (caused by KeyError('kaltura')); please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"/home/erwin/.local/lib/python3.10/site-packages/yt_dlp/extractor/common.py\", line 680, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/home/erwin/.local/lib/python3.10/site-packages/yt_dlp/extractor/yle_areena.py\", line 97, in _real_extract\r\n f'kaltura:1955031:{video_data[\"data\"][\"ongoing_ondemand\"][\"kaltura\"][\"id\"]}',\r\nKeyError: 'kaltura'\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom .kaltura import KalturaIE\nfrom ..utils import (\n int_or_none,\n smuggle_url,\n traverse_obj,\n unified_strdate,\n url_or_none,\n)\n\n\nclass YleAreenaIE(InfoExtractor):\n _VALID_URL = r'https?://areena\\.yle\\.fi/(?P<id>[\\d-]+)'\n _TESTS = [\n {\n 'url': 'https://areena.yle.fi/1-4371942',\n 'md5': '932edda0ecf5dfd6423804182d32f8ac',\n 'info_dict': {\n 'id': '0_a3tjk92c',\n 'ext': 'mp4',\n 'title': 'Pouchit',\n 'description': 'md5:d487309c3abbe5650265bbd1742d2f82',\n 'series': 'Modernit miehet',\n 'season': 'Season 1',\n 'season_number': 1,\n 'episode': 'Episode 2',\n 'episode_number': 2,\n 'thumbnail': 'http://cfvod.kaltura.com/p/1955031/sp/195503100/thumbnail/entry_id/0_a3tjk92c/version/100061',\n 'uploader_id': '[email protected]',\n 'duration': 1435,\n 'view_count': int,\n 'upload_date': '20181204',\n 'release_date': '20190106',\n 'timestamp': 1543916210,\n 'subtitles': {'fin': [{'url': r're:^https?://', 'ext': 'srt'}]},\n 'age_limit': 7,\n 'webpage_url': 'https://areena.yle.fi/1-4371942'\n }\n },\n {\n 'url': 'https://areena.yle.fi/1-2158940',\n 'md5': 'cecb603661004e36af8c5188b5212b12',\n 'info_dict': {\n 'id': '1_l38iz9ur',\n 'ext': 'mp4',\n 'title': 'Albi haluaa vessan',\n 'description': 'md5:15236d810c837bed861fae0e88663c33',\n 'series': 'Albi Lumiukko',\n 'season': None,\n 'season_number': None,\n 'episode': None,\n 'episode_number': None,\n 'thumbnail': 'http://cfvod.kaltura.com/p/1955031/sp/195503100/thumbnail/entry_id/1_l38iz9ur/version/100021',\n 'uploader_id': '[email protected]',\n 'duration': 319,\n 'view_count': int,\n 'upload_date': '20211202',\n 'release_date': '20211215',\n 'timestamp': 1638448202,\n 'subtitles': {},\n 'age_limit': 0,\n 'webpage_url': 'https://areena.yle.fi/1-2158940'\n }\n }\n ]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n info = self._search_json_ld(self._download_webpage(url, video_id), video_id, default={})\n video_data = self._download_json(\n f'https://player.api.yle.fi/v1/preview/{video_id}.json?app_id=player_static_prod&app_key=8930d72170e48303cf5f3867780d549b',\n video_id, headers={\n 'origin': 'https://areena.yle.fi',\n 'referer': 'https://areena.yle.fi/',\n 'content-type': 'application/json'\n })\n\n # Example title: 'K1, J2: Pouchit | Modernit miehet'\n series, season_number, episode_number, episode = self._search_regex(\n r'K(?P<season_no>[\\d]+),\\s*J(?P<episode_no>[\\d]+):?\\s*\\b(?P<episode>[^|]+)\\s*|\\s*(?P<series>.+)',\n info.get('title') or '', 'episode metadata', group=('season_no', 'episode_no', 'episode', 'series'),\n default=(None, None, None, None))\n description = traverse_obj(video_data, ('data', 'ongoing_ondemand', 'description', 'fin'), expected_type=str)\n\n subtitles = {}\n for sub in traverse_obj(video_data, ('data', 'ongoing_ondemand', 'subtitles', ...)):\n if url_or_none(sub.get('uri')):\n subtitles.setdefault(sub.get('language') or 'und', []).append({\n 'url': sub['uri'],\n 'ext': 'srt',\n 'name': sub.get('kind'),\n })\n\n return {\n '_type': 'url_transparent',\n 'url': smuggle_url(\n f'kaltura:1955031:{video_data[\"data\"][\"ongoing_ondemand\"][\"kaltura\"][\"id\"]}',\n {'source_url': url}),\n 'ie_key': KalturaIE.ie_key(),\n 'title': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'title', 'fin'), expected_type=str)\n or episode or info.get('title')),\n 'description': description,\n 'series': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'series', 'title', 'fin'), expected_type=str)\n or series),\n 'season_number': (int_or_none(self._search_regex(r'Kausi (\\d+)', description, 'season number', default=None))\n or int_or_none(season_number)),\n 'episode_number': (traverse_obj(video_data, ('data', 'ongoing_ondemand', 'episode_number'), expected_type=int_or_none)\n or int_or_none(episode_number)),\n 'thumbnails': traverse_obj(info, ('thumbnails', ..., {'url': 'url'})),\n 'age_limit': traverse_obj(video_data, ('data', 'ongoing_ondemand', 'content_rating', 'age_restriction'), expected_type=int_or_none),\n 'subtitles': subtitles,\n 'release_date': unified_strdate(traverse_obj(video_data, ('data', 'ongoing_ondemand', 'start_time'), expected_type=str)),\n }\n", "path": "yt_dlp/extractor/yle_areena.py"}]}
| 3,413 | 645 |
gh_patches_debug_9136
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5537
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace `@$(...)` with `@$el.find(...)`
Unnecessary alias. Often people forget that `@$(...) != $(...)`.
</issue>
<code>
[start of sphinx/source/docs/user_guide/examples/extensions_putting_together.py]
1 from bokeh.core.properties import String, Instance
2 from bokeh.models import LayoutDOM, Slider
3
4 CODE ="""
5 import * as _ from "underscore"
6 import * as $ from "jquery"
7
8 import * as p from "core/properties"
9 import {LayoutDOM, LayoutDOMView} from "models/layouts/layout_dom"
10
11 export class CustomView extends LayoutDOMView
12
13 initialize: (options) ->
14 super(options)
15
16 @render()
17
18 # Set Backbone listener so that when the Bokeh slider has a change
19 # event, we can process the new data
20 @listenTo(@model.slider, 'change', () => @render())
21
22 render: () ->
23 # Backbone Views create <div> elements by default, accessible as @$el.
24 # Many Bokeh views ignore this default <div>, and instead do things
25 # like draw to the HTML canvas. In this case though, we change the
26 # contents of the <div>, based on the current slider value.
27 @$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>")
28 @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
29
30 export class Custom extends LayoutDOM
31
32 # If there is an associated view, this is boilerplate.
33 default_view: CustomView
34
35 # The ``type`` class attribute should generally match exactly the name
36 # of the corresponding Python class.
37 type: "Custom"
38
39 # The @define block adds corresponding "properties" to the JS model. These
40 # should basically line up 1-1 with the Python model class. Most property
41 # types have counterparts, e.g. bokeh.core.properties.String will be
42 # p.String in the JS implementation. Where the JS type system is not yet
43 # as rich, you can use p.Any as a "wildcard" property type.
44 @define {
45 text: [ p.String ]
46 slider: [ p.Any ]
47 }
48 """
49
50 class Custom(LayoutDOM):
51
52 __implementation__ = CODE
53
54 text = String(default="Custom text")
55
56 slider = Instance(Slider)
57
58 from bokeh.io import show
59
60 from bokeh.layouts import column
61 from bokeh.models import Slider
62
63 slider = Slider(start=0, end=10, step=0.1, value=0, title="value")
64
65 custom = Custom(text="Special Slider Display", slider=slider)
66
67 layout = column(slider, custom)
68
69 show(layout)
70
[end of sphinx/source/docs/user_guide/examples/extensions_putting_together.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
--- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
+++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py
@@ -25,7 +25,7 @@
# like draw to the HTML canvas. In this case though, we change the
# contents of the <div>, based on the current slider value.
@$el.html("<h1>#{ @model.text }: #{ @model.slider.value }</h1>")
- @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
+ @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })
export class Custom extends LayoutDOM
|
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n--- a/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n+++ b/sphinx/source/docs/user_guide/examples/extensions_putting_together.py\n@@ -25,7 +25,7 @@\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n- @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n+ @$el.find('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n \n export class Custom extends LayoutDOM\n", "issue": "Replace `@$(...)` with `@$el.find(...)`\nUnnecessary alias. Often people forget that `@$(...) != $(...)`.\n", "before_files": [{"content": "from bokeh.core.properties import String, Instance\nfrom bokeh.models import LayoutDOM, Slider\n\nCODE =\"\"\"\nimport * as _ from \"underscore\"\nimport * as $ from \"jquery\"\n\nimport * as p from \"core/properties\"\nimport {LayoutDOM, LayoutDOMView} from \"models/layouts/layout_dom\"\n\nexport class CustomView extends LayoutDOMView\n\n initialize: (options) ->\n super(options)\n\n @render()\n\n # Set Backbone listener so that when the Bokeh slider has a change\n # event, we can process the new data\n @listenTo(@model.slider, 'change', () => @render())\n\n render: () ->\n # Backbone Views create <div> elements by default, accessible as @$el.\n # Many Bokeh views ignore this default <div>, and instead do things\n # like draw to the HTML canvas. In this case though, we change the\n # contents of the <div>, based on the current slider value.\n @$el.html(\"<h1>#{ @model.text }: #{ @model.slider.value }</h1>\")\n @$('h1').css({ 'color': '#686d8e', 'background-color': '#2a3153' })\n\nexport class Custom extends LayoutDOM\n\n # If there is an associated view, this is boilerplate.\n default_view: CustomView\n\n # The ``type`` class attribute should generally match exactly the name\n # of the corresponding Python class.\n type: \"Custom\"\n\n # The @define block adds corresponding \"properties\" to the JS model. These\n # should basically line up 1-1 with the Python model class. Most property\n # types have counterparts, e.g. bokeh.core.properties.String will be\n # p.String in the JS implementation. Where the JS type system is not yet\n # as rich, you can use p.Any as a \"wildcard\" property type.\n @define {\n text: [ p.String ]\n slider: [ p.Any ]\n }\n\"\"\"\n\nclass Custom(LayoutDOM):\n\n __implementation__ = CODE\n\n text = String(default=\"Custom text\")\n\n slider = Instance(Slider)\n\nfrom bokeh.io import show\n\nfrom bokeh.layouts import column\nfrom bokeh.models import Slider\n\nslider = Slider(start=0, end=10, step=0.1, value=0, title=\"value\")\n\ncustom = Custom(text=\"Special Slider Display\", slider=slider)\n\nlayout = column(slider, custom)\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/extensions_putting_together.py"}]}
| 1,260 | 207 |
gh_patches_debug_62334
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
discovery_cache module not packaged during installation.
I've installed `google-api-python-client` from source, but when at some point my application was failing with this message:
```
...
...
File "build/bdist.linux-x86_64/egg/oauth2client/util.py", line 142, in positional_wrapper
return wrapped(*args, **kwargs)
File "build/bdist.linux-x86_64/egg/googleapiclient/discovery.py", line 193, in build
content = _retrieve_discovery_doc(requested_url, http, cache_discovery, cache)
File "build/bdist.linux-x86_64/egg/googleapiclient/discovery.py", line 215, in _retrieve_discovery_doc
from . import discovery_cache
ImportError: cannot import name discovery_cache
```
I've checked if `discovery_cache` module was actually part of the `egg`, and unfortunately it was not:
```
[root@e42fb97ce657 unit]# python
Python 2.7.5 (default, Jun 24 2015, 00:41:19)
[GCC 4.8.3 20140911 (Red Hat 4.8.3-9)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import googleapiclient.discovery_cache
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named discovery_cache
>>>
```
Here are all the files in `egg`
```
[root@e42fb97ce657 ~]# unzip -l /usr/lib/python2.7/site-packages/google_api_python_client-1.4.1-py2.7.egg
Archive: /usr/lib/python2.7/site-packages/google_api_python_client-1.4.1-py2.7.egg
Length Date Time Name
--------- ---------- ----- ----
1169 09-03-2015 16:09 apiclient/__init__.py
1301 09-03-2015 16:09 apiclient/__init__.pyc
1 09-03-2015 16:09 EGG-INFO/dependency_links.txt
62 09-03-2015 16:09 EGG-INFO/requires.txt
26 09-03-2015 16:09 EGG-INFO/top_level.txt
969 09-03-2015 16:09 EGG-INFO/PKG-INFO
1 09-03-2015 16:09 EGG-INFO/zip-safe
545 09-03-2015 16:09 EGG-INFO/SOURCES.txt
53575 09-03-2015 16:09 googleapiclient/http.py
9910 09-03-2015 16:09 googleapiclient/channel.py
40890 09-03-2015 16:09 googleapiclient/discovery.py
9907 09-03-2015 16:09 googleapiclient/schema.pyc
620 09-03-2015 16:09 googleapiclient/__init__.py
9317 09-03-2015 16:09 googleapiclient/schema.py
11830 09-03-2015 16:09 googleapiclient/model.py
4047 09-03-2015 16:09 googleapiclient/sample_tools.py
6552 09-03-2015 16:09 googleapiclient/mimeparse.py
53976 09-03-2015 16:09 googleapiclient/http.pyc
7043 09-03-2015 16:09 googleapiclient/mimeparse.pyc
6333 09-03-2015 16:09 googleapiclient/errors.pyc
3131 09-03-2015 16:09 googleapiclient/sample_tools.pyc
3622 09-03-2015 16:09 googleapiclient/errors.py
35534 09-03-2015 16:09 googleapiclient/discovery.pyc
14028 09-03-2015 16:09 googleapiclient/model.pyc
175 09-03-2015 16:09 googleapiclient/__init__.pyc
10690 09-03-2015 16:09 googleapiclient/channel.pyc
--------- -------
285254 26 files
[root@e42fb97ce657 ~]#
```
As a workaround I had to add `googleapiclient/discovery_cache` to the `packages` in `setup.py` so it looked like that:
```
[root@e42fb97ce657 google-api-python-client]# more setup.py | grep packages -A 4 -m1
packages = [
'apiclient',
'googleapiclient',
'googleapiclient/discovery_cache'
]
```
Then installed and everything magically started working.
```
[root@e42fb97ce657 google-api-python-client]# python
Python 2.7.5 (default, Jun 24 2015, 00:41:19)
[GCC 4.8.3 20140911 (Red Hat 4.8.3-9)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import googleapiclient.discovery_cache
>>>
```
Here is a quick sample that looks similar to my environment using `Docker`:
```
FROM centos:centos7
RUN yum install -y git python-devel python-setuptools unzip
RUN easy_install pip
RUN cd /tmp ;\
git clone https://github.com/google/google-api-python-client && \
cd google-api-python-client && \
python setup.py install
```
I've also tried to follow preferred suggestion from the `README.md` and install it from `pip` but it ended up in the same situation.
Please advice on how to proceed without making "manual" modifications to the official package?
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 6):
25 print('google-api-python-client requires python version >= 2.6.',
26 file=sys.stderr)
27 sys.exit(1)
28 if (3, 1) <= sys.version_info < (3, 3):
29 print('google-api-python-client requires python3 version >= 3.3.',
30 file=sys.stderr)
31 sys.exit(1)
32
33 from setuptools import setup
34 import pkg_resources
35
36 def _DetectBadness():
37 import os
38 if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:
39 return
40 o2c_pkg = None
41 try:
42 o2c_pkg = pkg_resources.get_distribution('oauth2client')
43 except pkg_resources.DistributionNotFound:
44 pass
45 oauth2client = None
46 try:
47 import oauth2client
48 except ImportError:
49 pass
50 if o2c_pkg is None and oauth2client is not None:
51 raise RuntimeError(
52 'Previous version of google-api-python-client detected; due to a '
53 'packaging issue, we cannot perform an in-place upgrade. Please remove '
54 'the old version and re-install this package.'
55 )
56
57 _DetectBadness()
58
59 packages = [
60 'apiclient',
61 'googleapiclient',
62 ]
63
64 install_requires = [
65 'httplib2>=0.8',
66 'oauth2client>=1.4.6',
67 'six>=1.6.1',
68 'uritemplate>=0.6',
69 ]
70
71 if sys.version_info < (2, 7):
72 install_requires.append('argparse')
73
74 long_desc = """The Google API Client for Python is a client library for
75 accessing the Plus, Moderator, and many other Google APIs."""
76
77 import googleapiclient
78 version = googleapiclient.__version__
79
80 setup(
81 name="google-api-python-client",
82 version=version,
83 description="Google API Client Library for Python",
84 long_description=long_desc,
85 author="Google Inc.",
86 url="http://github.com/google/google-api-python-client/",
87 install_requires=install_requires,
88 packages=packages,
89 package_data={},
90 license="Apache 2.0",
91 keywords="google api client",
92 classifiers=[
93 'Programming Language :: Python :: 2',
94 'Programming Language :: Python :: 2.6',
95 'Programming Language :: Python :: 2.7',
96 'Programming Language :: Python :: 3',
97 'Programming Language :: Python :: 3.3',
98 'Programming Language :: Python :: 3.4',
99 'Development Status :: 5 - Production/Stable',
100 'Intended Audience :: Developers',
101 'License :: OSI Approved :: Apache Software License',
102 'Operating System :: OS Independent',
103 'Topic :: Internet :: WWW/HTTP',
104 ],
105 )
106
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,6 +59,7 @@
packages = [
'apiclient',
'googleapiclient',
+ 'googleapiclient/discovery_cache',
]
install_requires = [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,6 +59,7 @@\n packages = [\n 'apiclient',\n 'googleapiclient',\n+ 'googleapiclient/discovery_cache',\n ]\n \n install_requires = [\n", "issue": "discovery_cache module not packaged during installation.\nI've installed `google-api-python-client` from source, but when at some point my application was failing with this message:\n\n```\n ...\n ...\n File \"build/bdist.linux-x86_64/egg/oauth2client/util.py\", line 142, in positional_wrapper\n return wrapped(*args, **kwargs)\n File \"build/bdist.linux-x86_64/egg/googleapiclient/discovery.py\", line 193, in build\n content = _retrieve_discovery_doc(requested_url, http, cache_discovery, cache)\n File \"build/bdist.linux-x86_64/egg/googleapiclient/discovery.py\", line 215, in _retrieve_discovery_doc\n from . import discovery_cache\nImportError: cannot import name discovery_cache\n```\n\nI've checked if `discovery_cache` module was actually part of the `egg`, and unfortunately it was not:\n\n```\n[root@e42fb97ce657 unit]# python\nPython 2.7.5 (default, Jun 24 2015, 00:41:19) \n[GCC 4.8.3 20140911 (Red Hat 4.8.3-9)] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import googleapiclient.discovery_cache\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nImportError: No module named discovery_cache\n>>> \n```\n\nHere are all the files in `egg`\n\n```\n[root@e42fb97ce657 ~]# unzip -l /usr/lib/python2.7/site-packages/google_api_python_client-1.4.1-py2.7.egg \nArchive: /usr/lib/python2.7/site-packages/google_api_python_client-1.4.1-py2.7.egg\n Length Date Time Name\n--------- ---------- ----- ----\n 1169 09-03-2015 16:09 apiclient/__init__.py\n 1301 09-03-2015 16:09 apiclient/__init__.pyc\n 1 09-03-2015 16:09 EGG-INFO/dependency_links.txt\n 62 09-03-2015 16:09 EGG-INFO/requires.txt\n 26 09-03-2015 16:09 EGG-INFO/top_level.txt\n 969 09-03-2015 16:09 EGG-INFO/PKG-INFO\n 1 09-03-2015 16:09 EGG-INFO/zip-safe\n 545 09-03-2015 16:09 EGG-INFO/SOURCES.txt\n 53575 09-03-2015 16:09 googleapiclient/http.py\n 9910 09-03-2015 16:09 googleapiclient/channel.py\n 40890 09-03-2015 16:09 googleapiclient/discovery.py\n 9907 09-03-2015 16:09 googleapiclient/schema.pyc\n 620 09-03-2015 16:09 googleapiclient/__init__.py\n 9317 09-03-2015 16:09 googleapiclient/schema.py\n 11830 09-03-2015 16:09 googleapiclient/model.py\n 4047 09-03-2015 16:09 googleapiclient/sample_tools.py\n 6552 09-03-2015 16:09 googleapiclient/mimeparse.py\n 53976 09-03-2015 16:09 googleapiclient/http.pyc\n 7043 09-03-2015 16:09 googleapiclient/mimeparse.pyc\n 6333 09-03-2015 16:09 googleapiclient/errors.pyc\n 3131 09-03-2015 16:09 googleapiclient/sample_tools.pyc\n 3622 09-03-2015 16:09 googleapiclient/errors.py\n 35534 09-03-2015 16:09 googleapiclient/discovery.pyc\n 14028 09-03-2015 16:09 googleapiclient/model.pyc\n 175 09-03-2015 16:09 googleapiclient/__init__.pyc\n 10690 09-03-2015 16:09 googleapiclient/channel.pyc\n--------- -------\n 285254 26 files\n[root@e42fb97ce657 ~]# \n```\n\nAs a workaround I had to add `googleapiclient/discovery_cache` to the `packages` in `setup.py` so it looked like that:\n\n```\n[root@e42fb97ce657 google-api-python-client]# more setup.py | grep packages -A 4 -m1\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache'\n]\n```\n\nThen installed and everything magically started working.\n\n```\n[root@e42fb97ce657 google-api-python-client]# python\nPython 2.7.5 (default, Jun 24 2015, 00:41:19) \n[GCC 4.8.3 20140911 (Red Hat 4.8.3-9)] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import googleapiclient.discovery_cache\n>>> \n```\n\nHere is a quick sample that looks similar to my environment using `Docker`:\n\n```\nFROM centos:centos7\n\nRUN yum install -y git python-devel python-setuptools unzip\nRUN easy_install pip\nRUN cd /tmp ;\\\n git clone https://github.com/google/google-api-python-client && \\\n cd google-api-python-client && \\\n python setup.py install \n```\n\nI've also tried to follow preferred suggestion from the `README.md` and install it from `pip` but it ended up in the same situation.\n\nPlease advice on how to proceed without making \"manual\" modifications to the official package?\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n]\n\ninstall_requires = [\n 'httplib2>=0.8',\n 'oauth2client>=1.4.6',\n 'six>=1.6.1',\n 'uritemplate>=0.6',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]}
| 3,212 | 65 |
gh_patches_debug_11388
|
rasdani/github-patches
|
git_diff
|
google__mobly-222
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`snippet_client._start_event_client` crashes
because of the extra `host_port` arg.
This crashes all tests that use `@AsyncRpc`.
We should add some unit tests for this...
</issue>
<code>
[start of mobly/controllers/android_device_lib/snippet_client.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """JSON RPC interface to Mobly Snippet Lib."""
15 import logging
16 import re
17 import time
18
19 from mobly import utils
20 from mobly.controllers.android_device_lib import adb
21 from mobly.controllers.android_device_lib import jsonrpc_client_base
22
23 _INSTRUMENTATION_RUNNER_PACKAGE = (
24 'com.google.android.mobly.snippet.SnippetRunner')
25
26 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
27 _LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
28 _INSTRUMENTATION_RUNNER_PACKAGE)
29
30 _LAUNCH_CMD_V1 = (
31 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
32
33 _STOP_CMD = (
34 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
35
36 # Maximum time to wait for a v0 snippet to start on the device (10 minutes).
37 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
38 _APP_START_WAIT_TIME_V0 = 10 * 60
39
40
41 class Error(Exception):
42 pass
43
44
45 class ProtocolVersionError(Error):
46 """Raised when the protocol reported by the snippet is unknown."""
47
48
49 class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
50 """A client for interacting with snippet APKs using Mobly Snippet Lib.
51
52 See superclass documentation for a list of public attributes.
53
54 It currently supports both v0 and v1 snippet launch protocols, although
55 support for v0 will be removed in a future version.
56
57 For a description of the launch protocols, see the documentation in
58 mobly-snippet-lib, SnippetRunner.java.
59 """
60
61 def __init__(self, package, adb_proxy, log=logging.getLogger()):
62 """Initializes a SnippetClient.
63
64 Args:
65 package: (str) The package name of the apk where the snippets are
66 defined.
67 adb_proxy: (adb.AdbProxy) Adb proxy for running adb commands.
68 log: (logging.Logger) logger to which to send log messages.
69 """
70 super(SnippetClient, self).__init__(app_name=package, log=log)
71 self.package = package
72 self._adb = adb_proxy
73 self._proc = None
74
75 def start_app_and_connect(self):
76 """Overrides superclass. Launches a snippet app and connects to it."""
77 self._check_app_installed()
78
79 # Try launching the app with the v1 protocol. If that fails, fall back
80 # to v0 for compatibility. Use info here so people know exactly what's
81 # happening here, which is helpful since they need to create their own
82 # instrumentations and manifest.
83 self.log.info('Launching snippet apk %s with protocol v1',
84 self.package)
85 cmd = _LAUNCH_CMD_V1 % self.package
86 start_time = time.time()
87 self._proc = self._do_start_app(cmd)
88
89 # "Instrumentation crashed" could be due to several reasons, eg
90 # exception thrown during startup or just a launch protocol 0 snippet
91 # dying because it needs the port flag. Sadly we have no way to tell so
92 # just warn and retry as v0.
93 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is
94 # removed.
95 line = self._read_line()
96 if line == 'INSTRUMENTATION_RESULT: shortMsg=Process crashed.':
97 self.log.warning('Snippet %s crashed on startup. This might be an '
98 'actual error or a snippet using deprecated v0 '
99 'start protocol. Retrying as a v0 snippet.',
100 self.package)
101 self.host_port = utils.get_available_host_port()
102 # Reuse the host port as the device port in v0 snippet. This isn't
103 # safe in general, but the protocol is deprecated.
104 cmd = _LAUNCH_CMD_V0 % (self.host_port, self.package)
105 self._proc = self._do_start_app(cmd)
106 self._connect_to_v0()
107 else:
108 # Check protocol version and get the device port
109 match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$',
110 line)
111 if not match or match.group(1) != '1':
112 raise ProtocolVersionError(line)
113 self._connect_to_v1()
114 self.log.debug('Snippet %s started after %.1fs on host port %s',
115 self.package, time.time() - start_time, self.host_port)
116
117 def stop_app(self):
118 # Kill the pending 'adb shell am instrument -w' process if there is one.
119 # Although killing the snippet apk would abort this process anyway, we
120 # want to call stop_standing_subprocess() to perform a health check,
121 # print the failure stack trace if there was any, and reap it from the
122 # process table.
123 self.log.debug('Stopping snippet apk %s', self.package)
124 try:
125 # Close the socket connection.
126 self.disconnect()
127 if self._proc:
128 utils.stop_standing_subprocess(self._proc)
129 out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')
130 if 'OK (0 tests)' not in out:
131 raise Error('Failed to stop existing apk. Unexpected '
132 'output: %s' % out)
133 finally:
134 # Always clean up the adb port
135 if self.host_port:
136 self._adb.forward(['--remove', 'tcp:%d' % self.host_port])
137
138 def _start_event_client(self):
139 """Overrides superclass."""
140 event_client = SnippetClient(
141 package=self.package,
142 host_port=self.host_port,
143 adb_proxy=self._adb,
144 log=self.log)
145 event_client.connect(self.uid,
146 jsonrpc_client_base.JsonRpcCommand.CONTINUE)
147 return event_client
148
149 def _check_app_installed(self):
150 # Check that the Mobly Snippet app is installed.
151 out = self._adb.shell('pm list package')
152 if not utils.grep('^package:%s$' % self.package, out):
153 raise jsonrpc_client_base.AppStartError(
154 '%s is not installed on %s' % (self.package, self._adb.serial))
155 # Check that the app is instrumented.
156 out = self._adb.shell('pm list instrumentation')
157 matched_out = utils.grep('^instrumentation:%s/%s' %
158 (self.package,
159 _INSTRUMENTATION_RUNNER_PACKAGE), out)
160 if not matched_out:
161 raise jsonrpc_client_base.AppStartError(
162 '%s is installed on %s, but it is not instrumented.' %
163 (self.package, self._adb.serial))
164 match = re.search('^instrumentation:(.*)\/(.*) \(target=(.*)\)$',
165 matched_out[0])
166 target_name = match.group(3)
167 # Check that the instrumentation target is installed if it's not the
168 # same as the snippet package.
169 if target_name != self.package:
170 out = self._adb.shell('pm list package')
171 if not utils.grep('^package:%s$' % target_name, out):
172 raise jsonrpc_client_base.AppStartError(
173 'Instrumentation target %s is not installed on %s' %
174 (target_name, self._adb.serial))
175
176 def _do_start_app(self, launch_cmd):
177 adb_cmd = [adb.ADB]
178 if self._adb.serial:
179 adb_cmd += ['-s', self._adb.serial]
180 adb_cmd += ['shell', launch_cmd]
181 return utils.start_standing_subprocess(adb_cmd, shell=False)
182
183 # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is
184 # removed.
185 def _connect_to_v0(self):
186 self.device_port = self.host_port
187 self._adb.forward(
188 ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
189 start_time = time.time()
190 expiration_time = start_time + _APP_START_WAIT_TIME_V0
191 while time.time() < expiration_time:
192 self.log.debug('Attempting to start %s.', self.package)
193 try:
194 self.connect()
195 return
196 except:
197 self.log.debug(
198 'v0 snippet %s is not yet running, retrying',
199 self.package,
200 exc_info=True)
201 time.sleep(1)
202 raise jsonrpc_client_base.AppStartError(
203 '%s failed to start on %s.' % (self.package, self._adb.serial))
204
205 def _connect_to_v1(self):
206 line = self._read_line()
207 match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)
208 if not match:
209 raise ProtocolVersionError(line)
210 self.device_port = int(match.group(1))
211
212 # Forward the device port to a new host port, and connect to that port
213 self.host_port = utils.get_available_host_port()
214 self._adb.forward(
215 ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])
216 self.connect()
217
218 def _read_line(self):
219 line = self._proc.stdout.readline().rstrip()
220 self.log.debug('Read line from instrumentation output: "%s"', line)
221 return line
222
[end of mobly/controllers/android_device_lib/snippet_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -138,10 +138,8 @@
def _start_event_client(self):
"""Overrides superclass."""
event_client = SnippetClient(
- package=self.package,
- host_port=self.host_port,
- adb_proxy=self._adb,
- log=self.log)
+ package=self.package, adb_proxy=self._adb, log=self.log)
+ event_client.host_port = self.host_port
event_client.connect(self.uid,
jsonrpc_client_base.JsonRpcCommand.CONTINUE)
return event_client
|
{"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -138,10 +138,8 @@\n def _start_event_client(self):\n \"\"\"Overrides superclass.\"\"\"\n event_client = SnippetClient(\n- package=self.package,\n- host_port=self.host_port,\n- adb_proxy=self._adb,\n- log=self.log)\n+ package=self.package, adb_proxy=self._adb, log=self.log)\n+ event_client.host_port = self.host_port\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "issue": "`snippet_client._start_event_client` crashes\nbecause of the extra `host_port` arg.\r\n\r\nThis crashes all tests that use `@AsyncRpc`.\r\n\r\nWe should add some unit tests for this...\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\nimport time\n\nfrom mobly import utils\nfrom mobly.controllers.android_device_lib import adb\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = (\n 'com.google.android.mobly.snippet.SnippetRunner')\n\n# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.\n_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +\n _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_LAUNCH_CMD_V1 = (\n 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n_STOP_CMD = (\n 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n\n# Maximum time to wait for a v0 snippet to start on the device (10 minutes).\n# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.\n_APP_START_WAIT_TIME_V0 = 10 * 60\n\n\nclass Error(Exception):\n pass\n\n\nclass ProtocolVersionError(Error):\n \"\"\"Raised when the protocol reported by the snippet is unknown.\"\"\"\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n\n It currently supports both v0 and v1 snippet launch protocols, although\n support for v0 will be removed in a future version.\n\n For a description of the launch protocols, see the documentation in\n mobly-snippet-lib, SnippetRunner.java.\n \"\"\"\n\n def __init__(self, package, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n adb_proxy: (adb.AdbProxy) Adb proxy for running adb commands.\n log: (logging.Logger) logger to which to send log messages.\n \"\"\"\n super(SnippetClient, self).__init__(app_name=package, log=log)\n self.package = package\n self._adb = adb_proxy\n self._proc = None\n\n def start_app_and_connect(self):\n \"\"\"Overrides superclass. Launches a snippet app and connects to it.\"\"\"\n self._check_app_installed()\n\n # Try launching the app with the v1 protocol. If that fails, fall back\n # to v0 for compatibility. Use info here so people know exactly what's\n # happening here, which is helpful since they need to create their own\n # instrumentations and manifest.\n self.log.info('Launching snippet apk %s with protocol v1',\n self.package)\n cmd = _LAUNCH_CMD_V1 % self.package\n start_time = time.time()\n self._proc = self._do_start_app(cmd)\n\n # \"Instrumentation crashed\" could be due to several reasons, eg\n # exception thrown during startup or just a launch protocol 0 snippet\n # dying because it needs the port flag. Sadly we have no way to tell so\n # just warn and retry as v0.\n # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is\n # removed.\n line = self._read_line()\n if line == 'INSTRUMENTATION_RESULT: shortMsg=Process crashed.':\n self.log.warning('Snippet %s crashed on startup. This might be an '\n 'actual error or a snippet using deprecated v0 '\n 'start protocol. Retrying as a v0 snippet.',\n self.package)\n self.host_port = utils.get_available_host_port()\n # Reuse the host port as the device port in v0 snippet. This isn't\n # safe in general, but the protocol is deprecated.\n cmd = _LAUNCH_CMD_V0 % (self.host_port, self.package)\n self._proc = self._do_start_app(cmd)\n self._connect_to_v0()\n else:\n # Check protocol version and get the device port\n match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$',\n line)\n if not match or match.group(1) != '1':\n raise ProtocolVersionError(line)\n self._connect_to_v1()\n self.log.debug('Snippet %s started after %.1fs on host port %s',\n self.package, time.time() - start_time, self.host_port)\n\n def stop_app(self):\n # Kill the pending 'adb shell am instrument -w' process if there is one.\n # Although killing the snippet apk would abort this process anyway, we\n # want to call stop_standing_subprocess() to perform a health check,\n # print the failure stack trace if there was any, and reap it from the\n # process table.\n self.log.debug('Stopping snippet apk %s', self.package)\n try:\n # Close the socket connection.\n self.disconnect()\n if self._proc:\n utils.stop_standing_subprocess(self._proc)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected '\n 'output: %s' % out)\n finally:\n # Always clean up the adb port\n if self.host_port:\n self._adb.forward(['--remove', 'tcp:%d' % self.host_port])\n\n def _start_event_client(self):\n \"\"\"Overrides superclass.\"\"\"\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n\n def _check_app_installed(self):\n # Check that the Mobly Snippet app is installed.\n out = self._adb.shell('pm list package')\n if not utils.grep('^package:%s$' % self.package, out):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._adb.serial))\n # Check that the app is instrumented.\n out = self._adb.shell('pm list instrumentation')\n matched_out = utils.grep('^instrumentation:%s/%s' %\n (self.package,\n _INSTRUMENTATION_RUNNER_PACKAGE), out)\n if not matched_out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._adb.serial))\n match = re.search('^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$',\n matched_out[0])\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb.shell('pm list package')\n if not utils.grep('^package:%s$' % target_name, out):\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._adb.serial))\n\n def _do_start_app(self, launch_cmd):\n adb_cmd = [adb.ADB]\n if self._adb.serial:\n adb_cmd += ['-s', self._adb.serial]\n adb_cmd += ['shell', launch_cmd]\n return utils.start_standing_subprocess(adb_cmd, shell=False)\n\n # TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is\n # removed.\n def _connect_to_v0(self):\n self.device_port = self.host_port\n self._adb.forward(\n ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n start_time = time.time()\n expiration_time = start_time + _APP_START_WAIT_TIME_V0\n while time.time() < expiration_time:\n self.log.debug('Attempting to start %s.', self.package)\n try:\n self.connect()\n return\n except:\n self.log.debug(\n 'v0 snippet %s is not yet running, retrying',\n self.package,\n exc_info=True)\n time.sleep(1)\n raise jsonrpc_client_base.AppStartError(\n '%s failed to start on %s.' % (self.package, self._adb.serial))\n\n def _connect_to_v1(self):\n line = self._read_line()\n match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line)\n if not match:\n raise ProtocolVersionError(line)\n self.device_port = int(match.group(1))\n\n # Forward the device port to a new host port, and connect to that port\n self.host_port = utils.get_available_host_port()\n self._adb.forward(\n ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port])\n self.connect()\n\n def _read_line(self):\n line = self._proc.stdout.readline().rstrip()\n self.log.debug('Read line from instrumentation output: \"%s\"', line)\n return line\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}]}
| 3,309 | 170 |
gh_patches_debug_1253
|
rasdani/github-patches
|
git_diff
|
translate__pootle-3671
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Confusing sentence in permissions view
There is a permission called "Can review translations" that confused me as I thought that there are also reviewers beside suggesters and translators! Hopefully you fix it so that it lands in 2.7.0.
</issue>
<code>
[start of pootle/core/initdb.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10
11 from django.contrib.auth import get_user_model
12 from django.contrib.auth.models import Permission
13 from django.contrib.contenttypes.models import ContentType
14 from django.utils.translation import ugettext_noop as _
15
16 from pootle.core.models import Revision
17 from pootle_app.models import Directory
18 from pootle_app.models.permissions import PermissionSet, get_pootle_permission
19 from pootle_language.models import Language
20 from pootle_project.models import Project
21 from staticpages.models import StaticPage as Announcement
22
23
24 def initdb():
25 """Populate the database with default initial data.
26
27 This creates the default database to get a working Pootle installation.
28 """
29 create_revision()
30 create_essential_users()
31 create_root_directories()
32 create_template_languages()
33 create_terminology_project()
34 create_pootle_permissions()
35 create_pootle_permission_sets()
36
37 create_default_projects()
38 create_default_languages()
39 create_default_admin()
40
41
42 def create_revision():
43 Revision.initialize()
44
45
46 def create_essential_users():
47 """Create the 'default' and 'nobody' User instances.
48
49 These users are required for Pootle's permission system.
50 """
51 User = get_user_model()
52
53 # The nobody user is used to represent an anonymous user in cases where
54 # we need to associate model information with such a user. An example is
55 # in the permission system: we need a way to store rights for anonymous
56 # users; thus we use the nobody user.
57 criteria = {
58 'username': u"nobody",
59 'full_name': u"any anonymous user",
60 'is_active': True,
61 }
62 nobody, created = User.objects.get_or_create(**criteria)
63 if created:
64 nobody.set_unusable_password()
65 nobody.save()
66
67 # The 'default' user represents any valid, non-anonymous user and is used
68 # to associate information any such user. An example is in the permission
69 # system: we need a way to store default rights for users. We use the
70 # 'default' user for this.
71 #
72 # In a future version of Pootle we should think about using Django's
73 # groups to do better permissions handling.
74 criteria = {
75 'username': u"default",
76 'full_name': u"any authenticated user",
77 'is_active': True,
78 }
79 default, created = User.objects.get_or_create(**criteria)
80 if created:
81 default.set_unusable_password()
82 default.save()
83
84 # The system user represents a system, and is used to
85 # associate updates done by bulk commands as update_stores.
86 criteria = {
87 'username': u"system",
88 'full_name': u"system user",
89 'is_active': True,
90 }
91 system, created = User.objects.get_or_create(**criteria)
92 if created:
93 system.set_unusable_password()
94 system.save()
95
96
97 def create_pootle_permissions():
98 """Create Pootle's directory level permissions."""
99
100 args = {
101 'app_label': "pootle_app",
102 'model': "directory",
103 }
104 pootle_content_type, created = ContentType.objects.get_or_create(**args)
105 pootle_content_type.name = 'pootle'
106 pootle_content_type.save()
107
108 # Create the permissions.
109 permissions = [
110 {
111 'name': _("Can access a project"),
112 'codename': "view",
113 },
114 {
115 'name': _("Cannot access a project"),
116 'codename': "hide",
117 },
118 {
119 'name': _("Can make a suggestion for a translation"),
120 'codename': "suggest",
121 },
122 {
123 'name': _("Can submit a translation"),
124 'codename': "translate",
125 },
126 {
127 'name': _("Can review translations"),
128 'codename': "review",
129 },
130 {
131 'name': _("Can administrate a translation project"),
132 'codename': "administrate",
133 },
134 ]
135
136 criteria = {
137 'content_type': pootle_content_type,
138 }
139
140 for permission in permissions:
141 criteria.update(permission)
142 obj, created = Permission.objects.get_or_create(**criteria)
143
144
145 def create_pootle_permission_sets():
146 """Create the default permission set for the 'nobody' and 'default' users.
147
148 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged
149 in user.
150 """
151 User = get_user_model()
152
153 nobody = User.objects.get(username='nobody')
154 default = User.objects.get(username='default')
155
156 view = get_pootle_permission('view')
157 suggest = get_pootle_permission('suggest')
158 translate = get_pootle_permission('translate')
159
160 # Default permissions for tree root.
161 criteria = {
162 'user': nobody,
163 'directory': Directory.objects.root,
164 }
165 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
166 if created:
167 permission_set.positive_permissions = [view, suggest]
168 permission_set.save()
169
170 criteria['user'] = default
171 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
172 if created:
173 permission_set.positive_permissions = [view, suggest, translate]
174 permission_set.save()
175
176 # Default permissions for templates language.
177 # Override with no permissions for templates language.
178 criteria = {
179 'user': nobody,
180 'directory': Directory.objects.get(pootle_path="/templates/"),
181 }
182 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
183 if created:
184 permission_set.positive_permissions = []
185 permission_set.save()
186
187 criteria['user'] = default
188 permission_set, created = PermissionSet.objects.get_or_create(**criteria)
189 if created:
190 permission_set.positive_permissions = []
191 permission_set.save()
192
193
194 def require_english():
195 """Create the English Language item."""
196 criteria = {
197 'code': "en",
198 'fullname': u"English",
199 'nplurals': 2,
200 'pluralequation': "(n != 1)",
201 }
202 en, created = Language.objects.get_or_create(**criteria)
203 return en
204
205
206 def create_root_directories():
207 """Create the root Directory items."""
208 root, created = Directory.objects.get_or_create(name='')
209 projects, created = Directory.objects.get_or_create(name='projects',
210 parent=root)
211
212
213 def create_template_languages():
214 """Create the 'templates' and English languages.
215
216 The 'templates' language is used to give users access to the untranslated
217 template files.
218 """
219 templates, created = Language.objects.get_or_create(code="templates",
220 fullname=u'Templates')
221 require_english()
222
223
224 def create_terminology_project():
225 """Create the terminology project.
226
227 The terminology project is used to display terminology suggestions while
228 translating.
229 """
230 criteria = {
231 'code': "terminology",
232 'fullname': u"Terminology",
233 'source_language': require_english(),
234 'checkstyle': "terminology",
235 }
236 terminology, created = Project.objects.get_or_create(**criteria)
237
238
239 def create_default_projects():
240 """Create the default projects that we host.
241
242 You might want to add your projects here, although you can also add things
243 through the web interface later.
244 """
245 from pootle_project.models import Project
246
247 en = require_english()
248
249 criteria = {
250 'code': u"tutorial",
251 'source_language': en,
252 'fullname': u"Tutorial",
253 'checkstyle': "standard",
254 'localfiletype': "po",
255 'treestyle': "auto",
256 }
257 tutorial = Project(**criteria)
258 tutorial.save()
259
260 criteria = {
261 'active': True,
262 'title': "Project instructions",
263 'body': ('<div dir="ltr" lang="en">Tutorial project where users can '
264 'play with Pootle and learn more about translation and '
265 'localisation.<br />For more help on localisation, visit the '
266 '<a href="http://docs.translatehouse.org/projects/'
267 'localization-guide/en/latest/guide/start.html">localisation '
268 'guide</a>.</div>'),
269 'virtual_path': "announcements/projects/"+tutorial.code,
270 }
271 ann = Announcement(**criteria)
272 ann.save()
273
274
275 def create_default_languages():
276 """Create the default languages."""
277 from translate.lang import data, factory
278
279 from pootle_language.models import Language
280
281 # import languages from toolkit
282 for code in data.languages.keys():
283 try:
284 tk_lang = factory.getlanguage(code)
285 criteria = {
286 'code': code,
287 'fullname': tk_lang.fullname,
288 'nplurals': tk_lang.nplurals,
289 'pluralequation': tk_lang.pluralequation,
290 }
291 try:
292 criteria['specialchars'] = tk_lang.specialchars
293 except AttributeError:
294 pass
295 lang, created = Language.objects.get_or_create(**criteria)
296 except:
297 pass
298
299
300 def create_default_admin():
301 """Create the default admin user for Pootle.
302
303 You definitely want to change the admin account so that your default
304 install is not accessible with the default credentials. The users 'noboby'
305 and 'default' should be left as is.
306 """
307 User = get_user_model()
308
309 criteria = {
310 'username': u"admin",
311 'full_name': u"Administrator",
312 'is_active': True,
313 'is_superuser': True,
314 }
315 admin = User(**criteria)
316 admin.set_password("admin")
317 admin.save()
318
[end of pootle/core/initdb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py
--- a/pootle/core/initdb.py
+++ b/pootle/core/initdb.py
@@ -124,7 +124,7 @@
'codename': "translate",
},
{
- 'name': _("Can review translations"),
+ 'name': _("Can review suggestions"),
'codename': "review",
},
{
|
{"golden_diff": "diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py\n--- a/pootle/core/initdb.py\n+++ b/pootle/core/initdb.py\n@@ -124,7 +124,7 @@\n 'codename': \"translate\",\n },\n {\n- 'name': _(\"Can review translations\"),\n+ 'name': _(\"Can review suggestions\"),\n 'codename': \"review\",\n },\n {\n", "issue": "Confusing sentence in permissions view\nThere is a permission called \"Can review translations\" that confused me as I thought that there are also reviewers beside suggesters and translators! Hopefully you fix it so that it lands in 2.7.0.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import StaticPage as Announcement\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review translations\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n criteria = {\n 'active': True,\n 'title': \"Project instructions\",\n 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n 'play with Pootle and learn more about translation and '\n 'localisation.<br />For more help on localisation, visit the '\n '<a href=\"http://docs.translatehouse.org/projects/'\n 'localization-guide/en/latest/guide/start.html\">localisation '\n 'guide</a>.</div>'),\n 'virtual_path': \"announcements/projects/\"+tutorial.code,\n }\n ann = Announcement(**criteria)\n ann.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n", "path": "pootle/core/initdb.py"}]}
| 3,575 | 98 |
gh_patches_debug_6228
|
rasdani/github-patches
|
git_diff
|
apache__airflow-22772
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LocalFileSystemToGCSOperator give false positive while copying file from src to dest, even when src has no file
### Apache Airflow Provider(s)
google
### Versions of Apache Airflow Providers
apache-airflow-providers-google==6.4.0
### Apache Airflow version
2.1.4
### Operating System
Debian GNU/Linux 10 (buster)
### Deployment
Docker-Compose
### Deployment details
_No response_
### What happened
When you run LocalFilesSystemToGCSOperator with the params for src and dest, the operator reports a false positive when there are no files present under the specified src directory. I expected it to fail stating the specified directory doesn't have any file.
[2022-03-15 14:26:15,475] {taskinstance.py:1107} INFO - Executing <Task(LocalFilesystemToGCSOperator): upload_files_to_GCS> on 2022-03-15T14:25:59.554459+00:00
[2022-03-15 14:26:15,484] {standard_task_runner.py:52} INFO - Started process 709 to run task
[2022-03-15 14:26:15,492] {standard_task_runner.py:76} INFO - Running: ['***', 'tasks', 'run', 'dag', 'upload_files_to_GCS', '2022-03-15T14:25:59.554459+00:00', '--job-id', '1562', '--pool', 'default_pool', '--raw', '--subdir', 'DAGS_FOLDER/dag.py', '--cfg-path', '/tmp/tmp_e9t7pl9', '--error-file', '/tmp/tmpyij6m4er']
[2022-03-15 14:26:15,493] {standard_task_runner.py:77} INFO - Job 1562: Subtask upload_files_to_GCS
[2022-03-15 14:26:15,590] {logging_mixin.py:104} INFO - Running <TaskInstance: dag.upload_files_to_GCS 2022-03-15T14:25:59.554459+00:00 [running]> on host 653e566fd372
[2022-03-15 14:26:15,752] {taskinstance.py:1300} INFO - Exporting the following env vars:
AIRFLOW_CTX_DAG_OWNER=jet2
AIRFLOW_CTX_DAG_ID=dag
AIRFLOW_CTX_TASK_ID=upload_files_to_GCS
AIRFLOW_CTX_EXECUTION_DATE=2022-03-15T14:25:59.554459+00:00
AIRFLOW_CTX_DAG_RUN_ID=manual__2022-03-15T14:25:59.554459+00:00
[2022-03-15 14:26:19,357] {taskinstance.py:1204} INFO - Marking task as SUCCESS. gag, task_id=upload_files_to_GCS, execution_date=20220315T142559, start_date=20220315T142615, end_date=20220315T142619
[2022-03-15 14:26:19,422] {taskinstance.py:1265} INFO - 1 downstream tasks scheduled from follow-on schedule check
[2022-03-15 14:26:19,458] {local_task_job.py:149} INFO - Task exited with return code 0
### What you think should happen instead
The operator should at least info that no files were copied than just making it successful.
### How to reproduce
- create a Dag with LocalFilesSystemToGCSOperator
- specify an empty directory as src and a gcp bucket as bucket_name, dest param(can be blank).
- run the dag
### Anything else
No
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/providers/google/cloud/transfers/local_to_gcs.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """This module contains operator for uploading local file(s) to GCS."""
19 import os
20 import warnings
21 from glob import glob
22 from typing import TYPE_CHECKING, Optional, Sequence, Union
23
24 from airflow.models import BaseOperator
25 from airflow.providers.google.cloud.hooks.gcs import GCSHook
26
27 if TYPE_CHECKING:
28 from airflow.utils.context import Context
29
30
31 class LocalFilesystemToGCSOperator(BaseOperator):
32 """
33 Uploads a file or list of files to Google Cloud Storage.
34 Optionally can compress the file for upload.
35
36 .. seealso::
37 For more information on how to use this operator, take a look at the guide:
38 :ref:`howto/operator:LocalFilesystemToGCSOperator`
39
40 :param src: Path to the local file, or list of local files. Path can be either absolute
41 (e.g. /path/to/file.ext) or relative (e.g. ../../foo/*/*.csv). (templated)
42 :param dst: Destination path within the specified bucket on GCS (e.g. /path/to/file.ext).
43 If multiple files are being uploaded, specify object prefix with trailing backslash
44 (e.g. /path/to/directory/) (templated)
45 :param bucket: The bucket to upload to. (templated)
46 :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
47 :param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
48 This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
49 :param mime_type: The mime-type string
50 :param delegate_to: The account to impersonate, if any
51 :param gzip: Allows for file to be compressed and uploaded as gzip
52 :param impersonation_chain: Optional service account to impersonate using short-term
53 credentials, or chained list of accounts required to get the access_token
54 of the last account in the list, which will be impersonated in the request.
55 If set as a string, the account must grant the originating account
56 the Service Account Token Creator IAM role.
57 If set as a sequence, the identities from the list must grant
58 Service Account Token Creator IAM role to the directly preceding identity, with first
59 account from the list granting this role to the originating account (templated).
60 """
61
62 template_fields: Sequence[str] = (
63 'src',
64 'dst',
65 'bucket',
66 'impersonation_chain',
67 )
68
69 def __init__(
70 self,
71 *,
72 src,
73 dst,
74 bucket,
75 gcp_conn_id='google_cloud_default',
76 google_cloud_storage_conn_id=None,
77 mime_type='application/octet-stream',
78 delegate_to=None,
79 gzip=False,
80 impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
81 **kwargs,
82 ):
83 super().__init__(**kwargs)
84
85 if google_cloud_storage_conn_id:
86 warnings.warn(
87 "The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
88 "the gcp_conn_id parameter.",
89 DeprecationWarning,
90 stacklevel=3,
91 )
92 gcp_conn_id = google_cloud_storage_conn_id
93
94 self.src = src
95 self.dst = dst
96 self.bucket = bucket
97 self.gcp_conn_id = gcp_conn_id
98 self.mime_type = mime_type
99 self.delegate_to = delegate_to
100 self.gzip = gzip
101 self.impersonation_chain = impersonation_chain
102
103 def execute(self, context: 'Context'):
104 """Uploads a file or list of files to Google Cloud Storage"""
105 hook = GCSHook(
106 gcp_conn_id=self.gcp_conn_id,
107 delegate_to=self.delegate_to,
108 impersonation_chain=self.impersonation_chain,
109 )
110
111 filepaths = self.src if isinstance(self.src, list) else glob(self.src)
112 if os.path.basename(self.dst): # path to a file
113 if len(filepaths) > 1: # multiple file upload
114 raise ValueError(
115 "'dst' parameter references filepath. Please specify "
116 "directory (with trailing backslash) to upload multiple "
117 "files. e.g. /path/to/directory/"
118 )
119 object_paths = [self.dst]
120 else: # directory is provided
121 object_paths = [os.path.join(self.dst, os.path.basename(filepath)) for filepath in filepaths]
122
123 for filepath, object_path in zip(filepaths, object_paths):
124 hook.upload(
125 bucket_name=self.bucket,
126 object_name=object_path,
127 mime_type=self.mime_type,
128 filename=filepath,
129 gzip=self.gzip,
130 )
131
[end of airflow/providers/google/cloud/transfers/local_to_gcs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/providers/google/cloud/transfers/local_to_gcs.py b/airflow/providers/google/cloud/transfers/local_to_gcs.py
--- a/airflow/providers/google/cloud/transfers/local_to_gcs.py
+++ b/airflow/providers/google/cloud/transfers/local_to_gcs.py
@@ -109,6 +109,8 @@
)
filepaths = self.src if isinstance(self.src, list) else glob(self.src)
+ if not filepaths:
+ raise FileNotFoundError(self.src)
if os.path.basename(self.dst): # path to a file
if len(filepaths) > 1: # multiple file upload
raise ValueError(
|
{"golden_diff": "diff --git a/airflow/providers/google/cloud/transfers/local_to_gcs.py b/airflow/providers/google/cloud/transfers/local_to_gcs.py\n--- a/airflow/providers/google/cloud/transfers/local_to_gcs.py\n+++ b/airflow/providers/google/cloud/transfers/local_to_gcs.py\n@@ -109,6 +109,8 @@\n )\n \n filepaths = self.src if isinstance(self.src, list) else glob(self.src)\n+ if not filepaths:\n+ raise FileNotFoundError(self.src)\n if os.path.basename(self.dst): # path to a file\n if len(filepaths) > 1: # multiple file upload\n raise ValueError(\n", "issue": "LocalFileSystemToGCSOperator give false positive while copying file from src to dest, even when src has no file\n### Apache Airflow Provider(s)\n\ngoogle\n\n### Versions of Apache Airflow Providers\n\napache-airflow-providers-google==6.4.0\n\n### Apache Airflow version\n\n2.1.4\n\n### Operating System\n\nDebian GNU/Linux 10 (buster)\n\n### Deployment\n\nDocker-Compose\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nWhen you run LocalFilesSystemToGCSOperator with the params for src and dest, the operator reports a false positive when there are no files present under the specified src directory. I expected it to fail stating the specified directory doesn't have any file.\r\n\r\n[2022-03-15 14:26:15,475] {taskinstance.py:1107} INFO - Executing <Task(LocalFilesystemToGCSOperator): upload_files_to_GCS> on 2022-03-15T14:25:59.554459+00:00\r\n[2022-03-15 14:26:15,484] {standard_task_runner.py:52} INFO - Started process 709 to run task\r\n[2022-03-15 14:26:15,492] {standard_task_runner.py:76} INFO - Running: ['***', 'tasks', 'run', 'dag', 'upload_files_to_GCS', '2022-03-15T14:25:59.554459+00:00', '--job-id', '1562', '--pool', 'default_pool', '--raw', '--subdir', 'DAGS_FOLDER/dag.py', '--cfg-path', '/tmp/tmp_e9t7pl9', '--error-file', '/tmp/tmpyij6m4er']\r\n[2022-03-15 14:26:15,493] {standard_task_runner.py:77} INFO - Job 1562: Subtask upload_files_to_GCS\r\n[2022-03-15 14:26:15,590] {logging_mixin.py:104} INFO - Running <TaskInstance: dag.upload_files_to_GCS 2022-03-15T14:25:59.554459+00:00 [running]> on host 653e566fd372\r\n[2022-03-15 14:26:15,752] {taskinstance.py:1300} INFO - Exporting the following env vars:\r\nAIRFLOW_CTX_DAG_OWNER=jet2\r\nAIRFLOW_CTX_DAG_ID=dag\r\nAIRFLOW_CTX_TASK_ID=upload_files_to_GCS\r\nAIRFLOW_CTX_EXECUTION_DATE=2022-03-15T14:25:59.554459+00:00\r\nAIRFLOW_CTX_DAG_RUN_ID=manual__2022-03-15T14:25:59.554459+00:00\r\n[2022-03-15 14:26:19,357] {taskinstance.py:1204} INFO - Marking task as SUCCESS. gag, task_id=upload_files_to_GCS, execution_date=20220315T142559, start_date=20220315T142615, end_date=20220315T142619\r\n[2022-03-15 14:26:19,422] {taskinstance.py:1265} INFO - 1 downstream tasks scheduled from follow-on schedule check\r\n[2022-03-15 14:26:19,458] {local_task_job.py:149} INFO - Task exited with return code 0\n\n### What you think should happen instead\n\nThe operator should at least info that no files were copied than just making it successful. \n\n### How to reproduce\n\n- create a Dag with LocalFilesSystemToGCSOperator \r\n- specify an empty directory as src and a gcp bucket as bucket_name, dest param(can be blank). \r\n- run the dag\n\n### Anything else\n\nNo\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains operator for uploading local file(s) to GCS.\"\"\"\nimport os\nimport warnings\nfrom glob import glob\nfrom typing import TYPE_CHECKING, Optional, Sequence, Union\n\nfrom airflow.models import BaseOperator\nfrom airflow.providers.google.cloud.hooks.gcs import GCSHook\n\nif TYPE_CHECKING:\n from airflow.utils.context import Context\n\n\nclass LocalFilesystemToGCSOperator(BaseOperator):\n \"\"\"\n Uploads a file or list of files to Google Cloud Storage.\n Optionally can compress the file for upload.\n\n .. seealso::\n For more information on how to use this operator, take a look at the guide:\n :ref:`howto/operator:LocalFilesystemToGCSOperator`\n\n :param src: Path to the local file, or list of local files. Path can be either absolute\n (e.g. /path/to/file.ext) or relative (e.g. ../../foo/*/*.csv). (templated)\n :param dst: Destination path within the specified bucket on GCS (e.g. /path/to/file.ext).\n If multiple files are being uploaded, specify object prefix with trailing backslash\n (e.g. /path/to/directory/) (templated)\n :param bucket: The bucket to upload to. (templated)\n :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.\n :param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.\n This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.\n :param mime_type: The mime-type string\n :param delegate_to: The account to impersonate, if any\n :param gzip: Allows for file to be compressed and uploaded as gzip\n :param impersonation_chain: Optional service account to impersonate using short-term\n credentials, or chained list of accounts required to get the access_token\n of the last account in the list, which will be impersonated in the request.\n If set as a string, the account must grant the originating account\n the Service Account Token Creator IAM role.\n If set as a sequence, the identities from the list must grant\n Service Account Token Creator IAM role to the directly preceding identity, with first\n account from the list granting this role to the originating account (templated).\n \"\"\"\n\n template_fields: Sequence[str] = (\n 'src',\n 'dst',\n 'bucket',\n 'impersonation_chain',\n )\n\n def __init__(\n self,\n *,\n src,\n dst,\n bucket,\n gcp_conn_id='google_cloud_default',\n google_cloud_storage_conn_id=None,\n mime_type='application/octet-stream',\n delegate_to=None,\n gzip=False,\n impersonation_chain: Optional[Union[str, Sequence[str]]] = None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n if google_cloud_storage_conn_id:\n warnings.warn(\n \"The google_cloud_storage_conn_id parameter has been deprecated. You should pass \"\n \"the gcp_conn_id parameter.\",\n DeprecationWarning,\n stacklevel=3,\n )\n gcp_conn_id = google_cloud_storage_conn_id\n\n self.src = src\n self.dst = dst\n self.bucket = bucket\n self.gcp_conn_id = gcp_conn_id\n self.mime_type = mime_type\n self.delegate_to = delegate_to\n self.gzip = gzip\n self.impersonation_chain = impersonation_chain\n\n def execute(self, context: 'Context'):\n \"\"\"Uploads a file or list of files to Google Cloud Storage\"\"\"\n hook = GCSHook(\n gcp_conn_id=self.gcp_conn_id,\n delegate_to=self.delegate_to,\n impersonation_chain=self.impersonation_chain,\n )\n\n filepaths = self.src if isinstance(self.src, list) else glob(self.src)\n if os.path.basename(self.dst): # path to a file\n if len(filepaths) > 1: # multiple file upload\n raise ValueError(\n \"'dst' parameter references filepath. Please specify \"\n \"directory (with trailing backslash) to upload multiple \"\n \"files. e.g. /path/to/directory/\"\n )\n object_paths = [self.dst]\n else: # directory is provided\n object_paths = [os.path.join(self.dst, os.path.basename(filepath)) for filepath in filepaths]\n\n for filepath, object_path in zip(filepaths, object_paths):\n hook.upload(\n bucket_name=self.bucket,\n object_name=object_path,\n mime_type=self.mime_type,\n filename=filepath,\n gzip=self.gzip,\n )\n", "path": "airflow/providers/google/cloud/transfers/local_to_gcs.py"}]}
| 3,087 | 148 |
gh_patches_debug_9386
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-2259
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test_upgrade_db_22_to_23_without_frontend_settings fails in OSX tests
## Problem Definition
We noticed this in the nightly runs here: https://github.com/rotki/rotki/runs/1811061566?check_suite_focus=true
It fails only in OSX and has never failed in linux.
```
______________ test_upgrade_db_22_to_23_without_frontend_settings ______________
data_dir = PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir')
user_data_dir = PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/testuser')
def test_upgrade_db_22_to_23_without_frontend_settings(data_dir, user_data_dir):
"""Test upgrading the DB from version 22 to version 23.
Tests the case where frontend settings were not populated and also the cache
file movement and deletion. Also test deleletion of deprecated historical_data_start
"""
msg_aggregator = MessagesAggregator()
_use_prepared_db(user_data_dir, 'v22_rotkehlchen_wo_frontend_settings.db')
db_v22 = _init_db_with_target_version(
target_version=22,
user_data_dir=user_data_dir,
msg_aggregator=msg_aggregator,
)
cursor = db_v22.conn.cursor()
# Create cache files under the data directory
(data_dir / 'forex_history_file.json').touch()
(data_dir / 'price_history_BTC_EUR.json').touch()
(data_dir / 'price_history_aDAI_USD.json').touch()
(data_dir / 'price_history_YFI_USD.json').touch()
# Also create an innocent json file and a random file
(data_dir / 'random.json').touch()
(data_dir / 'random.txt').touch()
# Check all settings except 'frontend_settings' exist
assert cursor.execute(
'SELECT COUNT(*) FROM settings WHERE name = "frontend_settings";',
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) FROM settings WHERE name IN '
'("thousand_separator", "decimal_separator", "currency_location");',
).fetchone()[0] == 3
# Check we got a historical data start entry to remove
assert cursor.execute(
'SELECT COUNT(*) FROM settings WHERE name = "historical_data_start";',
).fetchone()[0] == 1
# Migrate to v23
db = _init_db_with_target_version(
target_version=23,
user_data_dir=user_data_dir,
msg_aggregator=msg_aggregator,
)
cursor = db.conn.cursor()
# Make sure the settings have been removed
assert cursor.execute(
'SELECT COUNT(*) FROM settings WHERE name IN '
'("thousand_separator", "decimal_separator", "currency_location");',
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) FROM settings WHERE name = "historical_data_start";',
).fetchone()[0] == 0
# Make sure the settings have been migrated into 'frontend_settings'
frontend_settings = cursor.execute(
'SELECT value FROM settings WHERE name = "frontend_settings";',
).fetchone()[0]
frontend_settings_map = json.loads(frontend_settings)
assert frontend_settings_map['thousand_separator'] == ','
assert frontend_settings_map['decimal_separator'] == '.'
assert frontend_settings_map['currency_location'] == 'after'
# Assure the cache files were deleted
assert not (data_dir / 'price_history_BTC_EUR.json').is_file()
assert not (data_dir / 'price_history_aDAI_USD.json').is_file()
assert not (data_dir / 'price_history_YFI_USD.json').is_file()
# and that the forex history cache file moved
> assert (data_dir / 'price_history' / 'forex_history_file.json').is_file()
E AssertionError: assert False
E + where False = <bound method Path.is_file of PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/price_history/forex_history_file.json')>()
E + where <bound method Path.is_file of PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/price_history/forex_history_file.json')> = ((PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir') / 'price_history') / 'forex_history_file.json').is_file
rotkehlchen/tests/db/test_db_upgrades.py:1311: AssertionError
```
## Task
Figure out why and fix it
</issue>
<code>
[start of rotkehlchen/db/upgrades/v22_v23.py]
1 import json
2 from typing import TYPE_CHECKING
3 from rotkehlchen.utils.misc import get_or_make_price_history_dir
4 import os
5 from pathlib import Path
6 import glob
7 import shutil
8
9 if TYPE_CHECKING:
10 from rotkehlchen.db.dbhandler import DBHandler
11
12
13 def upgrade_v22_to_v23(db: 'DBHandler') -> None:
14 """Upgrades the DB from v22 to v23
15
16 - Migrates the settings entries 'thousand_separator', 'decimal_separator'
17 and 'currency_location' into the 'frontend_settings' entry.
18 - Deletes Bitfinex trades and their used query range, so trades can be
19 populated again with the right `fee_asset`.
20 - Delete all cryptocompare price cache files. Move forex price cache to price_history directory
21 """
22 settings = ('"thousand_separator"', '"decimal_separator"', '"currency_location"')
23 cursor = db.conn.cursor()
24 # Get the settings and put them in a dict
25 setting_value_map = dict(
26 cursor.execute(
27 f'SELECT name, value FROM settings WHERE name IN ({",".join(settings)});',
28 ).fetchall(),
29 )
30 # If the settings exist, migrate them into the 'frontend_settings' entry
31 if setting_value_map:
32 frontend_settings = cursor.execute(
33 'SELECT value FROM settings WHERE name = "frontend_settings";',
34 ).fetchone()
35
36 if frontend_settings is not None:
37 setting_value_map.update(json.loads(frontend_settings[0]))
38
39 cursor.execute(
40 'INSERT OR REPLACE INTO settings(name, value) VALUES(?, ?)',
41 ('frontend_settings', json.dumps(setting_value_map)),
42 )
43 # Delete the settings
44 cursor.execute(f'DELETE FROM settings WHERE name IN ({",".join(settings)});')
45 # Delete Bitfinex used_query_ranges
46 cursor.execute('DELETE FROM used_query_ranges WHERE name = "bitfinex_trades";')
47 # Delete Bitfinex trades
48 cursor.execute('DELETE FROM trades WHERE location = "T";')
49 # Delete deprecated historical data start setting
50 cursor.execute('DELETE from settings WHERE name="historical_data_start";')
51 db.conn.commit()
52
53 # -- Now move forex history to the new directory and remove all old cache files
54 data_directory = db.user_data_dir.parent
55 price_history_dir = get_or_make_price_history_dir(data_directory)
56 forex_history_file = data_directory / 'price_history_forex.json'
57 if forex_history_file.is_file():
58 shutil.move(
59 forex_history_file, # type: ignore
60 price_history_dir / 'forex_history_file.json',
61 )
62
63 prefix = os.path.join(str(data_directory), 'price_history_')
64 prefix = prefix.replace('\\', '\\\\')
65 files_list = glob.glob(prefix + '*.json')
66 for file_ in files_list:
67 file_ = file_.replace('\\\\', '\\')
68 try:
69 Path(file_).unlink()
70 except OSError:
71 pass
72
[end of rotkehlchen/db/upgrades/v22_v23.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rotkehlchen/db/upgrades/v22_v23.py b/rotkehlchen/db/upgrades/v22_v23.py
--- a/rotkehlchen/db/upgrades/v22_v23.py
+++ b/rotkehlchen/db/upgrades/v22_v23.py
@@ -51,6 +51,8 @@
db.conn.commit()
# -- Now move forex history to the new directory and remove all old cache files
+ # We botched this. Should have been forex_history_file.json -> price_history_forex.json
+ # and not the other way around
data_directory = db.user_data_dir.parent
price_history_dir = get_or_make_price_history_dir(data_directory)
forex_history_file = data_directory / 'price_history_forex.json'
|
{"golden_diff": "diff --git a/rotkehlchen/db/upgrades/v22_v23.py b/rotkehlchen/db/upgrades/v22_v23.py\n--- a/rotkehlchen/db/upgrades/v22_v23.py\n+++ b/rotkehlchen/db/upgrades/v22_v23.py\n@@ -51,6 +51,8 @@\n db.conn.commit()\n \n # -- Now move forex history to the new directory and remove all old cache files\n+ # We botched this. Should have been forex_history_file.json -> price_history_forex.json\n+ # and not the other way around\n data_directory = db.user_data_dir.parent\n price_history_dir = get_or_make_price_history_dir(data_directory)\n forex_history_file = data_directory / 'price_history_forex.json'\n", "issue": "test_upgrade_db_22_to_23_without_frontend_settings fails in OSX tests\n## Problem Definition\r\n\r\nWe noticed this in the nightly runs here: https://github.com/rotki/rotki/runs/1811061566?check_suite_focus=true\r\n\r\nIt fails only in OSX and has never failed in linux.\r\n\r\n```\r\n______________ test_upgrade_db_22_to_23_without_frontend_settings ______________\r\n\r\ndata_dir = PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir')\r\nuser_data_dir = PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/testuser')\r\n\r\n def test_upgrade_db_22_to_23_without_frontend_settings(data_dir, user_data_dir):\r\n \"\"\"Test upgrading the DB from version 22 to version 23.\r\n \r\n Tests the case where frontend settings were not populated and also the cache\r\n file movement and deletion. Also test deleletion of deprecated historical_data_start\r\n \"\"\"\r\n msg_aggregator = MessagesAggregator()\r\n _use_prepared_db(user_data_dir, 'v22_rotkehlchen_wo_frontend_settings.db')\r\n db_v22 = _init_db_with_target_version(\r\n target_version=22,\r\n user_data_dir=user_data_dir,\r\n msg_aggregator=msg_aggregator,\r\n )\r\n cursor = db_v22.conn.cursor()\r\n \r\n # Create cache files under the data directory\r\n (data_dir / 'forex_history_file.json').touch()\r\n (data_dir / 'price_history_BTC_EUR.json').touch()\r\n (data_dir / 'price_history_aDAI_USD.json').touch()\r\n (data_dir / 'price_history_YFI_USD.json').touch()\r\n # Also create an innocent json file and a random file\r\n (data_dir / 'random.json').touch()\r\n (data_dir / 'random.txt').touch()\r\n # Check all settings except 'frontend_settings' exist\r\n assert cursor.execute(\r\n 'SELECT COUNT(*) FROM settings WHERE name = \"frontend_settings\";',\r\n ).fetchone()[0] == 0\r\n assert cursor.execute(\r\n 'SELECT COUNT(*) FROM settings WHERE name IN '\r\n '(\"thousand_separator\", \"decimal_separator\", \"currency_location\");',\r\n ).fetchone()[0] == 3\r\n # Check we got a historical data start entry to remove\r\n assert cursor.execute(\r\n 'SELECT COUNT(*) FROM settings WHERE name = \"historical_data_start\";',\r\n ).fetchone()[0] == 1\r\n \r\n # Migrate to v23\r\n db = _init_db_with_target_version(\r\n target_version=23,\r\n user_data_dir=user_data_dir,\r\n msg_aggregator=msg_aggregator,\r\n )\r\n cursor = db.conn.cursor()\r\n \r\n # Make sure the settings have been removed\r\n assert cursor.execute(\r\n 'SELECT COUNT(*) FROM settings WHERE name IN '\r\n '(\"thousand_separator\", \"decimal_separator\", \"currency_location\");',\r\n ).fetchone()[0] == 0\r\n assert cursor.execute(\r\n 'SELECT COUNT(*) FROM settings WHERE name = \"historical_data_start\";',\r\n ).fetchone()[0] == 0\r\n \r\n # Make sure the settings have been migrated into 'frontend_settings'\r\n frontend_settings = cursor.execute(\r\n 'SELECT value FROM settings WHERE name = \"frontend_settings\";',\r\n ).fetchone()[0]\r\n frontend_settings_map = json.loads(frontend_settings)\r\n assert frontend_settings_map['thousand_separator'] == ','\r\n assert frontend_settings_map['decimal_separator'] == '.'\r\n assert frontend_settings_map['currency_location'] == 'after'\r\n \r\n # Assure the cache files were deleted\r\n assert not (data_dir / 'price_history_BTC_EUR.json').is_file()\r\n assert not (data_dir / 'price_history_aDAI_USD.json').is_file()\r\n assert not (data_dir / 'price_history_YFI_USD.json').is_file()\r\n # and that the forex history cache file moved\r\n> assert (data_dir / 'price_history' / 'forex_history_file.json').is_file()\r\nE AssertionError: assert False\r\nE + where False = <bound method Path.is_file of PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/price_history/forex_history_file.json')>()\r\nE + where <bound method Path.is_file of PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir/price_history/forex_history_file.json')> = ((PosixPath('/Users/runner/.cache/.rotkehlchen-test-dir') / 'price_history') / 'forex_history_file.json').is_file\r\n\r\nrotkehlchen/tests/db/test_db_upgrades.py:1311: AssertionError\r\n```\r\n\r\n## Task\r\n\r\nFigure out why and fix it\n", "before_files": [{"content": "import json\nfrom typing import TYPE_CHECKING\nfrom rotkehlchen.utils.misc import get_or_make_price_history_dir\nimport os\nfrom pathlib import Path\nimport glob\nimport shutil\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\ndef upgrade_v22_to_v23(db: 'DBHandler') -> None:\n \"\"\"Upgrades the DB from v22 to v23\n\n - Migrates the settings entries 'thousand_separator', 'decimal_separator'\n and 'currency_location' into the 'frontend_settings' entry.\n - Deletes Bitfinex trades and their used query range, so trades can be\n populated again with the right `fee_asset`.\n - Delete all cryptocompare price cache files. Move forex price cache to price_history directory\n \"\"\"\n settings = ('\"thousand_separator\"', '\"decimal_separator\"', '\"currency_location\"')\n cursor = db.conn.cursor()\n # Get the settings and put them in a dict\n setting_value_map = dict(\n cursor.execute(\n f'SELECT name, value FROM settings WHERE name IN ({\",\".join(settings)});',\n ).fetchall(),\n )\n # If the settings exist, migrate them into the 'frontend_settings' entry\n if setting_value_map:\n frontend_settings = cursor.execute(\n 'SELECT value FROM settings WHERE name = \"frontend_settings\";',\n ).fetchone()\n\n if frontend_settings is not None:\n setting_value_map.update(json.loads(frontend_settings[0]))\n\n cursor.execute(\n 'INSERT OR REPLACE INTO settings(name, value) VALUES(?, ?)',\n ('frontend_settings', json.dumps(setting_value_map)),\n )\n # Delete the settings\n cursor.execute(f'DELETE FROM settings WHERE name IN ({\",\".join(settings)});')\n # Delete Bitfinex used_query_ranges\n cursor.execute('DELETE FROM used_query_ranges WHERE name = \"bitfinex_trades\";')\n # Delete Bitfinex trades\n cursor.execute('DELETE FROM trades WHERE location = \"T\";')\n # Delete deprecated historical data start setting\n cursor.execute('DELETE from settings WHERE name=\"historical_data_start\";')\n db.conn.commit()\n\n # -- Now move forex history to the new directory and remove all old cache files\n data_directory = db.user_data_dir.parent\n price_history_dir = get_or_make_price_history_dir(data_directory)\n forex_history_file = data_directory / 'price_history_forex.json'\n if forex_history_file.is_file():\n shutil.move(\n forex_history_file, # type: ignore\n price_history_dir / 'forex_history_file.json',\n )\n\n prefix = os.path.join(str(data_directory), 'price_history_')\n prefix = prefix.replace('\\\\', '\\\\\\\\')\n files_list = glob.glob(prefix + '*.json')\n for file_ in files_list:\n file_ = file_.replace('\\\\\\\\', '\\\\')\n try:\n Path(file_).unlink()\n except OSError:\n pass\n", "path": "rotkehlchen/db/upgrades/v22_v23.py"}]}
| 2,335 | 179 |
gh_patches_debug_26190
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-13320
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUILD: what is generate_mtrand_c.py ?
xref #13163
Does anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels)
</issue>
<code>
[start of numpy/random/mtrand/generate_mtrand_c.py]
1 #!/usr/bin/env python
2 from __future__ import division, absolute_import, print_function
3
4 import sys
5 import re
6 import os
7
8 unused_internal_funcs = ['__Pyx_PrintItem',
9 '__Pyx_PrintNewline',
10 '__Pyx_ReRaise',
11 #'__Pyx_GetExcValue',
12 '__Pyx_ArgTypeTest',
13 '__Pyx_SetVtable',
14 '__Pyx_GetVtable',
15 '__Pyx_CreateClass']
16
17 if __name__ == '__main__':
18 # Use cython here so that long docstrings are broken up.
19 # This is needed for some VC++ compilers.
20 os.system('cython mtrand.pyx')
21 mtrand_c = open('mtrand.c', 'r')
22 processed = open('mtrand_pp.c', 'w')
23 unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
24 uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
25 linepat = re.compile(r'/\* ".*/mtrand.pyx":')
26 for linenum, line in enumerate(mtrand_c):
27 m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
28 line)
29 if m:
30 line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
31 m = uifpat.match(line)
32 if m:
33 line = ''
34 m = re.search(unused_funcs_str, line)
35 if m:
36 print("%s was declared unused, but is used at line %d" % (m.group(),
37 linenum+1), file=sys.stderr)
38 line = linepat.sub(r'/* "mtrand.pyx":', line)
39 processed.write(line)
40 mtrand_c.close()
41 processed.close()
42 os.rename('mtrand_pp.c', 'mtrand.c')
43
[end of numpy/random/mtrand/generate_mtrand_c.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py
deleted file mode 100644
--- a/numpy/random/mtrand/generate_mtrand_c.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-import sys
-import re
-import os
-
-unused_internal_funcs = ['__Pyx_PrintItem',
- '__Pyx_PrintNewline',
- '__Pyx_ReRaise',
- #'__Pyx_GetExcValue',
- '__Pyx_ArgTypeTest',
- '__Pyx_SetVtable',
- '__Pyx_GetVtable',
- '__Pyx_CreateClass']
-
-if __name__ == '__main__':
- # Use cython here so that long docstrings are broken up.
- # This is needed for some VC++ compilers.
- os.system('cython mtrand.pyx')
- mtrand_c = open('mtrand.c', 'r')
- processed = open('mtrand_pp.c', 'w')
- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
- uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
- linepat = re.compile(r'/\* ".*/mtrand.pyx":')
- for linenum, line in enumerate(mtrand_c):
- m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
- line)
- if m:
- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
- m = uifpat.match(line)
- if m:
- line = ''
- m = re.search(unused_funcs_str, line)
- if m:
- print("%s was declared unused, but is used at line %d" % (m.group(),
- linenum+1), file=sys.stderr)
- line = linepat.sub(r'/* "mtrand.pyx":', line)
- processed.write(line)
- mtrand_c.close()
- processed.close()
- os.rename('mtrand_pp.c', 'mtrand.c')
|
{"golden_diff": "diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py\ndeleted file mode 100644\n--- a/numpy/random/mtrand/generate_mtrand_c.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-import sys\n-import re\n-import os\n-\n-unused_internal_funcs = ['__Pyx_PrintItem',\n- '__Pyx_PrintNewline',\n- '__Pyx_ReRaise',\n- #'__Pyx_GetExcValue',\n- '__Pyx_ArgTypeTest',\n- '__Pyx_SetVtable',\n- '__Pyx_GetVtable',\n- '__Pyx_CreateClass']\n-\n-if __name__ == '__main__':\n- # Use cython here so that long docstrings are broken up.\n- # This is needed for some VC++ compilers.\n- os.system('cython mtrand.pyx')\n- mtrand_c = open('mtrand.c', 'r')\n- processed = open('mtrand_pp.c', 'w')\n- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n- uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n- linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n- for linenum, line in enumerate(mtrand_c):\n- m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n- line)\n- if m:\n- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n- m = uifpat.match(line)\n- if m:\n- line = ''\n- m = re.search(unused_funcs_str, line)\n- if m:\n- print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n- linenum+1), file=sys.stderr)\n- line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n- processed.write(line)\n- mtrand_c.close()\n- processed.close()\n- os.rename('mtrand_pp.c', 'mtrand.c')\n", "issue": "BUILD: what is generate_mtrand_c.py ?\nxref #13163 \r\n\r\nDoes anyone know what is ['numpy/random/mtrand/generate_mtrand_c.py'](https://github.com/numpy/numpy/blob/v1.16.2/numpy/random/mtrand/generate_mtrand_c.py) and why it is in ['MANIFEST.in'](https://github.com/numpy/numpy/blob/v1.16.2/MANIFEST.in#L11) ? Is it used by some downstream package? It does not seem to be used in [creating the wheels](https://github.com/MacPython/numpy-wheels)\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport re\nimport os\n\nunused_internal_funcs = ['__Pyx_PrintItem',\n '__Pyx_PrintNewline',\n '__Pyx_ReRaise',\n #'__Pyx_GetExcValue',\n '__Pyx_ArgTypeTest',\n '__Pyx_SetVtable',\n '__Pyx_GetVtable',\n '__Pyx_CreateClass']\n\nif __name__ == '__main__':\n # Use cython here so that long docstrings are broken up.\n # This is needed for some VC++ compilers.\n os.system('cython mtrand.pyx')\n mtrand_c = open('mtrand.c', 'r')\n processed = open('mtrand_pp.c', 'w')\n unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'\n uifpat = re.compile(r'static \\w+ \\*?'+unused_funcs_str+r'.*/\\*proto\\*/')\n linepat = re.compile(r'/\\* \".*/mtrand.pyx\":')\n for linenum, line in enumerate(mtrand_c):\n m = re.match(r'^(\\s+arrayObject\\w*\\s*=\\s*[(])[(]PyObject\\s*[*][)]',\n line)\n if m:\n line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])\n m = uifpat.match(line)\n if m:\n line = ''\n m = re.search(unused_funcs_str, line)\n if m:\n print(\"%s was declared unused, but is used at line %d\" % (m.group(),\n linenum+1), file=sys.stderr)\n line = linepat.sub(r'/* \"mtrand.pyx\":', line)\n processed.write(line)\n mtrand_c.close()\n processed.close()\n os.rename('mtrand_pp.c', 'mtrand.c')\n", "path": "numpy/random/mtrand/generate_mtrand_c.py"}]}
| 1,176 | 522 |
gh_patches_debug_41602
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-3461
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update docstrings in the Python backend code to follow the style guide.
We've been a bit lax about adding docstrings to code in the backend. Instead, we should be following the style guide: https://google.github.io/styleguide/pyguide.html?showone=Comments#Comments -- docstrings should define args, return values, exceptions, etc. The aim of this issue is to ensure that the backend code is properly documented; we will tackle this on a file-by-file basis.
Note: if you're a new contributor, this issue is a particularly good introductory one to tackle, since it will require reading, understanding and documenting the purpose of a file in the codebase, and help you get acquainted with how the Oppia backend is structured. It's important that you're able to communicate and write clearly, though!
Some useful tips:
- Please follow our [style guide](https://github.com/oppia/oppia/wiki/Coding-style-guide#general) (and also the [Google Python style guide](https://google.github.io/styleguide/pyguide.html?showone=Comments#Comments)) when writing docstrings. Make sure to follow the patterns established there as closely as possible, including spacing, capitalization and punctuation. Also, it's a good idea to look at some of the existing files (see below) to get a sense of the correct format -- we recommend following this closely in order to maintain uniformity across the codebase.
- If applicable, make sure that the docstrings for all functions correctly specify the args, return values, etc. If a function has no args, you can omit the Args section; if it has no return value, you can omit the Returns section.
- If a docstring is not present in the code, or the existing one is too vague for you to understand what's going on just by looking at it, then it should be rewritten to be more clear. You'll often need to read the code as well, and do a grep on the callsites to see how it's being used, in order to get a clear understanding of what it's doing. This is good practice for learning how to navigate the codebase, and is an intended part of this project.
- If you feel that parts of the codebase are unclear or incorrectly factored, please feel free to bring this up! The whole point of docstrings is to improve code health and accessibility for new developers, so if you spot other things that can be done in service of that aim, we should also look into doing that.
**Completed:**
- [x] core.controllers.base @shaz13
- [x] core.controllers.cron @saeedjassani
- [x] core.controllers.reader @96mohitm
- [x] core.domain.activity_domain @andromfins
- [x] core.domain.activity_services @souravsingh
- [x] core.domain.classifier_services @andromfins
- [x] core.domain.collection_domain @domevit
- [x] core.domain.collection_services @kahkhang
- [x] core.domain.config_services @vibhor98
- [x] core.domain.email_manager @Oishikatta
- [x] core.domain.exp_domain @domevit
- [x] core.domain.exp_services @DubeySandeep
- [x] core.domain.feedback_domain @terrameijar
- [x] core.domain.feedback_jobs_continuous
- [x] core.domain.feedback_services @andimeo
- [x] core.domain.fs_domain @kartik-25
- [x] core.domain.html_cleaner @seanlip
- [x] core.domain.param_domain @rohan-gulati
- [x] core.domain.rating_services @BojOnTheBeat
- [x] core.domain.rights_manager @ayejay
- [x] core.domain.stats_domain @Arunabh98
- [x] core.domain.stats_jobs_continuous @Arunabh98
- [x] core.domain.stats_jobs_one_off @zpuller
- [x] core.domain.stats_services @manoj-makkuboy
- [x] core.domain.subscription_services @vibhor98
- [x] core.domain.summary_services @domevit
- [x] core.domain.user_jobs_continuous @shubha1593
- [x] core.domain.user_services @Nalinc
- [x] core.domain.value_generators_domain @meetsha
- [x] core.jobs @brianrodri
- [x] core.platform.app_identity.gae_app_identity_services @ubhisat
- [x] core.platform.models @pranavsid98
- [x] core.platform.transactions.gae_transaction_services @ubhisat
- [x] core.storage.base_model.gae_models @vojtechjelinek
- [x] core.storage.collection.gae_models @kaggis
- [x] core.storage.email.gae_models @rajan-garg
- [x] core.storage.exploration.gae_models @kirankonduru
- [x] core.storage.feedback.gae_models @naveenshukla
- [x] core.storage.file.gae_models @Ian91
- [x] core.storage.job.gae_models @sasacocic
- [x] core.storage.statistics.gae_models @abhishekarya286
- [x] core.storage.user.gae_models @Anthony-Alridge
- [x] core.tests.test_util_jobs @seanlip
- [x] core.tests.test_utils @vibhor98
- [x] utils @yashLadha
</issue>
<code>
[start of core/domain/stats_jobs_one_off.py]
1 # Copyright 2014 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Jobs for statistics views."""
16
17 import ast
18 import collections
19
20 from core import jobs
21 from core.domain import stats_jobs_continuous
22 from core.platform import models
23
24 (stats_models,) = models.Registry.import_models([
25 models.NAMES.statistics
26 ])
27
28
29 class StatisticsAudit(jobs.BaseMapReduceJobManager):
30 """Performs a brief audit of exploration completions and state hit counts to
31 make sure they match counts stored in StateCounterModel. It also checks for
32 some possible error cases like negative counts.
33 """
34 _STATE_COUNTER_ERROR_KEY = 'State Counter ERROR'
35
36 @classmethod
37 def entity_classes_to_map_over(cls):
38 return [
39 stats_models.ExplorationAnnotationsModel,
40 stats_models.StateCounterModel]
41
42 @staticmethod
43 def map(item):
44 if isinstance(item, stats_models.StateCounterModel):
45 if item.first_entry_count < 0:
46 yield (
47 StatisticsAudit._STATE_COUNTER_ERROR_KEY,
48 'Less than 0: %s %d' % (item.key, item.first_entry_count))
49 return
50 # Older versions of ExplorationAnnotations didn't store exp_id
51 # This is short hand for making sure we get ones updated most recently
52 else:
53 if item.exploration_id is not None:
54 yield (item.exploration_id, {
55 'version': item.version,
56 'starts': item.num_starts,
57 'completions': item.num_completions,
58 'state_hit': item.state_hit_counts
59 })
60
61 @staticmethod
62 def reduce(key, stringified_values):
63 if key == StatisticsAudit._STATE_COUNTER_ERROR_KEY:
64 for value_str in stringified_values:
65 yield (value_str,)
66 return
67
68 # If the code reaches this point, we are looking at values that
69 # correspond to each version of a particular exploration.
70
71 # These variables correspond to the VERSION_ALL version.
72 all_starts = 0
73 all_completions = 0
74 all_state_hit = collections.defaultdict(int)
75
76 # These variables correspond to the sum of counts for all other
77 # versions besides VERSION_ALL.
78 sum_starts = 0
79 sum_completions = 0
80 sum_state_hit = collections.defaultdict(int)
81
82 for value_str in stringified_values:
83 value = ast.literal_eval(value_str)
84 if value['starts'] < 0:
85 yield (
86 'Negative start count: exp_id:%s version:%s starts:%s' %
87 (key, value['version'], value['starts']),)
88
89 if value['completions'] < 0:
90 yield (
91 'Negative completion count: exp_id:%s version:%s '
92 'completions:%s' %
93 (key, value['version'], value['completions']),)
94
95 if value['completions'] > value['starts']:
96 yield ('Completions > starts: exp_id:%s version:%s %s>%s' % (
97 key, value['version'], value['completions'],
98 value['starts']),)
99
100 if value['version'] == stats_jobs_continuous.VERSION_ALL:
101 all_starts = value['starts']
102 all_completions = value['completions']
103 for (state_name, counts) in value['state_hit'].iteritems():
104 all_state_hit[state_name] = counts['first_entry_count']
105 else:
106 sum_starts += value['starts']
107 sum_completions += value['completions']
108 for (state_name, counts) in value['state_hit'].iteritems():
109 sum_state_hit[state_name] += counts['first_entry_count']
110
111 if sum_starts != all_starts:
112 yield (
113 'Non-all != all for starts: exp_id:%s sum: %s all: %s'
114 % (key, sum_starts, all_starts),)
115 if sum_completions != all_completions:
116 yield (
117 'Non-all != all for completions: exp_id:%s sum: %s all: %s'
118 % (key, sum_completions, all_completions),)
119
120 for state_name in all_state_hit:
121 if (state_name not in sum_state_hit and
122 all_state_hit[state_name] != 0):
123 yield (
124 'state hit count not same exp_id:%s state:%s, '
125 'all:%s sum: null' % (
126 key, state_name, all_state_hit[state_name]),)
127 elif all_state_hit[state_name] != sum_state_hit[state_name]:
128 yield (
129 'state hit count not same exp_id: %s state: %s '
130 'all: %s sum:%s' % (
131 key, state_name, all_state_hit[state_name],
132 sum_state_hit[state_name]),)
133
[end of core/domain/stats_jobs_one_off.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/domain/stats_jobs_one_off.py b/core/domain/stats_jobs_one_off.py
--- a/core/domain/stats_jobs_one_off.py
+++ b/core/domain/stats_jobs_one_off.py
@@ -27,7 +27,9 @@
class StatisticsAudit(jobs.BaseMapReduceJobManager):
- """Performs a brief audit of exploration completions and state hit counts to
+ """A one-off statistics audit.
+
+ Performs a brief audit of exploration completions and state hit counts to
make sure they match counts stored in StateCounterModel. It also checks for
some possible error cases like negative counts.
"""
@@ -41,6 +43,39 @@
@staticmethod
def map(item):
+ """Implements the map function. Must be declared @staticmethod.
+
+ Args:
+ item: ExplorationAnnotationsModel or
+ StateCounterModel.
+
+ Yields:
+ tuple. For StateCounterModel, a 2-tuple in the form
+ (_STATE_COUNTER_ERROR_KEY, error message).
+ tuple. For ExplorationAnnotationModel, a 2-tuple in the form
+ ('exploration_id', value).
+ 'exploration_id': str. the id of the exploration.
+ 'value': a dict, whose structure is as follows:
+ {
+ 'version': str. version of the exploration.
+ 'starts': int. # of times exploration was started.
+ 'completions': int. # of times exploration was
+ completed.
+ 'state_hit': a dict containing the hit counts for the
+ states in the exploration. It is formatted as
+ follows:
+ {
+ state_name: {
+ 'first_entry_count': int. # of sessions
+ which hit this state.
+ 'total_entry_count': int. # of total hits
+ for this state.
+ 'no_answer_count': int. # of hits with no
+ answer for this state.
+ }
+ }
+ }
+ """
if isinstance(item, stats_models.StateCounterModel):
if item.first_entry_count < 0:
yield (
@@ -60,6 +95,36 @@
@staticmethod
def reduce(key, stringified_values):
+ """Updates statistics for the given exploration.
+
+ Args:
+ key: str. The id of the exploration.
+ stringified_values: list(str). A list of stringified values
+ associated with the given key. An element of stringified_values
+ would be of the form:
+ {
+ 'version': str. version of the exploration.
+ 'starts': int. # of times exploration was started.
+ 'completions': int. # of times exploration was
+ completed.
+ 'state_hit': dict. a dict containing the hit counts
+ for the states in the exploration. It is formatted
+ as follows:
+ {
+ state_name: {
+ 'first_entry_count': int. # of sessions
+ which hit this state.
+ 'total_entry_count': int. # of total
+ hits for this state.
+ 'no_answer_count': int. # of hits with
+ no answer for this state.
+ }
+ }
+ }
+
+ Yields:
+ tuple(str). A 1-tuple whose only element is an error message.
+ """
if key == StatisticsAudit._STATE_COUNTER_ERROR_KEY:
for value_str in stringified_values:
yield (value_str,)
|
{"golden_diff": "diff --git a/core/domain/stats_jobs_one_off.py b/core/domain/stats_jobs_one_off.py\n--- a/core/domain/stats_jobs_one_off.py\n+++ b/core/domain/stats_jobs_one_off.py\n@@ -27,7 +27,9 @@\n \n \n class StatisticsAudit(jobs.BaseMapReduceJobManager):\n- \"\"\"Performs a brief audit of exploration completions and state hit counts to\n+ \"\"\"A one-off statistics audit.\n+\n+ Performs a brief audit of exploration completions and state hit counts to\n make sure they match counts stored in StateCounterModel. It also checks for\n some possible error cases like negative counts.\n \"\"\"\n@@ -41,6 +43,39 @@\n \n @staticmethod\n def map(item):\n+ \"\"\"Implements the map function. Must be declared @staticmethod.\n+\n+ Args:\n+ item: ExplorationAnnotationsModel or\n+ StateCounterModel.\n+\n+ Yields:\n+ tuple. For StateCounterModel, a 2-tuple in the form\n+ (_STATE_COUNTER_ERROR_KEY, error message).\n+ tuple. For ExplorationAnnotationModel, a 2-tuple in the form\n+ ('exploration_id', value).\n+ 'exploration_id': str. the id of the exploration.\n+ 'value': a dict, whose structure is as follows:\n+ {\n+ 'version': str. version of the exploration.\n+ 'starts': int. # of times exploration was started.\n+ 'completions': int. # of times exploration was\n+ completed.\n+ 'state_hit': a dict containing the hit counts for the\n+ states in the exploration. It is formatted as\n+ follows:\n+ {\n+ state_name: {\n+ 'first_entry_count': int. # of sessions\n+ which hit this state.\n+ 'total_entry_count': int. # of total hits\n+ for this state.\n+ 'no_answer_count': int. # of hits with no\n+ answer for this state.\n+ }\n+ }\n+ }\n+ \"\"\"\n if isinstance(item, stats_models.StateCounterModel):\n if item.first_entry_count < 0:\n yield (\n@@ -60,6 +95,36 @@\n \n @staticmethod\n def reduce(key, stringified_values):\n+ \"\"\"Updates statistics for the given exploration.\n+\n+ Args:\n+ key: str. The id of the exploration.\n+ stringified_values: list(str). A list of stringified values\n+ associated with the given key. An element of stringified_values\n+ would be of the form:\n+ {\n+ 'version': str. version of the exploration.\n+ 'starts': int. # of times exploration was started.\n+ 'completions': int. # of times exploration was\n+ completed.\n+ 'state_hit': dict. a dict containing the hit counts\n+ for the states in the exploration. It is formatted\n+ as follows:\n+ {\n+ state_name: {\n+ 'first_entry_count': int. # of sessions\n+ which hit this state.\n+ 'total_entry_count': int. # of total\n+ hits for this state.\n+ 'no_answer_count': int. # of hits with\n+ no answer for this state.\n+ }\n+ }\n+ }\n+\n+ Yields:\n+ tuple(str). A 1-tuple whose only element is an error message.\n+ \"\"\"\n if key == StatisticsAudit._STATE_COUNTER_ERROR_KEY:\n for value_str in stringified_values:\n yield (value_str,)\n", "issue": "Update docstrings in the Python backend code to follow the style guide.\nWe've been a bit lax about adding docstrings to code in the backend. Instead, we should be following the style guide: https://google.github.io/styleguide/pyguide.html?showone=Comments#Comments -- docstrings should define args, return values, exceptions, etc. The aim of this issue is to ensure that the backend code is properly documented; we will tackle this on a file-by-file basis.\r\n\r\nNote: if you're a new contributor, this issue is a particularly good introductory one to tackle, since it will require reading, understanding and documenting the purpose of a file in the codebase, and help you get acquainted with how the Oppia backend is structured. It's important that you're able to communicate and write clearly, though!\r\n\r\nSome useful tips:\r\n- Please follow our [style guide](https://github.com/oppia/oppia/wiki/Coding-style-guide#general) (and also the [Google Python style guide](https://google.github.io/styleguide/pyguide.html?showone=Comments#Comments)) when writing docstrings. Make sure to follow the patterns established there as closely as possible, including spacing, capitalization and punctuation. Also, it's a good idea to look at some of the existing files (see below) to get a sense of the correct format -- we recommend following this closely in order to maintain uniformity across the codebase.\r\n- If applicable, make sure that the docstrings for all functions correctly specify the args, return values, etc. If a function has no args, you can omit the Args section; if it has no return value, you can omit the Returns section.\r\n- If a docstring is not present in the code, or the existing one is too vague for you to understand what's going on just by looking at it, then it should be rewritten to be more clear. You'll often need to read the code as well, and do a grep on the callsites to see how it's being used, in order to get a clear understanding of what it's doing. This is good practice for learning how to navigate the codebase, and is an intended part of this project.\r\n- If you feel that parts of the codebase are unclear or incorrectly factored, please feel free to bring this up! The whole point of docstrings is to improve code health and accessibility for new developers, so if you spot other things that can be done in service of that aim, we should also look into doing that.\r\n\r\n**Completed:**\r\n- [x] core.controllers.base @shaz13 \r\n- [x] core.controllers.cron @saeedjassani\r\n- [x] core.controllers.reader @96mohitm\r\n- [x] core.domain.activity_domain @andromfins\r\n- [x] core.domain.activity_services @souravsingh\r\n- [x] core.domain.classifier_services @andromfins \r\n- [x] core.domain.collection_domain @domevit\r\n- [x] core.domain.collection_services @kahkhang \r\n- [x] core.domain.config_services @vibhor98 \r\n- [x] core.domain.email_manager @Oishikatta \r\n- [x] core.domain.exp_domain @domevit\r\n- [x] core.domain.exp_services @DubeySandeep\r\n- [x] core.domain.feedback_domain @terrameijar \r\n- [x] core.domain.feedback_jobs_continuous\r\n- [x] core.domain.feedback_services @andimeo\r\n- [x] core.domain.fs_domain @kartik-25 \r\n- [x] core.domain.html_cleaner @seanlip\r\n- [x] core.domain.param_domain @rohan-gulati\r\n- [x] core.domain.rating_services @BojOnTheBeat \r\n- [x] core.domain.rights_manager @ayejay\r\n- [x] core.domain.stats_domain @Arunabh98\r\n- [x] core.domain.stats_jobs_continuous @Arunabh98 \r\n- [x] core.domain.stats_jobs_one_off @zpuller\r\n- [x] core.domain.stats_services @manoj-makkuboy \r\n- [x] core.domain.subscription_services @vibhor98 \r\n- [x] core.domain.summary_services @domevit \r\n- [x] core.domain.user_jobs_continuous @shubha1593 \r\n- [x] core.domain.user_services @Nalinc \r\n- [x] core.domain.value_generators_domain @meetsha \r\n- [x] core.jobs @brianrodri\r\n- [x] core.platform.app_identity.gae_app_identity_services @ubhisat\r\n- [x] core.platform.models @pranavsid98 \r\n- [x] core.platform.transactions.gae_transaction_services @ubhisat\r\n- [x] core.storage.base_model.gae_models @vojtechjelinek\r\n- [x] core.storage.collection.gae_models @kaggis\r\n- [x] core.storage.email.gae_models @rajan-garg\r\n- [x] core.storage.exploration.gae_models @kirankonduru\r\n- [x] core.storage.feedback.gae_models @naveenshukla\r\n- [x] core.storage.file.gae_models @Ian91 \r\n- [x] core.storage.job.gae_models @sasacocic\r\n- [x] core.storage.statistics.gae_models @abhishekarya286\r\n- [x] core.storage.user.gae_models @Anthony-Alridge \r\n- [x] core.tests.test_util_jobs @seanlip\r\n- [x] core.tests.test_utils @vibhor98\r\n- [x] utils @yashLadha \n", "before_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Jobs for statistics views.\"\"\"\n\nimport ast\nimport collections\n\nfrom core import jobs\nfrom core.domain import stats_jobs_continuous\nfrom core.platform import models\n\n(stats_models,) = models.Registry.import_models([\n models.NAMES.statistics\n])\n\n\nclass StatisticsAudit(jobs.BaseMapReduceJobManager):\n \"\"\"Performs a brief audit of exploration completions and state hit counts to\n make sure they match counts stored in StateCounterModel. It also checks for\n some possible error cases like negative counts.\n \"\"\"\n _STATE_COUNTER_ERROR_KEY = 'State Counter ERROR'\n\n @classmethod\n def entity_classes_to_map_over(cls):\n return [\n stats_models.ExplorationAnnotationsModel,\n stats_models.StateCounterModel]\n\n @staticmethod\n def map(item):\n if isinstance(item, stats_models.StateCounterModel):\n if item.first_entry_count < 0:\n yield (\n StatisticsAudit._STATE_COUNTER_ERROR_KEY,\n 'Less than 0: %s %d' % (item.key, item.first_entry_count))\n return\n # Older versions of ExplorationAnnotations didn't store exp_id\n # This is short hand for making sure we get ones updated most recently\n else:\n if item.exploration_id is not None:\n yield (item.exploration_id, {\n 'version': item.version,\n 'starts': item.num_starts,\n 'completions': item.num_completions,\n 'state_hit': item.state_hit_counts\n })\n\n @staticmethod\n def reduce(key, stringified_values):\n if key == StatisticsAudit._STATE_COUNTER_ERROR_KEY:\n for value_str in stringified_values:\n yield (value_str,)\n return\n\n # If the code reaches this point, we are looking at values that\n # correspond to each version of a particular exploration.\n\n # These variables correspond to the VERSION_ALL version.\n all_starts = 0\n all_completions = 0\n all_state_hit = collections.defaultdict(int)\n\n # These variables correspond to the sum of counts for all other\n # versions besides VERSION_ALL.\n sum_starts = 0\n sum_completions = 0\n sum_state_hit = collections.defaultdict(int)\n\n for value_str in stringified_values:\n value = ast.literal_eval(value_str)\n if value['starts'] < 0:\n yield (\n 'Negative start count: exp_id:%s version:%s starts:%s' %\n (key, value['version'], value['starts']),)\n\n if value['completions'] < 0:\n yield (\n 'Negative completion count: exp_id:%s version:%s '\n 'completions:%s' %\n (key, value['version'], value['completions']),)\n\n if value['completions'] > value['starts']:\n yield ('Completions > starts: exp_id:%s version:%s %s>%s' % (\n key, value['version'], value['completions'],\n value['starts']),)\n\n if value['version'] == stats_jobs_continuous.VERSION_ALL:\n all_starts = value['starts']\n all_completions = value['completions']\n for (state_name, counts) in value['state_hit'].iteritems():\n all_state_hit[state_name] = counts['first_entry_count']\n else:\n sum_starts += value['starts']\n sum_completions += value['completions']\n for (state_name, counts) in value['state_hit'].iteritems():\n sum_state_hit[state_name] += counts['first_entry_count']\n\n if sum_starts != all_starts:\n yield (\n 'Non-all != all for starts: exp_id:%s sum: %s all: %s'\n % (key, sum_starts, all_starts),)\n if sum_completions != all_completions:\n yield (\n 'Non-all != all for completions: exp_id:%s sum: %s all: %s'\n % (key, sum_completions, all_completions),)\n\n for state_name in all_state_hit:\n if (state_name not in sum_state_hit and\n all_state_hit[state_name] != 0):\n yield (\n 'state hit count not same exp_id:%s state:%s, '\n 'all:%s sum: null' % (\n key, state_name, all_state_hit[state_name]),)\n elif all_state_hit[state_name] != sum_state_hit[state_name]:\n yield (\n 'state hit count not same exp_id: %s state: %s '\n 'all: %s sum:%s' % (\n key, state_name, all_state_hit[state_name],\n sum_state_hit[state_name]),)\n", "path": "core/domain/stats_jobs_one_off.py"}]}
| 3,164 | 779 |
gh_patches_debug_36488
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2871
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider rubios is broken
During the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson))
</issue>
<code>
[start of locations/spiders/rubios.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class RubiosSpider(scrapy.Spider):
9 name = "rubios"
10 item_attributes = { 'brand': "Rubio's" }
11 allowed_domains = ['rubios.com']
12 start_urls = (
13 'https://www.rubios.com/sitemap.xml',
14 )
15
16 def parse(self, response):
17 response.selector.remove_namespaces()
18 city_urls = response.xpath('//url/loc/text()').extract()
19 regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+')
20 for path in city_urls:
21 if re.search(regex, path):
22 yield scrapy.Request(
23 path.strip(),
24 callback=self.parse_store,
25 )
26
27 def parse_store(self, response):
28
29 properties = {
30 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
31 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
32 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
33 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
34 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
35 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
36 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(),
37 'website': response.url,
38 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),
39 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]),
40 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]),
41 }
42
43 yield GeojsonPointItem(**properties)
44
[end of locations/spiders/rubios.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py
--- a/locations/spiders/rubios.py
+++ b/locations/spiders/rubios.py
@@ -16,7 +16,7 @@
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
- regex = re.compile(r'http\S+rubios.com/store-locations/\S+/\S+/\S+')
+ regex = re.compile(r'http\S+rubios.com/restaurant-locations/\S+/\S+/\S+')
for path in city_urls:
if re.search(regex, path):
yield scrapy.Request(
@@ -27,17 +27,18 @@
def parse_store(self, response):
properties = {
- 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
- 'ref': response.xpath('//span[@itemprop="name"]/text()').extract_first(),
- 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
- 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
- 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(),
- 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
- 'phone': response.xpath('//span[@itemprop="telephone"]/a/text()').extract_first(),
+ 'name': response.xpath('//meta[@property="og:title"]/@content').extract_first(),
+ 'ref': response.url,
+ 'addr_full': response.xpath('//meta[@property="og:street_address"]/@content').extract_first(),
+ 'city': response.xpath('//meta[@property="og:locality"]/@content').extract_first(),
+ 'state': response.xpath('//meta[@property="og:region"]/@content').extract_first(),
+ 'postcode': response.xpath('//meta[@property="og:postal_code"]/@content').extract_first(),
+ 'country': response.xpath('//meta[@property="og:country_name"]/@content').extract_first(),
+ 'phone': response.xpath('//@href[contains(.,"tel:")]').extract_first().replace('tel:', ''),
'website': response.url,
- 'opening_hours': "".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),
- 'lon': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[0]),
- 'lat': float(response.xpath('//head/script[9]').extract_first().split('"coordinates":[')[1].split(']')[0].split(',')[1]),
+ 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),
+ 'lon': response.xpath('//meta[@property="og:longitude"]/@content').extract_first(),
+ 'lat': response.xpath('//meta[@property="og:latitude"]/@content').extract_first(),
}
yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/rubios.py b/locations/spiders/rubios.py\n--- a/locations/spiders/rubios.py\n+++ b/locations/spiders/rubios.py\n@@ -16,7 +16,7 @@\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n- regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n+ regex = re.compile(r'http\\S+rubios.com/restaurant-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n@@ -27,17 +27,18 @@\n def parse_store(self, response):\n \n properties = {\n- 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n- 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n- 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n- 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n+ 'name': response.xpath('//meta[@property=\"og:title\"]/@content').extract_first(),\n+ 'ref': response.url,\n+ 'addr_full': response.xpath('//meta[@property=\"og:street_address\"]/@content').extract_first(),\n+ 'city': response.xpath('//meta[@property=\"og:locality\"]/@content').extract_first(),\n+ 'state': response.xpath('//meta[@property=\"og:region\"]/@content').extract_first(),\n+ 'postcode': response.xpath('//meta[@property=\"og:postal_code\"]/@content').extract_first(),\n+ 'country': response.xpath('//meta[@property=\"og:country_name\"]/@content').extract_first(),\n+ 'phone': response.xpath('//@href[contains(.,\"tel:\")]').extract_first().replace('tel:', ''),\n 'website': response.url,\n- 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n- 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n- 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n+ 'opening_hours': ''.join(response.css('.oh-wrapper ::text').extract()),\n+ 'lon': response.xpath('//meta[@property=\"og:longitude\"]/@content').extract_first(),\n+ 'lat': response.xpath('//meta[@property=\"og:latitude\"]/@content').extract_first(),\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider rubios is broken\nDuring the global build at 2021-05-26-14-42-23, spider **rubios** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/rubios.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/rubios.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass RubiosSpider(scrapy.Spider):\n name = \"rubios\"\n item_attributes = { 'brand': \"Rubio's\" }\n allowed_domains = ['rubios.com']\n start_urls = (\n 'https://www.rubios.com/sitemap.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n regex = re.compile(r'http\\S+rubios.com/store-locations/\\S+/\\S+/\\S+')\n for path in city_urls:\n if re.search(regex, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'ref': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first(),\n 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first(),\n 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/a/text()').extract_first(),\n 'website': response.url,\n 'opening_hours': \"\".join(response.xpath('//div/div/div/span/span/span/text()').extract()).strip(),\n 'lon': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[0]),\n 'lat': float(response.xpath('//head/script[9]').extract_first().split('\"coordinates\":[')[1].split(']')[0].split(',')[1]),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/rubios.py"}]}
| 1,244 | 683 |
gh_patches_debug_31398
|
rasdani/github-patches
|
git_diff
|
nv-legate__cunumeric-450
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
random.randint() multiple errors
Calls result in errors when minimal input is provided (no error if dtype is provided).
```
>>> import numpy as np
>>> import cunumeric as cn
>>> np.random.randint(1000)
293
>>> cn.random.randint(1000)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py", line 106, in wrapper
return func(*args, **kwargs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py", line 113, in randint
return nprandom.randint(low=low, high=high, size=size, dtype=dtype)
File "mtrand.pyx", line 764, in numpy.random.mtrand.RandomState.randint
TypeError: Unsupported dtype dtype('float64') for randint
>>> cn.random.randint(1000,dtype=np.int32)
172
```
Calls result in errors when size is provided in array form (or if ND) (this happens regardless whether or not dtype is provided):
```
>>> cn.random.randint(1000,size=10)
array([317, 787, 968, 969, 10, 508, 778, 387, 976, 166])
>>> np.random.randint(1000,size=[10])
array([ 79, 325, 406, 740, 810, 830, 551, 640, 596, 857])
>>> cn.random.randint(1000,size=[10])
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py", line 106, in wrapper
return func(*args, **kwargs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py", line 125, in randint
result = ndarray(size, dtype=dtype)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py", line 106, in wrapper
return func(*args, **kwargs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/array.py", line 190, in __init__
self._thunk = runtime.create_empty_thunk(shape, dtype, inputs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py", line 499, in create_empty_thunk
self.is_eager_shape(computed_shape)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py", line 528, in is_eager_shape
return volume <= self.max_eager_volume
TypeError: '<=' not supported between instances of 'list' and 'int'
>>> np.random.randint(1000,size=[10,10])
array([[ 33, 401, 305, 230, 610, 912, 661, 89, 245, 35],
[639, 985, 902, 324, 270, 145, 121, 947, 817, 175],
[458, 623, 160, 994, 263, 824, 304, 94, 492, 552],
[907, 607, 16, 761, 323, 479, 122, 954, 749, 254],
[146, 627, 161, 855, 573, 794, 731, 192, 988, 999],
[628, 582, 815, 592, 765, 69, 416, 445, 339, 756],
[504, 332, 910, 478, 14, 686, 77, 527, 186, 545],
[353, 432, 38, 32, 229, 129, 248, 992, 292, 49],
[380, 835, 754, 874, 136, 167, 497, 984, 951, 353],
[164, 512, 899, 515, 405, 929, 268, 491, 542, 664]])
>>> cn.random.randint(1000,size=[10,10])
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py", line 106, in wrapper
return func(*args, **kwargs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py", line 125, in randint
result = ndarray(size, dtype=dtype)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py", line 106, in wrapper
return func(*args, **kwargs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/array.py", line 190, in __init__
self._thunk = runtime.create_empty_thunk(shape, dtype, inputs)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py", line 499, in create_empty_thunk
self.is_eager_shape(computed_shape)
File "/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py", line 528, in is_eager_shape
return volume <= self.max_eager_volume
TypeError: '<=' not supported between instances of 'list' and 'int'
```
</issue>
<code>
[start of cunumeric/random/random.py]
1 # Copyright 2021-2022 NVIDIA Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from __future__ import annotations
16
17 from typing import TYPE_CHECKING, Any, Union
18
19 import numpy as np
20 import numpy.random as nprandom
21 from cunumeric.array import ndarray
22 from cunumeric.runtime import runtime
23
24 if TYPE_CHECKING:
25 import numpy.typing as npt
26
27
28 def seed(init: Union[int, None] = None) -> None:
29 if init is None:
30 init = 0
31 runtime.set_next_random_epoch(int(init))
32
33
34 def rand(*shapeargs: int) -> Union[float, ndarray]:
35 """
36 rand(d0, d1, ..., dn)
37
38 Random values in a given shape.
39
40 Create an array of the given shape and populate it with random samples from
41 a uniform distribution over ``[0, 1)``.
42
43 Parameters
44 ----------
45 d0, d1, ..., dn : int, optional
46 The dimensions of the returned array, must be non-negative.
47 If no argument is given a single Python float is returned.
48
49 Returns
50 -------
51 out : ndarray, shape ``(d0, d1, ..., dn)``
52 Random values.
53
54 See Also
55 --------
56 numpy.random.rand
57
58 Availability
59 --------
60 Multiple GPUs, Multiple CPUs
61 """
62
63 if shapeargs == ():
64 return nprandom.rand()
65 result = ndarray(shapeargs, dtype=np.dtype(np.float64))
66 result._thunk.random_uniform()
67 return result
68
69
70 def randint(
71 low: Union[int, ndarray],
72 high: Union[int, ndarray, None] = None,
73 size: Union[int, tuple[int], None] = None,
74 dtype: Union[np.dtype[Any], None] = None,
75 ) -> Union[int, ndarray, npt.NDArray[Any]]:
76 """
77 Return random integers from `low` (inclusive) to `high` (exclusive).
78
79 Parameters
80 ----------
81 low : int or array_like[int]
82 Lowest (signed) integers to be drawn from the distribution (unless
83 ``high=None``, in which case this parameter is one above the
84 *highest* such integer).
85 high : int or array_like[int], optional
86 If provided, one above the largest (signed) integer to be drawn
87 from the distribution (see above for behavior if ``high=None``).
88 If array-like, must contain integer values
89 size : int or tuple[int], optional
90 Output shape. If the given shape is, e.g., ``(m, n, k)``, then
91 ``m * n * k`` samples are drawn. Default is None, in which case a
92 single value is returned.
93 dtype : data-type, optional
94 Desired dtype of the result. Byteorder must be native.
95 The default value is int.
96
97 Returns
98 -------
99 out : int or ndarray[int]
100 `size`-shaped array of random integers from the appropriate
101 distribution, or a single such random int if `size` not provided.
102
103 See Also
104 --------
105 numpy.random.randint
106
107 Availability
108 --------
109 Multiple GPUs, Multiple CPUs
110 """
111
112 if size is None:
113 return nprandom.randint(low=low, high=high, size=size, dtype=dtype)
114 if dtype is not None:
115 dtype = np.dtype(dtype)
116 else:
117 dtype = np.dtype(np.int64)
118 # TODO: randint must support unsigned integer dtypes as well
119 if dtype.kind != "i":
120 raise TypeError(
121 "cunumeric.random.randint must be given an integer dtype"
122 )
123 if not isinstance(size, tuple):
124 size = (size,)
125 result = ndarray(size, dtype=dtype)
126 if high is None:
127 if low <= 0:
128 raise ValueError(
129 "bound must be strictly greater than 0 for randint"
130 )
131 result._thunk.random_integer(low=0, high=low)
132 else:
133 if low >= high:
134 raise ValueError(
135 "'high' bound must be strictly greater than 'low' "
136 "bound for randint"
137 )
138 result._thunk.random_integer(low=low, high=high)
139 return result
140
141
142 def randn(*shapeargs: int) -> Union[float, ndarray]:
143 """
144 randn(d0, d1, ..., dn)
145
146 Return a sample (or samples) from the "standard normal" distribution.
147
148 Parameters
149 ----------
150 d0, d1, ..., dn : int, optional
151 The dimensions of the returned array, must be non-negative.
152 If no argument is given a single Python float is returned.
153
154 Returns
155 -------
156 Z : ndarray or float
157 A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
158 the standard normal distribution, or a single such float if
159 no parameters were supplied.
160
161 See Also
162 --------
163 numpy.random.randn
164
165 Availability
166 --------
167 Multiple GPUs, Multiple CPUs
168 """
169
170 if shapeargs == ():
171 return nprandom.randn()
172 result = ndarray(shapeargs, dtype=np.dtype(np.float64))
173 result._thunk.random_normal()
174 return result
175
176
177 def random(shape: Union[tuple[int], None] = None) -> Union[float, ndarray]:
178 """
179 random(size=None)
180
181 Return random floats in the half-open interval [0.0, 1.0).
182
183 See Also
184 --------
185 numpy.random.random
186
187 Availability
188 --------
189 Multiple GPUs, Multiple CPUs
190 """
191 if shape is None:
192 return nprandom.random()
193 result = ndarray(shape, dtype=np.dtype(np.float64))
194 result._thunk.random_uniform()
195 return result
196
[end of cunumeric/random/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cunumeric/random/random.py b/cunumeric/random/random.py
--- a/cunumeric/random/random.py
+++ b/cunumeric/random/random.py
@@ -14,7 +14,7 @@
#
from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Union
+from typing import TYPE_CHECKING, Any, Sequence, Union
import numpy as np
import numpy.random as nprandom
@@ -68,10 +68,10 @@
def randint(
- low: Union[int, ndarray],
- high: Union[int, ndarray, None] = None,
- size: Union[int, tuple[int], None] = None,
- dtype: Union[np.dtype[Any], None] = None,
+ low: Union[int, Sequence[int]],
+ high: Union[int, Sequence[int], None] = None,
+ size: Union[int, Sequence[int], None] = None,
+ dtype: Union[np.dtype[Any], type, None] = int,
) -> Union[int, ndarray, npt.NDArray[Any]]:
"""
Return random integers from `low` (inclusive) to `high` (exclusive).
@@ -109,8 +109,14 @@
Multiple GPUs, Multiple CPUs
"""
+ if not isinstance(low, int):
+ raise NotImplementedError("'low' must be an integer")
+ if high is not None and not isinstance(high, int):
+ raise NotImplementedError("'high' must be an integer or None")
+
if size is None:
return nprandom.randint(low=low, high=high, size=size, dtype=dtype)
+
if dtype is not None:
dtype = np.dtype(dtype)
else:
@@ -120,7 +126,7 @@
raise TypeError(
"cunumeric.random.randint must be given an integer dtype"
)
- if not isinstance(size, tuple):
+ if isinstance(size, int):
size = (size,)
result = ndarray(size, dtype=dtype)
if high is None:
|
{"golden_diff": "diff --git a/cunumeric/random/random.py b/cunumeric/random/random.py\n--- a/cunumeric/random/random.py\n+++ b/cunumeric/random/random.py\n@@ -14,7 +14,7 @@\n #\n from __future__ import annotations\n \n-from typing import TYPE_CHECKING, Any, Union\n+from typing import TYPE_CHECKING, Any, Sequence, Union\n \n import numpy as np\n import numpy.random as nprandom\n@@ -68,10 +68,10 @@\n \n \n def randint(\n- low: Union[int, ndarray],\n- high: Union[int, ndarray, None] = None,\n- size: Union[int, tuple[int], None] = None,\n- dtype: Union[np.dtype[Any], None] = None,\n+ low: Union[int, Sequence[int]],\n+ high: Union[int, Sequence[int], None] = None,\n+ size: Union[int, Sequence[int], None] = None,\n+ dtype: Union[np.dtype[Any], type, None] = int,\n ) -> Union[int, ndarray, npt.NDArray[Any]]:\n \"\"\"\n Return random integers from `low` (inclusive) to `high` (exclusive).\n@@ -109,8 +109,14 @@\n Multiple GPUs, Multiple CPUs\n \"\"\"\n \n+ if not isinstance(low, int):\n+ raise NotImplementedError(\"'low' must be an integer\")\n+ if high is not None and not isinstance(high, int):\n+ raise NotImplementedError(\"'high' must be an integer or None\")\n+\n if size is None:\n return nprandom.randint(low=low, high=high, size=size, dtype=dtype)\n+\n if dtype is not None:\n dtype = np.dtype(dtype)\n else:\n@@ -120,7 +126,7 @@\n raise TypeError(\n \"cunumeric.random.randint must be given an integer dtype\"\n )\n- if not isinstance(size, tuple):\n+ if isinstance(size, int):\n size = (size,)\n result = ndarray(size, dtype=dtype)\n if high is None:\n", "issue": "random.randint() multiple errors\nCalls result in errors when minimal input is provided (no error if dtype is provided).\r\n```\r\n>>> import numpy as np\r\n>>> import cunumeric as cn\r\n>>> np.random.randint(1000)\r\n293\r\n>>> cn.random.randint(1000)\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py\", line 106, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py\", line 113, in randint\r\n return nprandom.randint(low=low, high=high, size=size, dtype=dtype)\r\n File \"mtrand.pyx\", line 764, in numpy.random.mtrand.RandomState.randint\r\nTypeError: Unsupported dtype dtype('float64') for randint\r\n>>> cn.random.randint(1000,dtype=np.int32)\r\n172\r\n```\r\nCalls result in errors when size is provided in array form (or if ND) (this happens regardless whether or not dtype is provided):\r\n```\r\n>>> cn.random.randint(1000,size=10)\r\narray([317, 787, 968, 969, 10, 508, 778, 387, 976, 166])\r\n>>> np.random.randint(1000,size=[10])\r\narray([ 79, 325, 406, 740, 810, 830, 551, 640, 596, 857])\r\n>>> cn.random.randint(1000,size=[10])\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py\", line 106, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py\", line 125, in randint\r\n result = ndarray(size, dtype=dtype)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py\", line 106, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/array.py\", line 190, in __init__\r\n self._thunk = runtime.create_empty_thunk(shape, dtype, inputs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py\", line 499, in create_empty_thunk\r\n self.is_eager_shape(computed_shape)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py\", line 528, in is_eager_shape\r\n return volume <= self.max_eager_volume\r\nTypeError: '<=' not supported between instances of 'list' and 'int'\r\n>>> np.random.randint(1000,size=[10,10])\r\narray([[ 33, 401, 305, 230, 610, 912, 661, 89, 245, 35],\r\n [639, 985, 902, 324, 270, 145, 121, 947, 817, 175],\r\n [458, 623, 160, 994, 263, 824, 304, 94, 492, 552],\r\n [907, 607, 16, 761, 323, 479, 122, 954, 749, 254],\r\n [146, 627, 161, 855, 573, 794, 731, 192, 988, 999],\r\n [628, 582, 815, 592, 765, 69, 416, 445, 339, 756],\r\n [504, 332, 910, 478, 14, 686, 77, 527, 186, 545],\r\n [353, 432, 38, 32, 229, 129, 248, 992, 292, 49],\r\n [380, 835, 754, 874, 136, 167, 497, 984, 951, 353],\r\n [164, 512, 899, 515, 405, 929, 268, 491, 542, 664]])\r\n>>> cn.random.randint(1000,size=[10,10])\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py\", line 106, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/random/random.py\", line 125, in randint\r\n result = ndarray(size, dtype=dtype)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/coverage.py\", line 106, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/array.py\", line 190, in __init__\r\n self._thunk = runtime.create_empty_thunk(shape, dtype, inputs)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py\", line 499, in create_empty_thunk\r\n self.is_eager_shape(computed_shape)\r\n File \"/rkarimi_l/Work/cunumeric/cunumeric/cunumeric/runtime.py\", line 528, in is_eager_shape\r\n return volume <= self.max_eager_volume\r\nTypeError: '<=' not supported between instances of 'list' and 'int'\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2021-2022 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Union\n\nimport numpy as np\nimport numpy.random as nprandom\nfrom cunumeric.array import ndarray\nfrom cunumeric.runtime import runtime\n\nif TYPE_CHECKING:\n import numpy.typing as npt\n\n\ndef seed(init: Union[int, None] = None) -> None:\n if init is None:\n init = 0\n runtime.set_next_random_epoch(int(init))\n\n\ndef rand(*shapeargs: int) -> Union[float, ndarray]:\n \"\"\"\n rand(d0, d1, ..., dn)\n\n Random values in a given shape.\n\n Create an array of the given shape and populate it with random samples from\n a uniform distribution over ``[0, 1)``.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, must be non-negative.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n out : ndarray, shape ``(d0, d1, ..., dn)``\n Random values.\n\n See Also\n --------\n numpy.random.rand\n\n Availability\n --------\n Multiple GPUs, Multiple CPUs\n \"\"\"\n\n if shapeargs == ():\n return nprandom.rand()\n result = ndarray(shapeargs, dtype=np.dtype(np.float64))\n result._thunk.random_uniform()\n return result\n\n\ndef randint(\n low: Union[int, ndarray],\n high: Union[int, ndarray, None] = None,\n size: Union[int, tuple[int], None] = None,\n dtype: Union[np.dtype[Any], None] = None,\n) -> Union[int, ndarray, npt.NDArray[Any]]:\n \"\"\"\n Return random integers from `low` (inclusive) to `high` (exclusive).\n\n Parameters\n ----------\n low : int or array_like[int]\n Lowest (signed) integers to be drawn from the distribution (unless\n ``high=None``, in which case this parameter is one above the\n *highest* such integer).\n high : int or array_like[int], optional\n If provided, one above the largest (signed) integer to be drawn\n from the distribution (see above for behavior if ``high=None``).\n If array-like, must contain integer values\n size : int or tuple[int], optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. Default is None, in which case a\n single value is returned.\n dtype : data-type, optional\n Desired dtype of the result. Byteorder must be native.\n The default value is int.\n\n Returns\n -------\n out : int or ndarray[int]\n `size`-shaped array of random integers from the appropriate\n distribution, or a single such random int if `size` not provided.\n\n See Also\n --------\n numpy.random.randint\n\n Availability\n --------\n Multiple GPUs, Multiple CPUs\n \"\"\"\n\n if size is None:\n return nprandom.randint(low=low, high=high, size=size, dtype=dtype)\n if dtype is not None:\n dtype = np.dtype(dtype)\n else:\n dtype = np.dtype(np.int64)\n # TODO: randint must support unsigned integer dtypes as well\n if dtype.kind != \"i\":\n raise TypeError(\n \"cunumeric.random.randint must be given an integer dtype\"\n )\n if not isinstance(size, tuple):\n size = (size,)\n result = ndarray(size, dtype=dtype)\n if high is None:\n if low <= 0:\n raise ValueError(\n \"bound must be strictly greater than 0 for randint\"\n )\n result._thunk.random_integer(low=0, high=low)\n else:\n if low >= high:\n raise ValueError(\n \"'high' bound must be strictly greater than 'low' \"\n \"bound for randint\"\n )\n result._thunk.random_integer(low=low, high=high)\n return result\n\n\ndef randn(*shapeargs: int) -> Union[float, ndarray]:\n \"\"\"\n randn(d0, d1, ..., dn)\n\n Return a sample (or samples) from the \"standard normal\" distribution.\n\n Parameters\n ----------\n d0, d1, ..., dn : int, optional\n The dimensions of the returned array, must be non-negative.\n If no argument is given a single Python float is returned.\n\n Returns\n -------\n Z : ndarray or float\n A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from\n the standard normal distribution, or a single such float if\n no parameters were supplied.\n\n See Also\n --------\n numpy.random.randn\n\n Availability\n --------\n Multiple GPUs, Multiple CPUs\n \"\"\"\n\n if shapeargs == ():\n return nprandom.randn()\n result = ndarray(shapeargs, dtype=np.dtype(np.float64))\n result._thunk.random_normal()\n return result\n\n\ndef random(shape: Union[tuple[int], None] = None) -> Union[float, ndarray]:\n \"\"\"\n random(size=None)\n\n Return random floats in the half-open interval [0.0, 1.0).\n\n See Also\n --------\n numpy.random.random\n\n Availability\n --------\n Multiple GPUs, Multiple CPUs\n \"\"\"\n if shape is None:\n return nprandom.random()\n result = ndarray(shape, dtype=np.dtype(np.float64))\n result._thunk.random_uniform()\n return result\n", "path": "cunumeric/random/random.py"}]}
| 3,913 | 447 |
gh_patches_debug_23811
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2137
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Subclassing `Info`
Subclassing `strawberry.types.Info` raises a warning when using it in resolvers.
`"Argument name-based matching of 'info' is deprecated and will be removed in v1.0."`
But it gets injected into resolvers without any problems, in my use case i only change `Info.context` to provide my custom `Context` class and original `Info` gets injected: `strawberry.types.info.Info`
## System Information
- Strawberry version: 0.126.2
## Additional Context
Repository with bug reproduction: https://gitlab.com/ThirVondukr/strawberry-info-warning
Strawberry code where warning originates from: https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/fields/resolver.py#L126
Subclassing `Info`
Subclassing `strawberry.types.Info` raises a warning when using it in resolvers.
`"Argument name-based matching of 'info' is deprecated and will be removed in v1.0."`
But it gets injected into resolvers without any problems, in my use case i only change `Info.context` to provide my custom `Context` class and original `Info` gets injected: `strawberry.types.info.Info`
## System Information
- Strawberry version: 0.126.2
## Additional Context
Repository with bug reproduction: https://gitlab.com/ThirVondukr/strawberry-info-warning
Strawberry code where warning originates from: https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/fields/resolver.py#L126
</issue>
<code>
[start of strawberry/types/fields/resolver.py]
1 from __future__ import annotations as _
2
3 import builtins
4 import inspect
5 import sys
6 import warnings
7 from inspect import isasyncgenfunction, iscoroutinefunction
8 from typing import ( # type: ignore[attr-defined]
9 Any,
10 Callable,
11 Dict,
12 ForwardRef,
13 Generic,
14 List,
15 Mapping,
16 NamedTuple,
17 Optional,
18 Tuple,
19 Type,
20 TypeVar,
21 Union,
22 _eval_type,
23 )
24
25 from typing_extensions import Annotated, Protocol, get_args, get_origin
26
27 from strawberry.annotation import StrawberryAnnotation
28 from strawberry.arguments import StrawberryArgument
29 from strawberry.exceptions import MissingArgumentsAnnotationsError
30 from strawberry.type import StrawberryType
31 from strawberry.types.info import Info
32 from strawberry.utils.cached_property import cached_property
33
34
35 class Parameter(inspect.Parameter):
36 def __hash__(self):
37 """Override to exclude default value from hash.
38
39 This adds compatibility for using unhashable default values in resolvers such as
40 list and dict. The present use-case is limited to analyzing parameters from one
41 resolver. Therefore, the name, kind, and annotation combination are guaranteed
42 to be unique since two arguments cannot have the same name in a callable.
43
44 Furthermore, even though it is not currently a use-case to collect parameters
45 from different resolvers, the likelihood of collision from having the same hash
46 value but different defaults is mitigated by Python invoking the
47 :py:meth:`__eq__` method if two items have the same hash. See the verification
48 of this behavior in the `test_parameter_hash_collision` test.
49 """
50 return hash((self.name, self.kind, self.annotation))
51
52
53 class Signature(inspect.Signature):
54
55 _parameter_cls = Parameter
56
57
58 class ReservedParameterSpecification(Protocol):
59 def find(
60 self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver
61 ) -> Optional[inspect.Parameter]:
62 """Finds the reserved parameter from ``parameters``."""
63
64
65 class ReservedName(NamedTuple):
66 name: str
67
68 def find(
69 self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver
70 ) -> Optional[inspect.Parameter]:
71 return next((p for p in parameters if p.name == self.name), None)
72
73
74 class ReservedNameBoundParameter(NamedTuple):
75 name: str
76
77 def find(
78 self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver
79 ) -> Optional[inspect.Parameter]:
80 if parameters: # Add compatibility for resolvers with no arguments
81 first_parameter = parameters[0]
82 return first_parameter if first_parameter.name == self.name else None
83 else:
84 return None
85
86
87 class ReservedType(NamedTuple):
88 """Define a reserved type by name or by type.
89
90 To preserve backwards-comaptibility, if an annotation was defined but does not match
91 :attr:`type`, then the name is used as a fallback.
92 """
93
94 name: str
95 type: Type
96
97 def find(
98 self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver
99 ) -> Optional[inspect.Parameter]:
100 for parameter in parameters:
101 annotation = parameter.annotation
102 try:
103 resolved_annotation = _eval_type(
104 ForwardRef(annotation)
105 if isinstance(annotation, str)
106 else annotation,
107 resolver._namespace,
108 None,
109 )
110 resolver._resolved_annotations[parameter] = resolved_annotation
111 except NameError:
112 # Type-annotation could not be resolved
113 resolved_annotation = annotation
114 if self.is_reserved_type(resolved_annotation):
115 return parameter
116
117 # Fallback to matching by name
118 reserved_name = ReservedName(name=self.name).find(parameters, resolver)
119 if reserved_name:
120 warning = DeprecationWarning(
121 f"Argument name-based matching of '{self.name}' is deprecated and will "
122 "be removed in v1.0. Ensure that reserved arguments are annotated "
123 "their respective types (i.e. use value: 'DirectiveValue[str]' instead "
124 "of 'value: str' and 'info: Info' instead of a plain 'info')."
125 )
126 warnings.warn(warning)
127 return reserved_name
128 else:
129 return None
130
131 def is_reserved_type(self, other: Type) -> bool:
132 if get_origin(other) is Annotated:
133 # Handle annotated arguments such as Private[str] and DirectiveValue[str]
134 return any(isinstance(argument, self.type) for argument in get_args(other))
135 else:
136 # Handle both concrete and generic types (i.e Info, and Info[Any, Any])
137 return other is self.type or get_origin(other) is self.type
138
139
140 SELF_PARAMSPEC = ReservedNameBoundParameter("self")
141 CLS_PARAMSPEC = ReservedNameBoundParameter("cls")
142 ROOT_PARAMSPEC = ReservedName("root")
143 INFO_PARAMSPEC = ReservedType("info", Info)
144
145 T = TypeVar("T")
146
147
148 class StrawberryResolver(Generic[T]):
149
150 RESERVED_PARAMSPEC: Tuple[ReservedParameterSpecification, ...] = (
151 SELF_PARAMSPEC,
152 CLS_PARAMSPEC,
153 ROOT_PARAMSPEC,
154 INFO_PARAMSPEC,
155 )
156
157 def __init__(
158 self,
159 func: Union[Callable[..., T], staticmethod, classmethod],
160 *,
161 description: Optional[str] = None,
162 type_override: Optional[Union[StrawberryType, type]] = None,
163 ):
164 self.wrapped_func = func
165 self._description = description
166 self._type_override = type_override
167 """Specify the type manually instead of calculating from wrapped func
168
169 This is used when creating copies of types w/ generics
170 """
171 self._resolved_annotations: Dict[inspect.Parameter, Any] = {}
172 """Populated during reserved parameter determination.
173
174 Caching resolved annotations this way prevents evaling them repeatedly.
175 """
176
177 # TODO: Use this when doing the actual resolving? How to deal with async resolvers?
178 def __call__(self, *args, **kwargs) -> T:
179 if not callable(self.wrapped_func):
180 raise UncallableResolverError(self)
181 return self.wrapped_func(*args, **kwargs)
182
183 @cached_property
184 def signature(self) -> inspect.Signature:
185 return Signature.from_callable(self._unbound_wrapped_func, follow_wrapped=True)
186
187 @cached_property
188 def reserved_parameters(
189 self,
190 ) -> Dict[ReservedParameterSpecification, Optional[inspect.Parameter]]:
191 """Mapping of reserved parameter specification to parameter."""
192 parameters = tuple(self.signature.parameters.values())
193 return {spec: spec.find(parameters, self) for spec in self.RESERVED_PARAMSPEC}
194
195 @cached_property
196 def arguments(self) -> List[StrawberryArgument]:
197 """Resolver arguments exposed in the GraphQL Schema."""
198 parameters = self.signature.parameters.values()
199 reserved_parameters = set(self.reserved_parameters.values())
200
201 missing_annotations = set()
202 arguments = []
203 user_parameters = (p for p in parameters if p not in reserved_parameters)
204 for param in user_parameters:
205 annotation = self._resolved_annotations.get(param, param.annotation)
206 if annotation is inspect.Signature.empty:
207 missing_annotations.add(param.name)
208 else:
209 argument = StrawberryArgument(
210 python_name=param.name,
211 graphql_name=None,
212 type_annotation=StrawberryAnnotation(
213 annotation=annotation, namespace=self._namespace
214 ),
215 default=param.default,
216 )
217 arguments.append(argument)
218 if missing_annotations:
219 raise MissingArgumentsAnnotationsError(self.name, missing_annotations)
220 return arguments
221
222 @cached_property
223 def info_parameter(self) -> Optional[inspect.Parameter]:
224 return self.reserved_parameters.get(INFO_PARAMSPEC)
225
226 @cached_property
227 def root_parameter(self) -> Optional[inspect.Parameter]:
228 return self.reserved_parameters.get(ROOT_PARAMSPEC)
229
230 @cached_property
231 def self_parameter(self) -> Optional[inspect.Parameter]:
232 return self.reserved_parameters.get(SELF_PARAMSPEC)
233
234 @cached_property
235 def name(self) -> str:
236 # TODO: What to do if resolver is a lambda?
237 return self._unbound_wrapped_func.__name__
238
239 @cached_property
240 def annotations(self) -> Dict[str, object]:
241 """Annotations for the resolver.
242
243 Does not include special args defined in `RESERVED_PARAMSPEC` (e.g. self, root,
244 info)
245 """
246 reserved_parameters = self.reserved_parameters
247 reserved_names = {p.name for p in reserved_parameters.values() if p is not None}
248
249 annotations = self._unbound_wrapped_func.__annotations__
250 annotations = {
251 name: annotation
252 for name, annotation in annotations.items()
253 if name not in reserved_names
254 }
255
256 return annotations
257
258 @cached_property
259 def type_annotation(self) -> Optional[StrawberryAnnotation]:
260 return_annotation = self.signature.return_annotation
261 if return_annotation is inspect.Signature.empty:
262 return None
263 else:
264 type_annotation = StrawberryAnnotation(
265 annotation=return_annotation, namespace=self._namespace
266 )
267 return type_annotation
268
269 @property
270 def type(self) -> Optional[Union[StrawberryType, type]]:
271 if self._type_override:
272 return self._type_override
273 if self.type_annotation is None:
274 return None
275 return self.type_annotation.resolve()
276
277 @cached_property
278 def is_async(self) -> bool:
279 return iscoroutinefunction(self._unbound_wrapped_func) or isasyncgenfunction(
280 self._unbound_wrapped_func
281 )
282
283 def copy_with(
284 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]
285 ) -> StrawberryResolver:
286 type_override = None
287
288 if self.type:
289 if isinstance(self.type, StrawberryType):
290 type_override = self.type.copy_with(type_var_map)
291 else:
292 type_override = self.type._type_definition.copy_with( # type: ignore
293 type_var_map,
294 )
295
296 return type(self)(
297 func=self.wrapped_func,
298 description=self._description,
299 type_override=type_override,
300 )
301
302 @cached_property
303 def _namespace(self) -> Dict[str, Any]:
304 return sys.modules[self._unbound_wrapped_func.__module__].__dict__
305
306 @cached_property
307 def _unbound_wrapped_func(self) -> Callable[..., T]:
308 if isinstance(self.wrapped_func, (staticmethod, classmethod)):
309 return self.wrapped_func.__func__
310
311 return self.wrapped_func
312
313
314 class UncallableResolverError(Exception):
315 def __init__(self, resolver: "StrawberryResolver"):
316 message = (
317 f"Attempted to call resolver {resolver} with uncallable function "
318 f"{resolver.wrapped_func}"
319 )
320 super().__init__(message)
321
322
323 __all__ = ["StrawberryResolver"]
324
[end of strawberry/types/fields/resolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/types/fields/resolver.py b/strawberry/types/fields/resolver.py
--- a/strawberry/types/fields/resolver.py
+++ b/strawberry/types/fields/resolver.py
@@ -20,6 +20,7 @@
TypeVar,
Union,
_eval_type,
+ cast,
)
from typing_extensions import Annotated, Protocol, get_args, get_origin
@@ -129,12 +130,17 @@
return None
def is_reserved_type(self, other: Type) -> bool:
- if get_origin(other) is Annotated:
+ origin = cast(type, get_origin(other)) or other
+ if origin is Annotated:
# Handle annotated arguments such as Private[str] and DirectiveValue[str]
return any(isinstance(argument, self.type) for argument in get_args(other))
else:
# Handle both concrete and generic types (i.e Info, and Info[Any, Any])
- return other is self.type or get_origin(other) is self.type
+ return (
+ issubclass(origin, self.type)
+ if isinstance(origin, type)
+ else origin is self.type
+ )
SELF_PARAMSPEC = ReservedNameBoundParameter("self")
|
{"golden_diff": "diff --git a/strawberry/types/fields/resolver.py b/strawberry/types/fields/resolver.py\n--- a/strawberry/types/fields/resolver.py\n+++ b/strawberry/types/fields/resolver.py\n@@ -20,6 +20,7 @@\n TypeVar,\n Union,\n _eval_type,\n+ cast,\n )\n \n from typing_extensions import Annotated, Protocol, get_args, get_origin\n@@ -129,12 +130,17 @@\n return None\n \n def is_reserved_type(self, other: Type) -> bool:\n- if get_origin(other) is Annotated:\n+ origin = cast(type, get_origin(other)) or other\n+ if origin is Annotated:\n # Handle annotated arguments such as Private[str] and DirectiveValue[str]\n return any(isinstance(argument, self.type) for argument in get_args(other))\n else:\n # Handle both concrete and generic types (i.e Info, and Info[Any, Any])\n- return other is self.type or get_origin(other) is self.type\n+ return (\n+ issubclass(origin, self.type)\n+ if isinstance(origin, type)\n+ else origin is self.type\n+ )\n \n \n SELF_PARAMSPEC = ReservedNameBoundParameter(\"self\")\n", "issue": "Subclassing `Info`\nSubclassing `strawberry.types.Info` raises a warning when using it in resolvers.\r\n`\"Argument name-based matching of 'info' is deprecated and will be removed in v1.0.\"`\r\nBut it gets injected into resolvers without any problems, in my use case i only change `Info.context` to provide my custom `Context` class and original `Info` gets injected: `strawberry.types.info.Info`\r\n\r\n## System Information\r\n\r\n - Strawberry version: 0.126.2\r\n\r\n## Additional Context\r\n\r\nRepository with bug reproduction: https://gitlab.com/ThirVondukr/strawberry-info-warning\r\nStrawberry code where warning originates from: https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/fields/resolver.py#L126\r\n\nSubclassing `Info`\nSubclassing `strawberry.types.Info` raises a warning when using it in resolvers.\r\n`\"Argument name-based matching of 'info' is deprecated and will be removed in v1.0.\"`\r\nBut it gets injected into resolvers without any problems, in my use case i only change `Info.context` to provide my custom `Context` class and original `Info` gets injected: `strawberry.types.info.Info`\r\n\r\n## System Information\r\n\r\n - Strawberry version: 0.126.2\r\n\r\n## Additional Context\r\n\r\nRepository with bug reproduction: https://gitlab.com/ThirVondukr/strawberry-info-warning\r\nStrawberry code where warning originates from: https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/fields/resolver.py#L126\r\n\n", "before_files": [{"content": "from __future__ import annotations as _\n\nimport builtins\nimport inspect\nimport sys\nimport warnings\nfrom inspect import isasyncgenfunction, iscoroutinefunction\nfrom typing import ( # type: ignore[attr-defined]\n Any,\n Callable,\n Dict,\n ForwardRef,\n Generic,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n _eval_type,\n)\n\nfrom typing_extensions import Annotated, Protocol, get_args, get_origin\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.arguments import StrawberryArgument\nfrom strawberry.exceptions import MissingArgumentsAnnotationsError\nfrom strawberry.type import StrawberryType\nfrom strawberry.types.info import Info\nfrom strawberry.utils.cached_property import cached_property\n\n\nclass Parameter(inspect.Parameter):\n def __hash__(self):\n \"\"\"Override to exclude default value from hash.\n\n This adds compatibility for using unhashable default values in resolvers such as\n list and dict. The present use-case is limited to analyzing parameters from one\n resolver. Therefore, the name, kind, and annotation combination are guaranteed\n to be unique since two arguments cannot have the same name in a callable.\n\n Furthermore, even though it is not currently a use-case to collect parameters\n from different resolvers, the likelihood of collision from having the same hash\n value but different defaults is mitigated by Python invoking the\n :py:meth:`__eq__` method if two items have the same hash. See the verification\n of this behavior in the `test_parameter_hash_collision` test.\n \"\"\"\n return hash((self.name, self.kind, self.annotation))\n\n\nclass Signature(inspect.Signature):\n\n _parameter_cls = Parameter\n\n\nclass ReservedParameterSpecification(Protocol):\n def find(\n self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver\n ) -> Optional[inspect.Parameter]:\n \"\"\"Finds the reserved parameter from ``parameters``.\"\"\"\n\n\nclass ReservedName(NamedTuple):\n name: str\n\n def find(\n self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver\n ) -> Optional[inspect.Parameter]:\n return next((p for p in parameters if p.name == self.name), None)\n\n\nclass ReservedNameBoundParameter(NamedTuple):\n name: str\n\n def find(\n self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver\n ) -> Optional[inspect.Parameter]:\n if parameters: # Add compatibility for resolvers with no arguments\n first_parameter = parameters[0]\n return first_parameter if first_parameter.name == self.name else None\n else:\n return None\n\n\nclass ReservedType(NamedTuple):\n \"\"\"Define a reserved type by name or by type.\n\n To preserve backwards-comaptibility, if an annotation was defined but does not match\n :attr:`type`, then the name is used as a fallback.\n \"\"\"\n\n name: str\n type: Type\n\n def find(\n self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver\n ) -> Optional[inspect.Parameter]:\n for parameter in parameters:\n annotation = parameter.annotation\n try:\n resolved_annotation = _eval_type(\n ForwardRef(annotation)\n if isinstance(annotation, str)\n else annotation,\n resolver._namespace,\n None,\n )\n resolver._resolved_annotations[parameter] = resolved_annotation\n except NameError:\n # Type-annotation could not be resolved\n resolved_annotation = annotation\n if self.is_reserved_type(resolved_annotation):\n return parameter\n\n # Fallback to matching by name\n reserved_name = ReservedName(name=self.name).find(parameters, resolver)\n if reserved_name:\n warning = DeprecationWarning(\n f\"Argument name-based matching of '{self.name}' is deprecated and will \"\n \"be removed in v1.0. Ensure that reserved arguments are annotated \"\n \"their respective types (i.e. use value: 'DirectiveValue[str]' instead \"\n \"of 'value: str' and 'info: Info' instead of a plain 'info').\"\n )\n warnings.warn(warning)\n return reserved_name\n else:\n return None\n\n def is_reserved_type(self, other: Type) -> bool:\n if get_origin(other) is Annotated:\n # Handle annotated arguments such as Private[str] and DirectiveValue[str]\n return any(isinstance(argument, self.type) for argument in get_args(other))\n else:\n # Handle both concrete and generic types (i.e Info, and Info[Any, Any])\n return other is self.type or get_origin(other) is self.type\n\n\nSELF_PARAMSPEC = ReservedNameBoundParameter(\"self\")\nCLS_PARAMSPEC = ReservedNameBoundParameter(\"cls\")\nROOT_PARAMSPEC = ReservedName(\"root\")\nINFO_PARAMSPEC = ReservedType(\"info\", Info)\n\nT = TypeVar(\"T\")\n\n\nclass StrawberryResolver(Generic[T]):\n\n RESERVED_PARAMSPEC: Tuple[ReservedParameterSpecification, ...] = (\n SELF_PARAMSPEC,\n CLS_PARAMSPEC,\n ROOT_PARAMSPEC,\n INFO_PARAMSPEC,\n )\n\n def __init__(\n self,\n func: Union[Callable[..., T], staticmethod, classmethod],\n *,\n description: Optional[str] = None,\n type_override: Optional[Union[StrawberryType, type]] = None,\n ):\n self.wrapped_func = func\n self._description = description\n self._type_override = type_override\n \"\"\"Specify the type manually instead of calculating from wrapped func\n\n This is used when creating copies of types w/ generics\n \"\"\"\n self._resolved_annotations: Dict[inspect.Parameter, Any] = {}\n \"\"\"Populated during reserved parameter determination.\n\n Caching resolved annotations this way prevents evaling them repeatedly.\n \"\"\"\n\n # TODO: Use this when doing the actual resolving? How to deal with async resolvers?\n def __call__(self, *args, **kwargs) -> T:\n if not callable(self.wrapped_func):\n raise UncallableResolverError(self)\n return self.wrapped_func(*args, **kwargs)\n\n @cached_property\n def signature(self) -> inspect.Signature:\n return Signature.from_callable(self._unbound_wrapped_func, follow_wrapped=True)\n\n @cached_property\n def reserved_parameters(\n self,\n ) -> Dict[ReservedParameterSpecification, Optional[inspect.Parameter]]:\n \"\"\"Mapping of reserved parameter specification to parameter.\"\"\"\n parameters = tuple(self.signature.parameters.values())\n return {spec: spec.find(parameters, self) for spec in self.RESERVED_PARAMSPEC}\n\n @cached_property\n def arguments(self) -> List[StrawberryArgument]:\n \"\"\"Resolver arguments exposed in the GraphQL Schema.\"\"\"\n parameters = self.signature.parameters.values()\n reserved_parameters = set(self.reserved_parameters.values())\n\n missing_annotations = set()\n arguments = []\n user_parameters = (p for p in parameters if p not in reserved_parameters)\n for param in user_parameters:\n annotation = self._resolved_annotations.get(param, param.annotation)\n if annotation is inspect.Signature.empty:\n missing_annotations.add(param.name)\n else:\n argument = StrawberryArgument(\n python_name=param.name,\n graphql_name=None,\n type_annotation=StrawberryAnnotation(\n annotation=annotation, namespace=self._namespace\n ),\n default=param.default,\n )\n arguments.append(argument)\n if missing_annotations:\n raise MissingArgumentsAnnotationsError(self.name, missing_annotations)\n return arguments\n\n @cached_property\n def info_parameter(self) -> Optional[inspect.Parameter]:\n return self.reserved_parameters.get(INFO_PARAMSPEC)\n\n @cached_property\n def root_parameter(self) -> Optional[inspect.Parameter]:\n return self.reserved_parameters.get(ROOT_PARAMSPEC)\n\n @cached_property\n def self_parameter(self) -> Optional[inspect.Parameter]:\n return self.reserved_parameters.get(SELF_PARAMSPEC)\n\n @cached_property\n def name(self) -> str:\n # TODO: What to do if resolver is a lambda?\n return self._unbound_wrapped_func.__name__\n\n @cached_property\n def annotations(self) -> Dict[str, object]:\n \"\"\"Annotations for the resolver.\n\n Does not include special args defined in `RESERVED_PARAMSPEC` (e.g. self, root,\n info)\n \"\"\"\n reserved_parameters = self.reserved_parameters\n reserved_names = {p.name for p in reserved_parameters.values() if p is not None}\n\n annotations = self._unbound_wrapped_func.__annotations__\n annotations = {\n name: annotation\n for name, annotation in annotations.items()\n if name not in reserved_names\n }\n\n return annotations\n\n @cached_property\n def type_annotation(self) -> Optional[StrawberryAnnotation]:\n return_annotation = self.signature.return_annotation\n if return_annotation is inspect.Signature.empty:\n return None\n else:\n type_annotation = StrawberryAnnotation(\n annotation=return_annotation, namespace=self._namespace\n )\n return type_annotation\n\n @property\n def type(self) -> Optional[Union[StrawberryType, type]]:\n if self._type_override:\n return self._type_override\n if self.type_annotation is None:\n return None\n return self.type_annotation.resolve()\n\n @cached_property\n def is_async(self) -> bool:\n return iscoroutinefunction(self._unbound_wrapped_func) or isasyncgenfunction(\n self._unbound_wrapped_func\n )\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]\n ) -> StrawberryResolver:\n type_override = None\n\n if self.type:\n if isinstance(self.type, StrawberryType):\n type_override = self.type.copy_with(type_var_map)\n else:\n type_override = self.type._type_definition.copy_with( # type: ignore\n type_var_map,\n )\n\n return type(self)(\n func=self.wrapped_func,\n description=self._description,\n type_override=type_override,\n )\n\n @cached_property\n def _namespace(self) -> Dict[str, Any]:\n return sys.modules[self._unbound_wrapped_func.__module__].__dict__\n\n @cached_property\n def _unbound_wrapped_func(self) -> Callable[..., T]:\n if isinstance(self.wrapped_func, (staticmethod, classmethod)):\n return self.wrapped_func.__func__\n\n return self.wrapped_func\n\n\nclass UncallableResolverError(Exception):\n def __init__(self, resolver: \"StrawberryResolver\"):\n message = (\n f\"Attempted to call resolver {resolver} with uncallable function \"\n f\"{resolver.wrapped_func}\"\n )\n super().__init__(message)\n\n\n__all__ = [\"StrawberryResolver\"]\n", "path": "strawberry/types/fields/resolver.py"}]}
| 4,072 | 277 |
gh_patches_debug_63976
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-1214
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New web plugin idea: a health indicator
This ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966)
People contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky
Ticket created on: `Oct 24 2014`
Ticket last modified on: `Mar 19 2016`
---
I like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...)
It would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...)
---
_Comment from_: @sa2ajj
_Date_: `Oct 24 2014`
Could you please elaborate or provide a more specific example?
I think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :)
---
_Comment from_: @Ben
_Date_: `Oct 24 2014`
I was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that
Note that statistics are not preserved after a build is complete.
So metrics is probably where we want to interface with the master.
I used to abuse Properties for that purpose ...
---
_Comment from_: @tardyp
_Date_: `Oct 24 2014`
Buildbot plugin system is really made for enabling such dashboards.
A web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service.
For me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that.
I also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated.
---
_Comment from_: @tardyp
_Date_: `Oct 26 2014`
on IRC, sa2ajj talked about http://graphite.readthedocs.org/
He told us, he actually uses it at work, and has integration working with eigth.
Looking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper.
I haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot.
An external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path.
@sa2ajj do you think it is possible? How would you estimate the cost of integration?
---
_Comment from_: @Ben
_Date_: `Oct 26 2014`
There are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin !
---
_Comment from_: @sa2ajj
_Date_: `Oct 27 2014`
What I said was that we indeed use graphite, but I did not say we use it with Buildbot.
I have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case.
If there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked.
---
_Comment from_: @unknown_contributor
_Date_: `Mar 19 2016`
+1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example)
</issue>
<code>
[start of pkg/setup.py]
1 #!/usr/bin/env python
2 #
3 # This file is part of Buildbot. Buildbot is free software: you can
4 # redistribute it and/or modify it under the terms of the GNU General Public
5 # License as published by the Free Software Foundation, version 2.
6 #
7 # This program is distributed in the hope that it will be useful, but WITHOUT
8 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
9 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
10 # details.
11 #
12 # You should have received a copy of the GNU General Public License along with
13 # this program; if not, write to the Free Software Foundation, Inc., 51
14 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
15 #
16 # Copyright Buildbot Team Members
17
18 from setuptools import setup
19 import buildbot_pkg
20
21 setup(
22 name='buildbot_pkg',
23 version=buildbot_pkg.getVersion("."),
24 description='Buildbot packaging tools',
25 author=u'Pierre Tardy',
26 author_email=u'[email protected]',
27 url='http://buildbot.net/',
28 license='GNU GPL',
29 py_modules=['buildbot_pkg'],
30 )
31
[end of pkg/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pkg/setup.py b/pkg/setup.py
--- a/pkg/setup.py
+++ b/pkg/setup.py
@@ -19,7 +19,7 @@
import buildbot_pkg
setup(
- name='buildbot_pkg',
+ name='buildbot-pkg',
version=buildbot_pkg.getVersion("."),
description='Buildbot packaging tools',
author=u'Pierre Tardy',
|
{"golden_diff": "diff --git a/pkg/setup.py b/pkg/setup.py\n--- a/pkg/setup.py\n+++ b/pkg/setup.py\n@@ -19,7 +19,7 @@\n import buildbot_pkg\n \n setup(\n- name='buildbot_pkg',\n+ name='buildbot-pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n", "issue": "New web plugin idea: a health indicator\nThis ticket is a migrated Trac ticket [2966](http://trac.buildbot.net/ticket/2966)\n\nPeople contributed to the original ticket: `benoit.allard@...`, @tardyp, @rutsky, @unknown_contributor, @sa2ajj, @rutsky\nTicket created on: `Oct 24 2014`\nTicket last modified on: `Mar 19 2016`\n\n---\n\nI like to extract as much useful indicator from my builds as possible (time, but also amount of warnings, and such ...)\n\nIt would be cool to have a web plugin that could print the evolution of my indicators over time ! (Of course, I would have to configure which indicator I want to see plotted, maybe the kind of plot, and so on ...)\n\n\n---\n\n_Comment from_: @sa2ajj\n_Date_: `Oct 24 2014`\n\nCould you please elaborate or provide a more specific example?\n\nI think it's related to the [[metrics support|http://docs.buildbot.net/latest/developer/metrics.html]], but without an example I can easily be wrong :)\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 24 2014`\n\nI was more aiming at [[Statistics|http://docs.buildbot.net/latest/developer/cls-buildsteps.html?highlight=statistics#buildbot.process.buildstep.[[BuildStep]].hasStatistic]], but I just realized that\n\n Note that statistics are not preserved after a build is complete.\n\nSo metrics is probably where we want to interface with the master.\n\nI used to abuse Properties for that purpose ...\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 24 2014`\n\nBuildbot plugin system is really made for enabling such dashboards.\n\nA web ui plugin is not technically restricted to creating a bunch of js file, it could also create a twisted service.\n\nFor me having the JS only use existing data api to query the data will be very inefficient. I think we could easily create a service, like a status service that registers to a bunch of mq events, and create statistics based on that.\n\nI also had in mind that plugins could have some table in the DB they could use to store the data, or maybe use another db configuration with all schema + migration stuff separated.\n\n\n---\n_Comment from_: @tardyp\n_Date_: `Oct 26 2014`\n\non IRC, sa2ajj talked about http://graphite.readthedocs.org/\n\nHe told us, he actually uses it at work, and has integration working with eigth.\n\nLooking at the documentation, the first reaction is how to integrate this with multimaster, as graphite has its own db called whisper.\nI haven't look too much deeply, but I think this is still feasible as a external tool. Probably this would be much cheaper than making our own metrics system inside buildbot.\n\nAn external graphite server could be setup, and watch for the (TBD) mq server. As there are messages for nearly every kind of activity that happens in buildbot, this is a good mean of making solid analysis of what is going on. Of course, this solution would not be fully integrated, as probably the UI would be external, but anyway, I think this is a possible cheap path.\n\n@sa2ajj do you think it is possible? How would you estimate the cost of integration?\n\n\n\n\n---\n_Comment from_: @Ben\n_Date_: `Oct 26 2014`\n\nThere are a lot of [[interfaces|http://graphite.readthedocs.org/en/latest/tools.html#visualization]], as well as data collectors / forwarders (same page, a bit higher) available for graphite. It looks like some of them are js-only ! Still way to go for a www-plugin !\n\n---\n_Comment from_: @sa2ajj\n_Date_: `Oct 27 2014`\n\nWhat I said was that we indeed use graphite, but I did not say we use it with Buildbot.\n\nI have an oldish branch where I tried to publish metrics to graphite. However I stopped working on that when I realised that it's not very straightforward to implement it to support multi-master case.\n\nIf there's an interest, I can revive the branch or, at least, publish what I have (after rebasing the latest master) so other could comment in what direction my thinking worked.\n\n---\n_Comment from_: @unknown_contributor\n_Date_: `Mar 19 2016`\n\n+1 for this feature. I suggest to merge the statistic API with the [[stats module|https://github.com/buildbot/buildbot/tree/master/master/buildbot/statistics]] developed last year by my GSOC student. And add a default backend that store a subset of these stat in the main database that would enable the development of heath/stats visualization modules directly installed by default (using Highchart js lib for example) \n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom setuptools import setup\nimport buildbot_pkg\n\nsetup(\n name='buildbot_pkg',\n version=buildbot_pkg.getVersion(\".\"),\n description='Buildbot packaging tools',\n author=u'Pierre Tardy',\n author_email=u'[email protected]',\n url='http://buildbot.net/',\n license='GNU GPL',\n py_modules=['buildbot_pkg'],\n)\n", "path": "pkg/setup.py"}]}
| 1,880 | 86 |
gh_patches_debug_20490
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__datumaro-1284
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automatic generation of specs.json by running specs.py is broken
After merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers.
</issue>
<code>
[start of src/datumaro/plugins/specs.py]
1 # Copyright (C) 2023 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from pathlib import Path
6
7 from datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin
8 from datumaro.util import parse_json_file
9
10 _SOURCE_PATH = Path(__file__).resolve()
11 _SOURCE_DIR = _SOURCE_PATH.parent
12 _SPECS_JSON_PATH = _SOURCE_DIR / "specs.json"
13
14
15 def get_lazy_plugins():
16 return [
17 plugin
18 for plugin in [
19 get_lazy_plugin(
20 spec["import_path"],
21 spec["plugin_name"],
22 spec["plugin_type"],
23 spec.get("extra_deps", []),
24 spec.get("metadata", {}),
25 )
26 for spec in parse_json_file(str(_SPECS_JSON_PATH))
27 ]
28 if plugin is not None
29 ]
30
31
32 if __name__ == "__main__":
33 from datumaro.components.environment import Environment
34 from datumaro.util import dump_json_file
35
36 env = Environment(use_lazy_import=False)
37 plugin_specs = []
38
39 def _enroll_to_plugin_specs(plugins, plugin_type):
40 global plugin_specs
41
42 for _, plugin in plugins.items():
43 mod = plugin.__module__
44 class_name = plugin.__name__
45 plugin_name = plugin.NAME
46 plugin_specs += [
47 {
48 "import_path": f"{mod}.{class_name}",
49 "plugin_name": plugin_name,
50 "plugin_type": plugin_type,
51 "extra_deps": get_extra_deps(plugin),
52 }
53 ]
54
55 _enroll_to_plugin_specs(env.extractors, "DatasetBase")
56 _enroll_to_plugin_specs(env.importers, "Importer")
57 _enroll_to_plugin_specs(env.launchers, "Launcher")
58 _enroll_to_plugin_specs(env.exporters, "Exporter")
59 _enroll_to_plugin_specs(env.generators, "DatasetGenerator")
60 _enroll_to_plugin_specs(env.transforms, "Transform")
61 _enroll_to_plugin_specs(env.validators, "Validator")
62
63 dump_json_file(
64 _SPECS_JSON_PATH,
65 sorted(plugin_specs, key=lambda spec: spec["import_path"]),
66 indent=True,
67 append_newline=True,
68 )
69
[end of src/datumaro/plugins/specs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py
--- a/src/datumaro/plugins/specs.py
+++ b/src/datumaro/plugins/specs.py
@@ -43,14 +43,18 @@
mod = plugin.__module__
class_name = plugin.__name__
plugin_name = plugin.NAME
- plugin_specs += [
- {
- "import_path": f"{mod}.{class_name}",
- "plugin_name": plugin_name,
- "plugin_type": plugin_type,
- "extra_deps": get_extra_deps(plugin),
- }
- ]
+ extra_deps = get_extra_deps(plugin)
+ plugin_spec = {
+ "import_path": f"{mod}.{class_name}",
+ "plugin_name": plugin_name,
+ "plugin_type": plugin_type,
+ }
+ if extra_deps:
+ plugin_spec["extra_deps"] = extra_deps
+ # Setting metadata for importers
+ if hasattr(plugin, "get_file_extensions"):
+ plugin_spec["metadata"] = {"file_extensions": sorted(plugin.get_file_extensions())}
+ plugin_specs.append(plugin_spec)
_enroll_to_plugin_specs(env.extractors, "DatasetBase")
_enroll_to_plugin_specs(env.importers, "Importer")
|
{"golden_diff": "diff --git a/src/datumaro/plugins/specs.py b/src/datumaro/plugins/specs.py\n--- a/src/datumaro/plugins/specs.py\n+++ b/src/datumaro/plugins/specs.py\n@@ -43,14 +43,18 @@\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n- plugin_specs += [\n- {\n- \"import_path\": f\"{mod}.{class_name}\",\n- \"plugin_name\": plugin_name,\n- \"plugin_type\": plugin_type,\n- \"extra_deps\": get_extra_deps(plugin),\n- }\n- ]\n+ extra_deps = get_extra_deps(plugin)\n+ plugin_spec = {\n+ \"import_path\": f\"{mod}.{class_name}\",\n+ \"plugin_name\": plugin_name,\n+ \"plugin_type\": plugin_type,\n+ }\n+ if extra_deps:\n+ plugin_spec[\"extra_deps\"] = extra_deps\n+ # Setting metadata for importers\n+ if hasattr(plugin, \"get_file_extensions\"):\n+ plugin_spec[\"metadata\"] = {\"file_extensions\": sorted(plugin.get_file_extensions())}\n+ plugin_specs.append(plugin_spec)\n \n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n", "issue": "Automatic generation of specs.json by running specs.py is broken\nAfter merging https://github.com/openvinotoolkit/datumaro/pull/1268, `metadata` is newly introduced in `specs.json`, but the automated `specs.json` doesn't create `metadata`, and hence it breaks the lazy_load of importers.\n", "before_files": [{"content": "# Copyright (C) 2023 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom pathlib import Path\n\nfrom datumaro.components.lazy_plugin import get_extra_deps, get_lazy_plugin\nfrom datumaro.util import parse_json_file\n\n_SOURCE_PATH = Path(__file__).resolve()\n_SOURCE_DIR = _SOURCE_PATH.parent\n_SPECS_JSON_PATH = _SOURCE_DIR / \"specs.json\"\n\n\ndef get_lazy_plugins():\n return [\n plugin\n for plugin in [\n get_lazy_plugin(\n spec[\"import_path\"],\n spec[\"plugin_name\"],\n spec[\"plugin_type\"],\n spec.get(\"extra_deps\", []),\n spec.get(\"metadata\", {}),\n )\n for spec in parse_json_file(str(_SPECS_JSON_PATH))\n ]\n if plugin is not None\n ]\n\n\nif __name__ == \"__main__\":\n from datumaro.components.environment import Environment\n from datumaro.util import dump_json_file\n\n env = Environment(use_lazy_import=False)\n plugin_specs = []\n\n def _enroll_to_plugin_specs(plugins, plugin_type):\n global plugin_specs\n\n for _, plugin in plugins.items():\n mod = plugin.__module__\n class_name = plugin.__name__\n plugin_name = plugin.NAME\n plugin_specs += [\n {\n \"import_path\": f\"{mod}.{class_name}\",\n \"plugin_name\": plugin_name,\n \"plugin_type\": plugin_type,\n \"extra_deps\": get_extra_deps(plugin),\n }\n ]\n\n _enroll_to_plugin_specs(env.extractors, \"DatasetBase\")\n _enroll_to_plugin_specs(env.importers, \"Importer\")\n _enroll_to_plugin_specs(env.launchers, \"Launcher\")\n _enroll_to_plugin_specs(env.exporters, \"Exporter\")\n _enroll_to_plugin_specs(env.generators, \"DatasetGenerator\")\n _enroll_to_plugin_specs(env.transforms, \"Transform\")\n _enroll_to_plugin_specs(env.validators, \"Validator\")\n\n dump_json_file(\n _SPECS_JSON_PATH,\n sorted(plugin_specs, key=lambda spec: spec[\"import_path\"]),\n indent=True,\n append_newline=True,\n )\n", "path": "src/datumaro/plugins/specs.py"}]}
| 1,193 | 282 |
gh_patches_debug_4520
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-357
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading
When loading ImageNet, I get the following debug messages
`Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading`
message called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46
caused by
https://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573
</issue>
<code>
[start of torchvision/datasets/folder.py]
1 import torch.utils.data as data
2
3 from PIL import Image
4 import os
5 import os.path
6
7 IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
8
9
10 def is_image_file(filename):
11 """Checks if a file is an image.
12
13 Args:
14 filename (string): path to a file
15
16 Returns:
17 bool: True if the filename ends with a known image extension
18 """
19 filename_lower = filename.lower()
20 return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
21
22
23 def find_classes(dir):
24 classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
25 classes.sort()
26 class_to_idx = {classes[i]: i for i in range(len(classes))}
27 return classes, class_to_idx
28
29
30 def make_dataset(dir, class_to_idx):
31 images = []
32 dir = os.path.expanduser(dir)
33 for target in sorted(os.listdir(dir)):
34 d = os.path.join(dir, target)
35 if not os.path.isdir(d):
36 continue
37
38 for root, _, fnames in sorted(os.walk(d)):
39 for fname in sorted(fnames):
40 if is_image_file(fname):
41 path = os.path.join(root, fname)
42 item = (path, class_to_idx[target])
43 images.append(item)
44
45 return images
46
47
48 def pil_loader(path):
49 # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
50 with open(path, 'rb') as f:
51 with Image.open(f) as img:
52 return img.convert('RGB')
53
54
55 def accimage_loader(path):
56 import accimage
57 try:
58 return accimage.Image(path)
59 except IOError:
60 # Potentially a decoding problem, fall back to PIL.Image
61 return pil_loader(path)
62
63
64 def default_loader(path):
65 from torchvision import get_image_backend
66 if get_image_backend() == 'accimage':
67 return accimage_loader(path)
68 else:
69 return pil_loader(path)
70
71
72 class ImageFolder(data.Dataset):
73 """A generic data loader where the images are arranged in this way: ::
74
75 root/dog/xxx.png
76 root/dog/xxy.png
77 root/dog/xxz.png
78
79 root/cat/123.png
80 root/cat/nsdf3.png
81 root/cat/asd932_.png
82
83 Args:
84 root (string): Root directory path.
85 transform (callable, optional): A function/transform that takes in an PIL image
86 and returns a transformed version. E.g, ``transforms.RandomCrop``
87 target_transform (callable, optional): A function/transform that takes in the
88 target and transforms it.
89 loader (callable, optional): A function to load an image given its path.
90
91 Attributes:
92 classes (list): List of the class names.
93 class_to_idx (dict): Dict with items (class_name, class_index).
94 imgs (list): List of (image path, class_index) tuples
95 """
96
97 def __init__(self, root, transform=None, target_transform=None,
98 loader=default_loader):
99 classes, class_to_idx = find_classes(root)
100 imgs = make_dataset(root, class_to_idx)
101 if len(imgs) == 0:
102 raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
103 "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
104
105 self.root = root
106 self.imgs = imgs
107 self.classes = classes
108 self.class_to_idx = class_to_idx
109 self.transform = transform
110 self.target_transform = target_transform
111 self.loader = loader
112
113 def __getitem__(self, index):
114 """
115 Args:
116 index (int): Index
117
118 Returns:
119 tuple: (image, target) where target is class_index of the target class.
120 """
121 path, target = self.imgs[index]
122 img = self.loader(path)
123 if self.transform is not None:
124 img = self.transform(img)
125 if self.target_transform is not None:
126 target = self.target_transform(target)
127
128 return img, target
129
130 def __len__(self):
131 return len(self.imgs)
132
[end of torchvision/datasets/folder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -48,8 +48,8 @@
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
- with Image.open(f) as img:
- return img.convert('RGB')
+ img = Image.open(f)
+ return img.convert('RGB')
def accimage_loader(path):
|
{"golden_diff": "diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py\n--- a/torchvision/datasets/folder.py\n+++ b/torchvision/datasets/folder.py\n@@ -48,8 +48,8 @@\n def pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n- with Image.open(f) as img:\n- return img.convert('RGB')\n+ img = Image.open(f)\n+ return img.convert('RGB')\n \n \n def accimage_loader(path):\n", "issue": "Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading\nWhen loading ImageNet, I get the following debug messages\r\n`Error closing: 'NoneType' object has no attribute 'close' for ImageNet reading`\r\nmessage called from https://github.com/pytorch/vision/blob/45dab0e4528333332f07ab1979e006857563cf99/torchvision/datasets/folder.py#L46\r\n\r\ncaused by\r\nhttps://github.com/python-pillow/Pillow/blob/7d8c0d9e39d5ed79ea6aa590b9cbff5b0d17248c/PIL/Image.py#L573\n", "before_files": [{"content": "import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n\n\ndef is_image_file(filename):\n \"\"\"Checks if a file is an image.\n\n Args:\n filename (string): path to a file\n\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n\ndef find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(dir, class_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n images.append(item)\n\n return images\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n import accimage\n try:\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef default_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n\n\nclass ImageFolder(data.Dataset):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(self, root, transform=None, target_transform=None,\n loader=default_loader):\n classes, class_to_idx = find_classes(root)\n imgs = make_dataset(root, class_to_idx)\n if len(imgs) == 0:\n raise(RuntimeError(\"Found 0 images in subfolders of: \" + root + \"\\n\"\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.root = root\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is class_index of the target class.\n \"\"\"\n path, target = self.imgs[index]\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n", "path": "torchvision/datasets/folder.py"}]}
| 1,912 | 139 |
gh_patches_debug_14940
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1348
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unwrap_phase SegFaults when passed a masked array with all elements masked
The **unwrap_phase** function in the skimage.restoration namespace Segmentation faults when passed a masked array with all elements masked. For example:
``` Python
import numpy as np
from skimage.restoration import unwrap_phase
image = np.ma.arange(100, dtype='float32').reshape(10, 10)
image[:] = np.ma.masked
unwrap_phase(image)
```
The Segmentation fault occurs in the find_pivot function in unwrap_2d_ljmu.c which is called from quicker_sort. The issue is that the routine is attempting to sort a length 0 array. A check should be added in the unwrap2D (and related functions) to see if params.no_of_edges is not 0 (or 1?)
</issue>
<code>
[start of skimage/restoration/unwrap.py]
1 import numpy as np
2 import warnings
3 from six import string_types
4
5 from ._unwrap_1d import unwrap_1d
6 from ._unwrap_2d import unwrap_2d
7 from ._unwrap_3d import unwrap_3d
8
9
10 def unwrap_phase(image, wrap_around=False, seed=None):
11 '''Recover the original from a wrapped phase image.
12
13 From an image wrapped to lie in the interval [-pi, pi), recover the
14 original, unwrapped image.
15
16 Parameters
17 ----------
18 image : 1D, 2D or 3D ndarray of floats, optionally a masked array
19 The values should be in the range [-pi, pi). If a masked array is
20 provided, the masked entries will not be changed, and their values
21 will not be used to guide the unwrapping of neighboring, unmasked
22 values. Masked 1D arrays are not allowed, and will raise a
23 `ValueError`.
24 wrap_around : bool or sequence of bool, optional
25 When an element of the sequence is `True`, the unwrapping process
26 will regard the edges along the corresponding axis of the image to be
27 connected and use this connectivity to guide the phase unwrapping
28 process. If only a single boolean is given, it will apply to all axes.
29 Wrap around is not supported for 1D arrays.
30 seed : int, optional
31 Unwrapping 2D or 3D images uses random initialization. This sets the
32 seed of the PRNG to achieve deterministic behavior.
33
34 Returns
35 -------
36 image_unwrapped : array_like, double
37 Unwrapped image of the same shape as the input. If the input `image`
38 was a masked array, the mask will be preserved.
39
40 Raises
41 ------
42 ValueError
43 If called with a masked 1D array or called with a 1D array and
44 ``wrap_around=True``.
45
46 Examples
47 --------
48 >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]
49 >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))
50 >>> image_wrapped = np.angle(np.exp(1j * image))
51 >>> image_unwrapped = unwrap_phase(image_wrapped)
52 >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal
53 True
54
55 References
56 ----------
57 .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
58 and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
59 algorithm based on sorting by reliability following a noncontinuous
60 path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
61 .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., "Fast
62 three-dimensional phase-unwrapping algorithm based on sorting by
63 reliability following a non-continuous path. In W. Osten,
64 C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,
65 International Society for Optics and Photonics.
66 '''
67 if image.ndim not in (1, 2, 3):
68 raise ValueError('Image must be 1, 2, or 3 dimensional')
69 if isinstance(wrap_around, bool):
70 wrap_around = [wrap_around] * image.ndim
71 elif (hasattr(wrap_around, '__getitem__')
72 and not isinstance(wrap_around, string_types)):
73 if len(wrap_around) != image.ndim:
74 raise ValueError('Length of `wrap_around` must equal the '
75 'dimensionality of image')
76 wrap_around = [bool(wa) for wa in wrap_around]
77 else:
78 raise ValueError('`wrap_around` must be a bool or a sequence with '
79 'length equal to the dimensionality of image')
80 if image.ndim == 1:
81 if np.ma.isMaskedArray(image):
82 raise ValueError('1D masked images cannot be unwrapped')
83 if wrap_around[0]:
84 raise ValueError('`wrap_around` is not supported for 1D images')
85 if image.ndim in (2, 3) and 1 in image.shape:
86 warnings.warn('Image has a length 1 dimension. Consider using an '
87 'array of lower dimensionality to use a more efficient '
88 'algorithm')
89
90 if np.ma.isMaskedArray(image):
91 mask = np.require(image.mask, np.uint8, ['C'])
92 image = image.data
93 else:
94 mask = np.zeros_like(image, dtype=np.uint8, order='C')
95
96 image_not_masked = np.asarray(image, dtype=np.double, order='C')
97 image_unwrapped = np.empty_like(image, dtype=np.double, order='C')
98
99 if image.ndim == 1:
100 unwrap_1d(image_not_masked, image_unwrapped)
101 elif image.ndim == 2:
102 unwrap_2d(image_not_masked, mask, image_unwrapped,
103 wrap_around, seed)
104 elif image.ndim == 3:
105 unwrap_3d(image_not_masked, mask, image_unwrapped,
106 wrap_around, seed)
107
108 if np.ma.isMaskedArray(image):
109 return np.ma.array(image_unwrapped, mask=mask)
110 else:
111 return image_unwrapped
112
[end of skimage/restoration/unwrap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/restoration/unwrap.py b/skimage/restoration/unwrap.py
--- a/skimage/restoration/unwrap.py
+++ b/skimage/restoration/unwrap.py
@@ -88,13 +88,14 @@
'algorithm')
if np.ma.isMaskedArray(image):
- mask = np.require(image.mask, np.uint8, ['C'])
- image = image.data
+ mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])
else:
mask = np.zeros_like(image, dtype=np.uint8, order='C')
- image_not_masked = np.asarray(image, dtype=np.double, order='C')
- image_unwrapped = np.empty_like(image, dtype=np.double, order='C')
+ image_not_masked = np.asarray(
+ np.ma.getdata(image), dtype=np.double, order='C')
+ image_unwrapped = np.empty_like(image, dtype=np.double, order='C',
+ subok=False)
if image.ndim == 1:
unwrap_1d(image_not_masked, image_unwrapped)
|
{"golden_diff": "diff --git a/skimage/restoration/unwrap.py b/skimage/restoration/unwrap.py\n--- a/skimage/restoration/unwrap.py\n+++ b/skimage/restoration/unwrap.py\n@@ -88,13 +88,14 @@\n 'algorithm')\n \n if np.ma.isMaskedArray(image):\n- mask = np.require(image.mask, np.uint8, ['C'])\n- image = image.data\n+ mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])\n else:\n mask = np.zeros_like(image, dtype=np.uint8, order='C')\n \n- image_not_masked = np.asarray(image, dtype=np.double, order='C')\n- image_unwrapped = np.empty_like(image, dtype=np.double, order='C')\n+ image_not_masked = np.asarray(\n+ np.ma.getdata(image), dtype=np.double, order='C')\n+ image_unwrapped = np.empty_like(image, dtype=np.double, order='C',\n+ subok=False)\n \n if image.ndim == 1:\n unwrap_1d(image_not_masked, image_unwrapped)\n", "issue": "unwrap_phase SegFaults when passed a masked array with all elements masked\nThe **unwrap_phase** function in the skimage.restoration namespace Segmentation faults when passed a masked array with all elements masked. For example:\n\n``` Python\nimport numpy as np\nfrom skimage.restoration import unwrap_phase\nimage = np.ma.arange(100, dtype='float32').reshape(10, 10)\nimage[:] = np.ma.masked\nunwrap_phase(image)\n```\n\nThe Segmentation fault occurs in the find_pivot function in unwrap_2d_ljmu.c which is called from quicker_sort. The issue is that the routine is attempting to sort a length 0 array. A check should be added in the unwrap2D (and related functions) to see if params.no_of_edges is not 0 (or 1?)\n\n", "before_files": [{"content": "import numpy as np\nimport warnings\nfrom six import string_types\n\nfrom ._unwrap_1d import unwrap_1d\nfrom ._unwrap_2d import unwrap_2d\nfrom ._unwrap_3d import unwrap_3d\n\n\ndef unwrap_phase(image, wrap_around=False, seed=None):\n '''Recover the original from a wrapped phase image.\n\n From an image wrapped to lie in the interval [-pi, pi), recover the\n original, unwrapped image.\n\n Parameters\n ----------\n image : 1D, 2D or 3D ndarray of floats, optionally a masked array\n The values should be in the range [-pi, pi). If a masked array is\n provided, the masked entries will not be changed, and their values\n will not be used to guide the unwrapping of neighboring, unmasked\n values. Masked 1D arrays are not allowed, and will raise a\n `ValueError`.\n wrap_around : bool or sequence of bool, optional\n When an element of the sequence is `True`, the unwrapping process\n will regard the edges along the corresponding axis of the image to be\n connected and use this connectivity to guide the phase unwrapping\n process. If only a single boolean is given, it will apply to all axes.\n Wrap around is not supported for 1D arrays.\n seed : int, optional\n Unwrapping 2D or 3D images uses random initialization. This sets the\n seed of the PRNG to achieve deterministic behavior.\n\n Returns\n -------\n image_unwrapped : array_like, double\n Unwrapped image of the same shape as the input. If the input `image`\n was a masked array, the mask will be preserved.\n\n Raises\n ------\n ValueError\n If called with a masked 1D array or called with a 1D array and\n ``wrap_around=True``.\n\n Examples\n --------\n >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]\n >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))\n >>> image_wrapped = np.angle(np.exp(1j * image))\n >>> image_unwrapped = unwrap_phase(image_wrapped)\n >>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal\n True\n\n References\n ----------\n .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,\n and Munther A. Gdeisat, \"Fast two-dimensional phase-unwrapping\n algorithm based on sorting by reliability following a noncontinuous\n path\", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,\n .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., \"Fast\n three-dimensional phase-unwrapping algorithm based on sorting by\n reliability following a non-continuous path. In W. Osten,\n C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,\n International Society for Optics and Photonics.\n '''\n if image.ndim not in (1, 2, 3):\n raise ValueError('Image must be 1, 2, or 3 dimensional')\n if isinstance(wrap_around, bool):\n wrap_around = [wrap_around] * image.ndim\n elif (hasattr(wrap_around, '__getitem__')\n and not isinstance(wrap_around, string_types)):\n if len(wrap_around) != image.ndim:\n raise ValueError('Length of `wrap_around` must equal the '\n 'dimensionality of image')\n wrap_around = [bool(wa) for wa in wrap_around]\n else:\n raise ValueError('`wrap_around` must be a bool or a sequence with '\n 'length equal to the dimensionality of image')\n if image.ndim == 1:\n if np.ma.isMaskedArray(image):\n raise ValueError('1D masked images cannot be unwrapped')\n if wrap_around[0]:\n raise ValueError('`wrap_around` is not supported for 1D images')\n if image.ndim in (2, 3) and 1 in image.shape:\n warnings.warn('Image has a length 1 dimension. Consider using an '\n 'array of lower dimensionality to use a more efficient '\n 'algorithm')\n\n if np.ma.isMaskedArray(image):\n mask = np.require(image.mask, np.uint8, ['C'])\n image = image.data\n else:\n mask = np.zeros_like(image, dtype=np.uint8, order='C')\n\n image_not_masked = np.asarray(image, dtype=np.double, order='C')\n image_unwrapped = np.empty_like(image, dtype=np.double, order='C')\n\n if image.ndim == 1:\n unwrap_1d(image_not_masked, image_unwrapped)\n elif image.ndim == 2:\n unwrap_2d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n elif image.ndim == 3:\n unwrap_3d(image_not_masked, mask, image_unwrapped,\n wrap_around, seed)\n\n if np.ma.isMaskedArray(image):\n return np.ma.array(image_unwrapped, mask=mask)\n else:\n return image_unwrapped\n", "path": "skimage/restoration/unwrap.py"}]}
| 2,157 | 249 |
gh_patches_debug_50539
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-291
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Chinese document layout has a link error.
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A clear and concise description of what the bug is.
Chinese document layout has a connection error.
doc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)

</issue>
<code>
[start of docs/source/norm_zh.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """
18 This file folds Chinese po files by hacking babel.messages.pofile.normalize
19 using jieba text segment library instead of regex
20 """
21
22 import datetime
23 import os
24
25 from babel.messages import pofile
26 from babel.messages.pofile import escape
27
28
29 def _zh_len(s):
30 """
31 Calculate text length in Chinese
32 """
33 try:
34 return len(s.encode('gb2312'))
35 except ValueError:
36 return len(s)
37
38
39 def _zh_split(s):
40 """
41 Split text length in Chinese
42 """
43 import jieba
44 try:
45 s.encode('ascii')
46 has_zh = False
47 except ValueError:
48 has_zh = True
49
50 if has_zh:
51 return list(jieba.cut(s))
52 else:
53 return pofile.WORD_SEP.split(s)
54
55
56 # code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)
57 def _normalize(string, prefix='', width=76):
58 r"""Convert a string into a format that is appropriate for .po files.
59 >>> print(normalize('''Say:
60 ... "hello, world!"
61 ... ''', width=None))
62 ""
63 "Say:\n"
64 " \"hello, world!\"\n"
65 >>> print(normalize('''Say:
66 ... "Lorem ipsum dolor sit amet, consectetur adipisicing elit, "
67 ... ''', width=32))
68 ""
69 "Say:\n"
70 " \"Lorem ipsum dolor sit "
71 "amet, consectetur adipisicing"
72 " elit, \"\n"
73 :param string: the string to normalize
74 :param prefix: a string that should be prepended to every line
75 :param width: the maximum line width; use `None`, 0, or a negative number
76 to completely disable line wrapping
77 """
78
79 if width and width > 0:
80 prefixlen = _zh_len(prefix)
81 lines = []
82 for line in string.splitlines(True):
83 if _zh_len(escape(line)) + prefixlen > width:
84 chunks = _zh_split(line)
85 chunks.reverse()
86 while chunks:
87 buf = []
88 size = 2
89 while chunks:
90 l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741
91 if size + l < width:
92 buf.append(chunks.pop())
93 size += l
94 else:
95 if not buf:
96 # handle long chunks by putting them on a
97 # separate line
98 buf.append(chunks.pop())
99 break
100 lines.append(u''.join(buf))
101 else:
102 lines.append(line)
103 else:
104 lines = string.splitlines(True)
105
106 if len(lines) <= 1:
107 return escape(string)
108
109 # Remove empty trailing line
110 if lines and not lines[-1]:
111 del lines[-1]
112 lines[-1] += '\n'
113 return u'""\n' + u'\n'.join([(prefix + escape(line)) for line in lines])
114
115
116 def main():
117 try:
118 import jieba # noqa: F401
119 except ImportError:
120 return
121
122 pofile.normalize = _normalize
123 for root, dirs, files in os.walk('.'):
124 if 'zh' not in root:
125 continue
126 for f in files:
127 if not f.endswith('.po'):
128 continue
129 path = os.path.join(root, f)
130
131 # only modify recent-changed files
132 modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
133 if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
134 continue
135
136 with open(path, 'rb') as inpf:
137 catalog = pofile.read_po(inpf)
138 with open(path, 'wb') as outf:
139 pofile.write_po(outf, catalog)
140
141
142 if __name__ == '__main__':
143 main()
144
[end of docs/source/norm_zh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py
--- a/docs/source/norm_zh.py
+++ b/docs/source/norm_zh.py
@@ -130,7 +130,7 @@
# only modify recent-changed files
modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:
continue
with open(path, 'rb') as inpf:
|
{"golden_diff": "diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py\n--- a/docs/source/norm_zh.py\n+++ b/docs/source/norm_zh.py\n@@ -130,7 +130,7 @@\n \n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n \n with open(path, 'rb') as inpf:\n", "issue": "[BUG] Chinese document layout has a link error.\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nChinese document layout has a connection error.\r\ndoc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n", "path": "docs/source/norm_zh.py"}]}
| 2,061 | 136 |
gh_patches_debug_20191
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-267
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DB function to edit column names
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
The user might want to edit column names.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
We need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column.
</issue>
<code>
[start of db/columns.py]
1 from sqlalchemy import Column, Integer, ForeignKey
2 from db import constants
3
4
5 NULLABLE = "nullable"
6 PRIMARY_KEY = "primary_key"
7 TYPE = "type"
8
9 ID_TYPE = Integer
10 DEFAULT_COLUMNS = {
11 constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}
12 }
13
14
15 class MathesarColumn(Column):
16 """
17 This class constrains the possible arguments, enabling us to include
18 a copy method (which has been deprecated in upstream SQLAlchemy since
19 1.4). The idea is that we can faithfully copy the subset of the
20 column definition that we care about, and this class defines that
21 subset.
22 """
23 def __init__(
24 self,
25 name,
26 sa_type,
27 foreign_keys=set(),
28 primary_key=False,
29 nullable=True,
30 ):
31 """
32 Construct a new ``MathesarColumn`` object.
33
34 Required arguments:
35 name -- String giving the name of the column in the database.
36 sa_type -- the SQLAlchemy type of the column.
37
38 Optional keyword arguments:
39 primary_key -- Boolean giving whether the column is a primary key.
40 """
41 super().__init__(
42 *foreign_keys,
43 name=name,
44 type_=sa_type,
45 primary_key=primary_key,
46 nullable=nullable,
47 )
48
49 @classmethod
50 def from_column(cls, column):
51 """
52 This alternate init method creates a new column (a copy) of the
53 given column. It respects only the properties in the __init__
54 of the MathesarColumn.
55 """
56 fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}
57 return cls(
58 column.name,
59 column.type,
60 foreign_keys=fkeys,
61 primary_key=column.primary_key,
62 nullable=column.nullable,
63 )
64
65 @property
66 def is_default(self):
67 default_def = DEFAULT_COLUMNS.get(self.name, False)
68 return (
69 default_def
70 and self.type.python_type == default_def[TYPE]().python_type
71 and self.primary_key == default_def.get(PRIMARY_KEY, False)
72 and self.nullable == default_def.get(NULLABLE, True)
73 )
74
75
76 def get_default_mathesar_column_list():
77 return [
78 MathesarColumn(
79 c,
80 DEFAULT_COLUMNS[c][TYPE],
81 primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]
82 )
83 for c in DEFAULT_COLUMNS
84 ]
85
86
87 def init_mathesar_table_column_list_with_defaults(column_list):
88 default_columns = get_default_mathesar_column_list()
89 given_columns = [MathesarColumn.from_column(c) for c in column_list]
90 return default_columns + given_columns
91
[end of db/columns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/db/columns.py b/db/columns.py
--- a/db/columns.py
+++ b/db/columns.py
@@ -1,4 +1,4 @@
-from sqlalchemy import Column, Integer, ForeignKey
+from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData
from db import constants
@@ -88,3 +88,19 @@
default_columns = get_default_mathesar_column_list()
given_columns = [MathesarColumn.from_column(c) for c in column_list]
return default_columns + given_columns
+
+
+def rename_column(schema, table_name, column_name, new_column_name, engine):
+ _preparer = engine.dialect.identifier_preparer
+ with engine.begin() as conn:
+ metadata = MetaData(bind=engine, schema=schema)
+ table = Table(table_name, metadata, schema=schema, autoload_with=engine)
+ column = table.columns[column_name]
+ prepared_table_name = _preparer.format_table(table)
+ prepared_column_name = _preparer.format_column(column)
+ prepared_new_column_name = _preparer.quote(new_column_name)
+ alter_stmt = f"""
+ ALTER TABLE {prepared_table_name}
+ RENAME {prepared_column_name} TO {prepared_new_column_name}
+ """
+ conn.execute(DDL(alter_stmt))
|
{"golden_diff": "diff --git a/db/columns.py b/db/columns.py\n--- a/db/columns.py\n+++ b/db/columns.py\n@@ -1,4 +1,4 @@\n-from sqlalchemy import Column, Integer, ForeignKey\n+from sqlalchemy import Column, Integer, ForeignKey, Table, DDL, MetaData\n from db import constants\n \n \n@@ -88,3 +88,19 @@\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n+\n+\n+def rename_column(schema, table_name, column_name, new_column_name, engine):\n+ _preparer = engine.dialect.identifier_preparer\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(table_name, metadata, schema=schema, autoload_with=engine)\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_new_column_name = _preparer.quote(new_column_name)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ RENAME {prepared_column_name} TO {prepared_new_column_name}\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "DB function to edit column names\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nThe user might want to edit column names.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nWe need a `db` function to allow the user to edit the name of a column. We need to check and fix foreign keys and other references to the column.\n", "before_files": [{"content": "from sqlalchemy import Column, Integer, ForeignKey\nfrom db import constants\n\n\nNULLABLE = \"nullable\"\nPRIMARY_KEY = \"primary_key\"\nTYPE = \"type\"\n\nID_TYPE = Integer\nDEFAULT_COLUMNS = {\n constants.ID: {TYPE: ID_TYPE, PRIMARY_KEY: True, NULLABLE: False}\n}\n\n\nclass MathesarColumn(Column):\n \"\"\"\n This class constrains the possible arguments, enabling us to include\n a copy method (which has been deprecated in upstream SQLAlchemy since\n 1.4). The idea is that we can faithfully copy the subset of the\n column definition that we care about, and this class defines that\n subset.\n \"\"\"\n def __init__(\n self,\n name,\n sa_type,\n foreign_keys=set(),\n primary_key=False,\n nullable=True,\n ):\n \"\"\"\n Construct a new ``MathesarColumn`` object.\n\n Required arguments:\n name -- String giving the name of the column in the database.\n sa_type -- the SQLAlchemy type of the column.\n\n Optional keyword arguments:\n primary_key -- Boolean giving whether the column is a primary key.\n \"\"\"\n super().__init__(\n *foreign_keys,\n name=name,\n type_=sa_type,\n primary_key=primary_key,\n nullable=nullable,\n )\n\n @classmethod\n def from_column(cls, column):\n \"\"\"\n This alternate init method creates a new column (a copy) of the\n given column. It respects only the properties in the __init__\n of the MathesarColumn.\n \"\"\"\n fkeys = {ForeignKey(fk.target_fullname) for fk in column.foreign_keys}\n return cls(\n column.name,\n column.type,\n foreign_keys=fkeys,\n primary_key=column.primary_key,\n nullable=column.nullable,\n )\n\n @property\n def is_default(self):\n default_def = DEFAULT_COLUMNS.get(self.name, False)\n return (\n default_def\n and self.type.python_type == default_def[TYPE]().python_type\n and self.primary_key == default_def.get(PRIMARY_KEY, False)\n and self.nullable == default_def.get(NULLABLE, True)\n )\n\n\ndef get_default_mathesar_column_list():\n return [\n MathesarColumn(\n c,\n DEFAULT_COLUMNS[c][TYPE],\n primary_key=DEFAULT_COLUMNS[c][PRIMARY_KEY]\n )\n for c in DEFAULT_COLUMNS\n ]\n\n\ndef init_mathesar_table_column_list_with_defaults(column_list):\n default_columns = get_default_mathesar_column_list()\n given_columns = [MathesarColumn.from_column(c) for c in column_list]\n return default_columns + given_columns\n", "path": "db/columns.py"}]}
| 1,359 | 291 |
gh_patches_debug_1154
|
rasdani/github-patches
|
git_diff
|
cognitedata__cognite-sdk-python-291
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
client.time_series.get_time_series does not return metadata
**Describe the bug**
When executing `client.time_series.get_time_series()` with `include_metadata = True` no metadata is returned.
**To Reproduce**
Runnable code reproducing the error.
```
import cognite
import requests
import os
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from cognite.client.stable.time_series import TimeSeries
sm_api = os.environ['SM_API_KEY']
client = cognite.CogniteClient(api_key = sm_api)
ts_name = 'Test_tssssss'
my_time_series = [TimeSeries(name=ts_name,
description = 'test_description',
metadata = { 'ASSETSCOPENAME' : 'meta_test_1' })]
client.time_series.post_time_series(my_time_series)
# create dummy data
np.random.seed(1338)
start_time = int((datetime.now()-timedelta(1)).strftime("%s"))
timestamps = [(start_time + i * 10)*1000 for i in np.arange(11)]
df = pd.DataFrame({'timestamp' : timestamps})
df[ts_name] = np.random.random(df.shape[0])
client.datapoints.post_datapoints_frame(df)
# get time_series
ts1 = client.time_series.get_time_series(name = ts_name,
include_metadata = True).to_pandas()
ts1_id = ts1['id'].loc[0]
print(ts1.loc[0])
# no meta data
# requests:
# first with no metadata
r1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,
headers= { 'Api-Key' : sm_api} , params = {"includeMetadata" : False})
print(r1.text.split('\n'))
# then with metadata
r1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,
headers= { 'Api-Key' : sm_api} , params = {"includeMetadata" : True})
print(r1.text.split('\n'))
```
**Expected behavior**
The `client.time_series.get_time_series(name = ts_name,include_metadata = True)` should return the metadata.
</issue>
<code>
[start of cognite/client/stable/time_series.py]
1 # -*- coding: utf-8 -*-
2 from copy import deepcopy
3 from typing import List
4 from urllib.parse import quote
5
6 import pandas as pd
7
8 from cognite.client._api_client import APIClient, CogniteCollectionResponse, CogniteResource, CogniteResponse
9
10
11 class TimeSeriesResponse(CogniteResponse):
12 """Time series Response Object"""
13
14 def __init__(self, internal_representation):
15 super().__init__(internal_representation)
16 item = self.to_json()
17 self.id = item.get("id")
18 self.name = item.get("name")
19 self.unit = item.get("unit")
20 self.is_step = item.get("isStep")
21 self.is_string = item.get("isString")
22 self.created_time = item.get("createdTime")
23 self.last_updated_time = item.get("lastUpdatedTime")
24 self.metadata = item.get("metadata")
25 self.asset_id = item.get("assetId")
26 self.description = item.get("description")
27
28 def to_pandas(self):
29 """Returns data as a pandas dataframe"""
30 if len(self.to_json()) > 0:
31 ts = self.to_json().copy()
32 if "metadata" in ts:
33 # Hack to avoid path ending up as first element in dict as from_dict will fail
34 metadata = ts.pop("metadata")
35 df = pd.DataFrame.from_dict(ts, orient="index")
36 df.loc["metadata"] = [metadata]
37 else:
38 df = pd.DataFrame.from_dict(ts, orient="index")
39 return df
40 return pd.DataFrame()
41
42
43 class TimeSeriesListResponse(CogniteCollectionResponse):
44 """Time series Response Object"""
45
46 _RESPONSE_CLASS = TimeSeriesResponse
47
48 def to_pandas(self, include_metadata: bool = False):
49 """Returns data as a pandas dataframe
50
51 Args:
52 include_metadata (bool): Whether or not to include metadata fields in the resulting dataframe
53 """
54 items = deepcopy(self.internal_representation["data"]["items"])
55 if items and items[0].get("metadata") is None:
56 return pd.DataFrame(items)
57 for d in items:
58 if d.get("metadata"):
59 metadata = d.pop("metadata")
60 if include_metadata:
61 d.update(metadata)
62 return pd.DataFrame(items)
63
64
65 class TimeSeries(CogniteResource):
66 """Data Transfer Object for a time series.
67
68 Args:
69 name (str): Unique name of time series.
70 is_string (bool): Whether the time series is string valued or not.
71 metadata (dict): Metadata.
72 unit (str): Physical unit of the time series.
73 asset_id (int): Asset that this time series belongs to.
74 description (str): Description of the time series.
75 security_categories (list(int)): Security categories required in order to access this time series.
76 is_step (bool): Whether or not the time series is a step series.
77
78 """
79
80 def __init__(
81 self,
82 name,
83 is_string=False,
84 metadata=None,
85 unit=None,
86 asset_id=None,
87 description=None,
88 security_categories=None,
89 is_step=None,
90 ):
91 self.name = name
92 self.is_string = is_string
93 self.metadata = metadata
94 self.unit = unit
95 self.asset_id = asset_id
96 self.description = description
97 self.security_categories = security_categories
98 self.is_step = is_step
99
100
101 class TimeSeriesClient(APIClient):
102 def __init__(self, **kwargs):
103 super().__init__(version="0.5", **kwargs)
104
105 def get_time_series(
106 self, prefix=None, description=None, include_metadata=False, asset_id=None, path=None, **kwargs
107 ) -> TimeSeriesListResponse:
108 """Returns an object containing the requested timeseries.
109
110 Args:
111 prefix (str): List timeseries with this prefix in the name.
112
113 description (str): Filter timeseries taht contains this string in its description.
114
115 include_metadata (bool): Decide if the metadata field should be returned or not. Defaults to False.
116
117 asset_id (int): Get timeseries related to this asset.
118
119 path (List[int]): Get timeseries under this asset path branch.
120
121 Keyword Arguments:
122 limit (int): Number of results to return.
123
124 autopaging (bool): Whether or not to automatically page through results. If set to true, limit will be
125 disregarded. Defaults to False.
126
127 Returns:
128 stable.time_series.TimeSeriesListResponse: A data object containing the requested timeseries with several getter methods with different
129 output formats.
130
131 Examples:
132 Get all time series for a given asset::
133
134 client = CogniteClient()
135 res = client.time_series.get_time_series(asset_id=123, autopaging=True)
136 print(res.to_pandas())
137 """
138 autopaging = kwargs.get("autopaging", False)
139 url = "/timeseries"
140 params = {
141 "q": prefix,
142 "description": description,
143 "includeMetadata": include_metadata,
144 "assetId": asset_id,
145 "path": str(path) if path else None,
146 "limit": kwargs.get("limit", self._LIMIT) if not autopaging else self._LIMIT,
147 }
148
149 res = self._get(url=url, params=params, autopaging=autopaging)
150 return TimeSeriesListResponse(res.json())
151
152 def post_time_series(self, time_series: List[TimeSeries]) -> None:
153 """Create a new time series.
154
155 Args:
156 time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to create.
157
158 Returns:
159 None
160
161 Examples:
162 Create a new time series::
163
164 from cognite.client.stable.time_series import TimeSeries
165 client = CogniteClient()
166
167 my_time_series = [TimeSeries(name="my_ts_1")]
168
169 client.time_series.post_time_series(my_time_series)
170 """
171 url = "/timeseries"
172 items = [ts.camel_case_dict() for ts in time_series]
173 body = {"items": items}
174 self._post(url, body=body)
175
176 def update_time_series(self, time_series: List[TimeSeries]) -> None:
177 """Update an existing time series.
178
179 For each field that can be updated, a null value indicates that nothing should be done.
180
181 Args:
182 time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to update.
183
184 Returns:
185 None
186
187 Examples:
188 Update the unit of a time series::
189
190 from cognite.client.stable.time_series import TimeSeries
191 client = CogniteClient()
192
193 my_time_series = [TimeSeries(name="my_ts_1", unit="celsius")]
194
195 client.time_series.update_time_series(my_time_series)
196 """
197 url = "/timeseries"
198 items = [ts.camel_case_dict() for ts in time_series]
199 body = {"items": items}
200 self._put(url, body=body)
201
202 def delete_time_series(self, name) -> None:
203 """Delete a timeseries.
204
205 Args:
206 name (str): Name of timeseries to delete.
207
208 Returns:
209 None
210
211 Examples:
212 Delete a time series by name::
213
214 client = CogniteClient()
215
216 client.time_series.delete_time_series(name="my_ts_1")
217 """
218 url = "/timeseries/{}".format(quote(name, safe=""))
219 self._delete(url)
220
[end of cognite/client/stable/time_series.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cognite/client/stable/time_series.py b/cognite/client/stable/time_series.py
--- a/cognite/client/stable/time_series.py
+++ b/cognite/client/stable/time_series.py
@@ -45,7 +45,7 @@
_RESPONSE_CLASS = TimeSeriesResponse
- def to_pandas(self, include_metadata: bool = False):
+ def to_pandas(self, include_metadata: bool = True):
"""Returns data as a pandas dataframe
Args:
|
{"golden_diff": "diff --git a/cognite/client/stable/time_series.py b/cognite/client/stable/time_series.py\n--- a/cognite/client/stable/time_series.py\n+++ b/cognite/client/stable/time_series.py\n@@ -45,7 +45,7 @@\n \n _RESPONSE_CLASS = TimeSeriesResponse\n \n- def to_pandas(self, include_metadata: bool = False):\n+ def to_pandas(self, include_metadata: bool = True):\n \"\"\"Returns data as a pandas dataframe\n \n Args:\n", "issue": "client.time_series.get_time_series does not return metadata\n**Describe the bug**\r\nWhen executing `client.time_series.get_time_series()` with `include_metadata = True` no metadata is returned.\r\n\r\n**To Reproduce**\r\nRunnable code reproducing the error.\r\n```\r\nimport cognite\r\nimport requests\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom datetime import datetime, timedelta\r\n\r\nfrom cognite.client.stable.time_series import TimeSeries\r\nsm_api = os.environ['SM_API_KEY']\r\nclient = cognite.CogniteClient(api_key = sm_api)\r\nts_name = 'Test_tssssss'\r\nmy_time_series = [TimeSeries(name=ts_name, \r\n description = 'test_description',\r\n metadata = { 'ASSETSCOPENAME' : 'meta_test_1' })]\r\nclient.time_series.post_time_series(my_time_series)\r\n\r\n# create dummy data\r\nnp.random.seed(1338)\r\nstart_time = int((datetime.now()-timedelta(1)).strftime(\"%s\"))\r\ntimestamps = [(start_time + i * 10)*1000 for i in np.arange(11)]\r\ndf = pd.DataFrame({'timestamp' : timestamps})\r\ndf[ts_name] = np.random.random(df.shape[0])\r\nclient.datapoints.post_datapoints_frame(df)\r\n# get time_series\r\nts1 = client.time_series.get_time_series(name = ts_name,\r\n include_metadata = True).to_pandas()\r\nts1_id = ts1['id'].loc[0] \r\nprint(ts1.loc[0])\r\n# no meta data\r\n# requests:\r\n# first with no metadata\r\nr1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,\r\n headers= { 'Api-Key' : sm_api} , params = {\"includeMetadata\" : False})\r\nprint(r1.text.split('\\n'))\r\n# then with metadata\r\nr1 = requests.get(url = 'https://api.cognitedata.com/api/0.5/projects/smart-maintenance-sandbox/timeseries/' + str(ts1_id) ,\r\n headers= { 'Api-Key' : sm_api} , params = {\"includeMetadata\" : True})\r\nprint(r1.text.split('\\n'))\r\n```\r\n**Expected behavior**\r\nThe `client.time_series.get_time_series(name = ts_name,include_metadata = True)` should return the metadata.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom copy import deepcopy\nfrom typing import List\nfrom urllib.parse import quote\n\nimport pandas as pd\n\nfrom cognite.client._api_client import APIClient, CogniteCollectionResponse, CogniteResource, CogniteResponse\n\n\nclass TimeSeriesResponse(CogniteResponse):\n \"\"\"Time series Response Object\"\"\"\n\n def __init__(self, internal_representation):\n super().__init__(internal_representation)\n item = self.to_json()\n self.id = item.get(\"id\")\n self.name = item.get(\"name\")\n self.unit = item.get(\"unit\")\n self.is_step = item.get(\"isStep\")\n self.is_string = item.get(\"isString\")\n self.created_time = item.get(\"createdTime\")\n self.last_updated_time = item.get(\"lastUpdatedTime\")\n self.metadata = item.get(\"metadata\")\n self.asset_id = item.get(\"assetId\")\n self.description = item.get(\"description\")\n\n def to_pandas(self):\n \"\"\"Returns data as a pandas dataframe\"\"\"\n if len(self.to_json()) > 0:\n ts = self.to_json().copy()\n if \"metadata\" in ts:\n # Hack to avoid path ending up as first element in dict as from_dict will fail\n metadata = ts.pop(\"metadata\")\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n df.loc[\"metadata\"] = [metadata]\n else:\n df = pd.DataFrame.from_dict(ts, orient=\"index\")\n return df\n return pd.DataFrame()\n\n\nclass TimeSeriesListResponse(CogniteCollectionResponse):\n \"\"\"Time series Response Object\"\"\"\n\n _RESPONSE_CLASS = TimeSeriesResponse\n\n def to_pandas(self, include_metadata: bool = False):\n \"\"\"Returns data as a pandas dataframe\n\n Args:\n include_metadata (bool): Whether or not to include metadata fields in the resulting dataframe\n \"\"\"\n items = deepcopy(self.internal_representation[\"data\"][\"items\"])\n if items and items[0].get(\"metadata\") is None:\n return pd.DataFrame(items)\n for d in items:\n if d.get(\"metadata\"):\n metadata = d.pop(\"metadata\")\n if include_metadata:\n d.update(metadata)\n return pd.DataFrame(items)\n\n\nclass TimeSeries(CogniteResource):\n \"\"\"Data Transfer Object for a time series.\n\n Args:\n name (str): Unique name of time series.\n is_string (bool): Whether the time series is string valued or not.\n metadata (dict): Metadata.\n unit (str): Physical unit of the time series.\n asset_id (int): Asset that this time series belongs to.\n description (str): Description of the time series.\n security_categories (list(int)): Security categories required in order to access this time series.\n is_step (bool): Whether or not the time series is a step series.\n\n \"\"\"\n\n def __init__(\n self,\n name,\n is_string=False,\n metadata=None,\n unit=None,\n asset_id=None,\n description=None,\n security_categories=None,\n is_step=None,\n ):\n self.name = name\n self.is_string = is_string\n self.metadata = metadata\n self.unit = unit\n self.asset_id = asset_id\n self.description = description\n self.security_categories = security_categories\n self.is_step = is_step\n\n\nclass TimeSeriesClient(APIClient):\n def __init__(self, **kwargs):\n super().__init__(version=\"0.5\", **kwargs)\n\n def get_time_series(\n self, prefix=None, description=None, include_metadata=False, asset_id=None, path=None, **kwargs\n ) -> TimeSeriesListResponse:\n \"\"\"Returns an object containing the requested timeseries.\n\n Args:\n prefix (str): List timeseries with this prefix in the name.\n\n description (str): Filter timeseries taht contains this string in its description.\n\n include_metadata (bool): Decide if the metadata field should be returned or not. Defaults to False.\n\n asset_id (int): Get timeseries related to this asset.\n\n path (List[int]): Get timeseries under this asset path branch.\n\n Keyword Arguments:\n limit (int): Number of results to return.\n\n autopaging (bool): Whether or not to automatically page through results. If set to true, limit will be\n disregarded. Defaults to False.\n\n Returns:\n stable.time_series.TimeSeriesListResponse: A data object containing the requested timeseries with several getter methods with different\n output formats.\n\n Examples:\n Get all time series for a given asset::\n\n client = CogniteClient()\n res = client.time_series.get_time_series(asset_id=123, autopaging=True)\n print(res.to_pandas())\n \"\"\"\n autopaging = kwargs.get(\"autopaging\", False)\n url = \"/timeseries\"\n params = {\n \"q\": prefix,\n \"description\": description,\n \"includeMetadata\": include_metadata,\n \"assetId\": asset_id,\n \"path\": str(path) if path else None,\n \"limit\": kwargs.get(\"limit\", self._LIMIT) if not autopaging else self._LIMIT,\n }\n\n res = self._get(url=url, params=params, autopaging=autopaging)\n return TimeSeriesListResponse(res.json())\n\n def post_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Create a new time series.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to create.\n\n Returns:\n None\n\n Examples:\n Create a new time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\")]\n\n client.time_series.post_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._post(url, body=body)\n\n def update_time_series(self, time_series: List[TimeSeries]) -> None:\n \"\"\"Update an existing time series.\n\n For each field that can be updated, a null value indicates that nothing should be done.\n\n Args:\n time_series (list[stable.time_series.TimeSeries]): List of time series data transfer objects to update.\n\n Returns:\n None\n\n Examples:\n Update the unit of a time series::\n\n from cognite.client.stable.time_series import TimeSeries\n client = CogniteClient()\n\n my_time_series = [TimeSeries(name=\"my_ts_1\", unit=\"celsius\")]\n\n client.time_series.update_time_series(my_time_series)\n \"\"\"\n url = \"/timeseries\"\n items = [ts.camel_case_dict() for ts in time_series]\n body = {\"items\": items}\n self._put(url, body=body)\n\n def delete_time_series(self, name) -> None:\n \"\"\"Delete a timeseries.\n\n Args:\n name (str): Name of timeseries to delete.\n\n Returns:\n None\n\n Examples:\n Delete a time series by name::\n\n client = CogniteClient()\n\n client.time_series.delete_time_series(name=\"my_ts_1\")\n \"\"\"\n url = \"/timeseries/{}\".format(quote(name, safe=\"\"))\n self._delete(url)\n", "path": "cognite/client/stable/time_series.py"}]}
| 3,198 | 113 |
gh_patches_debug_5030
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1544
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: test_numerical_operations fails under pandas 1.1.0
[Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now).
</issue>
<code>
[start of geopandas/_compat.py]
1 from distutils.version import LooseVersion
2 import importlib
3 import os
4 import warnings
5
6 import pandas as pd
7 import shapely
8
9 # -----------------------------------------------------------------------------
10 # pandas compat
11 # -----------------------------------------------------------------------------
12
13 PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0")
14 PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev")
15 PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev")
16
17
18 # -----------------------------------------------------------------------------
19 # Shapely / PyGEOS compat
20 # -----------------------------------------------------------------------------
21
22
23 SHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion("1.7.0")
24
25 HAS_PYGEOS = None
26 USE_PYGEOS = None
27 PYGEOS_SHAPELY_COMPAT = None
28
29 try:
30 import pygeos # noqa
31
32 HAS_PYGEOS = True
33 except ImportError:
34 HAS_PYGEOS = False
35
36
37 def set_use_pygeos(val=None):
38 """
39 Set the global configuration on whether to use PyGEOS or not.
40
41 The default is use PyGEOS if it is installed. This can be overridden
42 with an environment variable USE_PYGEOS (this is only checked at
43 first import, cannot be changed during interactive session).
44
45 Alternatively, pass a value here to force a True/False value.
46 """
47 global USE_PYGEOS
48 global PYGEOS_SHAPELY_COMPAT
49
50 if val is not None:
51 USE_PYGEOS = bool(val)
52 else:
53 if USE_PYGEOS is None:
54
55 USE_PYGEOS = HAS_PYGEOS
56
57 env_use_pygeos = os.getenv("USE_PYGEOS", None)
58 if env_use_pygeos is not None:
59 USE_PYGEOS = bool(int(env_use_pygeos))
60
61 # validate the pygeos version
62 if USE_PYGEOS:
63 try:
64 import pygeos # noqa
65
66 # validate the pygeos version
67 if not str(pygeos.__version__) >= LooseVersion("0.6"):
68 raise ImportError(
69 "PyGEOS >= 0.6 is required, version {0} is installed".format(
70 pygeos.__version__
71 )
72 )
73
74 # Check whether Shapely and PyGEOS use the same GEOS version.
75 # Based on PyGEOS from_shapely implementation.
76
77 from shapely.geos import geos_version_string as shapely_geos_version
78 from pygeos import geos_capi_version_string
79
80 # shapely has something like: "3.6.2-CAPI-1.10.2 4d2925d6"
81 # pygeos has something like: "3.6.2-CAPI-1.10.2"
82 if not shapely_geos_version.startswith(geos_capi_version_string):
83 warnings.warn(
84 "The Shapely GEOS version ({}) is incompatible with the GEOS "
85 "version PyGEOS was compiled with ({}). Conversions between both "
86 "will be slow.".format(
87 shapely_geos_version, geos_capi_version_string
88 )
89 )
90 PYGEOS_SHAPELY_COMPAT = False
91 else:
92 PYGEOS_SHAPELY_COMPAT = True
93
94 except ImportError:
95 raise ImportError(
96 "To use the PyGEOS speed-ups within GeoPandas, you need to install "
97 "PyGEOS: 'conda install pygeos' or 'pip install pygeos'"
98 )
99
100
101 set_use_pygeos()
102
103
104 def import_optional_dependency(name: str, extra: str = ""):
105 """
106 Import an optional dependency.
107
108 Adapted from pandas.compat._optional::import_optional_dependency
109
110 Raises a formatted ImportError if the module is not present.
111
112 Parameters
113 ----------
114 name : str
115 The module name.
116 extra : str
117 Additional text to include in the ImportError message.
118 Returns
119 -------
120 module
121 """
122 msg = """Missing optional dependency '{name}'. {extra} "
123 "Use pip or conda to install {name}.""".format(
124 name=name, extra=extra
125 )
126
127 if not isinstance(name, str):
128 raise ValueError(
129 "Invalid module name: '{name}'; must be a string".format(name=name)
130 )
131
132 try:
133 module = importlib.import_module(name)
134
135 except ImportError:
136 raise ImportError(msg) from None
137
138 return module
139
140
141 # -----------------------------------------------------------------------------
142 # RTree compat
143 # -----------------------------------------------------------------------------
144
145 HAS_RTREE = None
146 RTREE_GE_094 = False
147 try:
148 import rtree # noqa
149
150 HAS_RTREE = True
151 except ImportError:
152 HAS_RTREE = False
153
[end of geopandas/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/_compat.py b/geopandas/_compat.py
--- a/geopandas/_compat.py
+++ b/geopandas/_compat.py
@@ -12,7 +12,7 @@
PANDAS_GE_025 = str(pd.__version__) >= LooseVersion("0.25.0")
PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("0.26.0.dev")
-PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0.dev")
+PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0")
# -----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/geopandas/_compat.py b/geopandas/_compat.py\n--- a/geopandas/_compat.py\n+++ b/geopandas/_compat.py\n@@ -12,7 +12,7 @@\n \n PANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\n PANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\n-PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n+PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\n \n \n # -----------------------------------------------------------------------------\n", "issue": "BUG: test_numerical_operations fails under pandas 1.1.0\n[Failure](https://travis-ci.org/github/geopandas/geopandas/jobs/712675986#L2140) in `test_numerical_operations` (`idxmin` error type) -> pandas-dev/pandas#32749. Even though it is a known regression in pandas, we should handle it (at least skip tests for now).\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport importlib\nimport os\nimport warnings\n\nimport pandas as pd\nimport shapely\n\n# -----------------------------------------------------------------------------\n# pandas compat\n# -----------------------------------------------------------------------------\n\nPANDAS_GE_025 = str(pd.__version__) >= LooseVersion(\"0.25.0\")\nPANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"0.26.0.dev\")\nPANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0.dev\")\n\n\n# -----------------------------------------------------------------------------\n# Shapely / PyGEOS compat\n# -----------------------------------------------------------------------------\n\n\nSHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion(\"1.7.0\")\n\nHAS_PYGEOS = None\nUSE_PYGEOS = None\nPYGEOS_SHAPELY_COMPAT = None\n\ntry:\n import pygeos # noqa\n\n HAS_PYGEOS = True\nexcept ImportError:\n HAS_PYGEOS = False\n\n\ndef set_use_pygeos(val=None):\n \"\"\"\n Set the global configuration on whether to use PyGEOS or not.\n\n The default is use PyGEOS if it is installed. This can be overridden\n with an environment variable USE_PYGEOS (this is only checked at\n first import, cannot be changed during interactive session).\n\n Alternatively, pass a value here to force a True/False value.\n \"\"\"\n global USE_PYGEOS\n global PYGEOS_SHAPELY_COMPAT\n\n if val is not None:\n USE_PYGEOS = bool(val)\n else:\n if USE_PYGEOS is None:\n\n USE_PYGEOS = HAS_PYGEOS\n\n env_use_pygeos = os.getenv(\"USE_PYGEOS\", None)\n if env_use_pygeos is not None:\n USE_PYGEOS = bool(int(env_use_pygeos))\n\n # validate the pygeos version\n if USE_PYGEOS:\n try:\n import pygeos # noqa\n\n # validate the pygeos version\n if not str(pygeos.__version__) >= LooseVersion(\"0.6\"):\n raise ImportError(\n \"PyGEOS >= 0.6 is required, version {0} is installed\".format(\n pygeos.__version__\n )\n )\n\n # Check whether Shapely and PyGEOS use the same GEOS version.\n # Based on PyGEOS from_shapely implementation.\n\n from shapely.geos import geos_version_string as shapely_geos_version\n from pygeos import geos_capi_version_string\n\n # shapely has something like: \"3.6.2-CAPI-1.10.2 4d2925d6\"\n # pygeos has something like: \"3.6.2-CAPI-1.10.2\"\n if not shapely_geos_version.startswith(geos_capi_version_string):\n warnings.warn(\n \"The Shapely GEOS version ({}) is incompatible with the GEOS \"\n \"version PyGEOS was compiled with ({}). Conversions between both \"\n \"will be slow.\".format(\n shapely_geos_version, geos_capi_version_string\n )\n )\n PYGEOS_SHAPELY_COMPAT = False\n else:\n PYGEOS_SHAPELY_COMPAT = True\n\n except ImportError:\n raise ImportError(\n \"To use the PyGEOS speed-ups within GeoPandas, you need to install \"\n \"PyGEOS: 'conda install pygeos' or 'pip install pygeos'\"\n )\n\n\nset_use_pygeos()\n\n\ndef import_optional_dependency(name: str, extra: str = \"\"):\n \"\"\"\n Import an optional dependency.\n\n Adapted from pandas.compat._optional::import_optional_dependency\n\n Raises a formatted ImportError if the module is not present.\n\n Parameters\n ----------\n name : str\n The module name.\n extra : str\n Additional text to include in the ImportError message.\n Returns\n -------\n module\n \"\"\"\n msg = \"\"\"Missing optional dependency '{name}'. {extra} \"\n \"Use pip or conda to install {name}.\"\"\".format(\n name=name, extra=extra\n )\n\n if not isinstance(name, str):\n raise ValueError(\n \"Invalid module name: '{name}'; must be a string\".format(name=name)\n )\n\n try:\n module = importlib.import_module(name)\n\n except ImportError:\n raise ImportError(msg) from None\n\n return module\n\n\n# -----------------------------------------------------------------------------\n# RTree compat\n# -----------------------------------------------------------------------------\n\nHAS_RTREE = None\nRTREE_GE_094 = False\ntry:\n import rtree # noqa\n\n HAS_RTREE = True\nexcept ImportError:\n HAS_RTREE = False\n", "path": "geopandas/_compat.py"}]}
| 2,034 | 150 |
gh_patches_debug_18787
|
rasdani/github-patches
|
git_diff
|
plotly__dash-964
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clientside PreventUpdate and no_update
It would be helpful for clientside callbacks to be able to conditionally update components. This is not supported right now (as far as I can tell). This should be pretty simple, my idea would be to define
```js
window.dash_clientside.no_update = {};
```
then in `updateClientsideOutput` just conditionally block updating by adding
```js
if(window.dash_clientside.no_update && outputValue === window.dash_clientside.no_update)
return;
```
Similarly we could define `window.dash_clientside.PreventUpdate` and allow the use of
```js
throw window.dash_clientside.PreventUpdate;
```
If this seems reasonable I could put together a PR.
</issue>
<code>
[start of dash/dependencies.py]
1 class DashDependency:
2 # pylint: disable=too-few-public-methods
3 def __init__(self, component_id, component_property):
4 self.component_id = component_id
5 self.component_property = component_property
6
7 def __str__(self):
8 return '{}.{}'.format(
9 self.component_id,
10 self.component_property
11 )
12
13 def __repr__(self):
14 return '<{} `{}`>'.format(self.__class__.__name__, self)
15
16 def __eq__(self, other):
17 return isinstance(other, DashDependency) and str(self) == str(other)
18
19 def __hash__(self):
20 return hash(str(self))
21
22
23 class Output(DashDependency): # pylint: disable=too-few-public-methods
24 """Output of a callback."""
25
26
27 class Input(DashDependency): # pylint: disable=too-few-public-methods
28 """Input of callback trigger an update when it is updated."""
29
30
31 class State(DashDependency): # pylint: disable=too-few-public-methods
32 """Use the value of a state in a callback but don't trigger updates."""
33
34
35 class ClientsideFunction:
36 # pylint: disable=too-few-public-methods
37 def __init__(self, namespace=None, function_name=None):
38 self.namespace = namespace
39 self.function_name = function_name
40
41 def __repr__(self):
42 return 'ClientsideFunction({}, {})'.format(
43 self.namespace,
44 self.function_name
45 )
46
[end of dash/dependencies.py]
[start of dash/_utils.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3 import shlex
4 import sys
5 import uuid
6 import hashlib
7 import collections
8 import subprocess
9 import logging
10 from io import open # pylint: disable=redefined-builtin
11 from functools import wraps
12 import future.utils as utils
13
14 logger = logging.getLogger()
15
16
17 def interpolate_str(template, **data):
18 s = template
19 for k, v in data.items():
20 key = "{%" + k + "%}"
21 s = s.replace(key, v)
22 return s
23
24
25 def format_tag(tag_name, attributes, inner="", closed=False, opened=False):
26 tag = "<{tag} {attributes}"
27 if closed:
28 tag += "/>"
29 elif opened:
30 tag += ">"
31 else:
32 tag += ">" + inner + "</{tag}>"
33 return tag.format(
34 tag=tag_name,
35 attributes=" ".join(
36 ['{}="{}"'.format(k, v) for k, v in attributes.items()]
37 ),
38 )
39
40
41 def generate_hash():
42 return str(uuid.uuid4().hex).strip("-")
43
44
45 def get_asset_path(requests_pathname, asset_path, asset_url_path):
46
47 return "/".join(
48 [
49 # Only take the first part of the pathname
50 requests_pathname.rstrip("/"),
51 asset_url_path,
52 asset_path,
53 ]
54 )
55
56
57 # pylint: disable=no-member
58 def patch_collections_abc(member):
59 return getattr(collections if utils.PY2 else collections.abc, member)
60
61
62 class AttributeDict(dict):
63 """Dictionary subclass enabling attribute lookup/assignment of keys/values.
64
65 For example::
66 >>> m = AttributeDict({'foo': 'bar'})
67 >>> m.foo
68 'bar'
69 >>> m.foo = 'not bar'
70 >>> m['foo']
71 'not bar'
72 ``AttributeDict`` objects also provide ``.first()`` which acts like
73 ``.get()`` but accepts multiple keys as arguments, and returns the value of
74 the first hit, e.g.::
75 >>> m = AttributeDict({'foo': 'bar', 'biz': 'baz'})
76 >>> m.first('wrong', 'incorrect', 'foo', 'biz')
77 'bar'
78 """
79
80 def __setattr__(self, key, value):
81 self[key] = value
82
83 def __getattr__(self, key):
84 try:
85 return self[key]
86 except KeyError:
87 pass
88 # to conform with __getattr__ spec
89 # but get out of the except block so it doesn't look like a nested err
90 raise AttributeError(key)
91
92 def set_read_only(self, names, msg="Attribute is read-only"):
93 object.__setattr__(self, "_read_only", names)
94 object.__setattr__(self, "_read_only_msg", msg)
95
96 def finalize(self, msg="Object is final: No new keys may be added."):
97 """Prevent any new keys being set."""
98 object.__setattr__(self, "_final", msg)
99
100 def __setitem__(self, key, val):
101 if key in self.__dict__.get("_read_only", []):
102 raise AttributeError(self._read_only_msg, key)
103
104 final_msg = self.__dict__.get("_final")
105 if final_msg and key not in self:
106 raise AttributeError(final_msg, key)
107
108 return super(AttributeDict, self).__setitem__(key, val)
109
110 # pylint: disable=inconsistent-return-statements
111 def first(self, *names):
112 for name in names:
113 value = self.get(name)
114 if value:
115 return value
116
117
118 def create_callback_id(output):
119 if isinstance(output, (list, tuple)):
120 return "..{}..".format(
121 "...".join(
122 "{}.{}".format(x.component_id, x.component_property)
123 for x in output
124 )
125 )
126
127 return "{}.{}".format(output.component_id, output.component_property)
128
129
130 def run_command_with_process(cmd):
131 proc = subprocess.Popen(shlex.split(cmd, posix=sys.platform != "win32"))
132 proc.wait()
133 if proc.poll() is None:
134 logger.warning("🚨 trying to terminate subprocess in safe way")
135 try:
136 proc.communicate()
137 except Exception: # pylint: disable=broad-except
138 logger.exception("🚨 first try communicate failed")
139 proc.kill()
140 proc.communicate()
141
142
143 def compute_md5(path):
144 with open(path, encoding="utf-8") as fp:
145 return hashlib.md5(fp.read().encode("utf-8")).hexdigest()
146
147
148 def job(msg=""):
149 def wrapper(func):
150 @wraps(func)
151 def _wrapper(*args, **kwargs):
152 logger.info("🏗️ [%s] 🏗️️ - %s", func.__name__, msg)
153 res = func(*args, **kwargs)
154 logger.info("::: 🍻🍻🍻 [%s] job done 🍻🍻🍻 :::", func.__name__)
155 return res
156
157 return _wrapper
158
159 return wrapper
160
[end of dash/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dash/_utils.py b/dash/_utils.py
--- a/dash/_utils.py
+++ b/dash/_utils.py
@@ -128,7 +128,8 @@
def run_command_with_process(cmd):
- proc = subprocess.Popen(shlex.split(cmd, posix=sys.platform != "win32"))
+ is_win = sys.platform == "win32"
+ proc = subprocess.Popen(shlex.split(cmd, posix=is_win), shell=is_win)
proc.wait()
if proc.poll() is None:
logger.warning("🚨 trying to terminate subprocess in safe way")
diff --git a/dash/dependencies.py b/dash/dependencies.py
--- a/dash/dependencies.py
+++ b/dash/dependencies.py
@@ -35,6 +35,11 @@
class ClientsideFunction:
# pylint: disable=too-few-public-methods
def __init__(self, namespace=None, function_name=None):
+
+ if namespace in ['PreventUpdate', 'no_update']:
+ raise ValueError('"{}" is a forbidden namespace in'
+ ' dash_clientside.'.format(namespace))
+
self.namespace = namespace
self.function_name = function_name
|
{"golden_diff": "diff --git a/dash/_utils.py b/dash/_utils.py\n--- a/dash/_utils.py\n+++ b/dash/_utils.py\n@@ -128,7 +128,8 @@\n \n \n def run_command_with_process(cmd):\n- proc = subprocess.Popen(shlex.split(cmd, posix=sys.platform != \"win32\"))\n+ is_win = sys.platform == \"win32\"\n+ proc = subprocess.Popen(shlex.split(cmd, posix=is_win), shell=is_win)\n proc.wait()\n if proc.poll() is None:\n logger.warning(\"\ud83d\udea8 trying to terminate subprocess in safe way\")\ndiff --git a/dash/dependencies.py b/dash/dependencies.py\n--- a/dash/dependencies.py\n+++ b/dash/dependencies.py\n@@ -35,6 +35,11 @@\n class ClientsideFunction:\n # pylint: disable=too-few-public-methods\n def __init__(self, namespace=None, function_name=None):\n+\n+ if namespace in ['PreventUpdate', 'no_update']:\n+ raise ValueError('\"{}\" is a forbidden namespace in'\n+ ' dash_clientside.'.format(namespace))\n+\n self.namespace = namespace\n self.function_name = function_name\n", "issue": "Clientside PreventUpdate and no_update\nIt would be helpful for clientside callbacks to be able to conditionally update components. This is not supported right now (as far as I can tell). This should be pretty simple, my idea would be to define\r\n\r\n```js\r\nwindow.dash_clientside.no_update = {};\r\n```\r\n\r\nthen in `updateClientsideOutput` just conditionally block updating by adding\r\n\r\n```js\r\nif(window.dash_clientside.no_update && outputValue === window.dash_clientside.no_update)\r\n return;\r\n```\r\n\r\nSimilarly we could define `window.dash_clientside.PreventUpdate` and allow the use of\r\n```js\r\nthrow window.dash_clientside.PreventUpdate;\r\n``` \r\nIf this seems reasonable I could put together a PR.\r\n\n", "before_files": [{"content": "class DashDependency:\n # pylint: disable=too-few-public-methods\n def __init__(self, component_id, component_property):\n self.component_id = component_id\n self.component_property = component_property\n\n def __str__(self):\n return '{}.{}'.format(\n self.component_id,\n self.component_property\n )\n\n def __repr__(self):\n return '<{} `{}`>'.format(self.__class__.__name__, self)\n\n def __eq__(self, other):\n return isinstance(other, DashDependency) and str(self) == str(other)\n\n def __hash__(self):\n return hash(str(self))\n\n\nclass Output(DashDependency): # pylint: disable=too-few-public-methods\n \"\"\"Output of a callback.\"\"\"\n\n\nclass Input(DashDependency): # pylint: disable=too-few-public-methods\n \"\"\"Input of callback trigger an update when it is updated.\"\"\"\n\n\nclass State(DashDependency): # pylint: disable=too-few-public-methods\n \"\"\"Use the value of a state in a callback but don't trigger updates.\"\"\"\n\n\nclass ClientsideFunction:\n # pylint: disable=too-few-public-methods\n def __init__(self, namespace=None, function_name=None):\n self.namespace = namespace\n self.function_name = function_name\n\n def __repr__(self):\n return 'ClientsideFunction({}, {})'.format(\n self.namespace,\n self.function_name\n )\n", "path": "dash/dependencies.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport shlex\nimport sys\nimport uuid\nimport hashlib\nimport collections\nimport subprocess\nimport logging\nfrom io import open # pylint: disable=redefined-builtin\nfrom functools import wraps\nimport future.utils as utils\n\nlogger = logging.getLogger()\n\n\ndef interpolate_str(template, **data):\n s = template\n for k, v in data.items():\n key = \"{%\" + k + \"%}\"\n s = s.replace(key, v)\n return s\n\n\ndef format_tag(tag_name, attributes, inner=\"\", closed=False, opened=False):\n tag = \"<{tag} {attributes}\"\n if closed:\n tag += \"/>\"\n elif opened:\n tag += \">\"\n else:\n tag += \">\" + inner + \"</{tag}>\"\n return tag.format(\n tag=tag_name,\n attributes=\" \".join(\n ['{}=\"{}\"'.format(k, v) for k, v in attributes.items()]\n ),\n )\n\n\ndef generate_hash():\n return str(uuid.uuid4().hex).strip(\"-\")\n\n\ndef get_asset_path(requests_pathname, asset_path, asset_url_path):\n\n return \"/\".join(\n [\n # Only take the first part of the pathname\n requests_pathname.rstrip(\"/\"),\n asset_url_path,\n asset_path,\n ]\n )\n\n\n# pylint: disable=no-member\ndef patch_collections_abc(member):\n return getattr(collections if utils.PY2 else collections.abc, member)\n\n\nclass AttributeDict(dict):\n \"\"\"Dictionary subclass enabling attribute lookup/assignment of keys/values.\n\n For example::\n >>> m = AttributeDict({'foo': 'bar'})\n >>> m.foo\n 'bar'\n >>> m.foo = 'not bar'\n >>> m['foo']\n 'not bar'\n ``AttributeDict`` objects also provide ``.first()`` which acts like\n ``.get()`` but accepts multiple keys as arguments, and returns the value of\n the first hit, e.g.::\n >>> m = AttributeDict({'foo': 'bar', 'biz': 'baz'})\n >>> m.first('wrong', 'incorrect', 'foo', 'biz')\n 'bar'\n \"\"\"\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n pass\n # to conform with __getattr__ spec\n # but get out of the except block so it doesn't look like a nested err\n raise AttributeError(key)\n\n def set_read_only(self, names, msg=\"Attribute is read-only\"):\n object.__setattr__(self, \"_read_only\", names)\n object.__setattr__(self, \"_read_only_msg\", msg)\n\n def finalize(self, msg=\"Object is final: No new keys may be added.\"):\n \"\"\"Prevent any new keys being set.\"\"\"\n object.__setattr__(self, \"_final\", msg)\n\n def __setitem__(self, key, val):\n if key in self.__dict__.get(\"_read_only\", []):\n raise AttributeError(self._read_only_msg, key)\n\n final_msg = self.__dict__.get(\"_final\")\n if final_msg and key not in self:\n raise AttributeError(final_msg, key)\n\n return super(AttributeDict, self).__setitem__(key, val)\n\n # pylint: disable=inconsistent-return-statements\n def first(self, *names):\n for name in names:\n value = self.get(name)\n if value:\n return value\n\n\ndef create_callback_id(output):\n if isinstance(output, (list, tuple)):\n return \"..{}..\".format(\n \"...\".join(\n \"{}.{}\".format(x.component_id, x.component_property)\n for x in output\n )\n )\n\n return \"{}.{}\".format(output.component_id, output.component_property)\n\n\ndef run_command_with_process(cmd):\n proc = subprocess.Popen(shlex.split(cmd, posix=sys.platform != \"win32\"))\n proc.wait()\n if proc.poll() is None:\n logger.warning(\"\ud83d\udea8 trying to terminate subprocess in safe way\")\n try:\n proc.communicate()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"\ud83d\udea8 first try communicate failed\")\n proc.kill()\n proc.communicate()\n\n\ndef compute_md5(path):\n with open(path, encoding=\"utf-8\") as fp:\n return hashlib.md5(fp.read().encode(\"utf-8\")).hexdigest()\n\n\ndef job(msg=\"\"):\n def wrapper(func):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n logger.info(\"\ud83c\udfd7\ufe0f [%s] \ud83c\udfd7\ufe0f\ufe0f - %s\", func.__name__, msg)\n res = func(*args, **kwargs)\n logger.info(\"::: \ud83c\udf7b\ud83c\udf7b\ud83c\udf7b [%s] job done \ud83c\udf7b\ud83c\udf7b\ud83c\udf7b :::\", func.__name__)\n return res\n\n return _wrapper\n\n return wrapper\n", "path": "dash/_utils.py"}]}
| 2,552 | 261 |
gh_patches_debug_1883
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-2384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MN failing since at least 2018-06-15
MN has been failing since 2018-06-15
Based on automated runs it appears that MN has not run successfully in 2 days (2018-06-15).
```
/opt/openstates/venv-pupa/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.
""")
02:01:53 CRITICAL pupa: Session(s) 91st Legislature, 2019-2020 were reported by Minnesota.get_session_list() but were not found in Minnesota.legislative_sessions or Minnesota.ignored_scraped_sessions.
loaded Open States pupa settings...
mn (scrape, import)
bills: {}
committees: {}
people: {}
vote_events: {}
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of openstates/mn/__init__.py]
1 from pupa.scrape import Jurisdiction, Organization
2
3 from openstates.utils import url_xpath
4
5 from .bills import MNBillScraper
6 from .committees import MNCommitteeScraper
7 from .people import MNPersonScraper
8 from .vote_events import MNVoteScraper
9 # from .events import MNEventScraper
10
11 """
12 Minnesota legislative data can be found at the Office of the Revisor
13 of Statutes:
14 https://www.revisor.mn.gov/
15
16 Votes:
17 There are not detailed vote data for Senate votes, simply yes and no counts.
18 Bill pages have vote counts and links to House details, so it makes more
19 sense to get vote data from the bill pages.
20 """
21
22
23 class Minnesota(Jurisdiction):
24 division_id = "ocd-division/country:us/state:mn"
25 classification = "government"
26 name = "Minnesota"
27 url = "http://state.mn.us/"
28 scrapers = {
29 "bills": MNBillScraper,
30 "committees": MNCommitteeScraper,
31 "people": MNPersonScraper,
32 "vote_events": MNVoteScraper,
33 # "events": MNEventScraper,
34 }
35 legislative_sessions = [
36 {
37 '_scraped_name': '86th Legislature, 2009-2010',
38 'classification': 'primary',
39 'identifier': '2009-2010',
40 'name': '2009-2010 Regular Session'
41 },
42 {
43 '_scraped_name': '86th Legislature, 2010 1st Special Session',
44 'classification': 'special',
45 'identifier': '2010 1st Special Session',
46 'name': '2010, 1st Special Session'
47 },
48 {
49 '_scraped_name': '86th Legislature, 2010 2nd Special Session',
50 'classification': 'special',
51 'identifier': '2010 2nd Special Session',
52 'name': '2010, 2nd Special Session'
53 },
54 {
55 '_scraped_name': '87th Legislature, 2011-2012',
56 'classification': 'primary',
57 'identifier': '2011-2012',
58 'name': '2011-2012 Regular Session'
59 },
60 {
61 '_scraped_name': '87th Legislature, 2011 1st Special Session',
62 'classification': 'special',
63 'identifier': '2011s1',
64 'name': '2011, 1st Special Session'
65 },
66 {
67 '_scraped_name': '87th Legislature, 2012 1st Special Session',
68 'classification': 'special',
69 'identifier': '2012s1',
70 'name': '2012, 1st Special Session'
71 },
72 {
73 '_scraped_name': '88th Legislature, 2013-2014',
74 'classification': 'primary',
75 'identifier': '2013-2014',
76 'name': '2013-2014 Regular Session'
77 },
78 {
79 '_scraped_name': '88th Legislature, 2013 1st Special Session',
80 'classification': 'special',
81 'identifier': '2013s1',
82 'name': '2013, 1st Special Session'
83 },
84 {
85 '_scraped_name': '89th Legislature, 2015-2016',
86 'classification': 'primary',
87 'identifier': '2015-2016',
88 'name': '2015-2016 Regular Session'
89 },
90 {
91 '_scraped_name': '89th Legislature, 2015 1st Special Session',
92 'classification': 'special',
93 'identifier': '2015s1',
94 'name': '2015, 1st Special Session'
95 },
96 {
97 '_scraped_name': '90th Legislature, 2017 1st Special Session',
98 'classification': 'special',
99 'identifier': '2017s1',
100 'name': '2017, 1st Special Session'
101 },
102 {
103 '_scraped_name': '90th Legislature, 2017-2018',
104 'classification': 'primary',
105 'identifier': '2017-2018',
106 'name': '2017-2018 Regular Session',
107 'start_date': '2017-01-03',
108 'end_date': '2018-05-21'
109 },
110 ]
111 ignored_scraped_sessions = [
112 '85th Legislature, 2007-2008',
113 '85th Legislature, 2007 1st Special Session',
114 '84th Legislature, 2005-2006',
115 '84th Legislature, 2005 1st Special Session',
116 '83rd Legislature, 2003-2004',
117 '83rd Legislature, 2003 1st Special Session',
118 '82nd Legislature, 2001-2002',
119 '82nd Legislature, 2002 1st Special Session',
120 '82nd Legislature, 2001 1st Special Session',
121 '81st Legislature, 1999-2000',
122 '80th Legislature, 1997-1998',
123 '80th Legislature, 1998 1st Special Session',
124 '80th Legislature, 1997 3rd Special Session',
125 '80th Legislature, 1997 2nd Special Session',
126 '80th Legislature, 1997 1st Special Session',
127 '79th Legislature, 1995-1996',
128 '79th Legislature, 1995 1st Special Session',
129 '89th Legislature, 2015-2016',
130 ]
131
132 def get_organizations(self):
133 legis = Organization('Minnesota Legislature', classification='legislature')
134
135 upper = Organization('Minnesota Senate', classification='upper',
136 parent_id=legis._id)
137 lower = Organization('Minnesota House of Representatives',
138 classification='lower', parent_id=legis._id)
139
140 for n in range(1, 68):
141 upper.add_post(label=str(n), role='Senator',
142 division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n))
143 lower.add_post(label=str(n) + 'A', role='Representative',
144 division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n))
145 lower.add_post(label=str(n) + 'B', role='Representative',
146 division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n))
147
148 yield Organization('Governor of Minnesota', classification='executive')
149 yield legis
150 yield upper
151 yield lower
152
153 def get_session_list(self):
154 return url_xpath('https://www.revisor.mn.gov/bills/'
155 'status_search.php?body=House',
156 '//select[@name="session"]/option/text()', verify=False)
157
[end of openstates/mn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openstates/mn/__init__.py b/openstates/mn/__init__.py
--- a/openstates/mn/__init__.py
+++ b/openstates/mn/__init__.py
@@ -127,6 +127,7 @@
'79th Legislature, 1995-1996',
'79th Legislature, 1995 1st Special Session',
'89th Legislature, 2015-2016',
+ '91st Legislature, 2019-2020',
]
def get_organizations(self):
|
{"golden_diff": "diff --git a/openstates/mn/__init__.py b/openstates/mn/__init__.py\n--- a/openstates/mn/__init__.py\n+++ b/openstates/mn/__init__.py\n@@ -127,6 +127,7 @@\n '79th Legislature, 1995-1996',\n '79th Legislature, 1995 1st Special Session',\n '89th Legislature, 2015-2016',\n+ '91st Legislature, 2019-2020',\n ]\n \n def get_organizations(self):\n", "issue": "MN failing since at least 2018-06-15\nMN has been failing since 2018-06-15\n\nBased on automated runs it appears that MN has not run successfully in 2 days (2018-06-15).\n\n\n```\n /opt/openstates/venv-pupa/lib/python3.6/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\n \"\"\")\n02:01:53 CRITICAL pupa: Session(s) 91st Legislature, 2019-2020 were reported by Minnesota.get_session_list() but were not found in Minnesota.legislative_sessions or Minnesota.ignored_scraped_sessions.\nloaded Open States pupa settings...\nmn (scrape, import)\n bills: {}\n committees: {}\n people: {}\n vote_events: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\n\nfrom openstates.utils import url_xpath\n\nfrom .bills import MNBillScraper\nfrom .committees import MNCommitteeScraper\nfrom .people import MNPersonScraper\nfrom .vote_events import MNVoteScraper\n# from .events import MNEventScraper\n\n\"\"\"\nMinnesota legislative data can be found at the Office of the Revisor\nof Statutes:\nhttps://www.revisor.mn.gov/\n\nVotes:\nThere are not detailed vote data for Senate votes, simply yes and no counts.\nBill pages have vote counts and links to House details, so it makes more\nsense to get vote data from the bill pages.\n\"\"\"\n\n\nclass Minnesota(Jurisdiction):\n division_id = \"ocd-division/country:us/state:mn\"\n classification = \"government\"\n name = \"Minnesota\"\n url = \"http://state.mn.us/\"\n scrapers = {\n \"bills\": MNBillScraper,\n \"committees\": MNCommitteeScraper,\n \"people\": MNPersonScraper,\n \"vote_events\": MNVoteScraper,\n # \"events\": MNEventScraper,\n }\n legislative_sessions = [\n {\n '_scraped_name': '86th Legislature, 2009-2010',\n 'classification': 'primary',\n 'identifier': '2009-2010',\n 'name': '2009-2010 Regular Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 1st Special Session',\n 'classification': 'special',\n 'identifier': '2010 1st Special Session',\n 'name': '2010, 1st Special Session'\n },\n {\n '_scraped_name': '86th Legislature, 2010 2nd Special Session',\n 'classification': 'special',\n 'identifier': '2010 2nd Special Session',\n 'name': '2010, 2nd Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011-2012',\n 'classification': 'primary',\n 'identifier': '2011-2012',\n 'name': '2011-2012 Regular Session'\n },\n {\n '_scraped_name': '87th Legislature, 2011 1st Special Session',\n 'classification': 'special',\n 'identifier': '2011s1',\n 'name': '2011, 1st Special Session'\n },\n {\n '_scraped_name': '87th Legislature, 2012 1st Special Session',\n 'classification': 'special',\n 'identifier': '2012s1',\n 'name': '2012, 1st Special Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013-2014',\n 'classification': 'primary',\n 'identifier': '2013-2014',\n 'name': '2013-2014 Regular Session'\n },\n {\n '_scraped_name': '88th Legislature, 2013 1st Special Session',\n 'classification': 'special',\n 'identifier': '2013s1',\n 'name': '2013, 1st Special Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015-2016',\n 'classification': 'primary',\n 'identifier': '2015-2016',\n 'name': '2015-2016 Regular Session'\n },\n {\n '_scraped_name': '89th Legislature, 2015 1st Special Session',\n 'classification': 'special',\n 'identifier': '2015s1',\n 'name': '2015, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017 1st Special Session',\n 'classification': 'special',\n 'identifier': '2017s1',\n 'name': '2017, 1st Special Session'\n },\n {\n '_scraped_name': '90th Legislature, 2017-2018',\n 'classification': 'primary',\n 'identifier': '2017-2018',\n 'name': '2017-2018 Regular Session',\n 'start_date': '2017-01-03',\n 'end_date': '2018-05-21'\n },\n ]\n ignored_scraped_sessions = [\n '85th Legislature, 2007-2008',\n '85th Legislature, 2007 1st Special Session',\n '84th Legislature, 2005-2006',\n '84th Legislature, 2005 1st Special Session',\n '83rd Legislature, 2003-2004',\n '83rd Legislature, 2003 1st Special Session',\n '82nd Legislature, 2001-2002',\n '82nd Legislature, 2002 1st Special Session',\n '82nd Legislature, 2001 1st Special Session',\n '81st Legislature, 1999-2000',\n '80th Legislature, 1997-1998',\n '80th Legislature, 1998 1st Special Session',\n '80th Legislature, 1997 3rd Special Session',\n '80th Legislature, 1997 2nd Special Session',\n '80th Legislature, 1997 1st Special Session',\n '79th Legislature, 1995-1996',\n '79th Legislature, 1995 1st Special Session',\n '89th Legislature, 2015-2016',\n ]\n\n def get_organizations(self):\n legis = Organization('Minnesota Legislature', classification='legislature')\n\n upper = Organization('Minnesota Senate', classification='upper',\n parent_id=legis._id)\n lower = Organization('Minnesota House of Representatives',\n classification='lower', parent_id=legis._id)\n\n for n in range(1, 68):\n upper.add_post(label=str(n), role='Senator',\n division_id='ocd-division/country:us/state:mn/sldu:{}'.format(n))\n lower.add_post(label=str(n) + 'A', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}a'.format(n))\n lower.add_post(label=str(n) + 'B', role='Representative',\n division_id='ocd-division/country:us/state:mn/sldl:{}b'.format(n))\n\n yield Organization('Governor of Minnesota', classification='executive')\n yield legis\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath('https://www.revisor.mn.gov/bills/'\n 'status_search.php?body=House',\n '//select[@name=\"session\"]/option/text()', verify=False)\n", "path": "openstates/mn/__init__.py"}]}
| 2,846 | 141 |
gh_patches_debug_32455
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Thumbnails missing/broken on plasmapy documentation webpage home
As the title suggests, and can be seen on the screenshot, two of the three thumbnails
are missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/
<img width="598" alt="Untitled" src="https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png">
Thumbnails missing/broken on plasmapy documentation webpage home
As the title suggests, and can be seen on the screenshot, two of the three thumbnails
are missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/
<img width="598" alt="Untitled" src="https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png">
Thumbnails missing/broken on plasmapy documentation webpage home
As the title suggests, and can be seen on the screenshot, two of the three thumbnails
are missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/
<img width="598" alt="Untitled" src="https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png">
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3.6
2 # -*- coding: utf-8 -*-
3 #
4 # PlasmaPy documentation build configuration file, created by
5 # sphinx-quickstart on Wed May 31 18:16:46 2017.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20
21 import os
22 import sys
23
24 from pkg_resources import parse_version
25 from sphinx.application import Sphinx
26
27 sys.path.insert(0, os.path.abspath(".."))
28
29 from plasmapy import __version__ as release
30
31
32 # -- General configuration ------------------------------------------------
33
34 # If your documentation needs a minimal Sphinx version, state it here.
35 #
36 # needs_sphinx = '1.0'
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = [
42 "sphinx.ext.autodoc",
43 "sphinx.ext.intersphinx",
44 "sphinx.ext.graphviz",
45 "sphinx.ext.mathjax",
46 "sphinx.ext.napoleon",
47 "sphinx_automodapi.automodapi",
48 "sphinx_automodapi.smart_resolver",
49 "nbsphinx",
50 "sphinx_copybutton",
51 "sphinx_gallery.load_style",
52 ]
53
54 intersphinx_mapping = {
55 "python": ("https://docs.python.org/3", None),
56 "numpy": ("https://numpy.org/doc/stable/", None),
57 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
58 "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
59 "astropy": ("http://docs.astropy.org/en/stable/", None),
60 }
61 # Add any paths that contain templates here, relative to this directory.
62 templates_path = ["_templates"]
63
64 # The suffix(es) of source filenames.
65 # You can specify multiple suffix as a list of string:
66 #
67 # source_suffix = ['.rst', '.md']
68 source_suffix = ".rst"
69
70 # The master toctree document.
71 master_doc = "index"
72
73 # General information about the project.
74 project = "PlasmaPy"
75 copyright = "2015-2020, PlasmaPy Community"
76 author = "PlasmaPy Community"
77
78
79 # The version info for the project you're documenting, acts as replacement for
80 # |version| and |release|, also used in various other places throughout the
81 # built documents.
82 #
83 # The full version, including alpha/beta/rc tags.
84 # Note: If plasmapy.__version__ can not be defined then it is set to 'unknown'.
85 # However, release needs to be a semantic style version number, so set
86 # the 'unknown' case to ''.
87 release = "" if release == "unknown" else release
88 if release == "unknown":
89 release = version = revision = ""
90 else:
91 pv = parse_version(release)
92 release = pv.public
93 version = ".".join(release.split(".")[:2]) # short X.Y version
94 if pv.local is not None:
95 revision = pv.local[1:] # revision number w/o the leading g
96 else:
97 revision = ""
98
99
100 # The language for content autogenerated by Sphinx. Refer to documentation
101 # for a list of supported languages.
102 #
103 # This is also used if you do content translation via gettext catalogs.
104 # Usually you set "language" from the command line for these cases.
105 language = None
106
107 # List of patterns, relative to source directory, that match files and
108 # directories to ignore when looking for source files.
109 # This patterns also effect to html_static_path and html_extra_path
110 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
111
112 # The name of the Pygments (syntax highlighting) style to use.
113 pygments_style = "sphinx"
114
115 # If true, `todo` and `todoList` produce output, else they produce nothing.
116 todo_include_todos = False
117
118 default_role = "obj"
119
120 # -- Options for HTML output ----------------------------------------------
121
122 # The theme to use for HTML and HTML Help pages. See the documentation for
123 # a list of builtin themes.
124 #
125 # html_theme = 'alabaster'
126 # html_theme = 'traditional'
127 # html_theme = 'agogo'
128 html_theme = "sphinx_rtd_theme"
129
130 # Theme options are theme-specific and customize the look and feel of a theme
131 # further. For a list of options available for each theme, see the
132 # documentation.
133 #
134 # html_theme_options = {}
135
136 # Add any paths that contain custom static files (such as style sheets) here,
137 # relative to this directory. They are copied after the builtin static files,
138 # so a file named "default.css" will overwrite the builtin "default.css".
139 # html_static_path = ['_static']
140
141
142 # -- Options for HTMLHelp output ------------------------------------------
143
144 # Output file base name for HTML help builder.
145 htmlhelp_basename = "PlasmaPydoc"
146
147
148 # -- Options for LaTeX output ---------------------------------------------
149
150 latex_elements = {
151 # The paper size ('letterpaper' or 'a4paper').
152 # 'papersize': 'letterpaper',
153 #
154 # The font size ('10pt', '11pt' or '12pt').
155 # 'pointsize': '10pt',
156 #
157 # Additional stuff for the LaTeX preamble.
158 # 'preamble': '',
159 #
160 # Latex figure (float) alignment
161 # 'figure_align': 'htbp',
162 }
163
164 # Grouping the document tree into LaTeX files. List of tuples
165 # (source start file, target name, title,
166 # author, documentclass [howto, manual, or own class]).
167 latex_documents = [
168 (
169 master_doc,
170 "PlasmaPy.tex",
171 "PlasmaPy Documentation",
172 "PlasmaPy Community",
173 "manual",
174 )
175 ]
176
177
178 # -- Options for manual page output ---------------------------------------
179
180 # One entry per manual page. List of tuples
181 # (source start file, name, description, authors, manual section).
182 man_pages = [(master_doc, "plasmapy", "PlasmaPy Documentation", [author], 1)]
183
184
185 # -- Options for Texinfo output -------------------------------------------
186
187 # Grouping the document tree into Texinfo files. List of tuples
188 # (source start file, target name, title, author,
189 # dir menu entry, description, category)
190 texinfo_documents = [
191 (
192 master_doc,
193 "PlasmaPy",
194 "PlasmaPy Documentation",
195 author,
196 "PlasmaPy",
197 "Python package for plasma physics",
198 "Miscellaneous",
199 )
200 ]
201
202 html_favicon = "./_static/icon.ico"
203
204
205 # -- NBSphinx options
206
207 nbsphinx_thumbnails = {"notebooks/*": "_images/graphic-circular.png"}
208
209 # adapted from https://github.com/spatialaudio/nbsphinx/blob/58b8034dd9d7349c1b4ac3e7a7d6baa87ab2a6a9/doc/conf.py
210
211 # This is processed by Jinja2 and inserted before each notebook
212 nbsphinx_prolog = r"""
213 {% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}
214 {% set nb_base = 'tree' if env.config.revision else 'blob' %}
215 {% set nb_where = env.config.revision if env.config.revision else 'master' %}
216
217 .. raw:: html
218
219 <div class="admonition note">
220 <p style="margin-bottom:0px">
221 This page was generated by
222 <a href="https://nbsphinx.readthedocs.io/">nbsphinx</a> from
223 <a class="reference external" href="https://github.com/PlasmaPy/PlasmaPy/{{ nb_base|e }}/{{ nb_where|e }}/{{ docname|e }}">{{ docname|e }}</a>.
224 <br>
225 Interactive online version:
226 <a href="https://mybinder.org/v2/gh/PlasmaPy/PlasmaPy/{{ nb_where|e }}/?filepath={{ docname|e }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>.
227 </p>
228 </div>
229
230 .. raw:: latex
231
232 \nbsphinxstartnotebook{\scriptsize\noindent\strut
233 \textcolor{gray}{The following section was generated from
234 \sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}}
235 """
236
237
238 def setup(app: Sphinx) -> None:
239 app.add_config_value("revision", "", True)
240
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -21,6 +21,7 @@
import os
import sys
+from datetime import datetime
from pkg_resources import parse_version
from sphinx.application import Sphinx
@@ -72,8 +73,8 @@
# General information about the project.
project = "PlasmaPy"
-copyright = "2015-2020, PlasmaPy Community"
author = "PlasmaPy Community"
+copyright = f"2015-{datetime.utcnow().year}, {author}"
# The version info for the project you're documenting, acts as replacement for
@@ -131,13 +132,24 @@
# further. For a list of options available for each theme, see the
# documentation.
#
-# html_theme_options = {}
+html_logo = "./_static/with-text-light-190px.png"
+html_theme_options = {
+ "logo_only": True,
+ #
+ # TOC options
+ # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#theme-options
+ "includehidden": False,
+}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
+html_static_path = ["_static"]
+# A list of prefixes that are ignored for sorting the Python module
+# index (e.g., if this is set to ['foo.'], then foo.bar is shown under
+# B, not F).
+modindex_common_prefix = ["plasmapy."]
# -- Options for HTMLHelp output ------------------------------------------
@@ -237,3 +249,4 @@
def setup(app: Sphinx) -> None:
app.add_config_value("revision", "", True)
+ app.add_stylesheet("rtd_theme_overrides.css")
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -21,6 +21,7 @@\n import os\n import sys\n \n+from datetime import datetime\n from pkg_resources import parse_version\n from sphinx.application import Sphinx\n \n@@ -72,8 +73,8 @@\n \n # General information about the project.\n project = \"PlasmaPy\"\n-copyright = \"2015-2020, PlasmaPy Community\"\n author = \"PlasmaPy Community\"\n+copyright = f\"2015-{datetime.utcnow().year}, {author}\"\n \n \n # The version info for the project you're documenting, acts as replacement for\n@@ -131,13 +132,24 @@\n # further. For a list of options available for each theme, see the\n # documentation.\n #\n-# html_theme_options = {}\n+html_logo = \"./_static/with-text-light-190px.png\"\n+html_theme_options = {\n+ \"logo_only\": True,\n+ #\n+ # TOC options\n+ # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#theme-options\n+ \"includehidden\": False,\n+}\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-# html_static_path = ['_static']\n+html_static_path = [\"_static\"]\n \n+# A list of prefixes that are ignored for sorting the Python module\n+# index (e.g., if this is set to ['foo.'], then foo.bar is shown under\n+# B, not F).\n+modindex_common_prefix = [\"plasmapy.\"]\n \n # -- Options for HTMLHelp output ------------------------------------------\n \n@@ -237,3 +249,4 @@\n \n def setup(app: Sphinx) -> None:\n app.add_config_value(\"revision\", \"\", True)\n+ app.add_stylesheet(\"rtd_theme_overrides.css\")\n", "issue": "Thumbnails missing/broken on plasmapy documentation webpage home\nAs the title suggests, and can be seen on the screenshot, two of the three thumbnails\r\nare missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/\r\n<img width=\"598\" alt=\"Untitled\" src=\"https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png\">\r\n\nThumbnails missing/broken on plasmapy documentation webpage home\nAs the title suggests, and can be seen on the screenshot, two of the three thumbnails\r\nare missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/\r\n<img width=\"598\" alt=\"Untitled\" src=\"https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png\">\r\n\nThumbnails missing/broken on plasmapy documentation webpage home\nAs the title suggests, and can be seen on the screenshot, two of the three thumbnails\r\nare missing from the PlasmaPy documentation front page at https://docs.plasmapy.org/en/latest/\r\n<img width=\"598\" alt=\"Untitled\" src=\"https://user-images.githubusercontent.com/7075058/88436433-2b75b680-cdb9-11ea-92a0-83ebd8d67880.png\">\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n#\n# PlasmaPy documentation build configuration file, created by\n# sphinx-quickstart on Wed May 31 18:16:46 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\n\nfrom pkg_resources import parse_version\nfrom sphinx.application import Sphinx\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nfrom plasmapy import __version__ as release\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx_automodapi.automodapi\",\n \"sphinx_automodapi.smart_resolver\",\n \"nbsphinx\",\n \"sphinx_copybutton\",\n \"sphinx_gallery.load_style\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n \"astropy\": (\"http://docs.astropy.org/en/stable/\", None),\n}\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"PlasmaPy\"\ncopyright = \"2015-2020, PlasmaPy Community\"\nauthor = \"PlasmaPy Community\"\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\n# Note: If plasmapy.__version__ can not be defined then it is set to 'unknown'.\n# However, release needs to be a semantic style version number, so set\n# the 'unknown' case to ''.\nrelease = \"\" if release == \"unknown\" else release\nif release == \"unknown\":\n release = version = revision = \"\"\nelse:\n pv = parse_version(release)\n release = pv.public\n version = \".\".join(release.split(\".\")[:2]) # short X.Y version\n if pv.local is not None:\n revision = pv.local[1:] # revision number w/o the leading g\n else:\n revision = \"\"\n\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\ndefault_role = \"obj\"\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'traditional'\n# html_theme = 'agogo'\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PlasmaPydoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n #\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n #\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n #\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"PlasmaPy.tex\",\n \"PlasmaPy Documentation\",\n \"PlasmaPy Community\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"plasmapy\", \"PlasmaPy Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"PlasmaPy\",\n \"PlasmaPy Documentation\",\n author,\n \"PlasmaPy\",\n \"Python package for plasma physics\",\n \"Miscellaneous\",\n )\n]\n\nhtml_favicon = \"./_static/icon.ico\"\n\n\n# -- NBSphinx options\n\nnbsphinx_thumbnails = {\"notebooks/*\": \"_images/graphic-circular.png\"}\n\n# adapted from https://github.com/spatialaudio/nbsphinx/blob/58b8034dd9d7349c1b4ac3e7a7d6baa87ab2a6a9/doc/conf.py\n\n# This is processed by Jinja2 and inserted before each notebook\nnbsphinx_prolog = r\"\"\"\n{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}\n{% set nb_base = 'tree' if env.config.revision else 'blob' %}\n{% set nb_where = env.config.revision if env.config.revision else 'master' %}\n\n.. raw:: html\n\n <div class=\"admonition note\">\n <p style=\"margin-bottom:0px\">\n This page was generated by\n <a href=\"https://nbsphinx.readthedocs.io/\">nbsphinx</a> from\n <a class=\"reference external\" href=\"https://github.com/PlasmaPy/PlasmaPy/{{ nb_base|e }}/{{ nb_where|e }}/{{ docname|e }}\">{{ docname|e }}</a>.\n <br>\n Interactive online version:\n <a href=\"https://mybinder.org/v2/gh/PlasmaPy/PlasmaPy/{{ nb_where|e }}/?filepath={{ docname|e }}\"><img alt=\"Binder badge\" src=\"https://mybinder.org/badge_logo.svg\" style=\"vertical-align:text-bottom\"></a>.\n </p>\n </div>\n\n.. raw:: latex\n\n \\nbsphinxstartnotebook{\\scriptsize\\noindent\\strut\n \\textcolor{gray}{The following section was generated from\n \\sphinxcode{\\sphinxupquote{\\strut {{ docname | escape_latex }}}} \\dotfill}}\n\"\"\"\n\n\ndef setup(app: Sphinx) -> None:\n app.add_config_value(\"revision\", \"\", True)\n", "path": "docs/conf.py"}]}
| 3,500 | 454 |
gh_patches_debug_4718
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-379
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error if parsers is empty
# Description
If you leave the `parsers` section of the config as an empty dictionary an error is thrown and startup fails.
## Steps to Reproduce
Uncomment the parsers section of the config but with no actual parsers
```
parsers:
# nothing else
```
## Expected Functionality
The application should start with the default parsers only. The same as if `parsers:` is not in the config.
## Experienced Functionality
```
DEBUG opsdroid.core: Parsing input: hi
DEBUG opsdroid.core: Processing parsers...
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/Users/jacob/.pyenv/versions/3.5.4/lib/python3.5/site-packages/aiohttp/web_protocol.py", line 416, in start
resp = yield from self._request_handler(request)
File "/Users/jacob/.pyenv/versions/3.5.4/lib/python3.5/site-packages/aiohttp/web.py", line 325, in _handle
resp = yield from handler(request)
File "/Users/jacob/.opsdroid/modules/opsdroid-modules/connector/websocket/__init__.py", line 77, in websocket_handler
await self.opsdroid.parse(message)
File "/Users/jacob/Projects/opsdroid/opsdroid/opsdroid/core.py", line 273, in parse
ranked_skills = await self.get_ranked_skills(message)
File "/Users/jacob/Projects/opsdroid/opsdroid/opsdroid/core.py", line 218, in get_ranked_skills
dialogflow = [p for p in parsers if p["name"] == "dialogflow"
TypeError: 'NoneType' object is not iterable
```
## Versions
- **Opsdroid version:** 0.10.0
- **Python version:** Python 3.5.4
- **OS/Docker version:** macOS 10.13
</issue>
<code>
[start of opsdroid/core.py]
1 """Core components of OpsDroid."""
2
3 import copy
4 import logging
5 import signal
6 import sys
7 import weakref
8 import asyncio
9
10 from opsdroid.memory import Memory
11 from opsdroid.connector import Connector
12 from opsdroid.database import Database
13 from opsdroid.loader import Loader
14 from opsdroid.parsers.always import parse_always
15 from opsdroid.parsers.regex import parse_regex
16 from opsdroid.parsers.dialogflow import parse_dialogflow
17 from opsdroid.parsers.luisai import parse_luisai
18 from opsdroid.parsers.recastai import parse_recastai
19 from opsdroid.parsers.witai import parse_witai
20 from opsdroid.parsers.crontab import parse_crontab
21 from opsdroid.const import DEFAULT_CONFIG_PATH
22
23
24 _LOGGER = logging.getLogger(__name__)
25
26
27 class OpsDroid():
28 """Root object for opsdroid."""
29
30 # pylint: disable=too-many-instance-attributes
31 # All are reasonable in this case.
32
33 instances = []
34
35 def __init__(self):
36 """Start opsdroid."""
37 self.bot_name = 'opsdroid'
38 self.sys_status = 0
39 self.connectors = []
40 self.connector_tasks = []
41 self.eventloop = asyncio.get_event_loop()
42 for sig in (signal.SIGINT, signal.SIGTERM):
43 self.eventloop.add_signal_handler(sig, self.call_stop)
44 self.skills = []
45 self.memory = Memory()
46 self.loader = Loader(self)
47 self.config = {}
48 self.stats = {
49 "messages_parsed": 0,
50 "webhooks_called": 0,
51 "total_response_time": 0,
52 "total_responses": 0,
53 }
54 self.web_server = None
55 self.should_restart = False
56 self.stored_path = []
57
58 def __enter__(self):
59 """Add self to existing instances."""
60 self.stored_path = copy.copy(sys.path)
61 if not self.__class__.instances:
62 self.__class__.instances.append(weakref.proxy(self))
63 else:
64 self.critical("opsdroid has already been started", 1)
65 return self
66
67 def __exit__(self, exc_type, exc_value, traceback):
68 """Remove self from existing instances."""
69 sys.path = self.stored_path
70 self.__class__.instances = []
71 asyncio.set_event_loop(asyncio.new_event_loop())
72
73 @property
74 def default_connector(self):
75 """Return the default connector."""
76 default_connector = None
77 for connector in self.connectors:
78 if "default" in connector.config and connector.config["default"]:
79 default_connector = connector
80 break
81 if default_connector is None:
82 default_connector = self.connectors[0]
83 return default_connector
84
85 def exit(self):
86 """Exit application."""
87 _LOGGER.info("Exiting application with return code %s",
88 str(self.sys_status))
89 sys.exit(self.sys_status)
90
91 def critical(self, error, code):
92 """Exit due to unrecoverable error."""
93 self.sys_status = code
94 _LOGGER.critical(error)
95 self.exit()
96
97 def restart(self):
98 """Restart opsdroid."""
99 self.should_restart = True
100 self.stop()
101
102 def call_stop(self):
103 """Signal handler to call disconnect and stop."""
104 future = asyncio.ensure_future(self.disconnect())
105 future.add_done_callback(self.stop)
106 return future
107
108 async def disconnect(self):
109 """Disconnect all the connectors."""
110 for connector in self.connectors:
111 await connector.disconnect(self)
112
113 def stop(self, future=None):
114 """Stop the event loop."""
115 pending = asyncio.Task.all_tasks()
116 for task in pending:
117 task.cancel()
118 self.eventloop.stop()
119 print('') # Prints a character return for return to shell
120 _LOGGER.info("Keyboard interrupt, exiting.")
121
122 def load(self):
123 """Load configuration."""
124 self.config = self.loader.load_config_file([
125 "configuration.yaml",
126 DEFAULT_CONFIG_PATH,
127 "/etc/opsdroid/configuration.yaml"
128 ])
129
130 def start_loop(self):
131 """Start the event loop."""
132 connectors, databases, skills = \
133 self.loader.load_modules_from_config(self.config)
134 _LOGGER.debug("Loaded %i skills", len(skills))
135 if databases is not None:
136 self.start_databases(databases)
137 self.setup_skills(skills)
138 self.start_connector_tasks(connectors)
139 self.eventloop.create_task(parse_crontab(self))
140 self.web_server.start()
141 try:
142 pending = asyncio.Task.all_tasks()
143 self.eventloop.run_until_complete(asyncio.gather(*pending))
144 except RuntimeError as error:
145 if str(error) != 'Event loop is closed':
146 raise error
147 finally:
148 self.eventloop.close()
149
150 def setup_skills(self, skills):
151 """Call the setup function on the passed in skills."""
152 for skill in skills:
153 try:
154 skill["module"].setup(self)
155 except AttributeError:
156 pass
157
158 def start_connector_tasks(self, connectors):
159 """Start the connectors."""
160 for connector_module in connectors:
161 for _, cls in connector_module["module"].__dict__.items():
162 if isinstance(cls, type) and \
163 issubclass(cls, Connector) and\
164 cls is not Connector:
165 connector = cls(connector_module["config"])
166 self.connectors.append(connector)
167
168 if connectors:
169 for connector in self.connectors:
170 self.eventloop.run_until_complete(connector.connect(self))
171 for connector in self.connectors:
172 task = self.eventloop.create_task(connector.listen(self))
173 self.connector_tasks.append(task)
174 else:
175 self.critical("All connectors failed to load", 1)
176
177 def start_databases(self, databases):
178 """Start the databases."""
179 if not databases:
180 _LOGGER.debug(databases)
181 _LOGGER.warning("All databases failed to load")
182 for database_module in databases:
183 for name, cls in database_module["module"].__dict__.items():
184 if isinstance(cls, type) and \
185 issubclass(cls, Database) and \
186 cls is not Database:
187 _LOGGER.debug("Adding database: %s", name)
188 database = cls(database_module["config"])
189 self.memory.databases.append(database)
190 self.eventloop.run_until_complete(database.connect(self))
191
192 async def run_skill(self, skill, config, message):
193 """Execute a skill."""
194 # pylint: disable=broad-except
195 # We want to catch all exceptions coming from a skill module and not
196 # halt the application. If a skill throws an exception it just doesn't
197 # give a response to the user, so an error response should be given.
198 try:
199 await skill(self, config, message)
200 except Exception:
201 if message:
202 await message.respond(
203 "Whoops there has been an error")
204 await message.respond(
205 "Check the log for details")
206 _LOGGER.exception("Exception when running skill '%s' ",
207 config["name"])
208
209 async def get_ranked_skills(self, message):
210 """Take a message and return a ranked list of matching skills."""
211 skills = []
212 skills = skills + await parse_regex(self, message)
213
214 if "parsers" in self.config:
215 _LOGGER.debug("Processing parsers...")
216 parsers = self.config["parsers"]
217
218 dialogflow = [p for p in parsers if p["name"] == "dialogflow"
219 or p["name"] == "apiai"]
220
221 # Show deprecation message but parse message
222 # Once it stops working remove this bit
223 apiai = [p for p in parsers if p["name"] == "apiai"]
224 if apiai:
225 _LOGGER.warning("Api.ai is now called Dialogflow. This "
226 "parser will stop working in the future "
227 "please swap: 'name: apiai' for "
228 "'name: dialogflow' in configuration.yaml")
229
230 if len(dialogflow) == 1 and \
231 ("enabled" not in dialogflow[0] or
232 dialogflow[0]["enabled"] is not False):
233 _LOGGER.debug("Checking dialogflow...")
234 skills = skills + \
235 await parse_dialogflow(self, message, dialogflow[0])
236
237 luisai = [p for p in parsers if p["name"] == "luisai"]
238 if len(luisai) == 1 and \
239 ("enabled" not in luisai[0] or
240 luisai[0]["enabled"] is not False):
241 _LOGGER.debug("Checking luisai...")
242 skills = skills + \
243 await parse_luisai(self, message, luisai[0])
244
245 recastai = [p for p in parsers if p["name"] == "recastai"]
246 if len(recastai) == 1 and \
247 ("enabled" not in recastai[0] or
248 recastai[0]["enabled"] is not False):
249 _LOGGER.debug("Checking Recast.AI...")
250 skills = skills + \
251 await parse_recastai(self, message, recastai[0])
252
253 witai = [p for p in parsers if p["name"] == "witai"]
254 if len(witai) == 1 and \
255 ("enabled" not in witai[0] or
256 witai[0]["enabled"] is not False):
257 _LOGGER.debug("Checking wit.ai...")
258 skills = skills + \
259 await parse_witai(self, message, witai[0])
260
261 return sorted(skills, key=lambda k: k["score"], reverse=True)
262
263 async def parse(self, message):
264 """Parse a string against all skills."""
265 self.stats["messages_parsed"] = self.stats["messages_parsed"] + 1
266 tasks = []
267 if message.text.strip() != "":
268 _LOGGER.debug("Parsing input: %s", message.text)
269
270 tasks.append(
271 self.eventloop.create_task(parse_always(self, message)))
272
273 ranked_skills = await self.get_ranked_skills(message)
274 if ranked_skills:
275 tasks.append(
276 self.eventloop.create_task(
277 self.run_skill(ranked_skills[0]["skill"],
278 ranked_skills[0]["config"],
279 message)))
280
281 return tasks
282
[end of opsdroid/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/core.py b/opsdroid/core.py
--- a/opsdroid/core.py
+++ b/opsdroid/core.py
@@ -213,7 +213,7 @@
if "parsers" in self.config:
_LOGGER.debug("Processing parsers...")
- parsers = self.config["parsers"]
+ parsers = self.config["parsers"] or []
dialogflow = [p for p in parsers if p["name"] == "dialogflow"
or p["name"] == "apiai"]
|
{"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -213,7 +213,7 @@\n \n if \"parsers\" in self.config:\n _LOGGER.debug(\"Processing parsers...\")\n- parsers = self.config[\"parsers\"]\n+ parsers = self.config[\"parsers\"] or []\n \n dialogflow = [p for p in parsers if p[\"name\"] == \"dialogflow\"\n or p[\"name\"] == \"apiai\"]\n", "issue": "Error if parsers is empty\n# Description\r\nIf you leave the `parsers` section of the config as an empty dictionary an error is thrown and startup fails.\r\n\r\n## Steps to Reproduce\r\nUncomment the parsers section of the config but with no actual parsers\r\n\r\n```\r\nparsers:\r\n\r\n# nothing else\r\n```\r\n\r\n## Expected Functionality\r\nThe application should start with the default parsers only. The same as if `parsers:` is not in the config.\r\n\r\n## Experienced Functionality\r\n\r\n```\r\nDEBUG opsdroid.core: Parsing input: hi\r\nDEBUG opsdroid.core: Processing parsers...\r\nERROR aiohttp.server: Error handling request\r\nTraceback (most recent call last):\r\n File \"/Users/jacob/.pyenv/versions/3.5.4/lib/python3.5/site-packages/aiohttp/web_protocol.py\", line 416, in start\r\n resp = yield from self._request_handler(request)\r\n File \"/Users/jacob/.pyenv/versions/3.5.4/lib/python3.5/site-packages/aiohttp/web.py\", line 325, in _handle\r\n resp = yield from handler(request)\r\n File \"/Users/jacob/.opsdroid/modules/opsdroid-modules/connector/websocket/__init__.py\", line 77, in websocket_handler\r\n await self.opsdroid.parse(message)\r\n File \"/Users/jacob/Projects/opsdroid/opsdroid/opsdroid/core.py\", line 273, in parse\r\n ranked_skills = await self.get_ranked_skills(message)\r\n File \"/Users/jacob/Projects/opsdroid/opsdroid/opsdroid/core.py\", line 218, in get_ranked_skills\r\n dialogflow = [p for p in parsers if p[\"name\"] == \"dialogflow\"\r\nTypeError: 'NoneType' object is not iterable\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.10.0\r\n- **Python version:** Python 3.5.4\r\n- **OS/Docker version:** macOS 10.13\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport copy\nimport logging\nimport signal\nimport sys\nimport weakref\nimport asyncio\n\nfrom opsdroid.memory import Memory\nfrom opsdroid.connector import Connector\nfrom opsdroid.database import Database\nfrom opsdroid.loader import Loader\nfrom opsdroid.parsers.always import parse_always\nfrom opsdroid.parsers.regex import parse_regex\nfrom opsdroid.parsers.dialogflow import parse_dialogflow\nfrom opsdroid.parsers.luisai import parse_luisai\nfrom opsdroid.parsers.recastai import parse_recastai\nfrom opsdroid.parsers.witai import parse_witai\nfrom opsdroid.parsers.crontab import parse_crontab\nfrom opsdroid.const import DEFAULT_CONFIG_PATH\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # All are reasonable in this case.\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_tasks = []\n self.eventloop = asyncio.get_event_loop()\n for sig in (signal.SIGINT, signal.SIGTERM):\n self.eventloop.add_signal_handler(sig, self.call_stop)\n self.skills = []\n self.memory = Memory()\n self.loader = Loader(self)\n self.config = {}\n self.stats = {\n \"messages_parsed\": 0,\n \"webhooks_called\": 0,\n \"total_response_time\": 0,\n \"total_responses\": 0,\n }\n self.web_server = None\n self.should_restart = False\n self.stored_path = []\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n self.stored_path = copy.copy(sys.path)\n if not self.__class__.instances:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n sys.path = self.stored_path\n self.__class__.instances = []\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n @property\n def default_connector(self):\n \"\"\"Return the default connector.\"\"\"\n default_connector = None\n for connector in self.connectors:\n if \"default\" in connector.config and connector.config[\"default\"]:\n default_connector = connector\n break\n if default_connector is None:\n default_connector = self.connectors[0]\n return default_connector\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n _LOGGER.info(\"Exiting application with return code %s\",\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n _LOGGER.critical(error)\n self.exit()\n\n def restart(self):\n \"\"\"Restart opsdroid.\"\"\"\n self.should_restart = True\n self.stop()\n\n def call_stop(self):\n \"\"\"Signal handler to call disconnect and stop.\"\"\"\n future = asyncio.ensure_future(self.disconnect())\n future.add_done_callback(self.stop)\n return future\n\n async def disconnect(self):\n \"\"\"Disconnect all the connectors.\"\"\"\n for connector in self.connectors:\n await connector.disconnect(self)\n\n def stop(self, future=None):\n \"\"\"Stop the event loop.\"\"\"\n pending = asyncio.Task.all_tasks()\n for task in pending:\n task.cancel()\n self.eventloop.stop()\n print('') # Prints a character return for return to shell\n _LOGGER.info(\"Keyboard interrupt, exiting.\")\n\n def load(self):\n \"\"\"Load configuration.\"\"\"\n self.config = self.loader.load_config_file([\n \"configuration.yaml\",\n DEFAULT_CONFIG_PATH,\n \"/etc/opsdroid/configuration.yaml\"\n ])\n\n def start_loop(self):\n \"\"\"Start the event loop.\"\"\"\n connectors, databases, skills = \\\n self.loader.load_modules_from_config(self.config)\n _LOGGER.debug(\"Loaded %i skills\", len(skills))\n if databases is not None:\n self.start_databases(databases)\n self.setup_skills(skills)\n self.start_connector_tasks(connectors)\n self.eventloop.create_task(parse_crontab(self))\n self.web_server.start()\n try:\n pending = asyncio.Task.all_tasks()\n self.eventloop.run_until_complete(asyncio.gather(*pending))\n except RuntimeError as error:\n if str(error) != 'Event loop is closed':\n raise error\n finally:\n self.eventloop.close()\n\n def setup_skills(self, skills):\n \"\"\"Call the setup function on the passed in skills.\"\"\"\n for skill in skills:\n try:\n skill[\"module\"].setup(self)\n except AttributeError:\n pass\n\n def start_connector_tasks(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n for connector_module in connectors:\n for _, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Connector) and\\\n cls is not Connector:\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n\n if connectors:\n for connector in self.connectors:\n self.eventloop.run_until_complete(connector.connect(self))\n for connector in self.connectors:\n task = self.eventloop.create_task(connector.listen(self))\n self.connector_tasks.append(task)\n else:\n self.critical(\"All connectors failed to load\", 1)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if not databases:\n _LOGGER.debug(databases)\n _LOGGER.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Database) and \\\n cls is not Database:\n _LOGGER.debug(\"Adding database: %s\", name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n self.eventloop.run_until_complete(database.connect(self))\n\n async def run_skill(self, skill, config, message):\n \"\"\"Execute a skill.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n try:\n await skill(self, config, message)\n except Exception:\n if message:\n await message.respond(\n \"Whoops there has been an error\")\n await message.respond(\n \"Check the log for details\")\n _LOGGER.exception(\"Exception when running skill '%s' \",\n config[\"name\"])\n\n async def get_ranked_skills(self, message):\n \"\"\"Take a message and return a ranked list of matching skills.\"\"\"\n skills = []\n skills = skills + await parse_regex(self, message)\n\n if \"parsers\" in self.config:\n _LOGGER.debug(\"Processing parsers...\")\n parsers = self.config[\"parsers\"]\n\n dialogflow = [p for p in parsers if p[\"name\"] == \"dialogflow\"\n or p[\"name\"] == \"apiai\"]\n\n # Show deprecation message but parse message\n # Once it stops working remove this bit\n apiai = [p for p in parsers if p[\"name\"] == \"apiai\"]\n if apiai:\n _LOGGER.warning(\"Api.ai is now called Dialogflow. This \"\n \"parser will stop working in the future \"\n \"please swap: 'name: apiai' for \"\n \"'name: dialogflow' in configuration.yaml\")\n\n if len(dialogflow) == 1 and \\\n (\"enabled\" not in dialogflow[0] or\n dialogflow[0][\"enabled\"] is not False):\n _LOGGER.debug(\"Checking dialogflow...\")\n skills = skills + \\\n await parse_dialogflow(self, message, dialogflow[0])\n\n luisai = [p for p in parsers if p[\"name\"] == \"luisai\"]\n if len(luisai) == 1 and \\\n (\"enabled\" not in luisai[0] or\n luisai[0][\"enabled\"] is not False):\n _LOGGER.debug(\"Checking luisai...\")\n skills = skills + \\\n await parse_luisai(self, message, luisai[0])\n\n recastai = [p for p in parsers if p[\"name\"] == \"recastai\"]\n if len(recastai) == 1 and \\\n (\"enabled\" not in recastai[0] or\n recastai[0][\"enabled\"] is not False):\n _LOGGER.debug(\"Checking Recast.AI...\")\n skills = skills + \\\n await parse_recastai(self, message, recastai[0])\n\n witai = [p for p in parsers if p[\"name\"] == \"witai\"]\n if len(witai) == 1 and \\\n (\"enabled\" not in witai[0] or\n witai[0][\"enabled\"] is not False):\n _LOGGER.debug(\"Checking wit.ai...\")\n skills = skills + \\\n await parse_witai(self, message, witai[0])\n\n return sorted(skills, key=lambda k: k[\"score\"], reverse=True)\n\n async def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n self.stats[\"messages_parsed\"] = self.stats[\"messages_parsed\"] + 1\n tasks = []\n if message.text.strip() != \"\":\n _LOGGER.debug(\"Parsing input: %s\", message.text)\n\n tasks.append(\n self.eventloop.create_task(parse_always(self, message)))\n\n ranked_skills = await self.get_ranked_skills(message)\n if ranked_skills:\n tasks.append(\n self.eventloop.create_task(\n self.run_skill(ranked_skills[0][\"skill\"],\n ranked_skills[0][\"config\"],\n message)))\n\n return tasks\n", "path": "opsdroid/core.py"}]}
| 3,876 | 120 |
gh_patches_debug_3408
|
rasdani/github-patches
|
git_diff
|
conda__conda-9660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda env config (or other inappropriate/incomplete commands) results in AttributeError(possibly related to argparse ?)
<!--
Hi! Read this; it's important.
This is an issue tracker for conda -- the package manager. File feature requests
for conda here, as well as bug reports about something conda has messed up.
Anaconda Community Code of Conduct: https://www.anaconda.com/community-code-of-conduct/
If your issue is a bug report for:
* a specific conda package from Anaconda ('defaults' channel):
==> file at https://github.com/ContinuumIO/anaconda-issues
* a specific conda package from conda-forge:
==> file at the corresponding feedstock under https://github.com/conda-forge
* repo.anaconda.com access and service:
==> file at https://github.com/ContinuumIO/anaconda-issues
* anaconda.org access and service:
==> file at https://anaconda.org/contact/report
* commands under 'conda build':
==> file at https://github.com/conda/conda-build
* commands under 'conda env':
==> please file it here!
* all other conda commands that start with 'conda':
==> please file it here!
If you continue on,
**please include all requested information below.**
If a maintainer determines the information is required to understand
your issue, and if it is not provided, your issue may be
closed automatically.
-->
## Current Behavior
<!-- What actually happens?
If you want to include console output, please use "Steps to Reproduce" below. -->
```conda env config``` (or other options that are either incomplete or inappropriate,
such as ```conda env config vars```)
shows Error message including
```
Traceback (most recent call last):
File "/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda/exceptions.py", line 1078, in __call__
return func(*args, **kwargs)
File "/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda_env/cli/main.py", line 76, in do_call
relative_mod, func_name = args.func.rsplit('.', 1)
AttributeError: 'Namespace' object has no attribute 'func'
```
This seems to be related to argparse behavior?
(p.s. USERNAME is in place for the username currently used, and ROOTUSERNAME is the (main) root username. ANOTHERFOLDER is just another folder.)
### Steps to Reproduce
<!-- If the current behavior is a bug, please provide specific, minimal steps to independently reproduce.
Include the exact conda commands that reproduce the issue and their output between the ticks below. -->
```
conda env config
# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<
Traceback (most recent call last):
File "/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda/exceptions.py", line 1078, in __call__
return func(*args, **kwargs)
File "/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda_env/cli/main.py", line 76, in do_call
relative_mod, func_name = args.func.rsplit('.', 1)
AttributeError: 'Namespace' object has no attribute 'func'
`$ /home/USERNAME/miniconda3/bin/conda-env config vars`
environment variables:
CIO_TEST=<not set>
CONDA_AUTO_UPDATE_CONDA=false
CONDA_DEFAULT_ENV=base
CONDA_EXE=/home/USERNAME/miniconda3/bin/conda
CONDA_PREFIX=/home/USERNAME/miniconda3
CONDA_PROMPT_MODIFIER=(base)
CONDA_PYTHON_EXE=/home/USERNAME/miniconda3/bin/python
CONDA_ROOT=/home/USERNAME/miniconda3
CONDA_SHLVL=1
PATH=/home/USERNAME/miniconda3/bin:/home/USERNAME/miniconda3/bin:/home/
USERNAME/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/
usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/usr/lib/jvm/
java-8-openjdk-amd64/bin:/usr/lib/jvm/java-8-openjdk-
amd64/jre/bin:/home/ROOTUSERNAME/ANOTHERFOLDER/apache-maven-3.6.0/bin
REQUESTS_CA_BUNDLE=<not set>
SSL_CERT_FILE=<not set>
active environment : base
active env location : /home/USERNAME/miniconda3
shell level : 1
user config file : /home/USERNAME/.condarc
populated config files : /home/USERNAME/.condarc
conda version : 4.8.1
conda-build version : not installed
python version : 3.7.6.final.0
virtual packages : __cuda=10.1
__glibc=2.27
base environment : /home/USERNAME/miniconda3 (writable)
channel URLs : https://repo.anaconda.com/pkgs/main/linux-64
https://repo.anaconda.com/pkgs/main/noarch
https://repo.anaconda.com/pkgs/r/linux-64
https://repo.anaconda.com/pkgs/r/noarch
package cache : /home/USERNAME/miniconda3/pkgs
/home/USERNAME/.conda/pkgs
envs directories : /home/USERNAME/miniconda3/envs
/home/USERNAME/.conda/envs
platform : linux-64
user-agent : conda/4.8.1 requests/2.22.0 CPython/3.7.6 Linux/4.15.0-74-generic ubuntu/18.04.3 glibc/2.27
UID:GID : 1010:1010
netrc file : None
offline mode : False
```
## Expected Behavior
<!-- What do you think should happen? -->
show error message or --help option equivalent output
## Environment Information
<details open><summary><code>`conda info`</code></summary><p>
<!-- between the ticks below, paste the output of 'conda info' -->
```
active environment : base
active env location : /home/USERNAME/miniconda3
shell level : 1
user config file : /home/USERNAME/.condarc
populated config files : /home/USERNAME/.condarc
conda version : 4.8.1
conda-build version : not installed
python version : 3.7.6.final.0
virtual packages : __cuda=10.1
__glibc=2.27
base environment : /home/USERNAME/miniconda3 (writable)
channel URLs : https://repo.anaconda.com/pkgs/main/linux-64
https://repo.anaconda.com/pkgs/main/noarch
https://repo.anaconda.com/pkgs/r/linux-64
https://repo.anaconda.com/pkgs/r/noarch
package cache : /home/USERNAME/miniconda3/pkgs
/home/USERNAME/.conda/pkgs
envs directories : /home/USERNAME/miniconda3/envs
/home/USERNAME/.conda/envs
platform : linux-64
user-agent : conda/4.8.1 requests/2.22.0 CPython/3.7.6 Linux/4.15.0-74-generic ubuntu/18.04.3 glibc/2.27
UID:GID : 1010:1010
netrc file : None
offline mode : False
```
</p></details>
<details open><summary><code>`conda config --show-sources`</code></summary><p>
<!-- between the ticks below, paste the output of 'conda config --show-sources' -->
```
==> /home/USERNAME/.condarc <==
channels:
- defaults
```
</p></details>
<details><summary><code>`conda list --show-channel-urls`</code></summary><p>
<!-- between the ticks below, paste the output of 'conda list --show-channel-urls' -->
```
# packages in environment at /home/USERNAME/miniconda3:
#
# Name Version Build Channel
_libgcc_mutex 0.1 main defaults
asn1crypto 1.3.0 py37_0 defaults
ca-certificates 2019.11.27 0 defaults
certifi 2019.11.28 py37_0 defaults
cffi 1.13.2 py37h2e261b9_0 defaults
chardet 3.0.4 py37_1003 defaults
conda 4.8.1 py37_0 defaults
conda-package-handling 1.6.0 py37h7b6447c_0 defaults
cryptography 2.8 py37h1ba5d50_0 defaults
idna 2.8 py37_0 defaults
ld_impl_linux-64 2.33.1 h53a641e_7 defaults
libedit 3.1.20181209 hc058e9b_0 defaults
libffi 3.2.1 hd88cf55_4 defaults
libgcc-ng 9.1.0 hdf63c60_0 defaults
libstdcxx-ng 9.1.0 hdf63c60_0 defaults
ncurses 6.1 he6710b0_1 defaults
openssl 1.1.1d h7b6447c_3 defaults
pip 19.3.1 py37_0 defaults
pycosat 0.6.3 py37h7b6447c_0 defaults
pycparser 2.19 py37_0 defaults
pyopenssl 19.1.0 py37_0 defaults
pysocks 1.7.1 py37_0 defaults
python 3.7.6 h0371630_2 defaults
readline 7.0 h7b6447c_5 defaults
requests 2.22.0 py37_1 defaults
ruamel_yaml 0.15.87 py37h7b6447c_0 defaults
setuptools 44.0.0 py37_0 defaults
six 1.13.0 py37_0 defaults
sqlite 3.30.1 h7b6447c_0 defaults
tk 8.6.8 hbc83047_0 defaults
tqdm 4.41.1 py_0 defaults
urllib3 1.25.7 py37_0 defaults
wheel 0.33.6 py37_0 defaults
xz 5.2.4 h14c3975_4 defaults
yaml 0.1.7 had09818_2 defaults
zlib 1.2.11 h7b6447c_3 defaults
```
</p></details>
</issue>
<code>
[start of conda_env/cli/main_config.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 from argparse import RawDescriptionHelpFormatter
5
6
7 from .main_vars import configure_parser as configure_vars_parser
8
9 config_description = '''
10 Configure a conda environment
11 '''
12
13 config_example = '''
14 examples:
15 conda env config vars list
16 conda env config --append channels conda-forge
17 '''
18
19 def configure_parser(sub_parsers):
20 config_parser = sub_parsers.add_parser(
21 'config',
22 formatter_class=RawDescriptionHelpFormatter,
23 description=config_description,
24 help=config_description,
25 epilog=config_example,
26 )
27 config_subparser = config_parser.add_subparsers()
28 configure_vars_parser(config_subparser)
29
[end of conda_env/cli/main_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda_env/cli/main_config.py b/conda_env/cli/main_config.py
--- a/conda_env/cli/main_config.py
+++ b/conda_env/cli/main_config.py
@@ -24,5 +24,10 @@
help=config_description,
epilog=config_example,
)
+ config_parser.set_defaults(func='.main_config.execute')
config_subparser = config_parser.add_subparsers()
configure_vars_parser(config_subparser)
+
+
+def execute(args, parser):
+ parser.parse_args(['config', '--help'])
|
{"golden_diff": "diff --git a/conda_env/cli/main_config.py b/conda_env/cli/main_config.py\n--- a/conda_env/cli/main_config.py\n+++ b/conda_env/cli/main_config.py\n@@ -24,5 +24,10 @@\n help=config_description,\n epilog=config_example,\n )\n+ config_parser.set_defaults(func='.main_config.execute')\n config_subparser = config_parser.add_subparsers()\n configure_vars_parser(config_subparser)\n+\n+\n+def execute(args, parser):\n+ parser.parse_args(['config', '--help'])\n", "issue": "conda env config (or other inappropriate/incomplete commands) results in AttributeError(possibly related to argparse ?) \n<!--\r\nHi! Read this; it's important.\r\n\r\nThis is an issue tracker for conda -- the package manager. File feature requests\r\nfor conda here, as well as bug reports about something conda has messed up.\r\n\r\nAnaconda Community Code of Conduct: https://www.anaconda.com/community-code-of-conduct/\r\n\r\nIf your issue is a bug report for:\r\n * a specific conda package from Anaconda ('defaults' channel):\r\n ==> file at https://github.com/ContinuumIO/anaconda-issues\r\n * a specific conda package from conda-forge:\r\n ==> file at the corresponding feedstock under https://github.com/conda-forge\r\n * repo.anaconda.com access and service:\r\n ==> file at https://github.com/ContinuumIO/anaconda-issues\r\n * anaconda.org access and service:\r\n ==> file at https://anaconda.org/contact/report\r\n * commands under 'conda build':\r\n ==> file at https://github.com/conda/conda-build\r\n * commands under 'conda env':\r\n ==> please file it here!\r\n * all other conda commands that start with 'conda':\r\n ==> please file it here!\r\n\r\nIf you continue on,\r\n\r\n **please include all requested information below.**\r\n\r\nIf a maintainer determines the information is required to understand \r\nyour issue, and if it is not provided, your issue may be\r\nclosed automatically.\r\n\r\n-->\r\n\r\n## Current Behavior\r\n<!-- What actually happens?\r\n If you want to include console output, please use \"Steps to Reproduce\" below. -->\r\n```conda env config``` (or other options that are either incomplete or inappropriate,\r\nsuch as ```conda env config vars```)\r\nshows Error message including \r\n```\r\n Traceback (most recent call last):\r\n File \"/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda/exceptions.py\", line 1078, in __call__\r\n return func(*args, **kwargs)\r\n File \"/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda_env/cli/main.py\", line 76, in do_call\r\n relative_mod, func_name = args.func.rsplit('.', 1)\r\n AttributeError: 'Namespace' object has no attribute 'func'\r\n```\r\nThis seems to be related to argparse behavior?\r\n(p.s. USERNAME is in place for the username currently used, and ROOTUSERNAME is the (main) root username. ANOTHERFOLDER is just another folder.)\r\n\r\n### Steps to Reproduce\r\n<!-- If the current behavior is a bug, please provide specific, minimal steps to independently reproduce.\r\n Include the exact conda commands that reproduce the issue and their output between the ticks below. -->\r\n```\r\nconda env config\r\n\r\n# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<\r\n\r\n Traceback (most recent call last):\r\n File \"/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda/exceptions.py\", line 1078, in __call__\r\n return func(*args, **kwargs)\r\n File \"/home/USERNAME/miniconda3/lib/python3.7/site-packages/conda_env/cli/main.py\", line 76, in do_call\r\n relative_mod, func_name = args.func.rsplit('.', 1)\r\n AttributeError: 'Namespace' object has no attribute 'func'\r\n\r\n`$ /home/USERNAME/miniconda3/bin/conda-env config vars`\r\n\r\n environment variables:\r\n CIO_TEST=<not set>\r\n CONDA_AUTO_UPDATE_CONDA=false\r\n CONDA_DEFAULT_ENV=base\r\n CONDA_EXE=/home/USERNAME/miniconda3/bin/conda\r\n CONDA_PREFIX=/home/USERNAME/miniconda3\r\n CONDA_PROMPT_MODIFIER=(base)\r\n CONDA_PYTHON_EXE=/home/USERNAME/miniconda3/bin/python\r\n CONDA_ROOT=/home/USERNAME/miniconda3\r\n CONDA_SHLVL=1\r\n PATH=/home/USERNAME/miniconda3/bin:/home/USERNAME/miniconda3/bin:/home/\r\n USERNAME/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/\r\n usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/usr/lib/jvm/\r\n java-8-openjdk-amd64/bin:/usr/lib/jvm/java-8-openjdk-\r\n amd64/jre/bin:/home/ROOTUSERNAME/ANOTHERFOLDER/apache-maven-3.6.0/bin\r\n REQUESTS_CA_BUNDLE=<not set>\r\n SSL_CERT_FILE=<not set>\r\n\r\n active environment : base\r\n active env location : /home/USERNAME/miniconda3\r\n shell level : 1\r\n user config file : /home/USERNAME/.condarc\r\n populated config files : /home/USERNAME/.condarc\r\n conda version : 4.8.1\r\n conda-build version : not installed\r\n python version : 3.7.6.final.0\r\n virtual packages : __cuda=10.1\r\n __glibc=2.27\r\n base environment : /home/USERNAME/miniconda3 (writable)\r\n channel URLs : https://repo.anaconda.com/pkgs/main/linux-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/r/linux-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n package cache : /home/USERNAME/miniconda3/pkgs\r\n /home/USERNAME/.conda/pkgs\r\n envs directories : /home/USERNAME/miniconda3/envs\r\n /home/USERNAME/.conda/envs\r\n platform : linux-64\r\n user-agent : conda/4.8.1 requests/2.22.0 CPython/3.7.6 Linux/4.15.0-74-generic ubuntu/18.04.3 glibc/2.27\r\n UID:GID : 1010:1010\r\n netrc file : None\r\n offline mode : False\r\n```\r\n\r\n\r\n## Expected Behavior\r\n<!-- What do you think should happen? -->\r\nshow error message or --help option equivalent output\r\n\r\n## Environment Information\r\n<details open><summary><code>`conda info`</code></summary><p>\r\n<!-- between the ticks below, paste the output of 'conda info' -->\r\n\r\n```\r\n active environment : base\r\n active env location : /home/USERNAME/miniconda3\r\n shell level : 1\r\n user config file : /home/USERNAME/.condarc\r\n populated config files : /home/USERNAME/.condarc\r\n conda version : 4.8.1\r\n conda-build version : not installed\r\n python version : 3.7.6.final.0\r\n virtual packages : __cuda=10.1\r\n __glibc=2.27\r\n base environment : /home/USERNAME/miniconda3 (writable)\r\n channel URLs : https://repo.anaconda.com/pkgs/main/linux-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/r/linux-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n package cache : /home/USERNAME/miniconda3/pkgs\r\n /home/USERNAME/.conda/pkgs\r\n envs directories : /home/USERNAME/miniconda3/envs\r\n /home/USERNAME/.conda/envs\r\n platform : linux-64\r\n user-agent : conda/4.8.1 requests/2.22.0 CPython/3.7.6 Linux/4.15.0-74-generic ubuntu/18.04.3 glibc/2.27\r\n UID:GID : 1010:1010\r\n netrc file : None\r\n offline mode : False\r\n```\r\n</p></details>\r\n\r\n\r\n<details open><summary><code>`conda config --show-sources`</code></summary><p>\r\n<!-- between the ticks below, paste the output of 'conda config --show-sources' -->\r\n\r\n```\r\n==> /home/USERNAME/.condarc <==\r\nchannels:\r\n - defaults\r\n```\r\n</p></details>\r\n\r\n\r\n<details><summary><code>`conda list --show-channel-urls`</code></summary><p>\r\n<!-- between the ticks below, paste the output of 'conda list --show-channel-urls' -->\r\n\r\n```\r\n# packages in environment at /home/USERNAME/miniconda3:\r\n#\r\n# Name Version Build Channel\r\n_libgcc_mutex 0.1 main defaults\r\nasn1crypto 1.3.0 py37_0 defaults\r\nca-certificates 2019.11.27 0 defaults\r\ncertifi 2019.11.28 py37_0 defaults\r\ncffi 1.13.2 py37h2e261b9_0 defaults\r\nchardet 3.0.4 py37_1003 defaults\r\nconda 4.8.1 py37_0 defaults\r\nconda-package-handling 1.6.0 py37h7b6447c_0 defaults\r\ncryptography 2.8 py37h1ba5d50_0 defaults\r\nidna 2.8 py37_0 defaults\r\nld_impl_linux-64 2.33.1 h53a641e_7 defaults\r\nlibedit 3.1.20181209 hc058e9b_0 defaults\r\nlibffi 3.2.1 hd88cf55_4 defaults\r\nlibgcc-ng 9.1.0 hdf63c60_0 defaults\r\nlibstdcxx-ng 9.1.0 hdf63c60_0 defaults\r\nncurses 6.1 he6710b0_1 defaults\r\nopenssl 1.1.1d h7b6447c_3 defaults\r\npip 19.3.1 py37_0 defaults\r\npycosat 0.6.3 py37h7b6447c_0 defaults\r\npycparser 2.19 py37_0 defaults\r\npyopenssl 19.1.0 py37_0 defaults\r\npysocks 1.7.1 py37_0 defaults\r\npython 3.7.6 h0371630_2 defaults\r\nreadline 7.0 h7b6447c_5 defaults\r\nrequests 2.22.0 py37_1 defaults\r\nruamel_yaml 0.15.87 py37h7b6447c_0 defaults\r\nsetuptools 44.0.0 py37_0 defaults\r\nsix 1.13.0 py37_0 defaults\r\nsqlite 3.30.1 h7b6447c_0 defaults\r\ntk 8.6.8 hbc83047_0 defaults\r\ntqdm 4.41.1 py_0 defaults\r\nurllib3 1.25.7 py37_0 defaults\r\nwheel 0.33.6 py37_0 defaults\r\nxz 5.2.4 h14c3975_4 defaults\r\nyaml 0.1.7 had09818_2 defaults\r\nzlib 1.2.11 h7b6447c_3 defaults\r\n```\r\n</p></details>\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom argparse import RawDescriptionHelpFormatter\n\n\nfrom .main_vars import configure_parser as configure_vars_parser\n\nconfig_description = '''\nConfigure a conda environment\n'''\n\nconfig_example = '''\nexamples:\n conda env config vars list\n conda env config --append channels conda-forge\n'''\n\ndef configure_parser(sub_parsers):\n config_parser = sub_parsers.add_parser(\n 'config',\n formatter_class=RawDescriptionHelpFormatter,\n description=config_description,\n help=config_description,\n epilog=config_example,\n )\n config_subparser = config_parser.add_subparsers()\n configure_vars_parser(config_subparser)\n", "path": "conda_env/cli/main_config.py"}]}
| 3,395 | 117 |
gh_patches_debug_23203
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-2339
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server Error when editing community roles
Reported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.
Reproduced on test AMY:
```
TypeError at /communityroles/role/25/edit/
the JSON object must be str, bytes or bytearray, not list
Error during template rendering
In template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39
{% crispy_field field %}
```
Also encountered locally when creating a community role and immediately editing it:
```
Exception Value: | 'NoneType' object is not iterable
/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context
```
</issue>
<code>
[start of amy/communityroles/fields.py]
1 import json
2
3 from django import forms
4 from django.http import QueryDict
5 from django.utils.datastructures import MultiValueDict
6
7
8 class CustomKeysWidget(forms.TextInput):
9 template_name = "widgets/custom_keys_widget.html"
10
11 def __init__(self, *args, **kwargs):
12 self.subwidget_form = kwargs.pop("subwidget_form", forms.TextInput)
13 super().__init__(*args, **kwargs)
14
15 def apply_labels(self, labels: list[str]) -> None:
16 self.labels = labels[:]
17
18 def get_context(self, name: str, value: str, attrs: dict):
19 value_deserialized = json.loads(value)
20 value_deserialized_dict = dict(value_deserialized)
21 default_values = dict([(label, "") for label in self.labels])
22 context_value = default_values | value_deserialized_dict
23
24 context = super().get_context(name, context_value, attrs)
25 final_attrs = context["widget"]["attrs"]
26 id_ = context["widget"]["attrs"].get("id")
27
28 subwidgets = []
29 for index, (label, value) in enumerate(context_value.items()):
30 widget_attrs = final_attrs.copy()
31 if id_:
32 widget_attrs["id"] = "{id_}_{index}".format(id_=id_, index=index)
33
34 widget = self.subwidget_form()
35 subwidget_context = widget.get_context(name, value, widget_attrs)["widget"]
36 subwidgets.append(subwidget_context | {"label": label})
37
38 context["widget"]["subwidgets"] = subwidgets
39 return context
40
41 def value_from_datadict(
42 self, data: QueryDict, files: MultiValueDict, name: str
43 ) -> list[tuple[str, str]]:
44 """Prepare structure stored in database. The structure is tied to
45 `CommunityRole.custom_keys` expected format:
46 [
47 (label1, value1),
48 (label2, value2),
49 ...
50 ]
51 """
52 try:
53 values = data.getlist(name)
54 except AttributeError:
55 values = data.get(name, [])
56 return list(zip(self.labels, values))
57
58 def value_omitted_from_data(
59 self, data: QueryDict, files: MultiValueDict, name: str
60 ) -> bool:
61 return False
62
63
64 class CustomKeysJSONField(forms.JSONField):
65 def __init__(self, **kwargs):
66 kwargs.setdefault("widget", CustomKeysWidget)
67 super().__init__(**kwargs)
68
69 def apply_labels(self, labels: list[str]) -> None:
70 self.labels = labels[:]
71 self.widget.apply_labels(self.labels)
72
[end of amy/communityroles/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py
--- a/amy/communityroles/fields.py
+++ b/amy/communityroles/fields.py
@@ -1,9 +1,12 @@
import json
+import logging
from django import forms
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
+logger = logging.getLogger("amy")
+
class CustomKeysWidget(forms.TextInput):
template_name = "widgets/custom_keys_widget.html"
@@ -17,7 +20,14 @@
def get_context(self, name: str, value: str, attrs: dict):
value_deserialized = json.loads(value)
- value_deserialized_dict = dict(value_deserialized)
+ try:
+ value_deserialized_dict = dict(value_deserialized)
+ except (ValueError, TypeError) as e:
+ logger.debug(
+ f"Failed to load custom key values {value_deserialized} to dict: {e}."
+ )
+ logger.debug("Proceeding without custom key values...")
+ value_deserialized_dict = {}
default_values = dict([(label, "") for label in self.labels])
context_value = default_values | value_deserialized_dict
|
{"golden_diff": "diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py\n--- a/amy/communityroles/fields.py\n+++ b/amy/communityroles/fields.py\n@@ -1,9 +1,12 @@\n import json\n+import logging\n \n from django import forms\n from django.http import QueryDict\n from django.utils.datastructures import MultiValueDict\n \n+logger = logging.getLogger(\"amy\")\n+\n \n class CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n@@ -17,7 +20,14 @@\n \n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n- value_deserialized_dict = dict(value_deserialized)\n+ try:\n+ value_deserialized_dict = dict(value_deserialized)\n+ except (ValueError, TypeError) as e:\n+ logger.debug(\n+ f\"Failed to load custom key values {value_deserialized} to dict: {e}.\"\n+ )\n+ logger.debug(\"Proceeding without custom key values...\")\n+ value_deserialized_dict = {}\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n", "issue": "Server Error when editing community roles\nReported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.\r\n\r\nReproduced on test AMY:\r\n```\r\nTypeError at /communityroles/role/25/edit/\r\nthe JSON object must be str, bytes or bytearray, not list\r\nError during template rendering\r\nIn template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39\r\n\r\n{% crispy_field field %}\r\n```\r\n\r\nAlso encountered locally when creating a community role and immediately editing it:\r\n```\r\nException Value: | 'NoneType' object is not iterable\r\n/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context\r\n\r\n```\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.http import QueryDict\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.subwidget_form = kwargs.pop(\"subwidget_form\", forms.TextInput)\n super().__init__(*args, **kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n\n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n value_deserialized_dict = dict(value_deserialized)\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n\n context = super().get_context(name, context_value, attrs)\n final_attrs = context[\"widget\"][\"attrs\"]\n id_ = context[\"widget\"][\"attrs\"].get(\"id\")\n\n subwidgets = []\n for index, (label, value) in enumerate(context_value.items()):\n widget_attrs = final_attrs.copy()\n if id_:\n widget_attrs[\"id\"] = \"{id_}_{index}\".format(id_=id_, index=index)\n\n widget = self.subwidget_form()\n subwidget_context = widget.get_context(name, value, widget_attrs)[\"widget\"]\n subwidgets.append(subwidget_context | {\"label\": label})\n\n context[\"widget\"][\"subwidgets\"] = subwidgets\n return context\n\n def value_from_datadict(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> list[tuple[str, str]]:\n \"\"\"Prepare structure stored in database. The structure is tied to\n `CommunityRole.custom_keys` expected format:\n [\n (label1, value1),\n (label2, value2),\n ...\n ]\n \"\"\"\n try:\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n return list(zip(self.labels, values))\n\n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> bool:\n return False\n\n\nclass CustomKeysJSONField(forms.JSONField):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"widget\", CustomKeysWidget)\n super().__init__(**kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n self.widget.apply_labels(self.labels)\n", "path": "amy/communityroles/fields.py"}]}
| 1,392 | 266 |
gh_patches_debug_21536
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-81
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider installing the tests
Since you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits:
- Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`,
- Easy testing of the installed package via `python -m pytest --pyargs pymeasure`.
</issue>
<code>
[start of setup.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2017 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup
27
28 setup(
29 name='PyMeasure',
30 version='0.4.3',
31 author='PyMeasure Developers',
32 packages=[
33 'pymeasure', 'pymeasure.instruments',
34 'pymeasure.adapters', 'pymeasure.display',
35 'pymeasure.experiment',
36 'pymeasure.instruments.agilent',
37 'pymeasure.instruments.ami',
38 'pymeasure.instruments.anritsu',
39 'pymeasure.instruments.danfysik',
40 'pymeasure.instruments.fwbell',
41 'pymeasure.instruments.hp',
42 'pymeasure.instruments.keithley',
43 'pymeasure.instruments.lakeshore',
44 'pymeasure.instruments.newport',
45 'pymeasure.instruments.parker',
46 'pymeasure.instruments.signalrecovery',
47 'pymeasure.instruments.srs',
48 'pymeasure.instruments.thorlabs',
49 'pymeasure.instruments.tektronix',
50 'pymeasure.instruments.yokogawa',
51 ],
52 scripts=[],
53 url='https://github.com/ralph-group/pymeasure',
54 download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',
55 license='MIT License',
56 description='Scientific measurement library for instruments, experiments, and live-plotting',
57 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
58 install_requires=[
59 "Numpy >= 1.6.1",
60 "pandas >= 0.14",
61 "pyvisa >= 1.8",
62 "pyserial >= 2.7",
63 "pyqtgraph >= 0.9.10"
64 ],
65 classifiers=[
66 "Development Status :: 4 - Beta",
67 "Intended Audience :: Science/Research",
68 "License :: OSI Approved :: MIT License",
69 "Operating System :: MacOS",
70 "Operating System :: Microsoft :: Windows",
71 "Operating System :: POSIX",
72 "Operating System :: Unix",
73 "Programming Language :: Python :: 3 :: Only",
74 "Topic :: Scientific/Engineering",
75 ],
76 keywords="measure instrument experiment control automate graph plot"
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,32 +23,13 @@
#
-from setuptools import setup
+from setuptools import setup, find_packages
setup(
name='PyMeasure',
version='0.4.3',
author='PyMeasure Developers',
- packages=[
- 'pymeasure', 'pymeasure.instruments',
- 'pymeasure.adapters', 'pymeasure.display',
- 'pymeasure.experiment',
- 'pymeasure.instruments.agilent',
- 'pymeasure.instruments.ami',
- 'pymeasure.instruments.anritsu',
- 'pymeasure.instruments.danfysik',
- 'pymeasure.instruments.fwbell',
- 'pymeasure.instruments.hp',
- 'pymeasure.instruments.keithley',
- 'pymeasure.instruments.lakeshore',
- 'pymeasure.instruments.newport',
- 'pymeasure.instruments.parker',
- 'pymeasure.instruments.signalrecovery',
- 'pymeasure.instruments.srs',
- 'pymeasure.instruments.thorlabs',
- 'pymeasure.instruments.tektronix',
- 'pymeasure.instruments.yokogawa',
- ],
+ packages=find_packages(),
scripts=[],
url='https://github.com/ralph-group/pymeasure',
download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,32 +23,13 @@\n #\n \n \n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n setup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n- packages=[\n- 'pymeasure', 'pymeasure.instruments',\n- 'pymeasure.adapters', 'pymeasure.display',\n- 'pymeasure.experiment',\n- 'pymeasure.instruments.agilent',\n- 'pymeasure.instruments.ami',\n- 'pymeasure.instruments.anritsu',\n- 'pymeasure.instruments.danfysik',\n- 'pymeasure.instruments.fwbell',\n- 'pymeasure.instruments.hp',\n- 'pymeasure.instruments.keithley',\n- 'pymeasure.instruments.lakeshore',\n- 'pymeasure.instruments.newport',\n- 'pymeasure.instruments.parker',\n- 'pymeasure.instruments.signalrecovery',\n- 'pymeasure.instruments.srs',\n- 'pymeasure.instruments.thorlabs',\n- 'pymeasure.instruments.tektronix',\n- 'pymeasure.instruments.yokogawa',\n- ],\n+ packages=find_packages(),\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n", "issue": "Consider installing the tests\nSince you decided to embed the test suite inside the package instead of leaving it outside in a common `tests` folder (both of which are totally acceptable and covered by the `pytest` guidelines), why not install the tests with the package? It would give the following benefits:\r\n\r\n- Simplify the `setuptools` metadata, by replacing the explicit listing of the modules with a single call to `find_package`,\r\n- Easy testing of the installed package via `python -m pytest --pyargs pymeasure`.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2017 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup\n\nsetup(\n name='PyMeasure',\n version='0.4.3',\n author='PyMeasure Developers',\n packages=[\n 'pymeasure', 'pymeasure.instruments',\n 'pymeasure.adapters', 'pymeasure.display',\n 'pymeasure.experiment',\n 'pymeasure.instruments.agilent',\n 'pymeasure.instruments.ami',\n 'pymeasure.instruments.anritsu',\n 'pymeasure.instruments.danfysik',\n 'pymeasure.instruments.fwbell',\n 'pymeasure.instruments.hp',\n 'pymeasure.instruments.keithley',\n 'pymeasure.instruments.lakeshore',\n 'pymeasure.instruments.newport',\n 'pymeasure.instruments.parker',\n 'pymeasure.instruments.signalrecovery',\n 'pymeasure.instruments.srs',\n 'pymeasure.instruments.thorlabs',\n 'pymeasure.instruments.tektronix',\n 'pymeasure.instruments.yokogawa',\n ],\n scripts=[],\n url='https://github.com/ralph-group/pymeasure',\n download_url = 'https://github.com/ralph-group/pymeasure/tarball/v0.4.3',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"Numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]}
| 1,525 | 347 |
gh_patches_debug_22658
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-942
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
List of Genres: alphabetization should be case-insensitive
The Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital "Z" comes before a lowercase "a" and that muddles the system somewhat. Behold:
<img width="1119" alt="NewCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099">
You can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it.
This is the same list on OldCantus, for reference:
<img width="1158" alt="OldCantus temporale list" src="https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34">
</issue>
<code>
[start of django/cantusdb_project/main_app/views/feast.py]
1 from django.views.generic import DetailView, ListView
2 from main_app.models import Feast, Source
3 from extra_views import SearchableListMixin
4
5 # this categorization is not finalized yet
6 # the feastcode on old cantus requires cleaning
7 # for now we just leave this categorization as it is
8 TEMP_PREFIX = [
9 "01",
10 "02",
11 "03",
12 "04",
13 "05",
14 "06",
15 "07",
16 "08",
17 "09",
18 "10",
19 "11",
20 "16",
21 "17",
22 ]
23 SANC_PREFIX = ["12", "13", "14", "15"]
24
25
26 class FeastDetailView(DetailView):
27 model = Feast
28 context_object_name = "feast"
29 template_name = "feast_detail.html"
30
31 def get_context_data(self, **kwargs):
32 context = super().get_context_data(**kwargs)
33
34 display_unpublished = self.request.user.is_authenticated
35
36 chants_in_feast = self.get_object().chant_set
37 if not display_unpublished:
38 chants_in_feast = chants_in_feast.filter(source__published=True)
39
40 cantus_ids = list(
41 chants_in_feast.values_list("cantus_id", flat=True).distinct()
42 )
43
44 counts = []
45 incipits = []
46 genres = []
47
48 for cantus_id in cantus_ids:
49 chants = chants_in_feast.filter(cantus_id=cantus_id)
50 count = chants.count()
51 incipit = chants.first().incipit
52 genre = chants.first().genre
53
54 counts.append(count)
55 incipits.append(incipit)
56 genres.append(genre)
57
58 zipped = zip(cantus_ids, incipits, genres, counts)
59 zipped = sorted(zipped, key=lambda t: t[3], reverse=True)
60 context["frequent_chants_zip"] = zipped
61
62 source_ids = list(
63 chants_in_feast.values_list("source__id", flat=True).distinct()
64 )
65 sources = Source.objects.filter(id__in=source_ids)
66 counts = [chants_in_feast.filter(source=source).count() for source in sources]
67
68 zipped = zip(sources, counts)
69 zipped = sorted(zipped, key=lambda t: t[1], reverse=True)
70 context["sources_zip"] = zipped
71
72 return context
73
74
75 class FeastListView(SearchableListMixin, ListView):
76 """Searchable List view for Feast model
77
78 Accessed by /feasts/
79
80 When passed a ``?q=<query>`` argument in the GET request, it will filter feasts
81 based on the fields defined in ``search_fields`` with the ``icontains`` lookup
82
83 The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,
84 which are also passed as GET parameters
85 """
86
87 model = Feast
88 search_fields = ["name", "description", "feast_code"]
89 paginate_by = 100
90 context_object_name = "feasts"
91 template_name = "feast_list.html"
92
93 def get_ordering(self):
94 ordering = self.request.GET.get("sort_by")
95 # feasts can be ordered by name or feast_code,
96 # default to ordering by name if given anything else
97 if ordering not in ["name", "feast_code"]:
98 ordering = "name"
99 return ordering
100
101 def get_queryset(self):
102 queryset = super().get_queryset()
103 date = self.request.GET.get("date")
104 month = self.request.GET.get("month")
105 # temp vs sanc categorization is not finalized yet,
106 # the feastcode needs to be updated by the cantus people
107 if date == "temp":
108 queryset = queryset.filter(prefix__in=TEMP_PREFIX)
109 elif date == "sanc":
110 queryset = queryset.filter(prefix__in=SANC_PREFIX)
111
112 if month and (int(month)) in range(1, 13):
113 month = int(month)
114 queryset = queryset.filter(month=month)
115
116 return queryset
117
[end of django/cantusdb_project/main_app/views/feast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py
--- a/django/cantusdb_project/main_app/views/feast.py
+++ b/django/cantusdb_project/main_app/views/feast.py
@@ -1,4 +1,5 @@
from django.views.generic import DetailView, ListView
+from django.db.models.functions import Lower
from main_app.models import Feast, Source
from extra_views import SearchableListMixin
@@ -90,13 +91,14 @@
context_object_name = "feasts"
template_name = "feast_list.html"
- def get_ordering(self):
+ def get_ordering(self) -> tuple:
ordering = self.request.GET.get("sort_by")
# feasts can be ordered by name or feast_code,
# default to ordering by name if given anything else
if ordering not in ["name", "feast_code"]:
ordering = "name"
- return ordering
+ # case insensitive ordering by name
+ return (Lower(ordering),) if ordering == "name" else (ordering,)
def get_queryset(self):
queryset = super().get_queryset()
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/feast.py b/django/cantusdb_project/main_app/views/feast.py\n--- a/django/cantusdb_project/main_app/views/feast.py\n+++ b/django/cantusdb_project/main_app/views/feast.py\n@@ -1,4 +1,5 @@\n from django.views.generic import DetailView, ListView\n+from django.db.models.functions import Lower\n from main_app.models import Feast, Source\n from extra_views import SearchableListMixin\n \n@@ -90,13 +91,14 @@\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n \n- def get_ordering(self):\n+ def get_ordering(self) -> tuple:\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n- return ordering\n+ # case insensitive ordering by name\n+ return (Lower(ordering),) if ordering == \"name\" else (ordering,)\n \n def get_queryset(self):\n queryset = super().get_queryset()\n", "issue": "List of Genres: alphabetization should be case-insensitive\nThe Lists of Feasts (this includes the list of all feasts as well as only the temporale and only the sanctorale) are ordered by alphabetic order of feast name, except that capital letters are currently taking precedence; a capital \"Z\" comes before a lowercase \"a\" and that muddles the system somewhat. Behold:\r\n\r\n<img width=\"1119\" alt=\"NewCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/2c4f7d2e-df3c-4a57-8c52-7acbfe96b099\">\r\n\r\nYou can see that the alphabet thing is solid until Ad Processionem, after which the third letter becomes lowercase (Ad aquam benedictio) and the alphabet starts all over again. Maybe this is preferable, but OldCantus doesn't take capitalization into account, so I thought I'd mention it.\r\n\r\nThis is the same list on OldCantus, for reference: \r\n\r\n<img width=\"1158\" alt=\"OldCantus temporale list\" src=\"https://github.com/DDMAL/CantusDB/assets/83373378/598ba519-a37a-478b-884f-a1ff6b07ee34\">\r\n\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView\nfrom main_app.models import Feast, Source\nfrom extra_views import SearchableListMixin\n\n# this categorization is not finalized yet\n# the feastcode on old cantus requires cleaning\n# for now we just leave this categorization as it is\nTEMP_PREFIX = [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"16\",\n \"17\",\n]\nSANC_PREFIX = [\"12\", \"13\", \"14\", \"15\"]\n\n\nclass FeastDetailView(DetailView):\n model = Feast\n context_object_name = \"feast\"\n template_name = \"feast_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n display_unpublished = self.request.user.is_authenticated\n\n chants_in_feast = self.get_object().chant_set\n if not display_unpublished:\n chants_in_feast = chants_in_feast.filter(source__published=True)\n\n cantus_ids = list(\n chants_in_feast.values_list(\"cantus_id\", flat=True).distinct()\n )\n\n counts = []\n incipits = []\n genres = []\n\n for cantus_id in cantus_ids:\n chants = chants_in_feast.filter(cantus_id=cantus_id)\n count = chants.count()\n incipit = chants.first().incipit\n genre = chants.first().genre\n\n counts.append(count)\n incipits.append(incipit)\n genres.append(genre)\n\n zipped = zip(cantus_ids, incipits, genres, counts)\n zipped = sorted(zipped, key=lambda t: t[3], reverse=True)\n context[\"frequent_chants_zip\"] = zipped\n\n source_ids = list(\n chants_in_feast.values_list(\"source__id\", flat=True).distinct()\n )\n sources = Source.objects.filter(id__in=source_ids)\n counts = [chants_in_feast.filter(source=source).count() for source in sources]\n\n zipped = zip(sources, counts)\n zipped = sorted(zipped, key=lambda t: t[1], reverse=True)\n context[\"sources_zip\"] = zipped\n\n return context\n\n\nclass FeastListView(SearchableListMixin, ListView):\n \"\"\"Searchable List view for Feast model\n\n Accessed by /feasts/\n\n When passed a ``?q=<query>`` argument in the GET request, it will filter feasts\n based on the fields defined in ``search_fields`` with the ``icontains`` lookup\n\n The feasts can also be filtered by `date` (temp/sanc) and `month` and ordered by `sort_by`,\n which are also passed as GET parameters\n \"\"\"\n\n model = Feast\n search_fields = [\"name\", \"description\", \"feast_code\"]\n paginate_by = 100\n context_object_name = \"feasts\"\n template_name = \"feast_list.html\"\n\n def get_ordering(self):\n ordering = self.request.GET.get(\"sort_by\")\n # feasts can be ordered by name or feast_code,\n # default to ordering by name if given anything else\n if ordering not in [\"name\", \"feast_code\"]:\n ordering = \"name\"\n return ordering\n\n def get_queryset(self):\n queryset = super().get_queryset()\n date = self.request.GET.get(\"date\")\n month = self.request.GET.get(\"month\")\n # temp vs sanc categorization is not finalized yet,\n # the feastcode needs to be updated by the cantus people\n if date == \"temp\":\n queryset = queryset.filter(prefix__in=TEMP_PREFIX)\n elif date == \"sanc\":\n queryset = queryset.filter(prefix__in=SANC_PREFIX)\n\n if month and (int(month)) in range(1, 13):\n month = int(month)\n queryset = queryset.filter(month=month)\n\n return queryset\n", "path": "django/cantusdb_project/main_app/views/feast.py"}]}
| 2,002 | 271 |
gh_patches_debug_8438
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1800
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AvalonSTPkts hexdump needs a minor fix now that the transaction is of type bytes rather than string
When DEBUG logging level is enabled, the `hexdump` call in `_monitor_recv` function of AvalonSTPkts calls hexdump after converting the `pkt` in to `str` which is no longer needed
</issue>
<code>
[start of cocotb/monitors/avalon.py]
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """Monitors for Intel Avalon interfaces.
29
30 See https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/manual/mnl_avalon_spec_1_3.pdf
31
32 NB Currently we only support a very small subset of functionality.
33 """
34
35 import warnings
36
37 from cocotb.utils import hexdump
38 from cocotb.decorators import coroutine
39 from cocotb.monitors import BusMonitor
40 from cocotb.triggers import RisingEdge, ReadOnly
41 from cocotb.binary import BinaryValue
42
43
44 class AvalonProtocolError(Exception):
45 pass
46
47
48 class AvalonST(BusMonitor):
49 """Avalon-ST bus.
50
51 Non-packetized so each valid word is a separate transaction.
52 """
53
54 _signals = ["valid", "data"]
55 _optional_signals = ["ready"]
56
57 _default_config = {"firstSymbolInHighOrderBits": True}
58
59 def __init__(self, entity, name, clock, *, config={}, **kwargs):
60 BusMonitor.__init__(self, entity, name, clock, **kwargs)
61
62 self.config = self._default_config.copy()
63
64 for configoption, value in config.items():
65 self.config[configoption] = value
66 self.log.debug("Setting config option %s to %s", configoption, str(value))
67
68 @coroutine
69 def _monitor_recv(self):
70 """Watch the pins and reconstruct transactions."""
71
72 # Avoid spurious object creation by recycling
73 clkedge = RisingEdge(self.clock)
74 rdonly = ReadOnly()
75
76 def valid():
77 if hasattr(self.bus, "ready"):
78 return self.bus.valid.value and self.bus.ready.value
79 return self.bus.valid.value
80
81 # NB could yield on valid here more efficiently?
82 while True:
83 yield clkedge
84 yield rdonly
85 if valid():
86 vec = self.bus.data.value
87 vec.big_endian = self.config["firstSymbolInHighOrderBits"]
88 self._recv(vec.buff)
89
90
91 class AvalonSTPkts(BusMonitor):
92 """Packetized Avalon-ST bus.
93
94 Args:
95 entity, name, clock: see :class:`BusMonitor`
96 config (dict): bus configuration options
97 report_channel (bool): report channel with data, default is False
98 Setting to True on bus without channel signal will give an error
99 """
100
101 _signals = ["valid", "data", "startofpacket", "endofpacket"]
102 _optional_signals = ["error", "channel", "ready", "empty"]
103
104 _default_config = {
105 "dataBitsPerSymbol" : 8,
106 "firstSymbolInHighOrderBits" : True,
107 "maxChannel" : 0,
108 "readyLatency" : 0,
109 "invalidTimeout" : 0,
110 }
111
112 def __init__(self, entity, name, clock, *, config={}, report_channel=False, **kwargs):
113 BusMonitor.__init__(self, entity, name , clock, **kwargs)
114
115 self.config = self._default_config.copy()
116 self.report_channel = report_channel
117
118 # Set default config maxChannel to max value on channel bus
119 if hasattr(self.bus, 'channel'):
120 self.config['maxChannel'] = (2 ** len(self.bus.channel)) -1
121 else:
122 if report_channel:
123 raise ValueError("Channel reporting asked on bus without channel signal")
124
125 for configoption, value in config.items():
126 self.config[configoption] = value
127 self.log.debug("Setting config option %s to %s",
128 configoption, str(value))
129
130 num_data_symbols = (len(self.bus.data) /
131 self.config["dataBitsPerSymbol"])
132 if (num_data_symbols > 1 and not hasattr(self.bus, 'empty')):
133 raise AttributeError(
134 "%s has %i data symbols, but contains no object named empty" %
135 (self.name, num_data_symbols))
136
137 self.config["useEmpty"] = (num_data_symbols > 1)
138
139 if hasattr(self.bus, 'channel'):
140 if len(self.bus.channel) > 128:
141 raise AttributeError("AvalonST interface specification defines channel width as 1-128. "
142 "%d channel width is %d" %
143 (self.name, len(self.bus.channel)))
144 maxChannel = (2 ** len(self.bus.channel)) -1
145 if self.config['maxChannel'] > maxChannel:
146 raise AttributeError("%s has maxChannel=%d, but can only support a maximum channel of "
147 "(2**channel_width)-1=%d, channel_width=%d" %
148 (self.name, self.config['maxChannel'], maxChannel, len(self.bus.channel)))
149
150 @coroutine
151 def _monitor_recv(self):
152 """Watch the pins and reconstruct transactions."""
153
154 # Avoid spurious object creation by recycling
155 clkedge = RisingEdge(self.clock)
156 rdonly = ReadOnly()
157 pkt = b""
158 in_pkt = False
159 invalid_cyclecount = 0
160 channel = None
161
162 def valid():
163 if hasattr(self.bus, 'ready'):
164 return self.bus.valid.value and self.bus.ready.value
165 return self.bus.valid.value
166
167 while True:
168 yield clkedge
169 yield rdonly
170
171 if self.in_reset:
172 continue
173
174 if valid():
175 invalid_cyclecount = 0
176
177 if self.bus.startofpacket.value:
178 if pkt:
179 raise AvalonProtocolError("Duplicate start-of-packet received on %s" %
180 str(self.bus.startofpacket))
181 pkt = b""
182 in_pkt = True
183
184 if not in_pkt:
185 raise AvalonProtocolError("Data transfer outside of "
186 "packet")
187
188 # Handle empty and X's in empty / data
189 vec = BinaryValue()
190 if not self.bus.endofpacket.value:
191 vec = self.bus.data.value
192 else:
193 value = self.bus.data.value.get_binstr()
194 if self.config["useEmpty"] and self.bus.empty.value.integer:
195 empty = self.bus.empty.value.integer * self.config["dataBitsPerSymbol"]
196 if self.config["firstSymbolInHighOrderBits"]:
197 value = value[:-empty]
198 else:
199 value = value[empty:]
200 vec.assign(value)
201 if not vec.is_resolvable:
202 raise AvalonProtocolError("After empty masking value is still bad? "
203 "Had empty {:d}, got value {:s}".format(empty,
204 self.bus.data.value.get_binstr()))
205
206 vec.big_endian = self.config['firstSymbolInHighOrderBits']
207 pkt += vec.buff
208
209 if hasattr(self.bus, 'channel'):
210 if channel is None:
211 channel = self.bus.channel.value.integer
212 if channel > self.config["maxChannel"]:
213 raise AvalonProtocolError("Channel value (%d) is greater than maxChannel (%d)" %
214 (channel, self.config["maxChannel"]))
215 elif self.bus.channel.value.integer != channel:
216 raise AvalonProtocolError("Channel value changed during packet")
217
218 if self.bus.endofpacket.value:
219 self.log.info("Received a packet of %d bytes", len(pkt))
220 self.log.debug(hexdump(str((pkt))))
221 self.channel = channel
222 if self.report_channel:
223 self._recv({"data": pkt, "channel": channel})
224 else:
225 self._recv(pkt)
226 pkt = b""
227 in_pkt = False
228 channel = None
229 else:
230 if in_pkt:
231 invalid_cyclecount += 1
232 if self.config["invalidTimeout"]:
233 if invalid_cyclecount >= self.config["invalidTimeout"]:
234 raise AvalonProtocolError(
235 "In-Packet Timeout. Didn't receive any valid data for %d cycles!" %
236 invalid_cyclecount)
237
238
239 class AvalonSTPktsWithChannel(AvalonSTPkts):
240 """Packetized AvalonST bus using channel.
241
242 This class is deprecated. Use AvalonSTPkts(..., report_channel=True, ...)
243 """
244
245 def __init__(self, entity, name, clock, **kwargs):
246 warnings.warn(
247 "Use of AvalonSTPktsWithChannel is deprecated\n"
248 "\tUse AvalonSTPkts(..., report_channel=True, ...)",
249 DeprecationWarning, stacklevel=2
250 )
251 AvalonSTPkts.__init__(self, entity, name, clock, report_channel=True, **kwargs)
252
[end of cocotb/monitors/avalon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cocotb/monitors/avalon.py b/cocotb/monitors/avalon.py
--- a/cocotb/monitors/avalon.py
+++ b/cocotb/monitors/avalon.py
@@ -217,7 +217,7 @@
if self.bus.endofpacket.value:
self.log.info("Received a packet of %d bytes", len(pkt))
- self.log.debug(hexdump(str((pkt))))
+ self.log.debug(hexdump(pkt))
self.channel = channel
if self.report_channel:
self._recv({"data": pkt, "channel": channel})
|
{"golden_diff": "diff --git a/cocotb/monitors/avalon.py b/cocotb/monitors/avalon.py\n--- a/cocotb/monitors/avalon.py\n+++ b/cocotb/monitors/avalon.py\n@@ -217,7 +217,7 @@\n \n if self.bus.endofpacket.value:\n self.log.info(\"Received a packet of %d bytes\", len(pkt))\n- self.log.debug(hexdump(str((pkt))))\n+ self.log.debug(hexdump(pkt))\n self.channel = channel\n if self.report_channel:\n self._recv({\"data\": pkt, \"channel\": channel})\n", "issue": "AvalonSTPkts hexdump needs a minor fix now that the transaction is of type bytes rather than string\nWhen DEBUG logging level is enabled, the `hexdump` call in `_monitor_recv` function of AvalonSTPkts calls hexdump after converting the `pkt` in to `str` which is no longer needed\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Monitors for Intel Avalon interfaces.\n\nSee https://www.intel.com/content/dam/www/programmable/us/en/pdfs/literature/manual/mnl_avalon_spec_1_3.pdf\n\nNB Currently we only support a very small subset of functionality.\n\"\"\"\n\nimport warnings\n\nfrom cocotb.utils import hexdump\nfrom cocotb.decorators import coroutine\nfrom cocotb.monitors import BusMonitor\nfrom cocotb.triggers import RisingEdge, ReadOnly\nfrom cocotb.binary import BinaryValue\n\n\nclass AvalonProtocolError(Exception):\n pass\n\n\nclass AvalonST(BusMonitor):\n \"\"\"Avalon-ST bus.\n\n Non-packetized so each valid word is a separate transaction.\n \"\"\"\n\n _signals = [\"valid\", \"data\"]\n _optional_signals = [\"ready\"]\n\n _default_config = {\"firstSymbolInHighOrderBits\": True}\n\n def __init__(self, entity, name, clock, *, config={}, **kwargs):\n BusMonitor.__init__(self, entity, name, clock, **kwargs)\n\n self.config = self._default_config.copy()\n\n for configoption, value in config.items():\n self.config[configoption] = value\n self.log.debug(\"Setting config option %s to %s\", configoption, str(value))\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"Watch the pins and reconstruct transactions.\"\"\"\n\n # Avoid spurious object creation by recycling\n clkedge = RisingEdge(self.clock)\n rdonly = ReadOnly()\n\n def valid():\n if hasattr(self.bus, \"ready\"):\n return self.bus.valid.value and self.bus.ready.value\n return self.bus.valid.value\n\n # NB could yield on valid here more efficiently?\n while True:\n yield clkedge\n yield rdonly\n if valid():\n vec = self.bus.data.value\n vec.big_endian = self.config[\"firstSymbolInHighOrderBits\"]\n self._recv(vec.buff)\n\n\nclass AvalonSTPkts(BusMonitor):\n \"\"\"Packetized Avalon-ST bus.\n\n Args:\n entity, name, clock: see :class:`BusMonitor`\n config (dict): bus configuration options\n report_channel (bool): report channel with data, default is False\n Setting to True on bus without channel signal will give an error\n \"\"\"\n\n _signals = [\"valid\", \"data\", \"startofpacket\", \"endofpacket\"]\n _optional_signals = [\"error\", \"channel\", \"ready\", \"empty\"]\n\n _default_config = {\n \"dataBitsPerSymbol\" : 8,\n \"firstSymbolInHighOrderBits\" : True,\n \"maxChannel\" : 0,\n \"readyLatency\" : 0,\n \"invalidTimeout\" : 0,\n }\n\n def __init__(self, entity, name, clock, *, config={}, report_channel=False, **kwargs):\n BusMonitor.__init__(self, entity, name , clock, **kwargs)\n\n self.config = self._default_config.copy()\n self.report_channel = report_channel\n\n # Set default config maxChannel to max value on channel bus\n if hasattr(self.bus, 'channel'):\n self.config['maxChannel'] = (2 ** len(self.bus.channel)) -1\n else:\n if report_channel:\n raise ValueError(\"Channel reporting asked on bus without channel signal\")\n\n for configoption, value in config.items():\n self.config[configoption] = value\n self.log.debug(\"Setting config option %s to %s\",\n configoption, str(value))\n\n num_data_symbols = (len(self.bus.data) /\n self.config[\"dataBitsPerSymbol\"])\n if (num_data_symbols > 1 and not hasattr(self.bus, 'empty')):\n raise AttributeError(\n \"%s has %i data symbols, but contains no object named empty\" %\n (self.name, num_data_symbols))\n\n self.config[\"useEmpty\"] = (num_data_symbols > 1)\n\n if hasattr(self.bus, 'channel'):\n if len(self.bus.channel) > 128:\n raise AttributeError(\"AvalonST interface specification defines channel width as 1-128. \"\n \"%d channel width is %d\" %\n (self.name, len(self.bus.channel)))\n maxChannel = (2 ** len(self.bus.channel)) -1\n if self.config['maxChannel'] > maxChannel:\n raise AttributeError(\"%s has maxChannel=%d, but can only support a maximum channel of \"\n \"(2**channel_width)-1=%d, channel_width=%d\" %\n (self.name, self.config['maxChannel'], maxChannel, len(self.bus.channel)))\n\n @coroutine\n def _monitor_recv(self):\n \"\"\"Watch the pins and reconstruct transactions.\"\"\"\n\n # Avoid spurious object creation by recycling\n clkedge = RisingEdge(self.clock)\n rdonly = ReadOnly()\n pkt = b\"\"\n in_pkt = False\n invalid_cyclecount = 0\n channel = None\n\n def valid():\n if hasattr(self.bus, 'ready'):\n return self.bus.valid.value and self.bus.ready.value\n return self.bus.valid.value\n\n while True:\n yield clkedge\n yield rdonly\n\n if self.in_reset:\n continue\n\n if valid():\n invalid_cyclecount = 0\n\n if self.bus.startofpacket.value:\n if pkt:\n raise AvalonProtocolError(\"Duplicate start-of-packet received on %s\" %\n str(self.bus.startofpacket))\n pkt = b\"\"\n in_pkt = True\n\n if not in_pkt:\n raise AvalonProtocolError(\"Data transfer outside of \"\n \"packet\")\n\n # Handle empty and X's in empty / data\n vec = BinaryValue()\n if not self.bus.endofpacket.value:\n vec = self.bus.data.value\n else:\n value = self.bus.data.value.get_binstr()\n if self.config[\"useEmpty\"] and self.bus.empty.value.integer:\n empty = self.bus.empty.value.integer * self.config[\"dataBitsPerSymbol\"]\n if self.config[\"firstSymbolInHighOrderBits\"]:\n value = value[:-empty]\n else:\n value = value[empty:]\n vec.assign(value)\n if not vec.is_resolvable:\n raise AvalonProtocolError(\"After empty masking value is still bad? \"\n \"Had empty {:d}, got value {:s}\".format(empty,\n self.bus.data.value.get_binstr()))\n\n vec.big_endian = self.config['firstSymbolInHighOrderBits']\n pkt += vec.buff\n\n if hasattr(self.bus, 'channel'):\n if channel is None:\n channel = self.bus.channel.value.integer\n if channel > self.config[\"maxChannel\"]:\n raise AvalonProtocolError(\"Channel value (%d) is greater than maxChannel (%d)\" %\n (channel, self.config[\"maxChannel\"]))\n elif self.bus.channel.value.integer != channel:\n raise AvalonProtocolError(\"Channel value changed during packet\")\n\n if self.bus.endofpacket.value:\n self.log.info(\"Received a packet of %d bytes\", len(pkt))\n self.log.debug(hexdump(str((pkt))))\n self.channel = channel\n if self.report_channel:\n self._recv({\"data\": pkt, \"channel\": channel})\n else:\n self._recv(pkt)\n pkt = b\"\"\n in_pkt = False\n channel = None\n else:\n if in_pkt:\n invalid_cyclecount += 1\n if self.config[\"invalidTimeout\"]:\n if invalid_cyclecount >= self.config[\"invalidTimeout\"]:\n raise AvalonProtocolError(\n \"In-Packet Timeout. Didn't receive any valid data for %d cycles!\" %\n invalid_cyclecount)\n\n\nclass AvalonSTPktsWithChannel(AvalonSTPkts):\n \"\"\"Packetized AvalonST bus using channel.\n\n This class is deprecated. Use AvalonSTPkts(..., report_channel=True, ...)\n \"\"\"\n\n def __init__(self, entity, name, clock, **kwargs):\n warnings.warn(\n \"Use of AvalonSTPktsWithChannel is deprecated\\n\"\n \"\\tUse AvalonSTPkts(..., report_channel=True, ...)\",\n DeprecationWarning, stacklevel=2\n )\n AvalonSTPkts.__init__(self, entity, name, clock, report_channel=True, **kwargs)\n", "path": "cocotb/monitors/avalon.py"}]}
| 3,400 | 142 |
gh_patches_debug_5544
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-2777
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop contributor profile URL with email addresses
We should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index.
This is particularly problematic for users without any contributions.
</issue>
<code>
[start of pontoon/contributors/urls.py]
1 from django.urls import path, register_converter
2 from django.urls.converters import StringConverter
3 from django.views.generic import RedirectView
4
5 from . import views
6
7
8 class EmailConverter(StringConverter):
9 regex = r"[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}"
10
11
12 class UsernameConverter(StringConverter):
13 regex = r"[\w.@+-]+"
14
15
16 register_converter(EmailConverter, "email")
17 register_converter(UsernameConverter, "username")
18
19 urlpatterns = [
20 # Legacy: Redirect to /contributors/email
21 path(
22 "contributor/<email:email>/",
23 RedirectView.as_view(url="/contributors/%(email)s/", permanent=True),
24 ),
25 # List contributors
26 path(
27 "contributors/",
28 views.ContributorsView.as_view(),
29 name="pontoon.contributors",
30 ),
31 # Contributor profile by email
32 path(
33 "contributors/<email:email>/",
34 views.contributor_email,
35 name="pontoon.contributors.contributor.email",
36 ),
37 # Contributor profile by username
38 path(
39 "contributors/<username:username>/",
40 views.contributor_username,
41 name="pontoon.contributors.contributor.username",
42 ),
43 # Verify email address
44 path(
45 "verify-email-address/<str:token>/",
46 views.verify_email_address,
47 name="pontoon.contributors.verify.email",
48 ),
49 # Current user's profile
50 path("profile/", views.profile, name="pontoon.contributors.profile"),
51 # Current user's settings
52 path("settings/", views.settings, name="pontoon.contributors.settings"),
53 # Current user's notifications
54 path(
55 "notifications/",
56 views.notifications,
57 name="pontoon.contributors.notifications",
58 ),
59 # Mark current user's notifications as read
60 path(
61 "notifications/mark-all-as-read/",
62 views.mark_all_notifications_as_read,
63 name="pontoon.contributors.notifications.mark.all.as.read",
64 ),
65 # API: Toggle user profile attribute
66 path(
67 "api/v1/user/<username:username>/",
68 views.toggle_user_profile_attribute,
69 name="pontoon.contributors.toggle_user_profile_attribute",
70 ),
71 # AJAX: Save custom homepage
72 path(
73 "save-custom-homepage/",
74 views.save_custom_homepage,
75 name="pontoon.contributors.save_custom_homepage",
76 ),
77 # AJAX: Save preferred source locale
78 path(
79 "save-preferred-source-locale/",
80 views.save_preferred_source_locale,
81 name="pontoon.contributors.save_preferred_source_locale",
82 ),
83 # AJAX: Dismiss Add-On Promotion
84 path(
85 "dismiss-addon-promotion/",
86 views.dismiss_addon_promotion,
87 name="pontoon.contributors.dismiss_addon_promotion",
88 ),
89 # AJAX: Update contribution graph
90 path(
91 "update-contribution-graph/",
92 views.update_contribution_graph,
93 name="pontoon.contributors.update_contribution_graph",
94 ),
95 # AJAX: Update contribution timeline
96 path(
97 "update-contribution-timeline/",
98 views.update_contribution_timeline,
99 name="pontoon.contributors.update_contribution_timeline",
100 ),
101 ]
102
[end of pontoon/contributors/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py
--- a/pontoon/contributors/urls.py
+++ b/pontoon/contributors/urls.py
@@ -28,12 +28,6 @@
views.ContributorsView.as_view(),
name="pontoon.contributors",
),
- # Contributor profile by email
- path(
- "contributors/<email:email>/",
- views.contributor_email,
- name="pontoon.contributors.contributor.email",
- ),
# Contributor profile by username
path(
"contributors/<username:username>/",
|
{"golden_diff": "diff --git a/pontoon/contributors/urls.py b/pontoon/contributors/urls.py\n--- a/pontoon/contributors/urls.py\n+++ b/pontoon/contributors/urls.py\n@@ -28,12 +28,6 @@\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n- # Contributor profile by email\n- path(\n- \"contributors/<email:email>/\",\n- views.contributor_email,\n- name=\"pontoon.contributors.contributor.email\",\n- ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n", "issue": "Drop contributor profile URL with email addresses\nWe should drop support for accessing contributor profiles by using user email addresses in the URLs. This feature, while not used internally in the app, leaks otherwise hidden user email addressed on sites like on web.archive.org index.\r\n\r\nThis is particularly problematic for users without any contributions.\r\n\n", "before_files": [{"content": "from django.urls import path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\n\nclass EmailConverter(StringConverter):\n regex = r\"[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}\"\n\n\nclass UsernameConverter(StringConverter):\n regex = r\"[\\w.@+-]+\"\n\n\nregister_converter(EmailConverter, \"email\")\nregister_converter(UsernameConverter, \"username\")\n\nurlpatterns = [\n # Legacy: Redirect to /contributors/email\n path(\n \"contributor/<email:email>/\",\n RedirectView.as_view(url=\"/contributors/%(email)s/\", permanent=True),\n ),\n # List contributors\n path(\n \"contributors/\",\n views.ContributorsView.as_view(),\n name=\"pontoon.contributors\",\n ),\n # Contributor profile by email\n path(\n \"contributors/<email:email>/\",\n views.contributor_email,\n name=\"pontoon.contributors.contributor.email\",\n ),\n # Contributor profile by username\n path(\n \"contributors/<username:username>/\",\n views.contributor_username,\n name=\"pontoon.contributors.contributor.username\",\n ),\n # Verify email address\n path(\n \"verify-email-address/<str:token>/\",\n views.verify_email_address,\n name=\"pontoon.contributors.verify.email\",\n ),\n # Current user's profile\n path(\"profile/\", views.profile, name=\"pontoon.contributors.profile\"),\n # Current user's settings\n path(\"settings/\", views.settings, name=\"pontoon.contributors.settings\"),\n # Current user's notifications\n path(\n \"notifications/\",\n views.notifications,\n name=\"pontoon.contributors.notifications\",\n ),\n # Mark current user's notifications as read\n path(\n \"notifications/mark-all-as-read/\",\n views.mark_all_notifications_as_read,\n name=\"pontoon.contributors.notifications.mark.all.as.read\",\n ),\n # API: Toggle user profile attribute\n path(\n \"api/v1/user/<username:username>/\",\n views.toggle_user_profile_attribute,\n name=\"pontoon.contributors.toggle_user_profile_attribute\",\n ),\n # AJAX: Save custom homepage\n path(\n \"save-custom-homepage/\",\n views.save_custom_homepage,\n name=\"pontoon.contributors.save_custom_homepage\",\n ),\n # AJAX: Save preferred source locale\n path(\n \"save-preferred-source-locale/\",\n views.save_preferred_source_locale,\n name=\"pontoon.contributors.save_preferred_source_locale\",\n ),\n # AJAX: Dismiss Add-On Promotion\n path(\n \"dismiss-addon-promotion/\",\n views.dismiss_addon_promotion,\n name=\"pontoon.contributors.dismiss_addon_promotion\",\n ),\n # AJAX: Update contribution graph\n path(\n \"update-contribution-graph/\",\n views.update_contribution_graph,\n name=\"pontoon.contributors.update_contribution_graph\",\n ),\n # AJAX: Update contribution timeline\n path(\n \"update-contribution-timeline/\",\n views.update_contribution_timeline,\n name=\"pontoon.contributors.update_contribution_timeline\",\n ),\n]\n", "path": "pontoon/contributors/urls.py"}]}
| 1,483 | 145 |
gh_patches_debug_40422
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-686
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setting transaction name in pyramid
Hi is it possible to set transaction name when using PyramidIntegration?
I tried setting `scope.transaction = "mycustomname"` but it seems that integration itself does:
try:
if integration.transaction_style == "route_name":
event["transaction"] = request.matched_route.name
elif integration.transaction_style == "route_pattern":
event["transaction"] = request.matched_route.pattern
except Exception:
pass
And I suspect that overwrites anything I've set up earlier?
</issue>
<code>
[start of sentry_sdk/integrations/pyramid.py]
1 from __future__ import absolute_import
2
3 import os
4 import sys
5 import weakref
6
7 from pyramid.httpexceptions import HTTPException
8 from pyramid.request import Request
9
10 from sentry_sdk.hub import Hub, _should_send_default_pii
11 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
12 from sentry_sdk._compat import reraise, iteritems
13
14 from sentry_sdk.integrations import Integration
15 from sentry_sdk.integrations._wsgi_common import RequestExtractor
16 from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
17
18 from sentry_sdk._types import MYPY
19
20 if MYPY:
21 from pyramid.response import Response
22 from typing import Any
23 from sentry_sdk.integrations.wsgi import _ScopedResponse
24 from typing import Callable
25 from typing import Dict
26 from typing import Optional
27 from webob.cookies import RequestCookies # type: ignore
28 from webob.compat import cgi_FieldStorage # type: ignore
29
30 from sentry_sdk.utils import ExcInfo
31 from sentry_sdk._types import EventProcessor
32
33
34 if getattr(Request, "authenticated_userid", None):
35
36 def authenticated_userid(request):
37 # type: (Request) -> Optional[Any]
38 return request.authenticated_userid
39
40
41 else:
42 # bw-compat for pyramid < 1.5
43 from pyramid.security import authenticated_userid # type: ignore
44
45
46 TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
47
48
49 class PyramidIntegration(Integration):
50 identifier = "pyramid"
51
52 transaction_style = None
53
54 def __init__(self, transaction_style="route_name"):
55 # type: (str) -> None
56 if transaction_style not in TRANSACTION_STYLE_VALUES:
57 raise ValueError(
58 "Invalid value for transaction_style: %s (must be in %s)"
59 % (transaction_style, TRANSACTION_STYLE_VALUES)
60 )
61 self.transaction_style = transaction_style
62
63 @staticmethod
64 def setup_once():
65 # type: () -> None
66 from pyramid.router import Router
67 from pyramid.request import Request
68
69 old_handle_request = Router.handle_request
70
71 def sentry_patched_handle_request(self, request, *args, **kwargs):
72 # type: (Any, Request, *Any, **Any) -> Response
73 hub = Hub.current
74 integration = hub.get_integration(PyramidIntegration)
75 if integration is not None:
76 with hub.configure_scope() as scope:
77 scope.add_event_processor(
78 _make_event_processor(weakref.ref(request), integration)
79 )
80
81 return old_handle_request(self, request, *args, **kwargs)
82
83 Router.handle_request = sentry_patched_handle_request
84
85 if hasattr(Request, "invoke_exception_view"):
86 old_invoke_exception_view = Request.invoke_exception_view
87
88 def sentry_patched_invoke_exception_view(self, *args, **kwargs):
89 # type: (Request, *Any, **Any) -> Any
90 rv = old_invoke_exception_view(self, *args, **kwargs)
91
92 if (
93 self.exc_info
94 and all(self.exc_info)
95 and rv.status_int == 500
96 and Hub.current.get_integration(PyramidIntegration) is not None
97 ):
98 _capture_exception(self.exc_info)
99
100 return rv
101
102 Request.invoke_exception_view = sentry_patched_invoke_exception_view
103
104 old_wsgi_call = Router.__call__
105
106 def sentry_patched_wsgi_call(self, environ, start_response):
107 # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
108 hub = Hub.current
109 integration = hub.get_integration(PyramidIntegration)
110 if integration is None:
111 return old_wsgi_call(self, environ, start_response)
112
113 def sentry_patched_inner_wsgi_call(environ, start_response):
114 # type: (Dict[str, Any], Callable[..., Any]) -> Any
115 try:
116 return old_wsgi_call(self, environ, start_response)
117 except Exception:
118 einfo = sys.exc_info()
119 _capture_exception(einfo)
120 reraise(*einfo)
121
122 return SentryWsgiMiddleware(sentry_patched_inner_wsgi_call)(
123 environ, start_response
124 )
125
126 Router.__call__ = sentry_patched_wsgi_call
127
128
129 def _capture_exception(exc_info):
130 # type: (ExcInfo) -> None
131 if exc_info[0] is None or issubclass(exc_info[0], HTTPException):
132 return
133 hub = Hub.current
134 if hub.get_integration(PyramidIntegration) is None:
135 return
136
137 # If an integration is there, a client has to be there.
138 client = hub.client # type: Any
139
140 event, hint = event_from_exception(
141 exc_info,
142 client_options=client.options,
143 mechanism={"type": "pyramid", "handled": False},
144 )
145
146 hub.capture_event(event, hint=hint)
147
148
149 class PyramidRequestExtractor(RequestExtractor):
150 def url(self):
151 # type: () -> str
152 return self.request.path_url
153
154 def env(self):
155 # type: () -> Dict[str, str]
156 return self.request.environ
157
158 def cookies(self):
159 # type: () -> RequestCookies
160 return self.request.cookies
161
162 def raw_data(self):
163 # type: () -> str
164 return self.request.text
165
166 def form(self):
167 # type: () -> Dict[str, str]
168 return {
169 key: value
170 for key, value in iteritems(self.request.POST)
171 if not getattr(value, "filename", None)
172 }
173
174 def files(self):
175 # type: () -> Dict[str, cgi_FieldStorage]
176 return {
177 key: value
178 for key, value in iteritems(self.request.POST)
179 if getattr(value, "filename", None)
180 }
181
182 def size_of_file(self, postdata):
183 # type: (cgi_FieldStorage) -> int
184 file = postdata.file
185 try:
186 return os.fstat(file.fileno()).st_size
187 except Exception:
188 return 0
189
190
191 def _make_event_processor(weak_request, integration):
192 # type: (Callable[[], Request], PyramidIntegration) -> EventProcessor
193 def event_processor(event, hint):
194 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
195 request = weak_request()
196 if request is None:
197 return event
198
199 try:
200 if integration.transaction_style == "route_name":
201 event["transaction"] = request.matched_route.name
202 elif integration.transaction_style == "route_pattern":
203 event["transaction"] = request.matched_route.pattern
204 except Exception:
205 pass
206
207 with capture_internal_exceptions():
208 PyramidRequestExtractor(request).extract_into_event(event)
209
210 if _should_send_default_pii():
211 with capture_internal_exceptions():
212 user_info = event.setdefault("user", {})
213 user_info.setdefault("id", authenticated_userid(request))
214
215 return event
216
217 return event_processor
218
[end of sentry_sdk/integrations/pyramid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/pyramid.py b/sentry_sdk/integrations/pyramid.py
--- a/sentry_sdk/integrations/pyramid.py
+++ b/sentry_sdk/integrations/pyramid.py
@@ -63,24 +63,33 @@
@staticmethod
def setup_once():
# type: () -> None
- from pyramid.router import Router
+ from pyramid import router
from pyramid.request import Request
- old_handle_request = Router.handle_request
+ old_call_view = router._call_view
- def sentry_patched_handle_request(self, request, *args, **kwargs):
+ def sentry_patched_call_view(registry, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Response
hub = Hub.current
integration = hub.get_integration(PyramidIntegration)
+
if integration is not None:
with hub.configure_scope() as scope:
+ try:
+ if integration.transaction_style == "route_name":
+ scope.transaction = request.matched_route.name
+ elif integration.transaction_style == "route_pattern":
+ scope.transaction = request.matched_route.pattern
+ except Exception:
+ raise
+
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
- return old_handle_request(self, request, *args, **kwargs)
+ return old_call_view(registry, request, *args, **kwargs)
- Router.handle_request = sentry_patched_handle_request
+ router._call_view = sentry_patched_call_view
if hasattr(Request, "invoke_exception_view"):
old_invoke_exception_view = Request.invoke_exception_view
@@ -101,7 +110,7 @@
Request.invoke_exception_view = sentry_patched_invoke_exception_view
- old_wsgi_call = Router.__call__
+ old_wsgi_call = router.Router.__call__
def sentry_patched_wsgi_call(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
@@ -123,7 +132,7 @@
environ, start_response
)
- Router.__call__ = sentry_patched_wsgi_call
+ router.Router.__call__ = sentry_patched_wsgi_call
def _capture_exception(exc_info):
@@ -196,14 +205,6 @@
if request is None:
return event
- try:
- if integration.transaction_style == "route_name":
- event["transaction"] = request.matched_route.name
- elif integration.transaction_style == "route_pattern":
- event["transaction"] = request.matched_route.pattern
- except Exception:
- pass
-
with capture_internal_exceptions():
PyramidRequestExtractor(request).extract_into_event(event)
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/pyramid.py b/sentry_sdk/integrations/pyramid.py\n--- a/sentry_sdk/integrations/pyramid.py\n+++ b/sentry_sdk/integrations/pyramid.py\n@@ -63,24 +63,33 @@\n @staticmethod\n def setup_once():\n # type: () -> None\n- from pyramid.router import Router\n+ from pyramid import router\n from pyramid.request import Request\n \n- old_handle_request = Router.handle_request\n+ old_call_view = router._call_view\n \n- def sentry_patched_handle_request(self, request, *args, **kwargs):\n+ def sentry_patched_call_view(registry, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Response\n hub = Hub.current\n integration = hub.get_integration(PyramidIntegration)\n+\n if integration is not None:\n with hub.configure_scope() as scope:\n+ try:\n+ if integration.transaction_style == \"route_name\":\n+ scope.transaction = request.matched_route.name\n+ elif integration.transaction_style == \"route_pattern\":\n+ scope.transaction = request.matched_route.pattern\n+ except Exception:\n+ raise\n+\n scope.add_event_processor(\n _make_event_processor(weakref.ref(request), integration)\n )\n \n- return old_handle_request(self, request, *args, **kwargs)\n+ return old_call_view(registry, request, *args, **kwargs)\n \n- Router.handle_request = sentry_patched_handle_request\n+ router._call_view = sentry_patched_call_view\n \n if hasattr(Request, \"invoke_exception_view\"):\n old_invoke_exception_view = Request.invoke_exception_view\n@@ -101,7 +110,7 @@\n \n Request.invoke_exception_view = sentry_patched_invoke_exception_view\n \n- old_wsgi_call = Router.__call__\n+ old_wsgi_call = router.Router.__call__\n \n def sentry_patched_wsgi_call(self, environ, start_response):\n # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse\n@@ -123,7 +132,7 @@\n environ, start_response\n )\n \n- Router.__call__ = sentry_patched_wsgi_call\n+ router.Router.__call__ = sentry_patched_wsgi_call\n \n \n def _capture_exception(exc_info):\n@@ -196,14 +205,6 @@\n if request is None:\n return event\n \n- try:\n- if integration.transaction_style == \"route_name\":\n- event[\"transaction\"] = request.matched_route.name\n- elif integration.transaction_style == \"route_pattern\":\n- event[\"transaction\"] = request.matched_route.pattern\n- except Exception:\n- pass\n-\n with capture_internal_exceptions():\n PyramidRequestExtractor(request).extract_into_event(event)\n", "issue": "Setting transaction name in pyramid\nHi is it possible to set transaction name when using PyramidIntegration?\r\nI tried setting `scope.transaction = \"mycustomname\"` but it seems that integration itself does:\r\n\r\n try:\r\n if integration.transaction_style == \"route_name\":\r\n event[\"transaction\"] = request.matched_route.name\r\n elif integration.transaction_style == \"route_pattern\":\r\n event[\"transaction\"] = request.matched_route.pattern\r\n except Exception:\r\n pass\r\n\r\nAnd I suspect that overwrites anything I've set up earlier?\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport sys\nimport weakref\n\nfrom pyramid.httpexceptions import HTTPException\nfrom pyramid.request import Request\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk._compat import reraise, iteritems\n\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor\nfrom sentry_sdk.integrations.wsgi import SentryWsgiMiddleware\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from pyramid.response import Response\n from typing import Any\n from sentry_sdk.integrations.wsgi import _ScopedResponse\n from typing import Callable\n from typing import Dict\n from typing import Optional\n from webob.cookies import RequestCookies # type: ignore\n from webob.compat import cgi_FieldStorage # type: ignore\n\n from sentry_sdk.utils import ExcInfo\n from sentry_sdk._types import EventProcessor\n\n\nif getattr(Request, \"authenticated_userid\", None):\n\n def authenticated_userid(request):\n # type: (Request) -> Optional[Any]\n return request.authenticated_userid\n\n\nelse:\n # bw-compat for pyramid < 1.5\n from pyramid.security import authenticated_userid # type: ignore\n\n\nTRANSACTION_STYLE_VALUES = (\"route_name\", \"route_pattern\")\n\n\nclass PyramidIntegration(Integration):\n identifier = \"pyramid\"\n\n transaction_style = None\n\n def __init__(self, transaction_style=\"route_name\"):\n # type: (str) -> None\n if transaction_style not in TRANSACTION_STYLE_VALUES:\n raise ValueError(\n \"Invalid value for transaction_style: %s (must be in %s)\"\n % (transaction_style, TRANSACTION_STYLE_VALUES)\n )\n self.transaction_style = transaction_style\n\n @staticmethod\n def setup_once():\n # type: () -> None\n from pyramid.router import Router\n from pyramid.request import Request\n\n old_handle_request = Router.handle_request\n\n def sentry_patched_handle_request(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Response\n hub = Hub.current\n integration = hub.get_integration(PyramidIntegration)\n if integration is not None:\n with hub.configure_scope() as scope:\n scope.add_event_processor(\n _make_event_processor(weakref.ref(request), integration)\n )\n\n return old_handle_request(self, request, *args, **kwargs)\n\n Router.handle_request = sentry_patched_handle_request\n\n if hasattr(Request, \"invoke_exception_view\"):\n old_invoke_exception_view = Request.invoke_exception_view\n\n def sentry_patched_invoke_exception_view(self, *args, **kwargs):\n # type: (Request, *Any, **Any) -> Any\n rv = old_invoke_exception_view(self, *args, **kwargs)\n\n if (\n self.exc_info\n and all(self.exc_info)\n and rv.status_int == 500\n and Hub.current.get_integration(PyramidIntegration) is not None\n ):\n _capture_exception(self.exc_info)\n\n return rv\n\n Request.invoke_exception_view = sentry_patched_invoke_exception_view\n\n old_wsgi_call = Router.__call__\n\n def sentry_patched_wsgi_call(self, environ, start_response):\n # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse\n hub = Hub.current\n integration = hub.get_integration(PyramidIntegration)\n if integration is None:\n return old_wsgi_call(self, environ, start_response)\n\n def sentry_patched_inner_wsgi_call(environ, start_response):\n # type: (Dict[str, Any], Callable[..., Any]) -> Any\n try:\n return old_wsgi_call(self, environ, start_response)\n except Exception:\n einfo = sys.exc_info()\n _capture_exception(einfo)\n reraise(*einfo)\n\n return SentryWsgiMiddleware(sentry_patched_inner_wsgi_call)(\n environ, start_response\n )\n\n Router.__call__ = sentry_patched_wsgi_call\n\n\ndef _capture_exception(exc_info):\n # type: (ExcInfo) -> None\n if exc_info[0] is None or issubclass(exc_info[0], HTTPException):\n return\n hub = Hub.current\n if hub.get_integration(PyramidIntegration) is None:\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"pyramid\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\nclass PyramidRequestExtractor(RequestExtractor):\n def url(self):\n # type: () -> str\n return self.request.path_url\n\n def env(self):\n # type: () -> Dict[str, str]\n return self.request.environ\n\n def cookies(self):\n # type: () -> RequestCookies\n return self.request.cookies\n\n def raw_data(self):\n # type: () -> str\n return self.request.text\n\n def form(self):\n # type: () -> Dict[str, str]\n return {\n key: value\n for key, value in iteritems(self.request.POST)\n if not getattr(value, \"filename\", None)\n }\n\n def files(self):\n # type: () -> Dict[str, cgi_FieldStorage]\n return {\n key: value\n for key, value in iteritems(self.request.POST)\n if getattr(value, \"filename\", None)\n }\n\n def size_of_file(self, postdata):\n # type: (cgi_FieldStorage) -> int\n file = postdata.file\n try:\n return os.fstat(file.fileno()).st_size\n except Exception:\n return 0\n\n\ndef _make_event_processor(weak_request, integration):\n # type: (Callable[[], Request], PyramidIntegration) -> EventProcessor\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n request = weak_request()\n if request is None:\n return event\n\n try:\n if integration.transaction_style == \"route_name\":\n event[\"transaction\"] = request.matched_route.name\n elif integration.transaction_style == \"route_pattern\":\n event[\"transaction\"] = request.matched_route.pattern\n except Exception:\n pass\n\n with capture_internal_exceptions():\n PyramidRequestExtractor(request).extract_into_event(event)\n\n if _should_send_default_pii():\n with capture_internal_exceptions():\n user_info = event.setdefault(\"user\", {})\n user_info.setdefault(\"id\", authenticated_userid(request))\n\n return event\n\n return event_processor\n", "path": "sentry_sdk/integrations/pyramid.py"}]}
| 2,695 | 630 |
gh_patches_debug_1910
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2699
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[email protected] still linked several places
Should be [email protected], right? ;-)
</issue>
<code>
[start of gratipay/exceptions.py]
1 """
2 This module contains exceptions shared across application code.
3 """
4
5 from __future__ import print_function, unicode_literals
6
7
8 class ProblemChangingUsername(Exception):
9 def __str__(self):
10 return self.msg.format(self.args[0])
11
12 class UsernameIsEmpty(ProblemChangingUsername):
13 msg = "You need to provide a username!"
14
15 class UsernameTooLong(ProblemChangingUsername):
16 msg = "The username '{}' is too long."
17
18 class UsernameContainsInvalidCharacters(ProblemChangingUsername):
19 msg = "The username '{}' contains invalid characters."
20
21 class UsernameIsRestricted(ProblemChangingUsername):
22 msg = "The username '{}' is restricted."
23
24 class UsernameAlreadyTaken(ProblemChangingUsername):
25 msg = "The username '{}' is already taken."
26
27
28 class ProblemChangingNumber(Exception):
29 def __str__(self):
30 return self.msg
31
32 class HasBigTips(ProblemChangingNumber):
33 msg = "You receive tips too large for an individual. Please contact [email protected]."
34
35
36 class TooGreedy(Exception): pass
37 class NoSelfTipping(Exception): pass
38 class NoTippee(Exception): pass
39 class BadAmount(Exception): pass
40 class UserDoesntAcceptTips(Exception): pass
41
42 class FailedToReserveUsername(Exception): pass
43
44 class NegativeBalance(Exception):
45 def __str__(self):
46 return "Negative balance not allowed in this context."
47
48 class NotWhitelisted(Exception): pass
49 class NoBalancedCustomerHref(Exception): pass
50
[end of gratipay/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py
--- a/gratipay/exceptions.py
+++ b/gratipay/exceptions.py
@@ -30,7 +30,7 @@
return self.msg
class HasBigTips(ProblemChangingNumber):
- msg = "You receive tips too large for an individual. Please contact [email protected]."
+ msg = "You receive tips too large for an individual. Please contact [email protected]."
class TooGreedy(Exception): pass
|
{"golden_diff": "diff --git a/gratipay/exceptions.py b/gratipay/exceptions.py\n--- a/gratipay/exceptions.py\n+++ b/gratipay/exceptions.py\n@@ -30,7 +30,7 @@\n return self.msg\n \n class HasBigTips(ProblemChangingNumber):\n- msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n+ msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n \n \n class TooGreedy(Exception): pass\n", "issue": "[email protected] still linked several places\nShould be [email protected], right? ;-)\n\n", "before_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"The username '{}' contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\n\nclass ProblemChangingNumber(Exception):\n def __str__(self):\n return self.msg\n\nclass HasBigTips(ProblemChangingNumber):\n msg = \"You receive tips too large for an individual. Please contact [email protected].\"\n\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass NoTippee(Exception): pass\nclass BadAmount(Exception): pass\nclass UserDoesntAcceptTips(Exception): pass\n\nclass FailedToReserveUsername(Exception): pass\n\nclass NegativeBalance(Exception):\n def __str__(self):\n return \"Negative balance not allowed in this context.\"\n\nclass NotWhitelisted(Exception): pass\nclass NoBalancedCustomerHref(Exception): pass\n", "path": "gratipay/exceptions.py"}]}
| 951 | 116 |
gh_patches_debug_35123
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-722
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an option not to cleanup URLs for the Duplicate model
In the duplicate case, having two equal URL can be a strong indication that two bugs are the same.
</issue>
<code>
[start of bugbug/models/duplicate.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import random
7
8 from sklearn.calibration import CalibratedClassifierCV
9 from sklearn.compose import ColumnTransformer
10 from sklearn.pipeline import Pipeline
11 from sklearn.preprocessing import LabelEncoder
12 from sklearn.svm import LinearSVC
13
14 from bugbug import bug_features, bugzilla, feature_cleanup
15 from bugbug.model import BugCoupleModel
16
17 REPORTERS_TO_IGNORE = {"[email protected]", "[email protected]"}
18
19
20 class LinearSVCWithLabelEncoding(CalibratedClassifierCV):
21 def __init__(self, clf):
22 super().__init__(clf)
23 self._le = LabelEncoder()
24
25 def fit(self, X, y):
26 super().fit(X, y)
27 self._le.fit(y)
28
29
30 class DuplicateModel(BugCoupleModel):
31 def __init__(self, training_size=14000, lemmatization=False):
32 self.num_duplicates = training_size // 2
33 self.num_nondups_nondups = self.num_dup_nondups = training_size // 4
34
35 BugCoupleModel.__init__(self, lemmatization)
36
37 self.calculate_importance = False
38
39 feature_extractors = [bug_features.is_same_product()]
40
41 cleanup_functions = [
42 feature_cleanup.responses(),
43 feature_cleanup.hex(),
44 feature_cleanup.dll(),
45 feature_cleanup.fileref(),
46 feature_cleanup.url(),
47 feature_cleanup.synonyms(),
48 feature_cleanup.crash(),
49 ]
50
51 self.extraction_pipeline = Pipeline(
52 [
53 (
54 "bug_extractor",
55 bug_features.BugExtractor(feature_extractors, cleanup_functions),
56 ),
57 (
58 "union",
59 ColumnTransformer([("text", self.text_vectorizer(), "text")]),
60 ),
61 ]
62 )
63
64 self.clf = LinearSVCWithLabelEncoding(LinearSVC())
65
66 def get_labels(self):
67
68 random.seed(4)
69
70 all_ids = set(
71 bug["id"]
72 for bug in bugzilla.get_bugs()
73 if bug["creator"] not in REPORTERS_TO_IGNORE
74 and "dupeme" not in bug["keywords"]
75 )
76
77 classes = {}
78
79 # Only store ids of bugs that have duplicates or are duplicates
80 duplicate_ids = []
81
82 duplicates_num = 0
83 for bug_data in bugzilla.get_bugs():
84 bug_id = bug_data["id"]
85 if bug_id not in all_ids:
86 continue
87
88 if bug_data["dupe_of"] or len(bug_data["duplicates"]) > 0:
89 duplicate_ids.append(bug_id)
90
91 for duplicate_bug_id in bug_data["duplicates"]:
92 if duplicate_bug_id not in all_ids:
93 continue
94
95 duplicate_ids.append(duplicate_bug_id)
96
97 if duplicates_num < self.num_duplicates:
98 classes[(bug_id, duplicate_bug_id)] = 1
99 duplicates_num += 1
100
101 # Remove duplicate duplicate IDs.
102 duplicate_ids = list(set(duplicate_ids))
103
104 # Store all remaining ids
105 non_duplicate_ids = list(all_ids - set(duplicate_ids))
106
107 print(f"Number of duplicate labels is: {self.num_duplicates}")
108
109 # When the bug has no duplicates, we create dup-nondup labels.
110 dup_nondup_num = 0
111 while dup_nondup_num < self.num_dup_nondups:
112 bug_id1 = random.choice(duplicate_ids)
113 bug_id2 = random.choice(non_duplicate_ids)
114
115 classes[(bug_id1, bug_id2)] = 0
116 dup_nondup_num += 1
117
118 print(f"Number of hybrid labels is: {self.num_dup_nondups}")
119
120 # Now we map non-dup to non-dup bug.
121 nondup_nondup_num = 0
122 while nondup_nondup_num < self.num_nondups_nondups:
123 bug_id1 = random.choice(non_duplicate_ids)
124 bug_id2 = random.choice(non_duplicate_ids)
125 if bug_id1 != bug_id2:
126 classes[(bug_id1, bug_id2)] = 0
127 nondup_nondup_num += 1
128
129 print(f"Number of purely non-duplicate labels is: {self.num_nondups_nondups}")
130
131 return classes, [0, 1]
132
133 def get_feature_names(self):
134 return self.extraction_pipeline.named_steps["union"].get_feature_names()
135
[end of bugbug/models/duplicate.py]
[start of run.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import csv
8 import os
9 import sys
10 from datetime import datetime, timedelta
11
12 import numpy as np
13
14 from bugbug import bugzilla, db, repository
15 from bugbug.models import MODELS, get_model_class
16
17
18 def parse_args(args):
19 parser = argparse.ArgumentParser()
20 parser.add_argument(
21 "--lemmatization",
22 help="Perform lemmatization (using spaCy)",
23 action="store_true",
24 )
25 parser.add_argument(
26 "--training-set-size",
27 nargs="?",
28 default=14000,
29 type=int,
30 help="The size of the training set for the duplicate model",
31 )
32 parser.add_argument("--train", help="Perform training", action="store_true")
33 parser.add_argument(
34 "--goal", help="Goal of the classifier", choices=MODELS.keys(), default="defect"
35 )
36 parser.add_argument(
37 "--classifier",
38 help="Type of the classifier. Only used for component classification.",
39 choices=["default", "nn"],
40 default="default",
41 )
42 parser.add_argument("--classify", help="Perform evaluation", action="store_true")
43 parser.add_argument(
44 "--generate-sheet",
45 help="Perform evaluation on bugs from last week and generate a csv file",
46 action="store_true",
47 )
48 parser.add_argument("--token", help="Bugzilla token", action="store")
49 parser.add_argument(
50 "--historical",
51 help="""Analyze historical bugs. Only used for defect, bugtype,
52 defectenhancementtask and regression tasks.""",
53 action="store_true",
54 )
55 return parser.parse_args(args)
56
57
58 def main(args):
59 model_file_name = "{}{}model".format(
60 args.goal, "" if args.classifier == "default" else args.classifier
61 )
62
63 if args.goal == "component":
64 if args.classifier == "default":
65 model_class_name = "component"
66 else:
67 model_class_name = "component_nn"
68 else:
69 model_class_name = args.goal
70
71 model_class = get_model_class(model_class_name)
72
73 if args.train:
74 db.download(bugzilla.BUGS_DB)
75 db.download(repository.COMMITS_DB)
76
77 historical_supported_tasks = [
78 "defect",
79 "bugtype",
80 "defectenhancementtask",
81 "regression",
82 ]
83
84 if args.goal in historical_supported_tasks:
85 model = model_class(args.lemmatization, args.historical)
86 elif args.goal == "duplicate":
87 model = model_class(args.training_set_size, args.lemmatization)
88 else:
89 model = model_class(args.lemmatization)
90 model.train()
91 else:
92 model = model_class.load(model_file_name)
93
94 if args.classify:
95 for bug in bugzilla.get_bugs():
96 print(
97 f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug["id"] } - { bug["summary"]} '
98 )
99
100 if model.calculate_importance:
101 probas, importance = model.classify(
102 bug, probabilities=True, importances=True
103 )
104
105 feature_names = model.get_human_readable_feature_names()
106 for i, (importance, index, is_positive) in enumerate(
107 importance["importances"]
108 ):
109 print(
110 f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})'
111 )
112 else:
113 probas = model.classify(bug, probabilities=True, importances=False)
114
115 if np.argmax(probas) == 1:
116 print(f"Positive! {probas}")
117 else:
118 print(f"Negative! {probas}")
119 input()
120
121 if args.generate_sheet:
122 assert (
123 args.token is not None
124 ), "A Bugzilla token should be set in order to download bugs"
125 today = datetime.utcnow()
126 a_week_ago = today - timedelta(7)
127 bugzilla.set_token(args.token)
128 bug_ids = bugzilla.get_ids_between(a_week_ago, today)
129 bugs = bugzilla.get(bug_ids)
130
131 print(f"Classifying {len(bugs)} bugs...")
132
133 rows = [["Bug", f"{args.goal}(model)", args.goal, "Title"]]
134
135 for bug in bugs.values():
136 p = model.classify(bug, probabilities=True)
137 rows.append(
138 [
139 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]}',
140 "y" if p[0][1] >= 0.7 else "n",
141 "",
142 bug["summary"],
143 ]
144 )
145
146 os.makedirs("sheets", exist_ok=True)
147 with open(
148 os.path.join(
149 "sheets",
150 f'{args.goal}-{datetime.utcnow().strftime("%Y-%m-%d")}-labels.csv',
151 ),
152 "w",
153 ) as f:
154 writer = csv.writer(f)
155 writer.writerows(rows)
156
157
158 if __name__ == "__main__":
159 main(parse_args(sys.argv[1:]))
160
[end of run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bugbug/models/duplicate.py b/bugbug/models/duplicate.py
--- a/bugbug/models/duplicate.py
+++ b/bugbug/models/duplicate.py
@@ -28,7 +28,7 @@
class DuplicateModel(BugCoupleModel):
- def __init__(self, training_size=14000, lemmatization=False):
+ def __init__(self, training_size=14000, lemmatization=False, cleanup_urls=True):
self.num_duplicates = training_size // 2
self.num_nondups_nondups = self.num_dup_nondups = training_size // 4
@@ -43,11 +43,13 @@
feature_cleanup.hex(),
feature_cleanup.dll(),
feature_cleanup.fileref(),
- feature_cleanup.url(),
feature_cleanup.synonyms(),
feature_cleanup.crash(),
]
+ if cleanup_urls:
+ cleanup_functions.append(feature_cleanup.url())
+
self.extraction_pipeline = Pipeline(
[
(
diff --git a/run.py b/run.py
--- a/run.py
+++ b/run.py
@@ -29,6 +29,13 @@
type=int,
help="The size of the training set for the duplicate model",
)
+ parser.add_argument(
+ "--disable-url-cleanup",
+ help="Don't cleanup urls when training the duplicate model",
+ dest="cleanup_urls",
+ default=True,
+ action="store_false",
+ )
parser.add_argument("--train", help="Perform training", action="store_true")
parser.add_argument(
"--goal", help="Goal of the classifier", choices=MODELS.keys(), default="defect"
@@ -84,7 +91,9 @@
if args.goal in historical_supported_tasks:
model = model_class(args.lemmatization, args.historical)
elif args.goal == "duplicate":
- model = model_class(args.training_set_size, args.lemmatization)
+ model = model_class(
+ args.training_set_size, args.lemmatization, args.cleanup_urls
+ )
else:
model = model_class(args.lemmatization)
model.train()
|
{"golden_diff": "diff --git a/bugbug/models/duplicate.py b/bugbug/models/duplicate.py\n--- a/bugbug/models/duplicate.py\n+++ b/bugbug/models/duplicate.py\n@@ -28,7 +28,7 @@\n \n \n class DuplicateModel(BugCoupleModel):\n- def __init__(self, training_size=14000, lemmatization=False):\n+ def __init__(self, training_size=14000, lemmatization=False, cleanup_urls=True):\n self.num_duplicates = training_size // 2\n self.num_nondups_nondups = self.num_dup_nondups = training_size // 4\n \n@@ -43,11 +43,13 @@\n feature_cleanup.hex(),\n feature_cleanup.dll(),\n feature_cleanup.fileref(),\n- feature_cleanup.url(),\n feature_cleanup.synonyms(),\n feature_cleanup.crash(),\n ]\n \n+ if cleanup_urls:\n+ cleanup_functions.append(feature_cleanup.url())\n+\n self.extraction_pipeline = Pipeline(\n [\n (\ndiff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -29,6 +29,13 @@\n type=int,\n help=\"The size of the training set for the duplicate model\",\n )\n+ parser.add_argument(\n+ \"--disable-url-cleanup\",\n+ help=\"Don't cleanup urls when training the duplicate model\",\n+ dest=\"cleanup_urls\",\n+ default=True,\n+ action=\"store_false\",\n+ )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\", help=\"Goal of the classifier\", choices=MODELS.keys(), default=\"defect\"\n@@ -84,7 +91,9 @@\n if args.goal in historical_supported_tasks:\n model = model_class(args.lemmatization, args.historical)\n elif args.goal == \"duplicate\":\n- model = model_class(args.training_set_size, args.lemmatization)\n+ model = model_class(\n+ args.training_set_size, args.lemmatization, args.cleanup_urls\n+ )\n else:\n model = model_class(args.lemmatization)\n model.train()\n", "issue": "Add an option not to cleanup URLs for the Duplicate model\nIn the duplicate case, having two equal URL can be a strong indication that two bugs are the same.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport random\n\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import LinearSVC\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugCoupleModel\n\nREPORTERS_TO_IGNORE = {\"[email protected]\", \"[email protected]\"}\n\n\nclass LinearSVCWithLabelEncoding(CalibratedClassifierCV):\n def __init__(self, clf):\n super().__init__(clf)\n self._le = LabelEncoder()\n\n def fit(self, X, y):\n super().fit(X, y)\n self._le.fit(y)\n\n\nclass DuplicateModel(BugCoupleModel):\n def __init__(self, training_size=14000, lemmatization=False):\n self.num_duplicates = training_size // 2\n self.num_nondups_nondups = self.num_dup_nondups = training_size // 4\n\n BugCoupleModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n feature_extractors = [bug_features.is_same_product()]\n\n cleanup_functions = [\n feature_cleanup.responses(),\n feature_cleanup.hex(),\n feature_cleanup.dll(),\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n feature_cleanup.crash(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n (\n \"union\",\n ColumnTransformer([(\"text\", self.text_vectorizer(), \"text\")]),\n ),\n ]\n )\n\n self.clf = LinearSVCWithLabelEncoding(LinearSVC())\n\n def get_labels(self):\n\n random.seed(4)\n\n all_ids = set(\n bug[\"id\"]\n for bug in bugzilla.get_bugs()\n if bug[\"creator\"] not in REPORTERS_TO_IGNORE\n and \"dupeme\" not in bug[\"keywords\"]\n )\n\n classes = {}\n\n # Only store ids of bugs that have duplicates or are duplicates\n duplicate_ids = []\n\n duplicates_num = 0\n for bug_data in bugzilla.get_bugs():\n bug_id = bug_data[\"id\"]\n if bug_id not in all_ids:\n continue\n\n if bug_data[\"dupe_of\"] or len(bug_data[\"duplicates\"]) > 0:\n duplicate_ids.append(bug_id)\n\n for duplicate_bug_id in bug_data[\"duplicates\"]:\n if duplicate_bug_id not in all_ids:\n continue\n\n duplicate_ids.append(duplicate_bug_id)\n\n if duplicates_num < self.num_duplicates:\n classes[(bug_id, duplicate_bug_id)] = 1\n duplicates_num += 1\n\n # Remove duplicate duplicate IDs.\n duplicate_ids = list(set(duplicate_ids))\n\n # Store all remaining ids\n non_duplicate_ids = list(all_ids - set(duplicate_ids))\n\n print(f\"Number of duplicate labels is: {self.num_duplicates}\")\n\n # When the bug has no duplicates, we create dup-nondup labels.\n dup_nondup_num = 0\n while dup_nondup_num < self.num_dup_nondups:\n bug_id1 = random.choice(duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n\n classes[(bug_id1, bug_id2)] = 0\n dup_nondup_num += 1\n\n print(f\"Number of hybrid labels is: {self.num_dup_nondups}\")\n\n # Now we map non-dup to non-dup bug.\n nondup_nondup_num = 0\n while nondup_nondup_num < self.num_nondups_nondups:\n bug_id1 = random.choice(non_duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n if bug_id1 != bug_id2:\n classes[(bug_id1, bug_id2)] = 0\n nondup_nondup_num += 1\n\n print(f\"Number of purely non-duplicate labels is: {self.num_nondups_nondups}\")\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/duplicate.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import bugzilla, db, repository\nfrom bugbug.models import MODELS, get_model_class\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--training-set-size\",\n nargs=\"?\",\n default=14000,\n type=int,\n help=\"The size of the training set for the duplicate model\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\", help=\"Goal of the classifier\", choices=MODELS.keys(), default=\"defect\"\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\",\n help=\"\"\"Analyze historical bugs. Only used for defect, bugtype,\n defectenhancementtask and regression tasks.\"\"\",\n action=\"store_true\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n else:\n model_class_name = \"component_nn\"\n else:\n model_class_name = args.goal\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download(bugzilla.BUGS_DB)\n db.download(repository.COMMITS_DB)\n\n historical_supported_tasks = [\n \"defect\",\n \"bugtype\",\n \"defectenhancementtask\",\n \"regression\",\n ]\n\n if args.goal in historical_supported_tasks:\n model = model_class(args.lemmatization, args.historical)\n elif args.goal == \"duplicate\":\n model = model_class(args.training_set_size, args.lemmatization)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_human_readable_feature_names()\n for i, (importance, index, is_positive) in enumerate(\n importance[\"importances\"]\n ):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bug_ids = bugzilla.get_ids_between(a_week_ago, today)\n bugs = bugzilla.get(bug_ids)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs.values():\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "run.py"}]}
| 3,384 | 478 |
gh_patches_debug_40189
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-3423
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Memory consumption in plot_brainstorm_phantom_elekta
Circle is failing because it's using so much memory.
</issue>
<code>
[start of tutorials/plot_brainstorm_phantom_elekta.py]
1 # -*- coding: utf-8 -*-
2 """
3 ==========================================
4 Brainstorm Elekta phantom tutorial dataset
5 ==========================================
6
7 Here we compute the evoked from raw for the Brainstorm Elekta phantom
8 tutorial dataset. For comparison, see [1]_ and:
9
10 http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta
11
12 References
13 ----------
14 .. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
15 Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
16 Computational Intelligence and Neuroscience, vol. 2011, Article ID
17 879716, 13 pages, 2011. doi:10.1155/2011/879716
18 """
19
20 # Authors: Eric Larson <[email protected]>
21 #
22 # License: BSD (3-clause)
23
24 import os.path as op
25 import numpy as np
26
27 import mne
28 from mne import find_events, fit_dipole
29 from mne.datasets.brainstorm import bst_phantom_elekta
30 from mne.io import read_raw_fif
31
32 print(__doc__)
33
34 ###############################################################################
35 # The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
36 # and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data
37 # are read to construct instances of :class:`mne.io.Raw`.
38 data_path = bst_phantom_elekta.data_path()
39
40 raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
41 raw = read_raw_fif(raw_fname, preload=True)
42
43 ###############################################################################
44 # Data channel array consisted of 204 MEG planor gradiometers,
45 # 102 axial magnetometers, and 3 stimulus channels. Let's get the events
46 # for the phantom, where each dipole (1-32) gets its own event:
47
48 events = find_events(raw, 'STI201')
49 raw.plot(events=events)
50 raw.info['bads'] = ['MEG2421']
51
52 ###############################################################################
53 # The data have strong line frequency (60 Hz and harmonics) and cHPI coil
54 # noise (five peaks around 300 Hz):
55
56 raw.plot_psd()
57
58 ###############################################################################
59 # We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
60
61 raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
62 raw.plot_psd()
63
64 ###############################################################################
65 # The data are still a bit noisy, so let's use Maxwell filtering to clean it.
66 # Ideally we would have the fine calibration and cross-talk information
67 # for the site of interest, but we don't, so we just do:
68
69 raw.fix_mag_coil_types()
70 raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
71 raw.plot(events=events)
72
73 ###############################################################################
74 # Now we epoch our data, average it, and look at the first dipole response.
75 # The first peak appears around 3 ms.
76
77 tmin, tmax = -0.2, 0.2
78 event_id = list(range(1, 33))
79 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))
80 epochs['1'].average().plot()
81
82 ###############################################################################
83 # Let's do some dipole fits. The phantom is properly modeled by a single-shell
84 # sphere with origin (0., 0., 0.). We compute covariance, then do the fits.
85
86 t_peak = 0.061 # 3 MS at largest peak
87 sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
88 cov = mne.compute_covariance(epochs, tmax=0)
89 data = []
90 for ii in range(1, 33):
91 evoked = epochs[str(ii)].average().crop(t_peak, t_peak)
92 data.append(evoked.data[:, 0])
93 evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
94 dip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]
95
96 ###############################################################################
97 # Now we can compare to the actual locations, taking the difference in mm:
98
99 actual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]
100 diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
101 print('Differences (mm):\n%s' % diffs[:, np.newaxis])
102
[end of tutorials/plot_brainstorm_phantom_elekta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py
--- a/tutorials/plot_brainstorm_phantom_elekta.py
+++ b/tutorials/plot_brainstorm_phantom_elekta.py
@@ -38,7 +38,7 @@
data_path = bst_phantom_elekta.data_path()
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
-raw = read_raw_fif(raw_fname, preload=True)
+raw = read_raw_fif(raw_fname)
###############################################################################
# Data channel array consisted of 204 MEG planor gradiometers,
@@ -51,39 +51,41 @@
###############################################################################
# The data have strong line frequency (60 Hz and harmonics) and cHPI coil
-# noise (five peaks around 300 Hz):
+# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds
+# to save memory:
-raw.plot_psd()
+raw.plot_psd(tmax=60.)
###############################################################################
-# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
-
-raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
-raw.plot_psd()
-
-###############################################################################
-# The data are still a bit noisy, so let's use Maxwell filtering to clean it.
+# Let's use Maxwell filtering to clean the data a bit.
# Ideally we would have the fine calibration and cross-talk information
# for the site of interest, but we don't, so we just do:
raw.fix_mag_coil_types()
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
+
+###############################################################################
+# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
+
+raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')
raw.plot(events=events)
###############################################################################
# Now we epoch our data, average it, and look at the first dipole response.
-# The first peak appears around 3 ms.
+# The first peak appears around 3 ms. Because we low-passed at 40 Hz,
+# we can also decimate our data to save memory.
-tmin, tmax = -0.2, 0.2
+tmin, tmax = -0.1, 0.1
event_id = list(range(1, 33))
-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))
+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),
+ decim=5)
epochs['1'].average().plot()
###############################################################################
# Let's do some dipole fits. The phantom is properly modeled by a single-shell
# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.
-t_peak = 0.061 # 3 MS at largest peak
+t_peak = 60e-3 # ~60 MS at largest peak
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
cov = mne.compute_covariance(epochs, tmax=0)
data = []
|
{"golden_diff": "diff --git a/tutorials/plot_brainstorm_phantom_elekta.py b/tutorials/plot_brainstorm_phantom_elekta.py\n--- a/tutorials/plot_brainstorm_phantom_elekta.py\n+++ b/tutorials/plot_brainstorm_phantom_elekta.py\n@@ -38,7 +38,7 @@\n data_path = bst_phantom_elekta.data_path()\n \n raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\n-raw = read_raw_fif(raw_fname, preload=True)\n+raw = read_raw_fif(raw_fname)\n \n ###############################################################################\n # Data channel array consisted of 204 MEG planor gradiometers,\n@@ -51,39 +51,41 @@\n \n ###############################################################################\n # The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n-# noise (five peaks around 300 Hz):\n+# noise (five peaks around 300 Hz). Here we plot only out to 60 seconds\n+# to save memory:\n \n-raw.plot_psd()\n+raw.plot_psd(tmax=60.)\n \n ###############################################################################\n-# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n-\n-raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n-raw.plot_psd()\n-\n-###############################################################################\n-# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n+# Let's use Maxwell filtering to clean the data a bit.\n # Ideally we would have the fine calibration and cross-talk information\n # for the site of interest, but we don't, so we just do:\n \n raw.fix_mag_coil_types()\n raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\n+\n+###############################################################################\n+# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n+\n+raw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\n raw.plot(events=events)\n \n ###############################################################################\n # Now we epoch our data, average it, and look at the first dipole response.\n-# The first peak appears around 3 ms.\n+# The first peak appears around 3 ms. Because we low-passed at 40 Hz,\n+# we can also decimate our data to save memory.\n \n-tmin, tmax = -0.2, 0.2\n+tmin, tmax = -0.1, 0.1\n event_id = list(range(1, 33))\n-epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\n+epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),\n+ decim=5)\n epochs['1'].average().plot()\n \n ###############################################################################\n # Let's do some dipole fits. The phantom is properly modeled by a single-shell\n # sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n \n-t_peak = 0.061 # 3 MS at largest peak\n+t_peak = 60e-3 # ~60 MS at largest peak\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\n cov = mne.compute_covariance(epochs, tmax=0)\n data = []\n", "issue": "Memory consumption in plot_brainstorm_phantom_elekta\nCircle is failing because it's using so much memory.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n==========================================\nBrainstorm Elekta phantom tutorial dataset\n==========================================\n\nHere we compute the evoked from raw for the Brainstorm Elekta phantom\ntutorial dataset. For comparison, see [1]_ and:\n\n http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta\n\nReferences\n----------\n.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.\n Brainstorm: A User-Friendly Application for MEG/EEG Analysis.\n Computational Intelligence and Neuroscience, vol. 2011, Article ID\n 879716, 13 pages, 2011. doi:10.1155/2011/879716\n\"\"\"\n\n# Authors: Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne import find_events, fit_dipole\nfrom mne.datasets.brainstorm import bst_phantom_elekta\nfrom mne.io import read_raw_fif\n\nprint(__doc__)\n\n###############################################################################\n# The data were collected with an Elekta Neuromag VectorView system at 1000 Hz\n# and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data\n# are read to construct instances of :class:`mne.io.Raw`.\ndata_path = bst_phantom_elekta.data_path()\n\nraw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')\nraw = read_raw_fif(raw_fname, preload=True)\n\n###############################################################################\n# Data channel array consisted of 204 MEG planor gradiometers,\n# 102 axial magnetometers, and 3 stimulus channels. Let's get the events\n# for the phantom, where each dipole (1-32) gets its own event:\n\nevents = find_events(raw, 'STI201')\nraw.plot(events=events)\nraw.info['bads'] = ['MEG2421']\n\n###############################################################################\n# The data have strong line frequency (60 Hz and harmonics) and cHPI coil\n# noise (five peaks around 300 Hz):\n\nraw.plot_psd()\n\n###############################################################################\n# We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.\n\nraw.filter(None, 40., h_trans_bandwidth=10., filter_length='1s')\nraw.plot_psd()\n\n###############################################################################\n# The data are still a bit noisy, so let's use Maxwell filtering to clean it.\n# Ideally we would have the fine calibration and cross-talk information\n# for the site of interest, but we don't, so we just do:\n\nraw.fix_mag_coil_types()\nraw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))\nraw.plot(events=events)\n\n###############################################################################\n# Now we epoch our data, average it, and look at the first dipole response.\n# The first peak appears around 3 ms.\n\ntmin, tmax = -0.2, 0.2\nevent_id = list(range(1, 33))\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01))\nepochs['1'].average().plot()\n\n###############################################################################\n# Let's do some dipole fits. The phantom is properly modeled by a single-shell\n# sphere with origin (0., 0., 0.). We compute covariance, then do the fits.\n\nt_peak = 0.061 # 3 MS at largest peak\nsphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)\ncov = mne.compute_covariance(epochs, tmax=0)\ndata = []\nfor ii in range(1, 33):\n evoked = epochs[str(ii)].average().crop(t_peak, t_peak)\n data.append(evoked.data[:, 0])\nevoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)\ndip = fit_dipole(evoked, cov, sphere, n_jobs=2)[0]\n\n###############################################################################\n# Now we can compare to the actual locations, taking the difference in mm:\n\nactual_pos = mne.dipole.get_phantom_dipoles(kind='122')[0]\ndiffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))\nprint('Differences (mm):\\n%s' % diffs[:, np.newaxis])\n", "path": "tutorials/plot_brainstorm_phantom_elekta.py"}]}
| 1,805 | 772 |
gh_patches_debug_13164
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1245
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JSON validation crash on missing array minItems property
```
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py", line 460, in put
new_record = self.process_record(post_record, old=existing)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py", line 66, in process_record
jsonschema.validate(stripped, schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 540, in validate
cls.check_schema(schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 83, in check_schema
raise SchemaError.create_from(error)
jsonschema.exceptions.SchemaError: [] is too short
Failed validating 'minItems' in schema['properties']['required']:
{'items': {'type': 'string'},
'minItems': 1,
'type': 'array',
'uniqueItems': True}
On instance['required']:
[]
```
JSON validation crash on missing array minItems property
```
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py", line 491, in wrapper
response = view_()
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py", line 460, in put
new_record = self.process_record(post_record, old=existing)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py", line 66, in process_record
jsonschema.validate(stripped, schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 540, in validate
cls.check_schema(schema)
File "/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py", line 83, in check_schema
raise SchemaError.create_from(error)
jsonschema.exceptions.SchemaError: [] is too short
Failed validating 'minItems' in schema['properties']['required']:
{'items': {'type': 'string'},
'minItems': 1,
'type': 'array',
'uniqueItems': True}
On instance['required']:
[]
```
</issue>
<code>
[start of kinto/views/records.py]
1 import copy
2
3 import jsonschema
4 from kinto.core import resource, utils
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import object_exists_or_404
11
12
13 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
14
15
16 @resource.register(name='record',
17 collection_path=_parent_path + '/records',
18 record_path=_parent_path + '/records/{{id}}')
19 class Record(resource.ShareableResource):
20
21 schema_field = 'schema'
22
23 def __init__(self, request, **kwargs):
24 # Before all, first check that the parent collection exists.
25 # Check if already fetched before (in batch).
26 collections = request.bound_data.setdefault('collections', {})
27 collection_uri = self.get_parent_id(request)
28 if collection_uri not in collections:
29 # Unknown yet, fetch from storage.
30 collection_parent_id = utils.instance_uri(request, 'bucket',
31 id=self.bucket_id)
32 collection = object_exists_or_404(request,
33 collection_id='collection',
34 parent_id=collection_parent_id,
35 object_id=self.collection_id)
36 collections[collection_uri] = collection
37
38 super().__init__(request, **kwargs)
39 self._collection = collections[collection_uri]
40
41 def get_parent_id(self, request):
42 self.bucket_id = request.matchdict['bucket_id']
43 self.collection_id = request.matchdict['collection_id']
44 return utils.instance_uri(request, 'collection',
45 bucket_id=self.bucket_id,
46 id=self.collection_id)
47
48 def process_record(self, new, old=None):
49 """Validate records against collection schema, if any."""
50 new = super().process_record(new, old)
51
52 schema = self._collection.get('schema')
53 settings = self.request.registry.settings
54 schema_validation = 'experimental_collection_schema_validation'
55 if not schema or not asbool(settings.get(schema_validation)):
56 return new
57
58 collection_timestamp = self._collection[self.model.modified_field]
59
60 try:
61 stripped = copy.deepcopy(new)
62 stripped.pop(self.model.id_field, None)
63 stripped.pop(self.model.modified_field, None)
64 stripped.pop(self.model.permissions_field, None)
65 stripped.pop(self.schema_field, None)
66 jsonschema.validate(stripped, schema)
67 except jsonschema_exceptions.ValidationError as e:
68 try:
69 field = e.path.pop() if e.path else e.validator_value.pop()
70 except AttributeError:
71 field = None
72 raise_invalid(self.request, name=field, description=e.message)
73
74 new[self.schema_field] = collection_timestamp
75 return new
76
77 def collection_get(self):
78 result = super().collection_get()
79 self._handle_cache_expires(self.request.response)
80 return result
81
82 def get(self):
83 result = super().get()
84 self._handle_cache_expires(self.request.response)
85 return result
86
87 def _handle_cache_expires(self, response):
88 """If the parent collection defines a ``cache_expires`` attribute,
89 then cache-control response headers are sent.
90
91 .. note::
92
93 Those headers are also sent if the
94 ``kinto.record_cache_expires_seconds`` setting is defined.
95 """
96 is_anonymous = Authenticated not in self.request.effective_principals
97 if not is_anonymous:
98 return
99
100 cache_expires = self._collection.get('cache_expires')
101 if cache_expires is None:
102 by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)
103 by_collection = '{}_{}_record_cache_expires_seconds'.format(
104 self.bucket_id, self.collection_id)
105 settings = self.request.registry.settings
106 cache_expires = settings.get(by_collection,
107 settings.get(by_bucket))
108
109 if cache_expires is not None:
110 response.cache_expires(seconds=int(cache_expires))
111
[end of kinto/views/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -65,10 +65,10 @@
stripped.pop(self.schema_field, None)
jsonschema.validate(stripped, schema)
except jsonschema_exceptions.ValidationError as e:
- try:
- field = e.path.pop() if e.path else e.validator_value.pop()
- except AttributeError:
- field = None
+ if e.validator_value:
+ field = e.validator_value[-1]
+ else:
+ field = e.schema_path[-1]
raise_invalid(self.request, name=field, description=e.message)
new[self.schema_field] = collection_timestamp
|
{"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -65,10 +65,10 @@\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n- try:\n- field = e.path.pop() if e.path else e.validator_value.pop()\n- except AttributeError:\n- field = None\n+ if e.validator_value:\n+ field = e.validator_value[-1]\n+ else:\n+ field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n \n new[self.schema_field] = collection_timestamp\n", "issue": "JSON validation crash on missing array minItems property\n```\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py\", line 460, in put\r\n new_record = self.process_record(post_record, old=existing)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py\", line 66, in process_record\r\n jsonschema.validate(stripped, schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 540, in validate\r\n cls.check_schema(schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 83, in check_schema\r\n raise SchemaError.create_from(error)\r\njsonschema.exceptions.SchemaError: [] is too short\r\n\r\nFailed validating 'minItems' in schema['properties']['required']:\r\n {'items': {'type': 'string'},\r\n 'minItems': 1,\r\n 'type': 'array',\r\n 'uniqueItems': True}\r\n\r\nOn instance['required']:\r\n []\r\n```\nJSON validation crash on missing array minItems property\n```\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\r\n response = view(request)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/cornice/service.py\", line 491, in wrapper\r\n response = view_()\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/core/resource/__init__.py\", line 460, in put\r\n new_record = self.process_record(post_record, old=existing)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/kinto/views/records.py\", line 66, in process_record\r\n jsonschema.validate(stripped, schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 540, in validate\r\n cls.check_schema(schema)\r\n File \"/home/ubuntu/venvs/kinto/local/lib/python3.5/site-packages/jsonschema/validators.py\", line 83, in check_schema\r\n raise SchemaError.create_from(error)\r\njsonschema.exceptions.SchemaError: [] is too short\r\n\r\nFailed validating 'minItems' in schema['properties']['required']:\r\n {'items': {'type': 'string'},\r\n 'minItems': 1,\r\n 'type': 'array',\r\n 'uniqueItems': True}\r\n\r\nOn instance['required']:\r\n []\r\n```\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]}
| 2,258 | 166 |
gh_patches_debug_18792
|
rasdani/github-patches
|
git_diff
|
nonebot__nonebot2-334
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Logger 发起对象名一律为 nonebot
**描述问题:**

在 [v2.0.0a11](https://github.com/nonebot/nonebot2/tree/v2.0.0a11) 及更早版本中,通过在事件处理的文件或者包中调用 `loguru.logger` 其输出的日志之发起对象名为该文件或包之名称

在 [v2.0.0a13.post1](https://github.com/nonebot/nonebot2/tree/v2.0.0a13.post1) 中,使用相同方式输出的日志,其发起对象名一律为 nonebot
**期望的结果**
日志之发起对象名为该文件或包之名称
**环境信息:**
- OS: windows-latest
- Python Version: 3.9.4
- Nonebot Version: v2.0.0a13.post1
</issue>
<code>
[start of nonebot/log.py]
1 """
2 日志
3 ====
4
5 NoneBot 使用 `loguru`_ 来记录日志信息。
6
7 自定义 logger 请参考 `loguru`_ 文档。
8
9 .. _loguru:
10 https://github.com/Delgan/loguru
11 """
12
13 import sys
14 import logging
15 from typing import Union
16
17 from loguru import logger as logger_
18
19 # logger = logging.getLogger("nonebot")
20 logger = logger_
21 """
22 :说明:
23
24 NoneBot 日志记录器对象。
25
26 :默认信息:
27
28 * 格式: ``[%(asctime)s %(name)s] %(levelname)s: %(message)s``
29 * 等级: ``DEBUG`` / ``INFO`` ,根据 config 配置改变
30 * 输出: 输出至 stdout
31
32 :用法:
33
34 .. code-block:: python
35
36 from nonebot.log import logger
37 """
38
39 # default_handler = logging.StreamHandler(sys.stdout)
40 # default_handler.setFormatter(
41 # logging.Formatter("[%(asctime)s %(name)s] %(levelname)s: %(message)s"))
42 # logger.addHandler(default_handler)
43
44
45 class Filter:
46
47 def __init__(self) -> None:
48 self.level: Union[int, str] = "DEBUG"
49
50 def __call__(self, record):
51 record["name"] = record["name"].split(".")[0]
52 levelno = logger.level(self.level).no if isinstance(self.level,
53 str) else self.level
54 return record["level"].no >= levelno
55
56
57 class LoguruHandler(logging.Handler):
58
59 def emit(self, record):
60 try:
61 level = logger.level(record.levelname).name
62 except ValueError:
63 level = record.levelno
64
65 frame, depth = logging.currentframe(), 2
66 while frame.f_code.co_filename == logging.__file__:
67 frame = frame.f_back
68 depth += 1
69
70 logger.opt(depth=depth,
71 exception=record.exc_info).log(level, record.getMessage())
72
73
74 logger.remove()
75 default_filter = Filter()
76 default_format = (
77 "<g>{time:MM-DD HH:mm:ss}</g> "
78 "[<lvl>{level}</lvl>] "
79 "<c><u>{name}</u></c> | "
80 # "<c>{function}:{line}</c>| "
81 "{message}")
82 logger_id = logger.add(sys.stdout,
83 colorize=True,
84 diagnose=False,
85 filter=default_filter,
86 format=default_format)
87
[end of nonebot/log.py]
[start of nonebot/plugin/manager.py]
1 import sys
2 import uuid
3 import pkgutil
4 import importlib
5 from hashlib import md5
6 from types import ModuleType
7 from collections import Counter
8 from contextvars import ContextVar
9 from importlib.abc import MetaPathFinder
10 from typing import Set, List, Optional, Iterable
11 from importlib.machinery import PathFinder, SourceFileLoader
12
13 from .export import _export, Export
14
15 _current_plugin: ContextVar[Optional[str]] = ContextVar("_current_plugin",
16 default=None)
17
18 _internal_space = ModuleType(__name__ + "._internal")
19 _internal_space.__path__ = [] # type: ignore
20 sys.modules[_internal_space.__name__] = _internal_space
21
22 _manager_stack: List["PluginManager"] = []
23
24
25 class _NamespaceModule(ModuleType):
26 """Simple namespace module to store plugins."""
27
28 @property
29 def __path__(self):
30 return []
31
32 def __getattr__(self, name: str):
33 try:
34 return super().__getattr__(name) # type: ignore
35 except AttributeError:
36 if name.startswith("__"):
37 raise
38 raise RuntimeError("Plugin manager not activated!")
39
40
41 class _InternalModule(ModuleType):
42 """Internal module for each plugin manager."""
43
44 def __init__(self, prefix: str, plugin_manager: "PluginManager"):
45 super().__init__(f"{prefix}.{plugin_manager.internal_id}")
46 self.__plugin_manager__ = plugin_manager
47
48 @property
49 def __path__(self) -> List[str]:
50 return list(self.__plugin_manager__.search_path)
51
52
53 class PluginManager:
54
55 def __init__(self,
56 namespace: Optional[str] = None,
57 plugins: Optional[Iterable[str]] = None,
58 search_path: Optional[Iterable[str]] = None,
59 *,
60 id: Optional[str] = None):
61 self.namespace: Optional[str] = namespace
62 self.namespace_module: Optional[ModuleType] = self._setup_namespace(
63 namespace)
64
65 self.id: str = id or str(uuid.uuid4())
66 self.internal_id: str = md5(
67 ((self.namespace or "") + self.id).encode()).hexdigest()
68 self.internal_module = self._setup_internal_module(self.internal_id)
69
70 # simple plugin not in search path
71 self.plugins: Set[str] = set(plugins or [])
72 self.search_path: Set[str] = set(search_path or [])
73 # ensure can be loaded
74 self.list_plugins()
75
76 def _setup_namespace(self,
77 namespace: Optional[str] = None
78 ) -> Optional[ModuleType]:
79 if not namespace:
80 return None
81
82 try:
83 module = importlib.import_module(namespace)
84 except ImportError:
85 module = _NamespaceModule(namespace)
86 if "." in namespace:
87 parent = importlib.import_module(namespace.rsplit(".", 1)[0])
88 setattr(parent, namespace.rsplit(".", 1)[1], module)
89
90 sys.modules[namespace] = module
91 return module
92
93 def _setup_internal_module(self, internal_id: str) -> ModuleType:
94 if hasattr(_internal_space, internal_id):
95 raise RuntimeError("Plugin manager already exists!")
96
97 prefix = sys._getframe(3).f_globals.get(
98 "__name__") or _internal_space.__name__
99 if not prefix.startswith(_internal_space.__name__):
100 prefix = _internal_space.__name__
101 module = _InternalModule(prefix, self)
102 sys.modules[module.__name__] = module
103 setattr(_internal_space, internal_id, module)
104 return module
105
106 def __enter__(self):
107 if self in _manager_stack:
108 raise RuntimeError("Plugin manager already activated!")
109 _manager_stack.append(self)
110 return self
111
112 def __exit__(self, exc_type, exc_value, traceback):
113 try:
114 _manager_stack.pop()
115 except IndexError:
116 pass
117
118 def search_plugins(self) -> List[str]:
119 return [
120 module_info.name
121 for module_info in pkgutil.iter_modules(self.search_path)
122 ]
123
124 def list_plugins(self) -> Set[str]:
125 _pre_managers: List[PluginManager]
126 if self in _manager_stack:
127 _pre_managers = _manager_stack[:_manager_stack.index(self)]
128 else:
129 _pre_managers = _manager_stack[:]
130
131 _search_path: Set[str] = set()
132 for manager in _pre_managers:
133 _search_path |= manager.search_path
134 if _search_path & self.search_path:
135 raise RuntimeError("Duplicate plugin search path!")
136
137 _search_plugins = self.search_plugins()
138 c = Counter([*_search_plugins, *self.plugins])
139 conflict = [name for name, num in c.items() if num > 1]
140 if conflict:
141 raise RuntimeError(
142 f"More than one plugin named {' / '.join(conflict)}!")
143 return set(_search_plugins) | self.plugins
144
145 def load_plugin(self, name) -> ModuleType:
146 if name in self.plugins:
147 with self:
148 return importlib.import_module(name)
149
150 if "." in name:
151 raise ValueError("Plugin name cannot contain '.'")
152
153 with self:
154 return importlib.import_module(f"{self.namespace}.{name}")
155
156 def load_all_plugins(self) -> List[ModuleType]:
157 return [self.load_plugin(name) for name in self.list_plugins()]
158
159 def _rewrite_module_name(self, module_name) -> Optional[str]:
160 prefix = f"{self.internal_module.__name__}."
161 if module_name.startswith(self.namespace + "."):
162 path = module_name.split(".")
163 length = self.namespace.count(".") + 1
164 return f"{prefix}{'.'.join(path[length:])}"
165 elif module_name in self.plugins or module_name.startswith(prefix):
166 return module_name
167 elif module_name in self.search_plugins():
168 return f"{prefix}{module_name}"
169 return None
170
171
172 class PluginFinder(MetaPathFinder):
173
174 def find_spec(self, fullname: str, path, target):
175 if _manager_stack:
176 index = -1
177 while -index <= len(_manager_stack):
178 manager = _manager_stack[index]
179 newname = manager._rewrite_module_name(fullname)
180 if newname:
181 spec = PathFinder.find_spec(
182 newname, [*manager.search_path, *(path or sys.path)],
183 target)
184 if spec:
185 spec.loader = PluginLoader(manager, newname,
186 spec.origin)
187 return spec
188 index -= 1
189 return None
190
191
192 class PluginLoader(SourceFileLoader):
193
194 def __init__(self, manager: PluginManager, fullname: str, path) -> None:
195 self.manager = manager
196 self.loaded = False
197 self._plugin_token = None
198 self._export_token = None
199 super().__init__(fullname, path)
200
201 def create_module(self, spec) -> Optional[ModuleType]:
202 if self.name in sys.modules:
203 self.loaded = True
204 return sys.modules[self.name]
205 prefix = self.manager.internal_module.__name__
206 plugin_name = self.name[len(prefix):] if self.name.startswith(
207 prefix) else self.name
208 self._plugin_token = _current_plugin.set(plugin_name.lstrip("."))
209 self._export_token = _export.set(Export())
210 # return None to use default module creation
211 return super().create_module(spec)
212
213 def exec_module(self, module: ModuleType) -> None:
214 if self.loaded:
215 return
216 # really need?
217 # setattr(module, "__manager__", self.manager)
218 if self._export_token:
219 setattr(module, "__export__", _export.get())
220
221 super().exec_module(module)
222
223 if self._plugin_token:
224 _current_plugin.reset(self._plugin_token)
225 if self._export_token:
226 _export.reset(self._export_token)
227 return
228
229
230 sys.meta_path.insert(0, PluginFinder())
231
[end of nonebot/plugin/manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nonebot/log.py b/nonebot/log.py
--- a/nonebot/log.py
+++ b/nonebot/log.py
@@ -47,6 +47,10 @@
self.level = "DEBUG"
def __call__(self, record):
+ module = sys.modules.get(record["name"])
+ if module:
+ plugin_name = getattr(module, "__plugin_name__", record["name"])
+ record["name"] = plugin_name
record["name"] = record["name"].split(".")[0]
levelno = logger.level(self.level).no
return record["level"].no >= levelno
diff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py
--- a/nonebot/plugin/manager.py
+++ b/nonebot/plugin/manager.py
@@ -215,6 +215,9 @@
return
# really need?
# setattr(module, "__manager__", self.manager)
+ if self._plugin_token:
+ setattr(module, "__plugin_name__",
+ _current_plugin.get(self._plugin_token))
if self._export_token:
setattr(module, "__export__", _export.get())
|
{"golden_diff": "diff --git a/nonebot/log.py b/nonebot/log.py\n--- a/nonebot/log.py\n+++ b/nonebot/log.py\n@@ -47,6 +47,10 @@\n self.level = \"DEBUG\"\n \n def __call__(self, record):\n+ module = sys.modules.get(record[\"name\"])\n+ if module:\n+ plugin_name = getattr(module, \"__plugin_name__\", record[\"name\"])\n+ record[\"name\"] = plugin_name\n record[\"name\"] = record[\"name\"].split(\".\")[0]\n levelno = logger.level(self.level).no\n return record[\"level\"].no >= levelno\ndiff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py\n--- a/nonebot/plugin/manager.py\n+++ b/nonebot/plugin/manager.py\n@@ -215,6 +215,9 @@\n return\n # really need?\n # setattr(module, \"__manager__\", self.manager)\n+ if self._plugin_token:\n+ setattr(module, \"__plugin_name__\",\n+ _current_plugin.get(self._plugin_token))\n if self._export_token:\n setattr(module, \"__export__\", _export.get())\n", "issue": "Bug: Logger \u53d1\u8d77\u5bf9\u8c61\u540d\u4e00\u5f8b\u4e3a nonebot\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5728 [v2.0.0a11](https://github.com/nonebot/nonebot2/tree/v2.0.0a11) \u53ca\u66f4\u65e9\u7248\u672c\u4e2d\uff0c\u901a\u8fc7\u5728\u4e8b\u4ef6\u5904\u7406\u7684\u6587\u4ef6\u6216\u8005\u5305\u4e2d\u8c03\u7528 `loguru.logger` \u5176\u8f93\u51fa\u7684\u65e5\u5fd7\u4e4b\u53d1\u8d77\u5bf9\u8c61\u540d\u4e3a\u8be5\u6587\u4ef6\u6216\u5305\u4e4b\u540d\u79f0\r\n\r\n\u5728 [v2.0.0a13.post1](https://github.com/nonebot/nonebot2/tree/v2.0.0a13.post1) \u4e2d\uff0c\u4f7f\u7528\u76f8\u540c\u65b9\u5f0f\u8f93\u51fa\u7684\u65e5\u5fd7\uff0c\u5176\u53d1\u8d77\u5bf9\u8c61\u540d\u4e00\u5f8b\u4e3a nonebot\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\u65e5\u5fd7\u4e4b\u53d1\u8d77\u5bf9\u8c61\u540d\u4e3a\u8be5\u6587\u4ef6\u6216\u5305\u4e4b\u540d\u79f0\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: windows-latest\r\n - Python Version: 3.9.4\r\n - Nonebot Version: v2.0.0a13.post1\n", "before_files": [{"content": "\"\"\"\n\u65e5\u5fd7\n====\n\nNoneBot \u4f7f\u7528 `loguru`_ \u6765\u8bb0\u5f55\u65e5\u5fd7\u4fe1\u606f\u3002\n\n\u81ea\u5b9a\u4e49 logger \u8bf7\u53c2\u8003 `loguru`_ \u6587\u6863\u3002\n\n.. _loguru:\n https://github.com/Delgan/loguru\n\"\"\"\n\nimport sys\nimport logging\nfrom typing import Union\n\nfrom loguru import logger as logger_\n\n# logger = logging.getLogger(\"nonebot\")\nlogger = logger_\n\"\"\"\n:\u8bf4\u660e:\n\n NoneBot \u65e5\u5fd7\u8bb0\u5f55\u5668\u5bf9\u8c61\u3002\n\n:\u9ed8\u8ba4\u4fe1\u606f:\n\n * \u683c\u5f0f: ``[%(asctime)s %(name)s] %(levelname)s: %(message)s``\n * \u7b49\u7ea7: ``DEBUG`` / ``INFO`` \uff0c\u6839\u636e config \u914d\u7f6e\u6539\u53d8\n * \u8f93\u51fa: \u8f93\u51fa\u81f3 stdout\n\n:\u7528\u6cd5:\n\n.. code-block:: python\n\n from nonebot.log import logger\n\"\"\"\n\n# default_handler = logging.StreamHandler(sys.stdout)\n# default_handler.setFormatter(\n# logging.Formatter(\"[%(asctime)s %(name)s] %(levelname)s: %(message)s\"))\n# logger.addHandler(default_handler)\n\n\nclass Filter:\n\n def __init__(self) -> None:\n self.level: Union[int, str] = \"DEBUG\"\n\n def __call__(self, record):\n record[\"name\"] = record[\"name\"].split(\".\")[0]\n levelno = logger.level(self.level).no if isinstance(self.level,\n str) else self.level\n return record[\"level\"].no >= levelno\n\n\nclass LoguruHandler(logging.Handler):\n\n def emit(self, record):\n try:\n level = logger.level(record.levelname).name\n except ValueError:\n level = record.levelno\n\n frame, depth = logging.currentframe(), 2\n while frame.f_code.co_filename == logging.__file__:\n frame = frame.f_back\n depth += 1\n\n logger.opt(depth=depth,\n exception=record.exc_info).log(level, record.getMessage())\n\n\nlogger.remove()\ndefault_filter = Filter()\ndefault_format = (\n \"<g>{time:MM-DD HH:mm:ss}</g> \"\n \"[<lvl>{level}</lvl>] \"\n \"<c><u>{name}</u></c> | \"\n # \"<c>{function}:{line}</c>| \"\n \"{message}\")\nlogger_id = logger.add(sys.stdout,\n colorize=True,\n diagnose=False,\n filter=default_filter,\n format=default_format)\n", "path": "nonebot/log.py"}, {"content": "import sys\nimport uuid\nimport pkgutil\nimport importlib\nfrom hashlib import md5\nfrom types import ModuleType\nfrom collections import Counter\nfrom contextvars import ContextVar\nfrom importlib.abc import MetaPathFinder\nfrom typing import Set, List, Optional, Iterable\nfrom importlib.machinery import PathFinder, SourceFileLoader\n\nfrom .export import _export, Export\n\n_current_plugin: ContextVar[Optional[str]] = ContextVar(\"_current_plugin\",\n default=None)\n\n_internal_space = ModuleType(__name__ + \"._internal\")\n_internal_space.__path__ = [] # type: ignore\nsys.modules[_internal_space.__name__] = _internal_space\n\n_manager_stack: List[\"PluginManager\"] = []\n\n\nclass _NamespaceModule(ModuleType):\n \"\"\"Simple namespace module to store plugins.\"\"\"\n\n @property\n def __path__(self):\n return []\n\n def __getattr__(self, name: str):\n try:\n return super().__getattr__(name) # type: ignore\n except AttributeError:\n if name.startswith(\"__\"):\n raise\n raise RuntimeError(\"Plugin manager not activated!\")\n\n\nclass _InternalModule(ModuleType):\n \"\"\"Internal module for each plugin manager.\"\"\"\n\n def __init__(self, prefix: str, plugin_manager: \"PluginManager\"):\n super().__init__(f\"{prefix}.{plugin_manager.internal_id}\")\n self.__plugin_manager__ = plugin_manager\n\n @property\n def __path__(self) -> List[str]:\n return list(self.__plugin_manager__.search_path)\n\n\nclass PluginManager:\n\n def __init__(self,\n namespace: Optional[str] = None,\n plugins: Optional[Iterable[str]] = None,\n search_path: Optional[Iterable[str]] = None,\n *,\n id: Optional[str] = None):\n self.namespace: Optional[str] = namespace\n self.namespace_module: Optional[ModuleType] = self._setup_namespace(\n namespace)\n\n self.id: str = id or str(uuid.uuid4())\n self.internal_id: str = md5(\n ((self.namespace or \"\") + self.id).encode()).hexdigest()\n self.internal_module = self._setup_internal_module(self.internal_id)\n\n # simple plugin not in search path\n self.plugins: Set[str] = set(plugins or [])\n self.search_path: Set[str] = set(search_path or [])\n # ensure can be loaded\n self.list_plugins()\n\n def _setup_namespace(self,\n namespace: Optional[str] = None\n ) -> Optional[ModuleType]:\n if not namespace:\n return None\n\n try:\n module = importlib.import_module(namespace)\n except ImportError:\n module = _NamespaceModule(namespace)\n if \".\" in namespace:\n parent = importlib.import_module(namespace.rsplit(\".\", 1)[0])\n setattr(parent, namespace.rsplit(\".\", 1)[1], module)\n\n sys.modules[namespace] = module\n return module\n\n def _setup_internal_module(self, internal_id: str) -> ModuleType:\n if hasattr(_internal_space, internal_id):\n raise RuntimeError(\"Plugin manager already exists!\")\n\n prefix = sys._getframe(3).f_globals.get(\n \"__name__\") or _internal_space.__name__\n if not prefix.startswith(_internal_space.__name__):\n prefix = _internal_space.__name__\n module = _InternalModule(prefix, self)\n sys.modules[module.__name__] = module\n setattr(_internal_space, internal_id, module)\n return module\n\n def __enter__(self):\n if self in _manager_stack:\n raise RuntimeError(\"Plugin manager already activated!\")\n _manager_stack.append(self)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n _manager_stack.pop()\n except IndexError:\n pass\n\n def search_plugins(self) -> List[str]:\n return [\n module_info.name\n for module_info in pkgutil.iter_modules(self.search_path)\n ]\n\n def list_plugins(self) -> Set[str]:\n _pre_managers: List[PluginManager]\n if self in _manager_stack:\n _pre_managers = _manager_stack[:_manager_stack.index(self)]\n else:\n _pre_managers = _manager_stack[:]\n\n _search_path: Set[str] = set()\n for manager in _pre_managers:\n _search_path |= manager.search_path\n if _search_path & self.search_path:\n raise RuntimeError(\"Duplicate plugin search path!\")\n\n _search_plugins = self.search_plugins()\n c = Counter([*_search_plugins, *self.plugins])\n conflict = [name for name, num in c.items() if num > 1]\n if conflict:\n raise RuntimeError(\n f\"More than one plugin named {' / '.join(conflict)}!\")\n return set(_search_plugins) | self.plugins\n\n def load_plugin(self, name) -> ModuleType:\n if name in self.plugins:\n with self:\n return importlib.import_module(name)\n\n if \".\" in name:\n raise ValueError(\"Plugin name cannot contain '.'\")\n\n with self:\n return importlib.import_module(f\"{self.namespace}.{name}\")\n\n def load_all_plugins(self) -> List[ModuleType]:\n return [self.load_plugin(name) for name in self.list_plugins()]\n\n def _rewrite_module_name(self, module_name) -> Optional[str]:\n prefix = f\"{self.internal_module.__name__}.\"\n if module_name.startswith(self.namespace + \".\"):\n path = module_name.split(\".\")\n length = self.namespace.count(\".\") + 1\n return f\"{prefix}{'.'.join(path[length:])}\"\n elif module_name in self.plugins or module_name.startswith(prefix):\n return module_name\n elif module_name in self.search_plugins():\n return f\"{prefix}{module_name}\"\n return None\n\n\nclass PluginFinder(MetaPathFinder):\n\n def find_spec(self, fullname: str, path, target):\n if _manager_stack:\n index = -1\n while -index <= len(_manager_stack):\n manager = _manager_stack[index]\n newname = manager._rewrite_module_name(fullname)\n if newname:\n spec = PathFinder.find_spec(\n newname, [*manager.search_path, *(path or sys.path)],\n target)\n if spec:\n spec.loader = PluginLoader(manager, newname,\n spec.origin)\n return spec\n index -= 1\n return None\n\n\nclass PluginLoader(SourceFileLoader):\n\n def __init__(self, manager: PluginManager, fullname: str, path) -> None:\n self.manager = manager\n self.loaded = False\n self._plugin_token = None\n self._export_token = None\n super().__init__(fullname, path)\n\n def create_module(self, spec) -> Optional[ModuleType]:\n if self.name in sys.modules:\n self.loaded = True\n return sys.modules[self.name]\n prefix = self.manager.internal_module.__name__\n plugin_name = self.name[len(prefix):] if self.name.startswith(\n prefix) else self.name\n self._plugin_token = _current_plugin.set(plugin_name.lstrip(\".\"))\n self._export_token = _export.set(Export())\n # return None to use default module creation\n return super().create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n if self.loaded:\n return\n # really need?\n # setattr(module, \"__manager__\", self.manager)\n if self._export_token:\n setattr(module, \"__export__\", _export.get())\n\n super().exec_module(module)\n\n if self._plugin_token:\n _current_plugin.reset(self._plugin_token)\n if self._export_token:\n _export.reset(self._export_token)\n return\n\n\nsys.meta_path.insert(0, PluginFinder())\n", "path": "nonebot/plugin/manager.py"}]}
| 3,814 | 264 |
gh_patches_debug_57
|
rasdani/github-patches
|
git_diff
|
Anselmoo__spectrafit-695
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs]: Update labeled criteria for CHANGELOG
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
Update the labeler criteria for avoiding double labels in `CHANGELOG.md`
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a7"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a7"
+__version__ = "1.0.0a8"
|
{"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a7\"\n+__version__ = \"1.0.0a8\"\n", "issue": "[Docs]: Update labeled criteria for CHANGELOG\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\nUpdate the labeler criteria for avoiding double labels in `CHANGELOG.md`\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a7\"\n", "path": "spectrafit/__init__.py"}]}
| 647 | 96 |
gh_patches_debug_53981
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-1222
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sanic `Request` object is falsey
```python
@app.route('/myroute')
async def someroute(request):
if request:
return 'some data'
raise Exception("Woops")
```
This code will raise the exception because `bool(request)` is `False`.
</issue>
<code>
[start of sanic/request.py]
1 import sys
2 import json
3 import socket
4 from cgi import parse_header
5 from collections import namedtuple
6 from http.cookies import SimpleCookie
7 from httptools import parse_url
8 from urllib.parse import parse_qs, urlunparse
9
10 try:
11 from ujson import loads as json_loads
12 except ImportError:
13 if sys.version_info[:2] == (3, 5):
14 def json_loads(data):
15 # on Python 3.5 json.loads only supports str not bytes
16 return json.loads(data.decode())
17 else:
18 json_loads = json.loads
19
20 from sanic.exceptions import InvalidUsage
21 from sanic.log import error_logger, logger
22
23 DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
24
25
26 # HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
27 # > If the media type remains unknown, the recipient SHOULD treat it
28 # > as type "application/octet-stream"
29
30
31 class RequestParameters(dict):
32 """Hosts a dict with lists as values where get returns the first
33 value of the list and getlist returns the whole shebang
34 """
35
36 def get(self, name, default=None):
37 """Return the first value, either the default or actual"""
38 return super().get(name, [default])[0]
39
40 def getlist(self, name, default=None):
41 """Return the entire list"""
42 return super().get(name, default)
43
44
45 class Request(dict):
46 """Properties of an HTTP request such as URL, headers, etc."""
47 __slots__ = (
48 'app', 'headers', 'version', 'method', '_cookies', 'transport',
49 'body', 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files',
50 '_ip', '_parsed_url', 'uri_template', 'stream', '_remote_addr',
51 '_socket', '_port', '__weakref__'
52 )
53
54 def __init__(self, url_bytes, headers, version, method, transport):
55 # TODO: Content-Encoding detection
56 self._parsed_url = parse_url(url_bytes)
57 self.app = None
58
59 self.headers = headers
60 self.version = version
61 self.method = method
62 self.transport = transport
63
64 # Init but do not inhale
65 self.body = []
66 self.parsed_json = None
67 self.parsed_form = None
68 self.parsed_files = None
69 self.parsed_args = None
70 self.uri_template = None
71 self._cookies = None
72 self.stream = None
73
74 def __repr__(self):
75 if self.method is None or not self.path:
76 return '<{0}>'.format(self.__class__.__name__)
77 return '<{0}: {1} {2}>'.format(self.__class__.__name__,
78 self.method,
79 self.path)
80
81 @property
82 def json(self):
83 if self.parsed_json is None:
84 self.load_json()
85
86 return self.parsed_json
87
88 def load_json(self, loads=json_loads):
89 try:
90 self.parsed_json = loads(self.body)
91 except Exception:
92 if not self.body:
93 return None
94 raise InvalidUsage("Failed when parsing body as json")
95
96 return self.parsed_json
97
98 @property
99 def token(self):
100 """Attempt to return the auth header token.
101
102 :return: token related to request
103 """
104 prefixes = ('Bearer', 'Token')
105 auth_header = self.headers.get('Authorization')
106
107 if auth_header is not None:
108 for prefix in prefixes:
109 if prefix in auth_header:
110 return auth_header.partition(prefix)[-1].strip()
111
112 return auth_header
113
114 @property
115 def form(self):
116 if self.parsed_form is None:
117 self.parsed_form = RequestParameters()
118 self.parsed_files = RequestParameters()
119 content_type = self.headers.get(
120 'Content-Type', DEFAULT_HTTP_CONTENT_TYPE)
121 content_type, parameters = parse_header(content_type)
122 try:
123 if content_type == 'application/x-www-form-urlencoded':
124 self.parsed_form = RequestParameters(
125 parse_qs(self.body.decode('utf-8')))
126 elif content_type == 'multipart/form-data':
127 # TODO: Stream this instead of reading to/from memory
128 boundary = parameters['boundary'].encode('utf-8')
129 self.parsed_form, self.parsed_files = (
130 parse_multipart_form(self.body, boundary))
131 except Exception:
132 error_logger.exception("Failed when parsing form")
133
134 return self.parsed_form
135
136 @property
137 def files(self):
138 if self.parsed_files is None:
139 self.form # compute form to get files
140
141 return self.parsed_files
142
143 @property
144 def args(self):
145 if self.parsed_args is None:
146 if self.query_string:
147 self.parsed_args = RequestParameters(
148 parse_qs(self.query_string))
149 else:
150 self.parsed_args = RequestParameters()
151 return self.parsed_args
152
153 @property
154 def raw_args(self):
155 return {k: v[0] for k, v in self.args.items()}
156
157 @property
158 def cookies(self):
159 if self._cookies is None:
160 cookie = self.headers.get('Cookie')
161 if cookie is not None:
162 cookies = SimpleCookie()
163 cookies.load(cookie)
164 self._cookies = {name: cookie.value
165 for name, cookie in cookies.items()}
166 else:
167 self._cookies = {}
168 return self._cookies
169
170 @property
171 def ip(self):
172 if not hasattr(self, '_socket'):
173 self._get_address()
174 return self._ip
175
176 @property
177 def port(self):
178 if not hasattr(self, '_socket'):
179 self._get_address()
180 return self._port
181
182 @property
183 def socket(self):
184 if not hasattr(self, '_socket'):
185 self._get_address()
186 return self._socket
187
188 def _get_address(self):
189 sock = self.transport.get_extra_info('socket')
190
191 if sock.family == socket.AF_INET:
192 self._socket = (self.transport.get_extra_info('peername') or
193 (None, None))
194 self._ip, self._port = self._socket
195 elif sock.family == socket.AF_INET6:
196 self._socket = (self.transport.get_extra_info('peername') or
197 (None, None, None, None))
198 self._ip, self._port, *_ = self._socket
199 else:
200 self._ip, self._port = (None, None)
201
202 @property
203 def remote_addr(self):
204 """Attempt to return the original client ip based on X-Forwarded-For.
205
206 :return: original client ip.
207 """
208 if not hasattr(self, '_remote_addr'):
209 forwarded_for = self.headers.get('X-Forwarded-For', '').split(',')
210 remote_addrs = [
211 addr for addr in [
212 addr.strip() for addr in forwarded_for
213 ] if addr
214 ]
215 if len(remote_addrs) > 0:
216 self._remote_addr = remote_addrs[0]
217 else:
218 self._remote_addr = ''
219 return self._remote_addr
220
221 @property
222 def scheme(self):
223 if self.app.websocket_enabled \
224 and self.headers.get('upgrade') == 'websocket':
225 scheme = 'ws'
226 else:
227 scheme = 'http'
228
229 if self.transport.get_extra_info('sslcontext'):
230 scheme += 's'
231
232 return scheme
233
234 @property
235 def host(self):
236 # it appears that httptools doesn't return the host
237 # so pull it from the headers
238 return self.headers.get('Host', '')
239
240 @property
241 def content_type(self):
242 return self.headers.get('Content-Type', DEFAULT_HTTP_CONTENT_TYPE)
243
244 @property
245 def match_info(self):
246 """return matched info after resolving route"""
247 return self.app.router.get(self)[2]
248
249 @property
250 def path(self):
251 return self._parsed_url.path.decode('utf-8')
252
253 @property
254 def query_string(self):
255 if self._parsed_url.query:
256 return self._parsed_url.query.decode('utf-8')
257 else:
258 return ''
259
260 @property
261 def url(self):
262 return urlunparse((
263 self.scheme,
264 self.host,
265 self.path,
266 None,
267 self.query_string,
268 None))
269
270
271 File = namedtuple('File', ['type', 'body', 'name'])
272
273
274 def parse_multipart_form(body, boundary):
275 """Parse a request body and returns fields and files
276
277 :param body: bytes request body
278 :param boundary: bytes multipart boundary
279 :return: fields (RequestParameters), files (RequestParameters)
280 """
281 files = RequestParameters()
282 fields = RequestParameters()
283
284 form_parts = body.split(boundary)
285 for form_part in form_parts[1:-1]:
286 file_name = None
287 content_type = 'text/plain'
288 content_charset = 'utf-8'
289 field_name = None
290 line_index = 2
291 line_end_index = 0
292 while not line_end_index == -1:
293 line_end_index = form_part.find(b'\r\n', line_index)
294 form_line = form_part[line_index:line_end_index].decode('utf-8')
295 line_index = line_end_index + 2
296
297 if not form_line:
298 break
299
300 colon_index = form_line.index(':')
301 form_header_field = form_line[0:colon_index].lower()
302 form_header_value, form_parameters = parse_header(
303 form_line[colon_index + 2:])
304
305 if form_header_field == 'content-disposition':
306 file_name = form_parameters.get('filename')
307 field_name = form_parameters.get('name')
308 elif form_header_field == 'content-type':
309 content_type = form_header_value
310 content_charset = form_parameters.get('charset', 'utf-8')
311
312 if field_name:
313 post_data = form_part[line_index:-4]
314 if file_name:
315 form_file = File(type=content_type,
316 name=file_name,
317 body=post_data)
318 if field_name in files:
319 files[field_name].append(form_file)
320 else:
321 files[field_name] = [form_file]
322 else:
323 value = post_data.decode(content_charset)
324 if field_name in fields:
325 fields[field_name].append(value)
326 else:
327 fields[field_name] = [value]
328 else:
329 logger.debug('Form-data field does not have a \'name\' parameter \
330 in the Content-Disposition header')
331
332 return fields, files
333
[end of sanic/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/request.py b/sanic/request.py
--- a/sanic/request.py
+++ b/sanic/request.py
@@ -78,6 +78,11 @@
self.method,
self.path)
+ def __bool__(self):
+ if self.transport:
+ return True
+ return False
+
@property
def json(self):
if self.parsed_json is None:
|
{"golden_diff": "diff --git a/sanic/request.py b/sanic/request.py\n--- a/sanic/request.py\n+++ b/sanic/request.py\n@@ -78,6 +78,11 @@\n self.method,\n self.path)\n \n+ def __bool__(self):\n+ if self.transport:\n+ return True\n+ return False\n+\n @property\n def json(self):\n if self.parsed_json is None:\n", "issue": "Sanic `Request` object is falsey\n```python\r\[email protected]('/myroute')\r\nasync def someroute(request):\r\n if request:\r\n return 'some data'\r\n raise Exception(\"Woops\")\r\n```\r\n\r\nThis code will raise the exception because `bool(request)` is `False`. \n", "before_files": [{"content": "import sys\nimport json\nimport socket\nfrom cgi import parse_header\nfrom collections import namedtuple\nfrom http.cookies import SimpleCookie\nfrom httptools import parse_url\nfrom urllib.parse import parse_qs, urlunparse\n\ntry:\n from ujson import loads as json_loads\nexcept ImportError:\n if sys.version_info[:2] == (3, 5):\n def json_loads(data):\n # on Python 3.5 json.loads only supports str not bytes\n return json.loads(data.decode())\n else:\n json_loads = json.loads\n\nfrom sanic.exceptions import InvalidUsage\nfrom sanic.log import error_logger, logger\n\nDEFAULT_HTTP_CONTENT_TYPE = \"application/octet-stream\"\n\n\n# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1\n# > If the media type remains unknown, the recipient SHOULD treat it\n# > as type \"application/octet-stream\"\n\n\nclass RequestParameters(dict):\n \"\"\"Hosts a dict with lists as values where get returns the first\n value of the list and getlist returns the whole shebang\n \"\"\"\n\n def get(self, name, default=None):\n \"\"\"Return the first value, either the default or actual\"\"\"\n return super().get(name, [default])[0]\n\n def getlist(self, name, default=None):\n \"\"\"Return the entire list\"\"\"\n return super().get(name, default)\n\n\nclass Request(dict):\n \"\"\"Properties of an HTTP request such as URL, headers, etc.\"\"\"\n __slots__ = (\n 'app', 'headers', 'version', 'method', '_cookies', 'transport',\n 'body', 'parsed_json', 'parsed_args', 'parsed_form', 'parsed_files',\n '_ip', '_parsed_url', 'uri_template', 'stream', '_remote_addr',\n '_socket', '_port', '__weakref__'\n )\n\n def __init__(self, url_bytes, headers, version, method, transport):\n # TODO: Content-Encoding detection\n self._parsed_url = parse_url(url_bytes)\n self.app = None\n\n self.headers = headers\n self.version = version\n self.method = method\n self.transport = transport\n\n # Init but do not inhale\n self.body = []\n self.parsed_json = None\n self.parsed_form = None\n self.parsed_files = None\n self.parsed_args = None\n self.uri_template = None\n self._cookies = None\n self.stream = None\n\n def __repr__(self):\n if self.method is None or not self.path:\n return '<{0}>'.format(self.__class__.__name__)\n return '<{0}: {1} {2}>'.format(self.__class__.__name__,\n self.method,\n self.path)\n\n @property\n def json(self):\n if self.parsed_json is None:\n self.load_json()\n\n return self.parsed_json\n\n def load_json(self, loads=json_loads):\n try:\n self.parsed_json = loads(self.body)\n except Exception:\n if not self.body:\n return None\n raise InvalidUsage(\"Failed when parsing body as json\")\n\n return self.parsed_json\n\n @property\n def token(self):\n \"\"\"Attempt to return the auth header token.\n\n :return: token related to request\n \"\"\"\n prefixes = ('Bearer', 'Token')\n auth_header = self.headers.get('Authorization')\n\n if auth_header is not None:\n for prefix in prefixes:\n if prefix in auth_header:\n return auth_header.partition(prefix)[-1].strip()\n\n return auth_header\n\n @property\n def form(self):\n if self.parsed_form is None:\n self.parsed_form = RequestParameters()\n self.parsed_files = RequestParameters()\n content_type = self.headers.get(\n 'Content-Type', DEFAULT_HTTP_CONTENT_TYPE)\n content_type, parameters = parse_header(content_type)\n try:\n if content_type == 'application/x-www-form-urlencoded':\n self.parsed_form = RequestParameters(\n parse_qs(self.body.decode('utf-8')))\n elif content_type == 'multipart/form-data':\n # TODO: Stream this instead of reading to/from memory\n boundary = parameters['boundary'].encode('utf-8')\n self.parsed_form, self.parsed_files = (\n parse_multipart_form(self.body, boundary))\n except Exception:\n error_logger.exception(\"Failed when parsing form\")\n\n return self.parsed_form\n\n @property\n def files(self):\n if self.parsed_files is None:\n self.form # compute form to get files\n\n return self.parsed_files\n\n @property\n def args(self):\n if self.parsed_args is None:\n if self.query_string:\n self.parsed_args = RequestParameters(\n parse_qs(self.query_string))\n else:\n self.parsed_args = RequestParameters()\n return self.parsed_args\n\n @property\n def raw_args(self):\n return {k: v[0] for k, v in self.args.items()}\n\n @property\n def cookies(self):\n if self._cookies is None:\n cookie = self.headers.get('Cookie')\n if cookie is not None:\n cookies = SimpleCookie()\n cookies.load(cookie)\n self._cookies = {name: cookie.value\n for name, cookie in cookies.items()}\n else:\n self._cookies = {}\n return self._cookies\n\n @property\n def ip(self):\n if not hasattr(self, '_socket'):\n self._get_address()\n return self._ip\n\n @property\n def port(self):\n if not hasattr(self, '_socket'):\n self._get_address()\n return self._port\n\n @property\n def socket(self):\n if not hasattr(self, '_socket'):\n self._get_address()\n return self._socket\n\n def _get_address(self):\n sock = self.transport.get_extra_info('socket')\n\n if sock.family == socket.AF_INET:\n self._socket = (self.transport.get_extra_info('peername') or\n (None, None))\n self._ip, self._port = self._socket\n elif sock.family == socket.AF_INET6:\n self._socket = (self.transport.get_extra_info('peername') or\n (None, None, None, None))\n self._ip, self._port, *_ = self._socket\n else:\n self._ip, self._port = (None, None)\n\n @property\n def remote_addr(self):\n \"\"\"Attempt to return the original client ip based on X-Forwarded-For.\n\n :return: original client ip.\n \"\"\"\n if not hasattr(self, '_remote_addr'):\n forwarded_for = self.headers.get('X-Forwarded-For', '').split(',')\n remote_addrs = [\n addr for addr in [\n addr.strip() for addr in forwarded_for\n ] if addr\n ]\n if len(remote_addrs) > 0:\n self._remote_addr = remote_addrs[0]\n else:\n self._remote_addr = ''\n return self._remote_addr\n\n @property\n def scheme(self):\n if self.app.websocket_enabled \\\n and self.headers.get('upgrade') == 'websocket':\n scheme = 'ws'\n else:\n scheme = 'http'\n\n if self.transport.get_extra_info('sslcontext'):\n scheme += 's'\n\n return scheme\n\n @property\n def host(self):\n # it appears that httptools doesn't return the host\n # so pull it from the headers\n return self.headers.get('Host', '')\n\n @property\n def content_type(self):\n return self.headers.get('Content-Type', DEFAULT_HTTP_CONTENT_TYPE)\n\n @property\n def match_info(self):\n \"\"\"return matched info after resolving route\"\"\"\n return self.app.router.get(self)[2]\n\n @property\n def path(self):\n return self._parsed_url.path.decode('utf-8')\n\n @property\n def query_string(self):\n if self._parsed_url.query:\n return self._parsed_url.query.decode('utf-8')\n else:\n return ''\n\n @property\n def url(self):\n return urlunparse((\n self.scheme,\n self.host,\n self.path,\n None,\n self.query_string,\n None))\n\n\nFile = namedtuple('File', ['type', 'body', 'name'])\n\n\ndef parse_multipart_form(body, boundary):\n \"\"\"Parse a request body and returns fields and files\n\n :param body: bytes request body\n :param boundary: bytes multipart boundary\n :return: fields (RequestParameters), files (RequestParameters)\n \"\"\"\n files = RequestParameters()\n fields = RequestParameters()\n\n form_parts = body.split(boundary)\n for form_part in form_parts[1:-1]:\n file_name = None\n content_type = 'text/plain'\n content_charset = 'utf-8'\n field_name = None\n line_index = 2\n line_end_index = 0\n while not line_end_index == -1:\n line_end_index = form_part.find(b'\\r\\n', line_index)\n form_line = form_part[line_index:line_end_index].decode('utf-8')\n line_index = line_end_index + 2\n\n if not form_line:\n break\n\n colon_index = form_line.index(':')\n form_header_field = form_line[0:colon_index].lower()\n form_header_value, form_parameters = parse_header(\n form_line[colon_index + 2:])\n\n if form_header_field == 'content-disposition':\n file_name = form_parameters.get('filename')\n field_name = form_parameters.get('name')\n elif form_header_field == 'content-type':\n content_type = form_header_value\n content_charset = form_parameters.get('charset', 'utf-8')\n\n if field_name:\n post_data = form_part[line_index:-4]\n if file_name:\n form_file = File(type=content_type,\n name=file_name,\n body=post_data)\n if field_name in files:\n files[field_name].append(form_file)\n else:\n files[field_name] = [form_file]\n else:\n value = post_data.decode(content_charset)\n if field_name in fields:\n fields[field_name].append(value)\n else:\n fields[field_name] = [value]\n else:\n logger.debug('Form-data field does not have a \\'name\\' parameter \\\n in the Content-Disposition header')\n\n return fields, files\n", "path": "sanic/request.py"}]}
| 3,767 | 93 |
gh_patches_debug_3952
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-10868
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backup restore locks projects and shows no existing components
### Describe the issue
I was testing the borg backup feature in weblate and upon restoring the backup I noticed that the projects had the label that indicated that the project is locked (See Image).
I see that the information is all there in the backup but it is not shown in the app. For example, after opening the projects, no components are found.
I have found little to no information online regarding such an issue.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar filed issues in this repository.
### Steps to reproduce the behavior
The issue happened after following the directions for a borg backup and restore in the weblate documentation.
### Expected behavior
_No response_
### Screenshots

### Exception traceback
_No response_
### How do you run Weblate?
Docker container
### Weblate versions
weblate/weblate:edge
Hash: sha256:780310018d21eba2ef5e06b66acab7bf2e86b3ed1778640707ad47d0525cb213.
### Weblate deploy checks
_No response_
### Additional context
_No response_
</issue>
<code>
[start of weblate/utils/tasks.py]
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 import gzip
6 import os
7 import shutil
8 import subprocess
9 import sys
10 import time
11 from importlib import import_module
12 from shutil import copyfile
13
14 from celery.schedules import crontab
15 from django.conf import settings
16 from django.core.cache import cache
17 from django.core.management.commands import diffsettings
18 from ruamel.yaml import YAML
19
20 import weblate.utils.version
21 from weblate.formats.models import FILE_FORMATS
22 from weblate.logger import LOGGER
23 from weblate.machinery.models import MACHINERY
24 from weblate.trans.models import Component, Translation
25 from weblate.trans.util import get_clean_env
26 from weblate.utils.backup import backup_lock
27 from weblate.utils.celery import app
28 from weblate.utils.data import data_dir
29 from weblate.utils.db import using_postgresql
30 from weblate.utils.errors import add_breadcrumb, report_error
31 from weblate.utils.lock import WeblateLockTimeoutError
32 from weblate.vcs.models import VCS_REGISTRY
33
34
35 @app.task(trail=False)
36 def ping():
37 return {
38 "version": weblate.utils.version.GIT_VERSION,
39 "vcs": sorted(VCS_REGISTRY.keys()),
40 "formats": sorted(FILE_FORMATS.keys()),
41 "mt_services": sorted(MACHINERY.keys()),
42 "encoding": [sys.getfilesystemencoding(), sys.getdefaultencoding()],
43 "uid": os.getuid(),
44 }
45
46
47 @app.task(trail=False)
48 def heartbeat():
49 cache.set("celery_loaded", time.time())
50 cache.set("celery_heartbeat", time.time())
51 cache.set(
52 "celery_encoding", [sys.getfilesystemencoding(), sys.getdefaultencoding()]
53 )
54
55
56 @app.task(trail=False, autoretry_for=(WeblateLockTimeoutError,))
57 def settings_backup():
58 with backup_lock():
59 # Expand settings in case it contains non-trivial code
60 command = diffsettings.Command()
61 kwargs = {"default": None, "all": False, "output": "hash"}
62 with open(data_dir("backups", "settings-expanded.py"), "w") as handle:
63 handle.write(command.handle(**kwargs))
64
65 # Backup original settings
66 if settings.SETTINGS_MODULE:
67 settings_mod = import_module(settings.SETTINGS_MODULE)
68 copyfile(settings_mod.__file__, data_dir("backups", "settings.py"))
69
70 # Backup environment (to make restoring Docker easier)
71 with open(data_dir("backups", "environment.yml"), "w") as handle:
72 yaml = YAML()
73 yaml.dump(dict(os.environ), handle)
74
75
76 @app.task(trail=False)
77 def update_translation_stats_parents(pk: int):
78 translation = Translation.objects.get(pk=pk)
79 translation.stats.update_parents()
80
81
82 @app.task(trail=False)
83 def update_language_stats_parents(pk: int):
84 component = Component.objects.get(pk=pk)
85 component.stats.update_language_stats_parents()
86
87
88 @app.task(trail=False, autoretry_for=(WeblateLockTimeoutError,))
89 def database_backup():
90 if settings.DATABASE_BACKUP == "none":
91 return
92 with backup_lock():
93 database = settings.DATABASES["default"]
94 env = get_clean_env()
95 compress = settings.DATABASE_BACKUP == "compressed"
96
97 out_compressed = data_dir("backups", "database.sql.gz")
98 out_text = data_dir("backups", "database.sql")
99
100 if using_postgresql():
101 cmd = [
102 "pg_dump",
103 # Superuser only, crashes on Alibaba Cloud Database PolarDB
104 "--no-subscriptions",
105 "--dbname",
106 database["NAME"],
107 ]
108
109 if database["HOST"]:
110 cmd.extend(["--host", database["HOST"]])
111 if database["PORT"]:
112 cmd.extend(["--port", database["PORT"]])
113 if database["USER"]:
114 cmd.extend(["--username", database["USER"]])
115 if settings.DATABASE_BACKUP == "compressed":
116 cmd.extend(["--file", out_compressed])
117 cmd.extend(["--compress", "6"])
118 compress = False
119 else:
120 cmd.extend(["--file", out_text])
121
122 env["PGPASSWORD"] = database["PASSWORD"]
123 else:
124 cmd = [
125 "mysqldump",
126 "--result-file",
127 out_text,
128 "--single-transaction",
129 "--skip-lock-tables",
130 ]
131
132 if database["HOST"]:
133 cmd.extend(["--host", database["HOST"]])
134 if database["PORT"]:
135 cmd.extend(["--port", database["PORT"]])
136 if database["USER"]:
137 cmd.extend(["--user", database["USER"]])
138
139 cmd.extend(["--databases", database["NAME"]])
140
141 env["MYSQL_PWD"] = database["PASSWORD"]
142
143 try:
144 subprocess.run(
145 cmd,
146 env=env,
147 capture_output=True,
148 stdin=subprocess.DEVNULL,
149 check=True,
150 text=True,
151 )
152 except subprocess.CalledProcessError as error:
153 add_breadcrumb(
154 category="backup",
155 message="database dump output",
156 stdout=error.stdout,
157 stderr=error.stderr,
158 )
159 LOGGER.error("failed database backup: %s", error.stderr)
160 report_error()
161 raise
162
163 if compress:
164 with open(out_text, "rb") as f_in, gzip.open(out_compressed, "wb") as f_out:
165 shutil.copyfileobj(f_in, f_out)
166 os.unlink(out_text)
167
168
169 @app.on_after_finalize.connect
170 def setup_periodic_tasks(sender, **kwargs):
171 cache.set("celery_loaded", time.time())
172 sender.add_periodic_task(
173 crontab(hour=1, minute=0), settings_backup.s(), name="settings-backup"
174 )
175 sender.add_periodic_task(
176 crontab(hour=1, minute=30), database_backup.s(), name="database-backup"
177 )
178 sender.add_periodic_task(60, heartbeat.s(), name="heartbeat")
179
[end of weblate/utils/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/weblate/utils/tasks.py b/weblate/utils/tasks.py
--- a/weblate/utils/tasks.py
+++ b/weblate/utils/tasks.py
@@ -102,6 +102,8 @@
"pg_dump",
# Superuser only, crashes on Alibaba Cloud Database PolarDB
"--no-subscriptions",
+ "--clean",
+ "--if-exists",
"--dbname",
database["NAME"],
]
|
{"golden_diff": "diff --git a/weblate/utils/tasks.py b/weblate/utils/tasks.py\n--- a/weblate/utils/tasks.py\n+++ b/weblate/utils/tasks.py\n@@ -102,6 +102,8 @@\n \"pg_dump\",\n # Superuser only, crashes on Alibaba Cloud Database PolarDB\n \"--no-subscriptions\",\n+ \"--clean\",\n+ \"--if-exists\",\n \"--dbname\",\n database[\"NAME\"],\n ]\n", "issue": "Backup restore locks projects and shows no existing components\n### Describe the issue\r\n\r\nI was testing the borg backup feature in weblate and upon restoring the backup I noticed that the projects had the label that indicated that the project is locked (See Image). \r\n\r\nI see that the information is all there in the backup but it is not shown in the app. For example, after opening the projects, no components are found.\r\n\r\nI have found little to no information online regarding such an issue.\r\n\r\n\r\n\r\n\r\n\r\n### I already tried\r\n\r\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\r\n- [X] I've searched for similar filed issues in this repository.\r\n\r\n### Steps to reproduce the behavior\r\n\r\nThe issue happened after following the directions for a borg backup and restore in the weblate documentation.\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n\r\n\r\n\r\n### Exception traceback\r\n\r\n_No response_\r\n\r\n### How do you run Weblate?\r\n\r\nDocker container\r\n\r\n### Weblate versions\r\n\r\n weblate/weblate:edge\r\nHash: sha256:780310018d21eba2ef5e06b66acab7bf2e86b3ed1778640707ad47d0525cb213.\r\n\r\n### Weblate deploy checks\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport gzip\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport time\nfrom importlib import import_module\nfrom shutil import copyfile\n\nfrom celery.schedules import crontab\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.management.commands import diffsettings\nfrom ruamel.yaml import YAML\n\nimport weblate.utils.version\nfrom weblate.formats.models import FILE_FORMATS\nfrom weblate.logger import LOGGER\nfrom weblate.machinery.models import MACHINERY\nfrom weblate.trans.models import Component, Translation\nfrom weblate.trans.util import get_clean_env\nfrom weblate.utils.backup import backup_lock\nfrom weblate.utils.celery import app\nfrom weblate.utils.data import data_dir\nfrom weblate.utils.db import using_postgresql\nfrom weblate.utils.errors import add_breadcrumb, report_error\nfrom weblate.utils.lock import WeblateLockTimeoutError\nfrom weblate.vcs.models import VCS_REGISTRY\n\n\[email protected](trail=False)\ndef ping():\n return {\n \"version\": weblate.utils.version.GIT_VERSION,\n \"vcs\": sorted(VCS_REGISTRY.keys()),\n \"formats\": sorted(FILE_FORMATS.keys()),\n \"mt_services\": sorted(MACHINERY.keys()),\n \"encoding\": [sys.getfilesystemencoding(), sys.getdefaultencoding()],\n \"uid\": os.getuid(),\n }\n\n\[email protected](trail=False)\ndef heartbeat():\n cache.set(\"celery_loaded\", time.time())\n cache.set(\"celery_heartbeat\", time.time())\n cache.set(\n \"celery_encoding\", [sys.getfilesystemencoding(), sys.getdefaultencoding()]\n )\n\n\[email protected](trail=False, autoretry_for=(WeblateLockTimeoutError,))\ndef settings_backup():\n with backup_lock():\n # Expand settings in case it contains non-trivial code\n command = diffsettings.Command()\n kwargs = {\"default\": None, \"all\": False, \"output\": \"hash\"}\n with open(data_dir(\"backups\", \"settings-expanded.py\"), \"w\") as handle:\n handle.write(command.handle(**kwargs))\n\n # Backup original settings\n if settings.SETTINGS_MODULE:\n settings_mod = import_module(settings.SETTINGS_MODULE)\n copyfile(settings_mod.__file__, data_dir(\"backups\", \"settings.py\"))\n\n # Backup environment (to make restoring Docker easier)\n with open(data_dir(\"backups\", \"environment.yml\"), \"w\") as handle:\n yaml = YAML()\n yaml.dump(dict(os.environ), handle)\n\n\[email protected](trail=False)\ndef update_translation_stats_parents(pk: int):\n translation = Translation.objects.get(pk=pk)\n translation.stats.update_parents()\n\n\[email protected](trail=False)\ndef update_language_stats_parents(pk: int):\n component = Component.objects.get(pk=pk)\n component.stats.update_language_stats_parents()\n\n\[email protected](trail=False, autoretry_for=(WeblateLockTimeoutError,))\ndef database_backup():\n if settings.DATABASE_BACKUP == \"none\":\n return\n with backup_lock():\n database = settings.DATABASES[\"default\"]\n env = get_clean_env()\n compress = settings.DATABASE_BACKUP == \"compressed\"\n\n out_compressed = data_dir(\"backups\", \"database.sql.gz\")\n out_text = data_dir(\"backups\", \"database.sql\")\n\n if using_postgresql():\n cmd = [\n \"pg_dump\",\n # Superuser only, crashes on Alibaba Cloud Database PolarDB\n \"--no-subscriptions\",\n \"--dbname\",\n database[\"NAME\"],\n ]\n\n if database[\"HOST\"]:\n cmd.extend([\"--host\", database[\"HOST\"]])\n if database[\"PORT\"]:\n cmd.extend([\"--port\", database[\"PORT\"]])\n if database[\"USER\"]:\n cmd.extend([\"--username\", database[\"USER\"]])\n if settings.DATABASE_BACKUP == \"compressed\":\n cmd.extend([\"--file\", out_compressed])\n cmd.extend([\"--compress\", \"6\"])\n compress = False\n else:\n cmd.extend([\"--file\", out_text])\n\n env[\"PGPASSWORD\"] = database[\"PASSWORD\"]\n else:\n cmd = [\n \"mysqldump\",\n \"--result-file\",\n out_text,\n \"--single-transaction\",\n \"--skip-lock-tables\",\n ]\n\n if database[\"HOST\"]:\n cmd.extend([\"--host\", database[\"HOST\"]])\n if database[\"PORT\"]:\n cmd.extend([\"--port\", database[\"PORT\"]])\n if database[\"USER\"]:\n cmd.extend([\"--user\", database[\"USER\"]])\n\n cmd.extend([\"--databases\", database[\"NAME\"]])\n\n env[\"MYSQL_PWD\"] = database[\"PASSWORD\"]\n\n try:\n subprocess.run(\n cmd,\n env=env,\n capture_output=True,\n stdin=subprocess.DEVNULL,\n check=True,\n text=True,\n )\n except subprocess.CalledProcessError as error:\n add_breadcrumb(\n category=\"backup\",\n message=\"database dump output\",\n stdout=error.stdout,\n stderr=error.stderr,\n )\n LOGGER.error(\"failed database backup: %s\", error.stderr)\n report_error()\n raise\n\n if compress:\n with open(out_text, \"rb\") as f_in, gzip.open(out_compressed, \"wb\") as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.unlink(out_text)\n\n\[email protected]_after_finalize.connect\ndef setup_periodic_tasks(sender, **kwargs):\n cache.set(\"celery_loaded\", time.time())\n sender.add_periodic_task(\n crontab(hour=1, minute=0), settings_backup.s(), name=\"settings-backup\"\n )\n sender.add_periodic_task(\n crontab(hour=1, minute=30), database_backup.s(), name=\"database-backup\"\n )\n sender.add_periodic_task(60, heartbeat.s(), name=\"heartbeat\")\n", "path": "weblate/utils/tasks.py"}]}
| 2,634 | 98 |
gh_patches_debug_3383
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-722
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
harddelete fails with AttributeError: 'NoneType' object has no attribute 'delete'
## Description
The `harddelete` command fails with this backtrace:
```
Aug 5 12:01:39 AM {"extra": {"landscape_id": "UUID('8483fbd9-307a-4b1f-a6f2-2eafaf6526b9')"}, "event": "Landscape has no default group, but it must have", "timestamp": "2023-08-05T04:01:39.275313Z", "logger": "apps.core.models.landscapes", "level": "error"}
Aug 5 12:01:39 AM Traceback (most recent call last):
Aug 5 12:01:39 AM File "/app/terraso_backend/manage.py", line 34, in <module>
Aug 5 12:01:39 AM main()
Aug 5 12:01:39 AM File "/app/terraso_backend/manage.py", line 30, in main
Aug 5 12:01:39 AM execute_from_command_line(sys.argv)
Aug 5 12:01:39 AM File "/home/terraso/.local/lib/python3.11/site-packages/django/core/management/__init__.py", line 442, in execute_from_command_line
Aug 5 12:01:39 AM utility.execute()
Aug 5 12:01:39 AM File "/home/terraso/.local/lib/python3.11/site-packages/django/core/management/__init__.py", line 436, in execute
Aug 5 12:01:39 AM self.fetch_command(subcommand).run_from_argv(self.argv)
Aug 5 12:01:39 AM File "/home/terraso/.local/lib/python3.11/site-packages/django/core/management/base.py", line 412, in run_from_argv
Aug 5 12:01:39 AM self.execute(*args, **cmd_options)
Aug 5 12:01:39 AM File "/home/terraso/.local/lib/python3.11/site-packages/django/core/management/base.py", line 458, in execute
Aug 5 12:01:39 AM output = self.handle(*args, **options)
Aug 5 12:01:39 AM ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Aug 5 12:01:39 AM File "/app/terraso_backend/apps/core/management/commands/harddelete.py", line 62, in handle
Aug 5 12:01:39 AM obj.delete(force_policy=HARD_DELETE)
Aug 5 12:01:39 AM File "/app/terraso_backend/apps/core/models/landscapes.py", line 129, in delete
Aug 5 12:01:39 AM default_group.delete()
Aug 5 12:01:39 AM ^^^^^^^^^^^^^^^^^^^^
Aug 5 12:01:39 AM AttributeError: 'NoneType' object has no attribute 'delete'
```
</issue>
<code>
[start of terraso_backend/apps/core/models/landscapes.py]
1 # Copyright © 2021-2023 Technology Matters
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License as published
5 # by the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU Affero General Public License for more details.
12 #
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see https://www.gnu.org/licenses/.
15
16 import structlog
17 from dirtyfields import DirtyFieldsMixin
18 from django.db import models, transaction
19
20 from apps.core import permission_rules as perm_rules
21 from apps.core.gis.utils import (
22 calculate_geojson_centroid,
23 calculate_geojson_feature_area,
24 )
25 from apps.core.models.taxonomy_terms import TaxonomyTerm
26
27 from .commons import BaseModel, SlugModel, validate_name
28 from .groups import Group
29 from .users import User
30
31 logger = structlog.get_logger(__name__)
32
33
34 class Landscape(SlugModel, DirtyFieldsMixin):
35 """
36 This model represents a Landscape on Terraso platform.
37
38 A Landscape is a socio-ecological system that consists of natural
39 and/or human-modified ecosystems. Defined by its stakeholds, a
40 Landscape usually has geographical boundaries. It may correspond to,
41 or be a combination of, natural boundaries, distinct land features,
42 socially defined areas such as indigenous territories, and/or
43 jurisdictional and administrative boundaries. The boundaries of a
44 Landscape can cross several countries.
45 """
46
47 fields_to_trim = ["name", "description"]
48
49 name = models.CharField(max_length=128, validators=[validate_name])
50 description = models.TextField(blank=True, default="")
51 website = models.URLField(max_length=500, blank=True, default="")
52 location = models.CharField(max_length=128, blank=True, default="")
53 area_polygon = models.JSONField(blank=True, null=True)
54 email = models.EmailField(blank=True, default="")
55 area_scalar_m2 = models.FloatField(blank=True, null=True)
56
57 created_by = models.ForeignKey(
58 User,
59 blank=True,
60 null=True,
61 on_delete=models.PROTECT,
62 related_name="created_landscapes",
63 )
64 groups = models.ManyToManyField(Group, through="LandscapeGroup")
65
66 area_types = models.JSONField(blank=True, null=True)
67 taxonomy_terms = models.ManyToManyField(TaxonomyTerm, blank=True)
68 population = models.IntegerField(blank=True, null=True)
69
70 PARTNERSHIP_STATUS_NONE = ""
71 PARTNERSHIP_STATUS_NO = "no"
72 PARTNERSHIP_STATUS_IN_PROGRESS = "in-progress"
73 PARTNERSHIP_STATUS_YES = "yes"
74
75 MEMBERSHIP_TYPES = (
76 (PARTNERSHIP_STATUS_NONE, "None"),
77 (PARTNERSHIP_STATUS_NO, "No"),
78 (PARTNERSHIP_STATUS_IN_PROGRESS, "In Progress"),
79 (PARTNERSHIP_STATUS_YES, "Yes"),
80 )
81 partnership_status = models.CharField(
82 max_length=32, choices=MEMBERSHIP_TYPES, blank=True, default=PARTNERSHIP_STATUS_NONE
83 )
84 profile_image = models.URLField(blank=True, default="")
85 profile_image_description = models.TextField(blank=True, default="")
86 center_coordinates = models.JSONField(blank=True, null=True)
87
88 field_to_slug = "name"
89
90 class Meta(SlugModel.Meta):
91 rules_permissions = {
92 "change": perm_rules.allowed_to_change_landscape,
93 "delete": perm_rules.allowed_to_delete_landscape,
94 }
95 _unique_fields = ["name"]
96 abstract = False
97
98 def save(self, *args, **kwargs):
99 dirty_fields = self.get_dirty_fields()
100 if self.area_polygon and "area_polygon" in dirty_fields:
101 area_scalar_m2 = calculate_geojson_feature_area(self.area_polygon)
102 if area_scalar_m2 is not None:
103 self.area_scalar_m2 = round(area_scalar_m2, 3)
104 self.center_coordinates = calculate_geojson_centroid(self.area_polygon)
105
106 with transaction.atomic():
107 creating = not Landscape.objects.filter(pk=self.pk).exists()
108
109 super().save(*args, **kwargs)
110
111 if creating and self.created_by:
112 group = Group(
113 name="Group {}".format(self.slug),
114 description="",
115 created_by=self.created_by,
116 )
117 group.save()
118 landscape_group = LandscapeGroup(
119 group=group, landscape=self, is_default_landscape_group=True
120 )
121 landscape_group.save()
122
123 def delete(self, *args, **kwargs):
124 default_group = self.get_default_group()
125
126 with transaction.atomic():
127 ret = super().delete(*args, **kwargs)
128 # default group should be deleted as well
129 default_group.delete()
130
131 return ret
132
133 def get_default_group(self):
134 """
135 A default Group in a Landscape is that Group where any
136 individual (associated or not with other Groups) is added when
137 associating directly with a Landscape.
138 """
139 try:
140 # associated_groups is the related_name defined on
141 # LandscapeGroup relationship with Landscape. It returns a
142 # queryset of LandscapeGroup
143 landscape_group = self.associated_groups.get(is_default_landscape_group=True)
144 except LandscapeGroup.DoesNotExist:
145 logger.error(
146 "Landscape has no default group, but it must have", extra={"landscape_id": self.pk}
147 )
148 return None
149
150 return landscape_group.group
151
152 def __str__(self):
153 return self.name
154
155
156 class LandscapeDevelopmentStrategy(BaseModel):
157 objectives = models.TextField(blank=True, default="")
158 opportunities = models.TextField(blank=True, default="")
159 problem_situtation = models.TextField(blank=True, default="")
160 intervention_strategy = models.TextField(blank=True, default="")
161 landscape = models.ForeignKey(
162 Landscape, on_delete=models.CASCADE, related_name="associated_development_strategy"
163 )
164
165
166 class LandscapeGroup(BaseModel):
167 """
168 This model represents the association between a Landscape and a Group on
169 Terraso platform.
170 """
171
172 landscape = models.ForeignKey(
173 Landscape, on_delete=models.CASCADE, related_name="associated_groups"
174 )
175 group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="associated_landscapes")
176
177 is_default_landscape_group = models.BooleanField(blank=True, default=False)
178 is_partnership = models.BooleanField(blank=True, default=False)
179 partnership_year = models.IntegerField(blank=True, null=True)
180
181 class Meta:
182 rules_permissions = {
183 "add": perm_rules.allowed_to_add_landscape_group,
184 "delete": perm_rules.allowed_to_delete_landscape_group,
185 }
186 constraints = (
187 models.UniqueConstraint(
188 fields=("group", "landscape", "is_partnership"),
189 condition=models.Q(deleted_at__isnull=True),
190 name="unique_active_landscape_group",
191 ),
192 )
193
[end of terraso_backend/apps/core/models/landscapes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py
--- a/terraso_backend/apps/core/models/landscapes.py
+++ b/terraso_backend/apps/core/models/landscapes.py
@@ -126,7 +126,8 @@
with transaction.atomic():
ret = super().delete(*args, **kwargs)
# default group should be deleted as well
- default_group.delete()
+ if default_group is not None:
+ default_group.delete()
return ret
|
{"golden_diff": "diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\n--- a/terraso_backend/apps/core/models/landscapes.py\n+++ b/terraso_backend/apps/core/models/landscapes.py\n@@ -126,7 +126,8 @@\n with transaction.atomic():\n ret = super().delete(*args, **kwargs)\n # default group should be deleted as well\n- default_group.delete()\n+ if default_group is not None:\n+ default_group.delete()\n \n return ret\n", "issue": "harddelete fails with AttributeError: 'NoneType' object has no attribute 'delete'\n## Description\r\nThe `harddelete` command fails with this backtrace:\r\n```\r\nAug 5 12:01:39 AM {\"extra\": {\"landscape_id\": \"UUID('8483fbd9-307a-4b1f-a6f2-2eafaf6526b9')\"}, \"event\": \"Landscape has no default group, but it must have\", \"timestamp\": \"2023-08-05T04:01:39.275313Z\", \"logger\": \"apps.core.models.landscapes\", \"level\": \"error\"}\r\nAug 5 12:01:39 AM Traceback (most recent call last):\r\nAug 5 12:01:39 AM File \"/app/terraso_backend/manage.py\", line 34, in <module>\r\nAug 5 12:01:39 AM main()\r\nAug 5 12:01:39 AM File \"/app/terraso_backend/manage.py\", line 30, in main\r\nAug 5 12:01:39 AM execute_from_command_line(sys.argv)\r\nAug 5 12:01:39 AM File \"/home/terraso/.local/lib/python3.11/site-packages/django/core/management/__init__.py\", line 442, in execute_from_command_line\r\nAug 5 12:01:39 AM utility.execute()\r\nAug 5 12:01:39 AM File \"/home/terraso/.local/lib/python3.11/site-packages/django/core/management/__init__.py\", line 436, in execute\r\nAug 5 12:01:39 AM self.fetch_command(subcommand).run_from_argv(self.argv)\r\nAug 5 12:01:39 AM File \"/home/terraso/.local/lib/python3.11/site-packages/django/core/management/base.py\", line 412, in run_from_argv\r\nAug 5 12:01:39 AM self.execute(*args, **cmd_options)\r\nAug 5 12:01:39 AM File \"/home/terraso/.local/lib/python3.11/site-packages/django/core/management/base.py\", line 458, in execute\r\nAug 5 12:01:39 AM output = self.handle(*args, **options)\r\nAug 5 12:01:39 AM ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nAug 5 12:01:39 AM File \"/app/terraso_backend/apps/core/management/commands/harddelete.py\", line 62, in handle\r\nAug 5 12:01:39 AM obj.delete(force_policy=HARD_DELETE)\r\nAug 5 12:01:39 AM File \"/app/terraso_backend/apps/core/models/landscapes.py\", line 129, in delete\r\nAug 5 12:01:39 AM default_group.delete()\r\nAug 5 12:01:39 AM ^^^^^^^^^^^^^^^^^^^^\r\nAug 5 12:01:39 AM AttributeError: 'NoneType' object has no attribute 'delete'\r\n```\n", "before_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nimport structlog\nfrom dirtyfields import DirtyFieldsMixin\nfrom django.db import models, transaction\n\nfrom apps.core import permission_rules as perm_rules\nfrom apps.core.gis.utils import (\n calculate_geojson_centroid,\n calculate_geojson_feature_area,\n)\nfrom apps.core.models.taxonomy_terms import TaxonomyTerm\n\nfrom .commons import BaseModel, SlugModel, validate_name\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel, DirtyFieldsMixin):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n fields_to_trim = [\"name\", \"description\"]\n\n name = models.CharField(max_length=128, validators=[validate_name])\n description = models.TextField(blank=True, default=\"\")\n website = models.URLField(max_length=500, blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n email = models.EmailField(blank=True, default=\"\")\n area_scalar_m2 = models.FloatField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n area_types = models.JSONField(blank=True, null=True)\n taxonomy_terms = models.ManyToManyField(TaxonomyTerm, blank=True)\n population = models.IntegerField(blank=True, null=True)\n\n PARTNERSHIP_STATUS_NONE = \"\"\n PARTNERSHIP_STATUS_NO = \"no\"\n PARTNERSHIP_STATUS_IN_PROGRESS = \"in-progress\"\n PARTNERSHIP_STATUS_YES = \"yes\"\n\n MEMBERSHIP_TYPES = (\n (PARTNERSHIP_STATUS_NONE, \"None\"),\n (PARTNERSHIP_STATUS_NO, \"No\"),\n (PARTNERSHIP_STATUS_IN_PROGRESS, \"In Progress\"),\n (PARTNERSHIP_STATUS_YES, \"Yes\"),\n )\n partnership_status = models.CharField(\n max_length=32, choices=MEMBERSHIP_TYPES, blank=True, default=PARTNERSHIP_STATUS_NONE\n )\n profile_image = models.URLField(blank=True, default=\"\")\n profile_image_description = models.TextField(blank=True, default=\"\")\n center_coordinates = models.JSONField(blank=True, null=True)\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n _unique_fields = [\"name\"]\n abstract = False\n\n def save(self, *args, **kwargs):\n dirty_fields = self.get_dirty_fields()\n if self.area_polygon and \"area_polygon\" in dirty_fields:\n area_scalar_m2 = calculate_geojson_feature_area(self.area_polygon)\n if area_scalar_m2 is not None:\n self.area_scalar_m2 = round(area_scalar_m2, 3)\n self.center_coordinates = calculate_geojson_centroid(self.area_polygon)\n\n with transaction.atomic():\n creating = not Landscape.objects.filter(pk=self.pk).exists()\n\n super().save(*args, **kwargs)\n\n if creating and self.created_by:\n group = Group(\n name=\"Group {}\".format(self.slug),\n description=\"\",\n created_by=self.created_by,\n )\n group.save()\n landscape_group = LandscapeGroup(\n group=group, landscape=self, is_default_landscape_group=True\n )\n landscape_group.save()\n\n def delete(self, *args, **kwargs):\n default_group = self.get_default_group()\n\n with transaction.atomic():\n ret = super().delete(*args, **kwargs)\n # default group should be deleted as well\n default_group.delete()\n\n return ret\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeDevelopmentStrategy(BaseModel):\n objectives = models.TextField(blank=True, default=\"\")\n opportunities = models.TextField(blank=True, default=\"\")\n problem_situtation = models.TextField(blank=True, default=\"\")\n intervention_strategy = models.TextField(blank=True, default=\"\")\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_development_strategy\"\n )\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n is_partnership = models.BooleanField(blank=True, default=False)\n partnership_year = models.IntegerField(blank=True, null=True)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\", \"is_partnership\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}]}
| 3,286 | 131 |
gh_patches_debug_22029
|
rasdani/github-patches
|
git_diff
|
microsoft__PubSec-Info-Assistant-447
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Chunks number not correctly reported in statuscontainer
**Describe the bug**
Chunks number for non-PDF files not correctly reported in statuscontainer
**To Reproduce**
Steps to reproduce the behavior:
1. Upload a non-PDF file
2. Wait for the chunking process to complete
3. Go to CosmosDB -> statuscontainer -> item relative to the uploaded non-PDF file
4. Check the "status" field which will display something like:
"FileLayoutParsingOther - chunking complete. <built-in method count of list object at 0x7713c4227440> chunks created"
**Expected behavior**
It should show a number instead of "<built-in method count of list object at 0x7713c4227440>"
**Desktop (please complete the following information):**
- OS: Windows 11
- Browser: Edge
- Version: 120.0.2210.121 (Official build) (64-bit)
**Alpha version details**
- GitHub branch: main
**Additional context**
Minor problem
probably line 166, functions/FileLayoutParsingOther/__init__.py
</issue>
<code>
[start of functions/FileLayoutParsingOther/__init__.py]
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import logging
5 import os
6 import json
7 from enum import Enum
8 from io import BytesIO
9 import azure.functions as func
10 from azure.storage.blob import generate_blob_sas
11 from azure.storage.queue import QueueClient, TextBase64EncodePolicy
12 from shared_code.status_log import StatusLog, State, StatusClassification
13 from shared_code.utilities import Utilities, MediaType
14
15 import requests
16
17 azure_blob_storage_account = os.environ["BLOB_STORAGE_ACCOUNT"]
18 azure_blob_storage_endpoint = os.environ["BLOB_STORAGE_ACCOUNT_ENDPOINT"]
19 azure_blob_drop_storage_container = os.environ["BLOB_STORAGE_ACCOUNT_UPLOAD_CONTAINER_NAME"]
20 azure_blob_content_storage_container = os.environ["BLOB_STORAGE_ACCOUNT_OUTPUT_CONTAINER_NAME"]
21 azure_blob_storage_key = os.environ["AZURE_BLOB_STORAGE_KEY"]
22 azure_blob_connection_string = os.environ["BLOB_CONNECTION_STRING"]
23 azure_blob_log_storage_container = os.environ["BLOB_STORAGE_ACCOUNT_LOG_CONTAINER_NAME"]
24 cosmosdb_url = os.environ["COSMOSDB_URL"]
25 cosmosdb_key = os.environ["COSMOSDB_KEY"]
26 cosmosdb_log_database_name = os.environ["COSMOSDB_LOG_DATABASE_NAME"]
27 cosmosdb_log_container_name = os.environ["COSMOSDB_LOG_CONTAINER_NAME"]
28 non_pdf_submit_queue = os.environ["NON_PDF_SUBMIT_QUEUE"]
29 pdf_polling_queue = os.environ["PDF_POLLING_QUEUE"]
30 pdf_submit_queue = os.environ["PDF_SUBMIT_QUEUE"]
31 text_enrichment_queue = os.environ["TEXT_ENRICHMENT_QUEUE"]
32 CHUNK_TARGET_SIZE = int(os.environ["CHUNK_TARGET_SIZE"])
33
34 NEW_AFTER_N_CHARS = 1500
35 COMBINE_UNDER_N_CHARS = 500
36 MAX_CHARACTERS = 1500
37
38
39 utilities = Utilities(azure_blob_storage_account, azure_blob_storage_endpoint, azure_blob_drop_storage_container, azure_blob_content_storage_container, azure_blob_storage_key)
40 function_name = "FileLayoutParsingOther"
41
42 class UnstructuredError(Exception):
43 pass
44
45 def PartitionFile(file_extension: str, file_url: str):
46 """ uses the unstructured.io libraries to analyse a document
47 Returns:
48 elements: A list of available models
49 """
50 # Send a GET request to the URL to download the file
51 response = requests.get(file_url)
52 bytes_io = BytesIO(response.content)
53 response.close()
54 metadata = []
55 elements = None
56 try:
57 if file_extension == '.csv':
58 from unstructured.partition.csv import partition_csv
59 elements = partition_csv(file=bytes_io)
60
61 elif file_extension == '.doc':
62 from unstructured.partition.doc import partition_doc
63 elements = partition_doc(file=bytes_io)
64
65 elif file_extension == '.docx':
66 from unstructured.partition.docx import partition_docx
67 elements = partition_docx(file=bytes_io)
68
69 elif file_extension == '.eml' or file_extension == '.msg':
70 if file_extension == '.msg':
71 from unstructured.partition.msg import partition_msg
72 elements = partition_msg(file=bytes_io)
73 else:
74 from unstructured.partition.email import partition_email
75 elements = partition_email(file=bytes_io)
76 metadata.append(f'Subject: {elements[0].metadata.subject}')
77 metadata.append(f'From: {elements[0].metadata.sent_from[0]}')
78 sent_to_str = 'To: '
79 for sent_to in elements[0].metadata.sent_to:
80 sent_to_str = sent_to_str + " " + sent_to
81 metadata.append(sent_to_str)
82
83 elif file_extension == '.html' or file_extension == '.htm':
84 from unstructured.partition.html import partition_html
85 elements = partition_html(file=bytes_io)
86
87 elif file_extension == '.md':
88 from unstructured.partition.md import partition_md
89 elements = partition_md(file=bytes_io)
90
91 elif file_extension == '.ppt':
92 from unstructured.partition.ppt import partition_ppt
93 elements = partition_ppt(file=bytes_io)
94
95 elif file_extension == '.pptx':
96 from unstructured.partition.pptx import partition_pptx
97 elements = partition_pptx(file=bytes_io)
98
99 elif any(file_extension in x for x in ['.txt', '.json']):
100 from unstructured.partition.text import partition_text
101 elements = partition_text(file=bytes_io)
102
103 elif file_extension == '.xlsx':
104 from unstructured.partition.xlsx import partition_xlsx
105 elements = partition_xlsx(file=bytes_io)
106
107 elif file_extension == '.xml':
108 from unstructured.partition.xml import partition_xml
109 elements = partition_xml(file=bytes_io)
110
111 except Exception as e:
112 raise UnstructuredError(f"An error occurred trying to parse the file: {str(e)}") from e
113
114 return elements, metadata
115
116
117
118 def main(msg: func.QueueMessage) -> None:
119 try:
120 statusLog = StatusLog(cosmosdb_url, cosmosdb_key, cosmosdb_log_database_name, cosmosdb_log_container_name)
121 logging.info('Python queue trigger function processed a queue item: %s',
122 msg.get_body().decode('utf-8'))
123
124 # Receive message from the queue
125 message_body = msg.get_body().decode('utf-8')
126 message_json = json.loads(message_body)
127 blob_name = message_json['blob_name']
128 blob_uri = message_json['blob_uri']
129 statusLog.upsert_document(blob_name, f'{function_name} - Starting to parse the non-PDF file', StatusClassification.INFO, State.PROCESSING)
130 statusLog.upsert_document(blob_name, f'{function_name} - Message received from non-pdf submit queue', StatusClassification.DEBUG)
131
132 # construct blob url
133 blob_path_plus_sas = utilities.get_blob_and_sas(blob_name)
134 statusLog.upsert_document(blob_name, f'{function_name} - SAS token generated to access the file', StatusClassification.DEBUG)
135
136 file_name, file_extension, file_directory = utilities.get_filename_and_extension(blob_name)
137
138 response = requests.get(blob_path_plus_sas)
139 response.raise_for_status()
140
141
142 # Partition the file dependent on file extension
143 elements, metadata = PartitionFile(file_extension, blob_path_plus_sas)
144 metdata_text = ''
145 for metadata_value in metadata:
146 metdata_text += metadata_value + '\n'
147 statusLog.upsert_document(blob_name, f'{function_name} - partitioning complete', StatusClassification.DEBUG)
148
149 title = ''
150 # Capture the file title
151 try:
152 for i, element in enumerate(elements):
153 if title == '' and element.category == 'Title':
154 # capture the first title
155 title = element.text
156 break
157 except:
158 # if this type of eleemnt does not include title, then process with emty value
159 pass
160
161 # Chunk the file
162 from unstructured.chunking.title import chunk_by_title
163 # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS)
164 # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS, max_characters=MAX_CHARACTERS)
165 chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_text_under_n_chars=COMBINE_UNDER_N_CHARS)
166 statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {str(chunks.count)} chunks created', StatusClassification.DEBUG)
167
168 subtitle_name = ''
169 section_name = ''
170 # Complete and write chunks
171 for i, chunk in enumerate(chunks):
172 if chunk.metadata.page_number == None:
173 page_list = [1]
174 else:
175 page_list = [chunk.metadata.page_number]
176 # substitute html if text is a table
177 if chunk.category == 'Table':
178 chunk_text = chunk.metadata.text_as_html
179 else:
180 chunk_text = chunk.text
181 # add filetype specific metadata as chunk text header
182 chunk_text = metdata_text + chunk_text
183 utilities.write_chunk(blob_name, blob_uri,
184 f"{i}",
185 utilities.token_count(chunk.text),
186 chunk_text, page_list,
187 section_name, title, subtitle_name,
188 MediaType.TEXT
189 )
190
191 statusLog.upsert_document(blob_name, f'{function_name} - chunking stored.', StatusClassification.DEBUG)
192
193 # submit message to the text enrichment queue to continue processing
194 queue_client = QueueClient.from_connection_string(azure_blob_connection_string, queue_name=text_enrichment_queue, message_encode_policy=TextBase64EncodePolicy())
195 message_json["text_enrichment_queued_count"] = 1
196 message_string = json.dumps(message_json)
197 queue_client.send_message(message_string)
198 statusLog.upsert_document(blob_name, f"{function_name} - message sent to enrichment queue", StatusClassification.DEBUG, State.QUEUED)
199
200 except Exception as e:
201 statusLog.upsert_document(blob_name, f"{function_name} - An error occurred - {str(e)}", StatusClassification.ERROR, State.ERROR)
202
203 statusLog.save_document(blob_name)
[end of functions/FileLayoutParsingOther/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/functions/FileLayoutParsingOther/__init__.py b/functions/FileLayoutParsingOther/__init__.py
--- a/functions/FileLayoutParsingOther/__init__.py
+++ b/functions/FileLayoutParsingOther/__init__.py
@@ -163,7 +163,7 @@
# chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS)
# chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS, max_characters=MAX_CHARACTERS)
chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_text_under_n_chars=COMBINE_UNDER_N_CHARS)
- statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {str(chunks.count)} chunks created', StatusClassification.DEBUG)
+ statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {len(chunks)} chunks created', StatusClassification.DEBUG)
subtitle_name = ''
section_name = ''
|
{"golden_diff": "diff --git a/functions/FileLayoutParsingOther/__init__.py b/functions/FileLayoutParsingOther/__init__.py\n--- a/functions/FileLayoutParsingOther/__init__.py\n+++ b/functions/FileLayoutParsingOther/__init__.py\n@@ -163,7 +163,7 @@\n # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS)\r\n # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS, max_characters=MAX_CHARACTERS) \r\n chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_text_under_n_chars=COMBINE_UNDER_N_CHARS)\r\n- statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {str(chunks.count)} chunks created', StatusClassification.DEBUG)\r\n+ statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {len(chunks)} chunks created', StatusClassification.DEBUG)\r\n \r\n subtitle_name = ''\r\n section_name = ''\n", "issue": "Chunks number not correctly reported in statuscontainer\n**Describe the bug**\r\nChunks number for non-PDF files not correctly reported in statuscontainer\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Upload a non-PDF file\r\n2. Wait for the chunking process to complete\r\n3. Go to CosmosDB -> statuscontainer -> item relative to the uploaded non-PDF file\r\n4. Check the \"status\" field which will display something like: \r\n\"FileLayoutParsingOther - chunking complete. <built-in method count of list object at 0x7713c4227440> chunks created\" \r\n\r\n**Expected behavior**\r\nIt should show a number instead of \"<built-in method count of list object at 0x7713c4227440>\"\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - Browser: Edge\r\n - Version: 120.0.2210.121 (Official build) (64-bit)\r\n\r\n**Alpha version details**\r\n - GitHub branch: main\r\n\r\n**Additional context**\r\nMinor problem\r\nprobably line 166, functions/FileLayoutParsingOther/__init__.py\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT license.\r\n\r\nimport logging\r\nimport os\r\nimport json\r\nfrom enum import Enum\r\nfrom io import BytesIO\r\nimport azure.functions as func\r\nfrom azure.storage.blob import generate_blob_sas\r\nfrom azure.storage.queue import QueueClient, TextBase64EncodePolicy\r\nfrom shared_code.status_log import StatusLog, State, StatusClassification\r\nfrom shared_code.utilities import Utilities, MediaType\r\n\r\nimport requests\r\n\r\nazure_blob_storage_account = os.environ[\"BLOB_STORAGE_ACCOUNT\"]\r\nazure_blob_storage_endpoint = os.environ[\"BLOB_STORAGE_ACCOUNT_ENDPOINT\"]\r\nazure_blob_drop_storage_container = os.environ[\"BLOB_STORAGE_ACCOUNT_UPLOAD_CONTAINER_NAME\"]\r\nazure_blob_content_storage_container = os.environ[\"BLOB_STORAGE_ACCOUNT_OUTPUT_CONTAINER_NAME\"]\r\nazure_blob_storage_key = os.environ[\"AZURE_BLOB_STORAGE_KEY\"]\r\nazure_blob_connection_string = os.environ[\"BLOB_CONNECTION_STRING\"]\r\nazure_blob_log_storage_container = os.environ[\"BLOB_STORAGE_ACCOUNT_LOG_CONTAINER_NAME\"]\r\ncosmosdb_url = os.environ[\"COSMOSDB_URL\"]\r\ncosmosdb_key = os.environ[\"COSMOSDB_KEY\"]\r\ncosmosdb_log_database_name = os.environ[\"COSMOSDB_LOG_DATABASE_NAME\"]\r\ncosmosdb_log_container_name = os.environ[\"COSMOSDB_LOG_CONTAINER_NAME\"]\r\nnon_pdf_submit_queue = os.environ[\"NON_PDF_SUBMIT_QUEUE\"]\r\npdf_polling_queue = os.environ[\"PDF_POLLING_QUEUE\"]\r\npdf_submit_queue = os.environ[\"PDF_SUBMIT_QUEUE\"]\r\ntext_enrichment_queue = os.environ[\"TEXT_ENRICHMENT_QUEUE\"]\r\nCHUNK_TARGET_SIZE = int(os.environ[\"CHUNK_TARGET_SIZE\"])\r\n\r\nNEW_AFTER_N_CHARS = 1500\r\nCOMBINE_UNDER_N_CHARS = 500\r\nMAX_CHARACTERS = 1500\r\n\r\n\r\nutilities = Utilities(azure_blob_storage_account, azure_blob_storage_endpoint, azure_blob_drop_storage_container, azure_blob_content_storage_container, azure_blob_storage_key)\r\nfunction_name = \"FileLayoutParsingOther\"\r\n\r\nclass UnstructuredError(Exception):\r\n pass\r\n\r\ndef PartitionFile(file_extension: str, file_url: str): \r\n \"\"\" uses the unstructured.io libraries to analyse a document\r\n Returns:\r\n elements: A list of available models\r\n \"\"\" \r\n # Send a GET request to the URL to download the file\r\n response = requests.get(file_url)\r\n bytes_io = BytesIO(response.content)\r\n response.close() \r\n metadata = [] \r\n elements = None\r\n try: \r\n if file_extension == '.csv':\r\n from unstructured.partition.csv import partition_csv\r\n elements = partition_csv(file=bytes_io) \r\n \r\n elif file_extension == '.doc':\r\n from unstructured.partition.doc import partition_doc\r\n elements = partition_doc(file=bytes_io) \r\n \r\n elif file_extension == '.docx':\r\n from unstructured.partition.docx import partition_docx\r\n elements = partition_docx(file=bytes_io)\r\n \r\n elif file_extension == '.eml' or file_extension == '.msg':\r\n if file_extension == '.msg':\r\n from unstructured.partition.msg import partition_msg\r\n elements = partition_msg(file=bytes_io) \r\n else: \r\n from unstructured.partition.email import partition_email\r\n elements = partition_email(file=bytes_io)\r\n metadata.append(f'Subject: {elements[0].metadata.subject}')\r\n metadata.append(f'From: {elements[0].metadata.sent_from[0]}')\r\n sent_to_str = 'To: '\r\n for sent_to in elements[0].metadata.sent_to:\r\n sent_to_str = sent_to_str + \" \" + sent_to\r\n metadata.append(sent_to_str)\r\n \r\n elif file_extension == '.html' or file_extension == '.htm': \r\n from unstructured.partition.html import partition_html\r\n elements = partition_html(file=bytes_io) \r\n \r\n elif file_extension == '.md':\r\n from unstructured.partition.md import partition_md\r\n elements = partition_md(file=bytes_io)\r\n \r\n elif file_extension == '.ppt':\r\n from unstructured.partition.ppt import partition_ppt\r\n elements = partition_ppt(file=bytes_io)\r\n \r\n elif file_extension == '.pptx': \r\n from unstructured.partition.pptx import partition_pptx\r\n elements = partition_pptx(file=bytes_io)\r\n \r\n elif any(file_extension in x for x in ['.txt', '.json']):\r\n from unstructured.partition.text import partition_text\r\n elements = partition_text(file=bytes_io)\r\n \r\n elif file_extension == '.xlsx':\r\n from unstructured.partition.xlsx import partition_xlsx\r\n elements = partition_xlsx(file=bytes_io)\r\n \r\n elif file_extension == '.xml':\r\n from unstructured.partition.xml import partition_xml\r\n elements = partition_xml(file=bytes_io)\r\n \r\n except Exception as e:\r\n raise UnstructuredError(f\"An error occurred trying to parse the file: {str(e)}\") from e\r\n \r\n return elements, metadata\r\n \r\n \r\n\r\ndef main(msg: func.QueueMessage) -> None:\r\n try:\r\n statusLog = StatusLog(cosmosdb_url, cosmosdb_key, cosmosdb_log_database_name, cosmosdb_log_container_name)\r\n logging.info('Python queue trigger function processed a queue item: %s',\r\n msg.get_body().decode('utf-8'))\r\n\r\n # Receive message from the queue\r\n message_body = msg.get_body().decode('utf-8')\r\n message_json = json.loads(message_body)\r\n blob_name = message_json['blob_name']\r\n blob_uri = message_json['blob_uri']\r\n statusLog.upsert_document(blob_name, f'{function_name} - Starting to parse the non-PDF file', StatusClassification.INFO, State.PROCESSING)\r\n statusLog.upsert_document(blob_name, f'{function_name} - Message received from non-pdf submit queue', StatusClassification.DEBUG)\r\n\r\n # construct blob url\r\n blob_path_plus_sas = utilities.get_blob_and_sas(blob_name)\r\n statusLog.upsert_document(blob_name, f'{function_name} - SAS token generated to access the file', StatusClassification.DEBUG)\r\n\r\n file_name, file_extension, file_directory = utilities.get_filename_and_extension(blob_name)\r\n\r\n response = requests.get(blob_path_plus_sas)\r\n response.raise_for_status()\r\n \r\n \r\n # Partition the file dependent on file extension\r\n elements, metadata = PartitionFile(file_extension, blob_path_plus_sas)\r\n metdata_text = ''\r\n for metadata_value in metadata:\r\n metdata_text += metadata_value + '\\n' \r\n statusLog.upsert_document(blob_name, f'{function_name} - partitioning complete', StatusClassification.DEBUG)\r\n \r\n title = ''\r\n # Capture the file title\r\n try:\r\n for i, element in enumerate(elements):\r\n if title == '' and element.category == 'Title':\r\n # capture the first title\r\n title = element.text\r\n break\r\n except:\r\n # if this type of eleemnt does not include title, then process with emty value\r\n pass\r\n \r\n # Chunk the file \r\n from unstructured.chunking.title import chunk_by_title\r\n # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS)\r\n # chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_under_n_chars=COMBINE_UNDER_N_CHARS, max_characters=MAX_CHARACTERS) \r\n chunks = chunk_by_title(elements, multipage_sections=True, new_after_n_chars=NEW_AFTER_N_CHARS, combine_text_under_n_chars=COMBINE_UNDER_N_CHARS)\r\n statusLog.upsert_document(blob_name, f'{function_name} - chunking complete. {str(chunks.count)} chunks created', StatusClassification.DEBUG)\r\n \r\n subtitle_name = ''\r\n section_name = ''\r\n # Complete and write chunks\r\n for i, chunk in enumerate(chunks): \r\n if chunk.metadata.page_number == None:\r\n page_list = [1]\r\n else:\r\n page_list = [chunk.metadata.page_number] \r\n # substitute html if text is a table \r\n if chunk.category == 'Table':\r\n chunk_text = chunk.metadata.text_as_html\r\n else:\r\n chunk_text = chunk.text\r\n # add filetype specific metadata as chunk text header\r\n chunk_text = metdata_text + chunk_text \r\n utilities.write_chunk(blob_name, blob_uri,\r\n f\"{i}\",\r\n utilities.token_count(chunk.text),\r\n chunk_text, page_list,\r\n section_name, title, subtitle_name,\r\n MediaType.TEXT\r\n )\r\n \r\n statusLog.upsert_document(blob_name, f'{function_name} - chunking stored.', StatusClassification.DEBUG) \r\n \r\n # submit message to the text enrichment queue to continue processing \r\n queue_client = QueueClient.from_connection_string(azure_blob_connection_string, queue_name=text_enrichment_queue, message_encode_policy=TextBase64EncodePolicy())\r\n message_json[\"text_enrichment_queued_count\"] = 1\r\n message_string = json.dumps(message_json)\r\n queue_client.send_message(message_string)\r\n statusLog.upsert_document(blob_name, f\"{function_name} - message sent to enrichment queue\", StatusClassification.DEBUG, State.QUEUED) \r\n \r\n except Exception as e:\r\n statusLog.upsert_document(blob_name, f\"{function_name} - An error occurred - {str(e)}\", StatusClassification.ERROR, State.ERROR)\r\n\r\n statusLog.save_document(blob_name)", "path": "functions/FileLayoutParsingOther/__init__.py"}]}
| 3,282 | 256 |
gh_patches_debug_8335
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-522
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No validation for kernel name
Although `ElementwiseKernel`'s `name` argument is directly used as a function name in generated CUDA code, there are no validation process to raise an exception when invalid characters are used in `name`.
That causes CUDA compile error, which is a bit difficult to debug.
</issue>
<code>
[start of cupy/cuda/compiler.py]
1 import hashlib
2 import math
3 import os
4 import shutil
5 import sys
6 import tempfile
7
8 import six
9
10 from cupy.cuda import device
11 from cupy.cuda import function
12 from cupy.cuda import nvrtc
13
14 _nvrtc_version = None
15
16
17 def _get_nvrtc_version():
18 global _nvrtc_version
19 if _nvrtc_version is None:
20 _nvrtc_version = nvrtc.getVersion()
21
22 return _nvrtc_version
23
24
25 def _get_arch():
26 cc = device.Device().compute_capability
27 return 'compute_%s' % cc
28
29
30 class TemporaryDirectory(object):
31 def __enter__(self):
32 self.path = tempfile.mkdtemp()
33 return self.path
34
35 def __exit__(self, exc_type, exc_value, traceback):
36 if exc_value is not None:
37 return
38
39 for name in os.listdir(self.path):
40 os.unlink(os.path.join(self.path, name))
41 os.rmdir(self.path)
42
43
44 def _get_bool_env_variable(name, default):
45 val = os.environ.get(name)
46 if val is None or len(val) == 0:
47 return default
48 try:
49 return int(val) == 1
50 except ValueError:
51 return False
52
53
54 def compile_using_nvrtc(source, options=(), arch=None):
55 if not arch:
56 arch = _get_arch()
57
58 options += ('-arch={}'.format(arch),)
59
60 with TemporaryDirectory() as root_dir:
61 path = os.path.join(root_dir, 'kern')
62 cu_path = '%s.cu' % path
63
64 with open(cu_path, 'w') as cu_file:
65 cu_file.write(source)
66
67 prog = _NVRTCProgram(source, cu_path)
68 try:
69 ptx = prog.compile(options)
70 except CompileException as e:
71 dump = _get_bool_env_variable(
72 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
73 if dump:
74 e.dump(sys.stderr)
75 raise
76
77 return ptx
78
79
80 def _preprocess(source, options=()):
81 prog = _NVRTCProgram(source, '')
82 try:
83 result = prog.compile(options)
84 except CompileException as e:
85 dump = _get_bool_env_variable(
86 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
87 if dump:
88 e.dump(sys.stderr)
89 raise
90
91 assert isinstance(result, six.text_type)
92 return result
93
94
95 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
96
97
98 def get_cache_dir():
99 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
100
101
102 _empty_file_preprocess_cache = {}
103
104
105 def compile_with_cache(source, options=(), arch=None, cache_dir=None,
106 extra_source=None):
107 # NVRTC does not use extra_source. extra_source is used for cache key.
108 global _empty_file_preprocess_cache
109 if cache_dir is None:
110 cache_dir = get_cache_dir()
111 if arch is None:
112 arch = _get_arch()
113
114 options += ('-ftz=true',)
115
116 env = (arch, options, _get_nvrtc_version())
117 base = _empty_file_preprocess_cache.get(env, None)
118 if base is None:
119 # This is checking of NVRTC compiler internal version
120 base = _empty_file_preprocess_cache[env] = _preprocess('', options)
121 key_src = '%s %s %s %s' % (env, base, source, extra_source)
122
123 key_src = key_src.encode('utf-8')
124 name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()
125
126 if not os.path.isdir(cache_dir):
127 try:
128 os.makedirs(cache_dir)
129 except OSError:
130 if not os.path.isdir(cache_dir):
131 raise
132
133 mod = function.Module()
134 # To handle conflicts in concurrent situation, we adopt lock-free method
135 # to avoid performance degradation.
136 path = os.path.join(cache_dir, name)
137 if os.path.exists(path):
138 with open(path, 'rb') as file:
139 data = file.read()
140 if len(data) >= 32:
141 hash = data[:32]
142 cubin = data[32:]
143 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
144 if hash == cubin_hash:
145 mod.load(cubin)
146 return mod
147
148 ptx = compile_using_nvrtc(source, options, arch)
149 ls = function.LinkState()
150 ls.add_ptr_data(ptx, six.u('cupy.ptx'))
151 cubin = ls.complete()
152 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
153
154 # shutil.move is not atomic operation, so it could result in a corrupted
155 # file. We detect it by appending md5 hash at the beginning of each cache
156 # file. If the file is corrupted, it will be ignored next time it is read.
157 with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
158 tf.write(cubin_hash)
159 tf.write(cubin)
160 temp_path = tf.name
161 shutil.move(temp_path, path)
162
163 # Save .cu source file along with .cubin
164 if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
165 with open(path + '.cu', 'w') as f:
166 f.write(source)
167
168 mod.load(cubin)
169 return mod
170
171
172 class CompileException(Exception):
173
174 def __init__(self, msg, source, name, options):
175 self._msg = msg
176 self.source = source
177 self.name = name
178 self.options = options
179
180 def __repr__(self):
181 return str(self)
182
183 def __str__(self):
184 return self.get_message()
185
186 def get_message(self):
187 return self._msg
188
189 def dump(self, f):
190 lines = self.source.split('\n')
191 digits = int(math.floor(math.log10(len(lines)))) + 1
192 linum_fmt = '{{:0{}d}} '.format(digits)
193 f.write('NVRTC compilation error: {}\n'.format(self))
194 f.write('-----\n')
195 f.write('Name: {}\n'.format(' '.join(self.name)))
196 f.write('Options: {}\n'.format(' '.join(self.options)))
197 f.write('CUDA source:\n')
198 for i, line in enumerate(lines):
199 f.write(linum_fmt.format(i + 1) + line.rstrip() + '\n')
200 f.write('-----\n')
201 f.flush()
202
203
204 class _NVRTCProgram(object):
205
206 def __init__(self, src, name="default_program", headers=(),
207 include_names=()):
208 self.ptr = None
209
210 if isinstance(src, six.binary_type):
211 src = src.decode('UTF-8')
212 if isinstance(name, six.binary_type):
213 name = name.decode('UTF-8')
214
215 self.src = src
216 self.name = name
217 self.ptr = nvrtc.createProgram(src, name, headers, include_names)
218
219 def __del__(self):
220 if self.ptr:
221 nvrtc.destroyProgram(self.ptr)
222
223 def compile(self, options=()):
224 try:
225 nvrtc.compileProgram(self.ptr, options)
226 return nvrtc.getPTX(self.ptr)
227 except nvrtc.NVRTCError:
228 log = nvrtc.getProgramLog(self.ptr)
229 raise CompileException(log, self.src, self.name, options)
230
[end of cupy/cuda/compiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -1,6 +1,7 @@
import hashlib
import math
import os
+import re
import shutil
import sys
import tempfile
@@ -227,3 +228,7 @@
except nvrtc.NVRTCError:
log = nvrtc.getProgramLog(self.ptr)
raise CompileException(log, self.src, self.name, options)
+
+
+def is_valid_kernel_name(name):
+ return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None
|
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -1,6 +1,7 @@\n import hashlib\n import math\n import os\n+import re\n import shutil\n import sys\n import tempfile\n@@ -227,3 +228,7 @@\n except nvrtc.NVRTCError:\n log = nvrtc.getProgramLog(self.ptr)\n raise CompileException(log, self.src, self.name, options)\n+\n+\n+def is_valid_kernel_name(name):\n+ return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\n", "issue": "No validation for kernel name\n Although `ElementwiseKernel`'s `name` argument is directly used as a function name in generated CUDA code, there are no validation process to raise an exception when invalid characters are used in `name`.\r\nThat causes CUDA compile error, which is a bit difficult to debug.\n", "before_files": [{"content": "import hashlib\nimport math\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\nfrom cupy.cuda import nvrtc\n\n_nvrtc_version = None\n\n\ndef _get_nvrtc_version():\n global _nvrtc_version\n if _nvrtc_version is None:\n _nvrtc_version = nvrtc.getVersion()\n\n return _nvrtc_version\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'compute_%s' % cc\n\n\nclass TemporaryDirectory(object):\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _get_bool_env_variable(name, default):\n val = os.environ.get(name)\n if val is None or len(val) == 0:\n return default\n try:\n return int(val) == 1\n except ValueError:\n return False\n\n\ndef compile_using_nvrtc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n\n options += ('-arch={}'.format(arch),)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n prog = _NVRTCProgram(source, cu_path)\n try:\n ptx = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n return ptx\n\n\ndef _preprocess(source, options=()):\n prog = _NVRTCProgram(source, '')\n try:\n result = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n assert isinstance(result, six.text_type)\n return result\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None,\n extra_source=None):\n # NVRTC does not use extra_source. extra_source is used for cache key.\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n options += ('-ftz=true',)\n\n env = (arch, options, _get_nvrtc_version())\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n # This is checking of NVRTC compiler internal version\n base = _empty_file_preprocess_cache[env] = _preprocess('', options)\n key_src = '%s %s %s %s' % (env, base, source, extra_source)\n\n key_src = key_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n ptx = compile_using_nvrtc(source, options, arch)\n ls = function.LinkState()\n ls.add_ptr_data(ptx, six.u('cupy.ptx'))\n cubin = ls.complete()\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n # Save .cu source file along with .cubin\n if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\n with open(path + '.cu', 'w') as f:\n f.write(source)\n\n mod.load(cubin)\n return mod\n\n\nclass CompileException(Exception):\n\n def __init__(self, msg, source, name, options):\n self._msg = msg\n self.source = source\n self.name = name\n self.options = options\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self.get_message()\n\n def get_message(self):\n return self._msg\n\n def dump(self, f):\n lines = self.source.split('\\n')\n digits = int(math.floor(math.log10(len(lines)))) + 1\n linum_fmt = '{{:0{}d}} '.format(digits)\n f.write('NVRTC compilation error: {}\\n'.format(self))\n f.write('-----\\n')\n f.write('Name: {}\\n'.format(' '.join(self.name)))\n f.write('Options: {}\\n'.format(' '.join(self.options)))\n f.write('CUDA source:\\n')\n for i, line in enumerate(lines):\n f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\n')\n f.write('-----\\n')\n f.flush()\n\n\nclass _NVRTCProgram(object):\n\n def __init__(self, src, name=\"default_program\", headers=(),\n include_names=()):\n self.ptr = None\n\n if isinstance(src, six.binary_type):\n src = src.decode('UTF-8')\n if isinstance(name, six.binary_type):\n name = name.decode('UTF-8')\n\n self.src = src\n self.name = name\n self.ptr = nvrtc.createProgram(src, name, headers, include_names)\n\n def __del__(self):\n if self.ptr:\n nvrtc.destroyProgram(self.ptr)\n\n def compile(self, options=()):\n try:\n nvrtc.compileProgram(self.ptr, options)\n return nvrtc.getPTX(self.ptr)\n except nvrtc.NVRTCError:\n log = nvrtc.getProgramLog(self.ptr)\n raise CompileException(log, self.src, self.name, options)\n", "path": "cupy/cuda/compiler.py"}]}
| 2,798 | 153 |
gh_patches_debug_27167
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3254
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable python 3.7 in CI
At least for Linux
</issue>
<code>
[start of conans/pylint_plugin.py]
1 """Pylint plugin for ConanFile"""
2
3 import astroid
4 from astroid import MANAGER, scoped_nodes
5
6
7 def register(linter):
8 """Declare package as plugin
9
10 This function needs to be declared so astroid treats
11 current file as a plugin.
12 """
13 pass
14
15
16 def transform_conanfile(node):
17 """Transform definition of ConanFile class so dynamic fields are visible to pylint"""
18
19 str_class = scoped_nodes.builtin_lookup("str")
20 info_class = MANAGER.ast_from_module_name("conans.model.info").lookup(
21 "ConanInfo")
22 build_requires_class = MANAGER.ast_from_module_name(
23 "conans.client.graph.graph_manager").lookup("_RecipeBuildRequires")
24 file_copier_class = MANAGER.ast_from_module_name(
25 "conans.client.file_copier").lookup("FileCopier")
26 file_importer_class = MANAGER.ast_from_module_name(
27 "conans.client.importer").lookup("_FileImporter")
28
29 dynamic_fields = {
30 "source_folder": str_class,
31 "build_folder": str_class,
32 "package_folder": str_class,
33 "build_requires": build_requires_class,
34 "info_build": info_class,
35 "info": info_class,
36 "copy": file_copier_class,
37 "copy_deps": file_importer_class,
38 }
39
40 for f, t in dynamic_fields.items():
41 node.locals[f] = [t]
42
43
44 MANAGER.register_transform(
45 scoped_nodes.Class, transform_conanfile,
46 lambda node: node.qname() == "conans.model.conan_file.ConanFile")
47
[end of conans/pylint_plugin.py]
[start of .ci/jenkins/conf.py]
1 import argparse
2 import os
3 import platform
4 from contextlib import contextmanager
5
6 winpylocation = {"py27": "C:\\Python27\\python.exe",
7 "py34": "C:\\Python34\\python.exe",
8 "py36": "C:\\Python36\\python.exe"}
9
10 macpylocation = {"py27": "/usr/bin/python", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python",
11 "py34": "/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python",
12 "py36": "/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python"}
13
14 linuxpylocation = {"py27": "/usr/bin/python2.7",
15 "py34": "/usr/bin/python3.4",
16 "py36": "/usr/bin/python3.6"}
17
18
19 def get_environ(tmp_path):
20 if platform.system() == "Windows":
21 return {"CONAN_BASH_PATH": "c:/tools/msys64/usr/bin/bash",
22 "CONAN_USER_HOME_SHORT": os.path.join(tmp_path, ".conan")}
23 return {}
24
25
26 class Extender(argparse.Action):
27 """Allows to use the same flag several times in a command and creates a list with the values.
28 For example:
29 conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true
30 It creates:
31 options = ['qt:value', 'mode:2']
32 settings = ['cucumber:true']
33 """
34 def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable
35 # Need None here incase `argparse.SUPPRESS` was supplied for `dest`
36 dest = getattr(namespace, self.dest, None)
37 if not hasattr(dest, 'extend') or dest == self.default:
38 dest = []
39 setattr(namespace, self.dest, dest)
40 # if default isn't set to None, this method might be called
41 # with the default as `values` for other arguments which
42 # share this destination.
43 parser.set_defaults(**{self.dest: None})
44
45 try:
46 dest.extend(values)
47 except ValueError:
48 dest.append(values)
49
50
51 @contextmanager
52 def environment_append(env_vars):
53 old_env = dict(os.environ)
54 for name, value in env_vars.items():
55 if isinstance(value, list):
56 env_vars[name] = os.pathsep.join(value)
57 if name in old_env:
58 env_vars[name] += os.pathsep + old_env[name]
59 os.environ.update(env_vars)
60 try:
61 yield
62 finally:
63 os.environ.clear()
64 os.environ.update(old_env)
65
66
67 @contextmanager
68 def chdir(newdir):
69 old_path = os.getcwd()
70 os.chdir(newdir)
71 try:
72 yield
73 finally:
74 os.chdir(old_path)
75
[end of .ci/jenkins/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py
--- a/.ci/jenkins/conf.py
+++ b/.ci/jenkins/conf.py
@@ -13,7 +13,8 @@
linuxpylocation = {"py27": "/usr/bin/python2.7",
"py34": "/usr/bin/python3.4",
- "py36": "/usr/bin/python3.6"}
+ "py36": "/usr/bin/python3.6",
+ "py37": "/usr/bin/python3.7"}
def get_environ(tmp_path):
diff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py
--- a/conans/pylint_plugin.py
+++ b/conans/pylint_plugin.py
@@ -1,7 +1,6 @@
"""Pylint plugin for ConanFile"""
-
import astroid
-from astroid import MANAGER, scoped_nodes
+from astroid import MANAGER
def register(linter):
@@ -16,7 +15,7 @@
def transform_conanfile(node):
"""Transform definition of ConanFile class so dynamic fields are visible to pylint"""
- str_class = scoped_nodes.builtin_lookup("str")
+ str_class = astroid.builtin_lookup("str")
info_class = MANAGER.ast_from_module_name("conans.model.info").lookup(
"ConanInfo")
build_requires_class = MANAGER.ast_from_module_name(
@@ -42,5 +41,5 @@
MANAGER.register_transform(
- scoped_nodes.Class, transform_conanfile,
+ astroid.ClassDef, transform_conanfile,
lambda node: node.qname() == "conans.model.conan_file.ConanFile")
|
{"golden_diff": "diff --git a/.ci/jenkins/conf.py b/.ci/jenkins/conf.py\n--- a/.ci/jenkins/conf.py\n+++ b/.ci/jenkins/conf.py\n@@ -13,7 +13,8 @@\n \n linuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n- \"py36\": \"/usr/bin/python3.6\"}\n+ \"py36\": \"/usr/bin/python3.6\",\n+ \"py37\": \"/usr/bin/python3.7\"}\n \n \n def get_environ(tmp_path):\ndiff --git a/conans/pylint_plugin.py b/conans/pylint_plugin.py\n--- a/conans/pylint_plugin.py\n+++ b/conans/pylint_plugin.py\n@@ -1,7 +1,6 @@\n \"\"\"Pylint plugin for ConanFile\"\"\"\n-\n import astroid\n-from astroid import MANAGER, scoped_nodes\n+from astroid import MANAGER\n \n \n def register(linter):\n@@ -16,7 +15,7 @@\n def transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n \n- str_class = scoped_nodes.builtin_lookup(\"str\")\n+ str_class = astroid.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n@@ -42,5 +41,5 @@\n \n \n MANAGER.register_transform(\n- scoped_nodes.Class, transform_conanfile,\n+ astroid.ClassDef, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "issue": "Enable python 3.7 in CI \nAt least for Linux\r\n\r\n\n", "before_files": [{"content": "\"\"\"Pylint plugin for ConanFile\"\"\"\n\nimport astroid\nfrom astroid import MANAGER, scoped_nodes\n\n\ndef register(linter):\n \"\"\"Declare package as plugin\n \n This function needs to be declared so astroid treats\n current file as a plugin.\n \"\"\"\n pass\n\n\ndef transform_conanfile(node):\n \"\"\"Transform definition of ConanFile class so dynamic fields are visible to pylint\"\"\"\n\n str_class = scoped_nodes.builtin_lookup(\"str\")\n info_class = MANAGER.ast_from_module_name(\"conans.model.info\").lookup(\n \"ConanInfo\")\n build_requires_class = MANAGER.ast_from_module_name(\n \"conans.client.graph.graph_manager\").lookup(\"_RecipeBuildRequires\")\n file_copier_class = MANAGER.ast_from_module_name(\n \"conans.client.file_copier\").lookup(\"FileCopier\")\n file_importer_class = MANAGER.ast_from_module_name(\n \"conans.client.importer\").lookup(\"_FileImporter\")\n\n dynamic_fields = {\n \"source_folder\": str_class,\n \"build_folder\": str_class,\n \"package_folder\": str_class,\n \"build_requires\": build_requires_class,\n \"info_build\": info_class,\n \"info\": info_class,\n \"copy\": file_copier_class,\n \"copy_deps\": file_importer_class,\n }\n\n for f, t in dynamic_fields.items():\n node.locals[f] = [t]\n\n\nMANAGER.register_transform(\n scoped_nodes.Class, transform_conanfile,\n lambda node: node.qname() == \"conans.model.conan_file.ConanFile\")\n", "path": "conans/pylint_plugin.py"}, {"content": "import argparse\nimport os\nimport platform\nfrom contextlib import contextmanager\n\nwinpylocation = {\"py27\": \"C:\\\\Python27\\\\python.exe\",\n \"py34\": \"C:\\\\Python34\\\\python.exe\",\n \"py36\": \"C:\\\\Python36\\\\python.exe\"}\n\nmacpylocation = {\"py27\": \"/usr/bin/python\", # /Users/jenkins_ci/.pyenv/versions/2.7.11/bin/python\",\n \"py34\": \"/Users/jenkins_ci/.pyenv/versions/3.4.7/bin/python\",\n \"py36\": \"/Users/jenkins_ci/.pyenv/versions/3.6.5/bin/python\"}\n\nlinuxpylocation = {\"py27\": \"/usr/bin/python2.7\",\n \"py34\": \"/usr/bin/python3.4\",\n \"py36\": \"/usr/bin/python3.6\"}\n\n\ndef get_environ(tmp_path):\n if platform.system() == \"Windows\":\n return {\"CONAN_BASH_PATH\": \"c:/tools/msys64/usr/bin/bash\",\n \"CONAN_USER_HOME_SHORT\": os.path.join(tmp_path, \".conan\")}\n return {}\n\n\nclass Extender(argparse.Action):\n \"\"\"Allows to use the same flag several times in a command and creates a list with the values.\n For example:\n conan install MyPackage/1.2@user/channel -o qt:value -o mode:2 -s cucumber:true\n It creates:\n options = ['qt:value', 'mode:2']\n settings = ['cucumber:true']\n \"\"\"\n def __call__(self, parser, namespace, values, option_strings=None): # @UnusedVariable\n # Need None here incase `argparse.SUPPRESS` was supplied for `dest`\n dest = getattr(namespace, self.dest, None)\n if not hasattr(dest, 'extend') or dest == self.default:\n dest = []\n setattr(namespace, self.dest, dest)\n # if default isn't set to None, this method might be called\n # with the default as `values` for other arguments which\n # share this destination.\n parser.set_defaults(**{self.dest: None})\n\n try:\n dest.extend(values)\n except ValueError:\n dest.append(values)\n\n\n@contextmanager\ndef environment_append(env_vars):\n old_env = dict(os.environ)\n for name, value in env_vars.items():\n if isinstance(value, list):\n env_vars[name] = os.pathsep.join(value)\n if name in old_env:\n env_vars[name] += os.pathsep + old_env[name]\n os.environ.update(env_vars)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)\n\n\n@contextmanager\ndef chdir(newdir):\n old_path = os.getcwd()\n os.chdir(newdir)\n try:\n yield\n finally:\n os.chdir(old_path)\n", "path": ".ci/jenkins/conf.py"}]}
| 1,772 | 376 |
gh_patches_debug_29463
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-1103
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incomplete and wrong typing
It seems, there are many errors in the typed code.
For examples
1. https://github.com/tensorflow/addons/blob/master/tensorflow_addons/activations/rrelu.py#L28 (Optional[int])
2. https://github.com/tensorflow/addons/blob/master/tensorflow_addons/losses/giou_loss.py#L101 (TensorLike)
The integration of a type checker such as `mypy` can help to keep it clean if the goal is to integrate type checking.
</issue>
<code>
[start of tensorflow_addons/losses/giou_loss.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements GIoU loss."""
16
17 import tensorflow as tf
18
19 from tensorflow_addons.utils.types import TensorLike
20 from typing import List
21 from typeguard import typechecked
22
23
24 @tf.keras.utils.register_keras_serializable(package='Addons')
25 class GIoULoss(tf.keras.losses.Loss):
26 """Implements the GIoU loss function.
27
28 GIoU loss was first introduced in the
29 [Generalized Intersection over Union:
30 A Metric and A Loss for Bounding Box Regression]
31 (https://giou.stanford.edu/GIoU.pdf).
32 GIoU is an enhancement for models which use IoU in object detection.
33
34 Usage:
35
36 ```python
37 gl = tfa.losses.GIoULoss()
38 boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])
39 boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])
40 loss = gl(boxes1, boxes2)
41 print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]
42 ```
43 Usage with tf.keras API:
44
45 ```python
46 model = tf.keras.Model(inputs, outputs)
47 model.compile('sgd', loss=tfa.losses.GIoULoss())
48 ```
49
50 Args:
51 mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
52 """
53 @typechecked
54 def __init__(self,
55 mode: str = 'giou',
56 reduction: str = tf.keras.losses.Reduction.AUTO,
57 name: str = 'giou_loss'):
58 if mode not in ['giou', 'iou']:
59 raise ValueError("Value of mode should be 'iou' or 'giou'")
60 super().__init__(name=name, reduction=reduction)
61 self.mode = mode
62
63 def get_config(self):
64 base_config = super().get_config()
65 base_config['mode'] = self.mode
66 return base_config
67
68 def call(self,
69 y_true,
70 y_pred):
71 return giou_loss(y_true, y_pred, mode=self.mode)
72
73
74 @tf.keras.utils.register_keras_serializable(package='Addons')
75 @tf.function
76 def giou_loss(y_true: TensorLike,
77 y_pred: TensorLike,
78 mode: str = 'giou') -> tf.Tensor:
79 """
80 Args:
81 y_true: true targets tensor. The coordinates of the each bounding
82 box in boxes are encoded as [y_min, x_min, y_max, x_max].
83 y_pred: predictions tensor. The coordinates of the each bounding
84 box in boxes are encoded as [y_min, x_min, y_max, x_max].
85 mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
86
87 Returns:
88 GIoU loss float `Tensor`.
89 """
90 if mode not in ['giou', 'iou']:
91 raise ValueError("Value of mode should be 'iou' or 'giou'")
92 y_pred = tf.convert_to_tensor(y_pred)
93 if not y_pred.dtype.is_floating:
94 y_pred = tf.cast(y_pred, tf.float32)
95 y_true = tf.cast(y_true, y_pred.dtype)
96 giou = _calculate_giou(y_pred, y_true, mode)
97
98 return 1 - giou
99
100
101 def _calculate_giou(b1: List[int],
102 b2: List[int],
103 mode: str = 'giou') -> tf.Tensor:
104 """
105 Args:
106 b1: bounding box. The coordinates of the each bounding box in boxes are
107 encoded as [y_min, x_min, y_max, x_max].
108 b2: the other bounding box. The coordinates of the each bounding box
109 in boxes are encoded as [y_min, x_min, y_max, x_max].
110 mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
111
112 Returns:
113 GIoU loss float `Tensor`.
114 """
115 zero = tf.convert_to_tensor(0., b1.dtype)
116 b1_ymin, b1_xmin, b1_ymax, b1_xmax = tf.unstack(b1, 4, axis=-1)
117 b2_ymin, b2_xmin, b2_ymax, b2_xmax = tf.unstack(b2, 4, axis=-1)
118 b1_width = tf.maximum(zero, b1_xmax - b1_xmin)
119 b1_height = tf.maximum(zero, b1_ymax - b1_ymin)
120 b2_width = tf.maximum(zero, b2_xmax - b2_xmin)
121 b2_height = tf.maximum(zero, b2_ymax - b2_ymin)
122 b1_area = b1_width * b1_height
123 b2_area = b2_width * b2_height
124
125 intersect_ymin = tf.maximum(b1_ymin, b2_ymin)
126 intersect_xmin = tf.maximum(b1_xmin, b2_xmin)
127 intersect_ymax = tf.minimum(b1_ymax, b2_ymax)
128 intersect_xmax = tf.minimum(b1_xmax, b2_xmax)
129 intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin)
130 intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin)
131 intersect_area = intersect_width * intersect_height
132
133 union_area = b1_area + b2_area - intersect_area
134 iou = tf.math.divide_no_nan(intersect_area, union_area)
135 if mode == 'iou':
136 return iou
137
138 enclose_ymin = tf.minimum(b1_ymin, b2_ymin)
139 enclose_xmin = tf.minimum(b1_xmin, b2_xmin)
140 enclose_ymax = tf.maximum(b1_ymax, b2_ymax)
141 enclose_xmax = tf.maximum(b1_xmax, b2_xmax)
142 enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)
143 enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)
144 enclose_area = enclose_width * enclose_height
145 giou = iou - tf.math.divide_no_nan(
146 (enclose_area - union_area), enclose_area)
147 return giou
148
[end of tensorflow_addons/losses/giou_loss.py]
[start of tensorflow_addons/activations/rrelu.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 import tensorflow as tf
17 from tensorflow_addons.utils.types import Number
18 from tensorflow_addons.utils import types
19 from typing import Optional
20
21
22 @tf.keras.utils.register_keras_serializable(package="Addons")
23 def rrelu(
24 x: types.TensorLike,
25 lower: Number = 0.125,
26 upper: Number = 0.3333333333333333,
27 training: Optional[str] = None,
28 seed: Optional[str] = None,
29 ) -> tf.Tensor:
30 """rrelu function.
31
32 Computes rrelu function:
33 `x if x > 0 else random(lower, upper) * x` or
34 `x if x > 0 else x * (lower + upper) / 2`
35 depending on whether training is enabled.
36
37 See [Empirical Evaluation of Rectified Activations in Convolutional Network](https://arxiv.org/abs/1505.00853).
38
39 Args:
40 x: A `Tensor`. Must be one of the following types:
41 `float16`, `float32`, `float64`.
42 lower: `float`, lower bound for random alpha.
43 upper: `float`, upper bound for random alpha.
44 training: `bool`, indicating whether the `call`
45 is meant for training or inference.
46 seed: `int`, this sets the operation-level seed.
47 Returns:
48 result: A `Tensor`. Has the same type as `x`.
49 """
50 x = tf.convert_to_tensor(x)
51 if training is None:
52 training = tf.keras.backend.learning_phase()
53 training = bool(tf.keras.backend.get_value(training))
54
55 if training:
56 alpha = tf.random.uniform(
57 tf.shape(x), minval=lower, maxval=upper, dtype=x.dtype, seed=seed
58 )
59 else:
60 alpha = tf.cast((lower + upper) / 2, x.dtype)
61
62 return tf.where(x >= 0, x, alpha * x)
63
[end of tensorflow_addons/activations/rrelu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/activations/rrelu.py b/tensorflow_addons/activations/rrelu.py
--- a/tensorflow_addons/activations/rrelu.py
+++ b/tensorflow_addons/activations/rrelu.py
@@ -24,8 +24,8 @@
x: types.TensorLike,
lower: Number = 0.125,
upper: Number = 0.3333333333333333,
- training: Optional[str] = None,
- seed: Optional[str] = None,
+ training: Optional[bool] = None,
+ seed: Optional[int] = None,
) -> tf.Tensor:
"""rrelu function.
diff --git a/tensorflow_addons/losses/giou_loss.py b/tensorflow_addons/losses/giou_loss.py
--- a/tensorflow_addons/losses/giou_loss.py
+++ b/tensorflow_addons/losses/giou_loss.py
@@ -17,7 +17,7 @@
import tensorflow as tf
from tensorflow_addons.utils.types import TensorLike
-from typing import List
+from typing import Optional
from typeguard import typechecked
@@ -54,7 +54,7 @@
def __init__(self,
mode: str = 'giou',
reduction: str = tf.keras.losses.Reduction.AUTO,
- name: str = 'giou_loss'):
+ name: Optional[str] = 'giou_loss'):
if mode not in ['giou', 'iou']:
raise ValueError("Value of mode should be 'iou' or 'giou'")
super().__init__(name=name, reduction=reduction)
@@ -98,8 +98,8 @@
return 1 - giou
-def _calculate_giou(b1: List[int],
- b2: List[int],
+def _calculate_giou(b1: TensorLike,
+ b2: TensorLike,
mode: str = 'giou') -> tf.Tensor:
"""
Args:
|
{"golden_diff": "diff --git a/tensorflow_addons/activations/rrelu.py b/tensorflow_addons/activations/rrelu.py\n--- a/tensorflow_addons/activations/rrelu.py\n+++ b/tensorflow_addons/activations/rrelu.py\n@@ -24,8 +24,8 @@\n x: types.TensorLike,\n lower: Number = 0.125,\n upper: Number = 0.3333333333333333,\n- training: Optional[str] = None,\n- seed: Optional[str] = None,\n+ training: Optional[bool] = None,\n+ seed: Optional[int] = None,\n ) -> tf.Tensor:\n \"\"\"rrelu function.\n \ndiff --git a/tensorflow_addons/losses/giou_loss.py b/tensorflow_addons/losses/giou_loss.py\n--- a/tensorflow_addons/losses/giou_loss.py\n+++ b/tensorflow_addons/losses/giou_loss.py\n@@ -17,7 +17,7 @@\n import tensorflow as tf\n \n from tensorflow_addons.utils.types import TensorLike\n-from typing import List\n+from typing import Optional\n from typeguard import typechecked\n \n \n@@ -54,7 +54,7 @@\n def __init__(self,\n mode: str = 'giou',\n reduction: str = tf.keras.losses.Reduction.AUTO,\n- name: str = 'giou_loss'):\n+ name: Optional[str] = 'giou_loss'):\n if mode not in ['giou', 'iou']:\n raise ValueError(\"Value of mode should be 'iou' or 'giou'\")\n super().__init__(name=name, reduction=reduction)\n@@ -98,8 +98,8 @@\n return 1 - giou\n \n \n-def _calculate_giou(b1: List[int],\n- b2: List[int],\n+def _calculate_giou(b1: TensorLike,\n+ b2: TensorLike,\n mode: str = 'giou') -> tf.Tensor:\n \"\"\"\n Args:\n", "issue": "Incomplete and wrong typing\nIt seems, there are many errors in the typed code. \r\n\r\nFor examples \r\n1. https://github.com/tensorflow/addons/blob/master/tensorflow_addons/activations/rrelu.py#L28 (Optional[int])\r\n2. https://github.com/tensorflow/addons/blob/master/tensorflow_addons/losses/giou_loss.py#L101 (TensorLike)\r\n\r\nThe integration of a type checker such as `mypy` can help to keep it clean if the goal is to integrate type checking. \r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements GIoU loss.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils.types import TensorLike\nfrom typing import List\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package='Addons')\nclass GIoULoss(tf.keras.losses.Loss):\n \"\"\"Implements the GIoU loss function.\n\n GIoU loss was first introduced in the\n [Generalized Intersection over Union:\n A Metric and A Loss for Bounding Box Regression]\n (https://giou.stanford.edu/GIoU.pdf).\n GIoU is an enhancement for models which use IoU in object detection.\n\n Usage:\n\n ```python\n gl = tfa.losses.GIoULoss()\n boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]])\n boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]])\n loss = gl(boxes1, boxes2)\n print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763]\n ```\n Usage with tf.keras API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tfa.losses.GIoULoss())\n ```\n\n Args:\n mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.\n \"\"\"\n @typechecked\n def __init__(self,\n mode: str = 'giou',\n reduction: str = tf.keras.losses.Reduction.AUTO,\n name: str = 'giou_loss'):\n if mode not in ['giou', 'iou']:\n raise ValueError(\"Value of mode should be 'iou' or 'giou'\")\n super().__init__(name=name, reduction=reduction)\n self.mode = mode\n\n def get_config(self):\n base_config = super().get_config()\n base_config['mode'] = self.mode\n return base_config\n\n def call(self,\n y_true,\n y_pred):\n return giou_loss(y_true, y_pred, mode=self.mode)\n\n\[email protected]_keras_serializable(package='Addons')\[email protected]\ndef giou_loss(y_true: TensorLike,\n y_pred: TensorLike,\n mode: str = 'giou') -> tf.Tensor:\n \"\"\"\n Args:\n y_true: true targets tensor. The coordinates of the each bounding\n box in boxes are encoded as [y_min, x_min, y_max, x_max].\n y_pred: predictions tensor. The coordinates of the each bounding\n box in boxes are encoded as [y_min, x_min, y_max, x_max].\n mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.\n\n Returns:\n GIoU loss float `Tensor`.\n \"\"\"\n if mode not in ['giou', 'iou']:\n raise ValueError(\"Value of mode should be 'iou' or 'giou'\")\n y_pred = tf.convert_to_tensor(y_pred)\n if not y_pred.dtype.is_floating:\n y_pred = tf.cast(y_pred, tf.float32)\n y_true = tf.cast(y_true, y_pred.dtype)\n giou = _calculate_giou(y_pred, y_true, mode)\n\n return 1 - giou\n\n\ndef _calculate_giou(b1: List[int],\n b2: List[int],\n mode: str = 'giou') -> tf.Tensor:\n \"\"\"\n Args:\n b1: bounding box. The coordinates of the each bounding box in boxes are\n encoded as [y_min, x_min, y_max, x_max].\n b2: the other bounding box. The coordinates of the each bounding box\n in boxes are encoded as [y_min, x_min, y_max, x_max].\n mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.\n\n Returns:\n GIoU loss float `Tensor`.\n \"\"\"\n zero = tf.convert_to_tensor(0., b1.dtype)\n b1_ymin, b1_xmin, b1_ymax, b1_xmax = tf.unstack(b1, 4, axis=-1)\n b2_ymin, b2_xmin, b2_ymax, b2_xmax = tf.unstack(b2, 4, axis=-1)\n b1_width = tf.maximum(zero, b1_xmax - b1_xmin)\n b1_height = tf.maximum(zero, b1_ymax - b1_ymin)\n b2_width = tf.maximum(zero, b2_xmax - b2_xmin)\n b2_height = tf.maximum(zero, b2_ymax - b2_ymin)\n b1_area = b1_width * b1_height\n b2_area = b2_width * b2_height\n\n intersect_ymin = tf.maximum(b1_ymin, b2_ymin)\n intersect_xmin = tf.maximum(b1_xmin, b2_xmin)\n intersect_ymax = tf.minimum(b1_ymax, b2_ymax)\n intersect_xmax = tf.minimum(b1_xmax, b2_xmax)\n intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin)\n intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin)\n intersect_area = intersect_width * intersect_height\n\n union_area = b1_area + b2_area - intersect_area\n iou = tf.math.divide_no_nan(intersect_area, union_area)\n if mode == 'iou':\n return iou\n\n enclose_ymin = tf.minimum(b1_ymin, b2_ymin)\n enclose_xmin = tf.minimum(b1_xmin, b2_xmin)\n enclose_ymax = tf.maximum(b1_ymax, b2_ymax)\n enclose_xmax = tf.maximum(b1_xmax, b2_xmax)\n enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)\n enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)\n enclose_area = enclose_width * enclose_height\n giou = iou - tf.math.divide_no_nan(\n (enclose_area - union_area), enclose_area)\n return giou\n", "path": "tensorflow_addons/losses/giou_loss.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils.types import Number\nfrom tensorflow_addons.utils import types\nfrom typing import Optional\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef rrelu(\n x: types.TensorLike,\n lower: Number = 0.125,\n upper: Number = 0.3333333333333333,\n training: Optional[str] = None,\n seed: Optional[str] = None,\n) -> tf.Tensor:\n \"\"\"rrelu function.\n\n Computes rrelu function:\n `x if x > 0 else random(lower, upper) * x` or\n `x if x > 0 else x * (lower + upper) / 2`\n depending on whether training is enabled.\n\n See [Empirical Evaluation of Rectified Activations in Convolutional Network](https://arxiv.org/abs/1505.00853).\n\n Args:\n x: A `Tensor`. Must be one of the following types:\n `float16`, `float32`, `float64`.\n lower: `float`, lower bound for random alpha.\n upper: `float`, upper bound for random alpha.\n training: `bool`, indicating whether the `call`\n is meant for training or inference.\n seed: `int`, this sets the operation-level seed.\n Returns:\n result: A `Tensor`. Has the same type as `x`.\n \"\"\"\n x = tf.convert_to_tensor(x)\n if training is None:\n training = tf.keras.backend.learning_phase()\n training = bool(tf.keras.backend.get_value(training))\n\n if training:\n alpha = tf.random.uniform(\n tf.shape(x), minval=lower, maxval=upper, dtype=x.dtype, seed=seed\n )\n else:\n alpha = tf.cast((lower + upper) / 2, x.dtype)\n\n return tf.where(x >= 0, x, alpha * x)\n", "path": "tensorflow_addons/activations/rrelu.py"}]}
| 3,305 | 455 |
gh_patches_debug_1673
|
rasdani/github-patches
|
git_diff
|
instadeepai__Mava-626
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TEST] Jax Datasets
### What do you want to test?
Jax dataset components
### Outline of test structure
* Unit tests
* Test components and hooks
### Definition of done
Passing checks, cover all hooks, edge cases considered
### Mandatory checklist before making a PR
* [ ] The success criteria laid down in “Definition of done” are met.
* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.
* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.
</issue>
<code>
[start of mava/components/jax/building/datasets.py]
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Commonly used dataset components for system builders"""
17 import abc
18 from dataclasses import dataclass
19 from typing import Any, Callable, Optional
20
21 import reverb
22 from acme import datasets
23
24 from mava.components.jax import Component
25 from mava.core_jax import SystemBuilder
26
27 Transform = Callable[[reverb.ReplaySample], reverb.ReplaySample]
28
29
30 class TrainerDataset(Component):
31 @abc.abstractmethod
32 def __init__(
33 self,
34 config: Any,
35 ):
36 """_summary_
37
38 Args:
39 config : _description_.
40 """
41 self.config = config
42
43 @abc.abstractmethod
44 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
45 """_summary_
46
47 Args:
48 builder : _description_
49 """
50 pass
51
52 @staticmethod
53 def name() -> str:
54 """_summary_
55
56 Returns:
57 _description_
58 """
59 return "trainer_dataset"
60
61
62 @dataclass
63 class TransitionDatasetConfig:
64 sample_batch_size: int = 256
65 prefetch_size: Optional[int] = None
66 num_parallel_calls: int = 12
67 max_in_flight_samples_per_worker: Optional[int] = None
68 postprocess: Optional[Transform] = None
69 # dataset_name: str = "transition_dataset"
70
71
72 class TransitionDataset(TrainerDataset):
73 def __init__(
74 self,
75 config: TransitionDatasetConfig = TransitionDatasetConfig(),
76 ):
77 """_summary_
78
79 Args:
80 config : _description_.
81 """
82 self.config = config
83
84 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
85 """_summary_
86
87 Args:
88 builder : _description_
89 """
90 max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker
91 dataset = datasets.make_reverb_dataset(
92 table=builder.store.trainer_id,
93 server_address=builder.store.data_server_client.server_address,
94 batch_size=self.config.sample_batch_size,
95 prefetch_size=self.config.prefetch_size,
96 num_parallel_calls=self.config.num_parallel_calls,
97 max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,
98 postprocess=self.config.postprocess,
99 )
100
101 builder.store.dataset = iter(dataset)
102
103 @staticmethod
104 def config_class() -> Optional[Callable]:
105 """Config class used for component.
106
107 Returns:
108 config class/dataclass for component.
109 """
110 return TransitionDatasetConfig
111
112
113 @dataclass
114 class TrajectoryDatasetConfig:
115 sample_batch_size: int = 256
116 max_in_flight_samples_per_worker: int = 512
117 num_workers_per_iterator: int = -1
118 max_samples_per_stream: int = -1
119 rate_limiter_timeout_ms: int = -1
120 get_signature_timeout_secs: Optional[int] = None
121 # max_samples: int = -1
122 # dataset_name: str = "trajectory_dataset"
123
124
125 class TrajectoryDataset(TrainerDataset):
126 def __init__(
127 self,
128 config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),
129 ):
130 """_summary_
131
132 Args:
133 config : _description_.
134 """
135 self.config = config
136
137 def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:
138 """_summary_
139
140 Args:
141 builder : _description_
142 """
143 dataset = reverb.TrajectoryDataset.from_table_signature(
144 server_address=builder.store.data_server_client.server_address,
145 table=builder.store.trainer_id,
146 max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,
147 num_workers_per_iterator=self.config.num_workers_per_iterator,
148 max_samples_per_stream=self.config.max_samples_per_stream,
149 rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,
150 get_signature_timeout_secs=self.config.get_signature_timeout_secs,
151 # max_samples=self.config.max_samples,
152 )
153
154 # Add batch dimension.
155 dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)
156 builder.store.sample_batch_size = self.config.sample_batch_size
157
158 builder.store.dataset_iterator = dataset.as_numpy_iterator()
159
160 @staticmethod
161 def config_class() -> Optional[Callable]:
162 """Config class used for component.
163
164 Returns:
165 config class/dataclass for component.
166 """
167 return TrajectoryDatasetConfig
168
[end of mava/components/jax/building/datasets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mava/components/jax/building/datasets.py b/mava/components/jax/building/datasets.py
--- a/mava/components/jax/building/datasets.py
+++ b/mava/components/jax/building/datasets.py
@@ -98,7 +98,7 @@
postprocess=self.config.postprocess,
)
- builder.store.dataset = iter(dataset)
+ builder.store.dataset_iterator = iter(dataset)
@staticmethod
def config_class() -> Optional[Callable]:
|
{"golden_diff": "diff --git a/mava/components/jax/building/datasets.py b/mava/components/jax/building/datasets.py\n--- a/mava/components/jax/building/datasets.py\n+++ b/mava/components/jax/building/datasets.py\n@@ -98,7 +98,7 @@\n postprocess=self.config.postprocess,\n )\n \n- builder.store.dataset = iter(dataset)\n+ builder.store.dataset_iterator = iter(dataset)\n \n @staticmethod\n def config_class() -> Optional[Callable]:\n", "issue": "[TEST] Jax Datasets\n### What do you want to test?\r\nJax dataset components\r\n\r\n### Outline of test structure\r\n* Unit tests\r\n* Test components and hooks\r\n\r\n### Definition of done\r\nPassing checks, cover all hooks, edge cases considered\r\n\r\n### Mandatory checklist before making a PR\r\n* [ ] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [ ] Test code is documented - docstrings for methods and classes, static types for arguments.\r\n* [ ] Documentation is updated - README, CONTRIBUTING, or other documentation.\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Commonly used dataset components for system builders\"\"\"\nimport abc\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Optional\n\nimport reverb\nfrom acme import datasets\n\nfrom mava.components.jax import Component\nfrom mava.core_jax import SystemBuilder\n\nTransform = Callable[[reverb.ReplaySample], reverb.ReplaySample]\n\n\nclass TrainerDataset(Component):\n @abc.abstractmethod\n def __init__(\n self,\n config: Any,\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n @abc.abstractmethod\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n pass\n\n @staticmethod\n def name() -> str:\n \"\"\"_summary_\n\n Returns:\n _description_\n \"\"\"\n return \"trainer_dataset\"\n\n\n@dataclass\nclass TransitionDatasetConfig:\n sample_batch_size: int = 256\n prefetch_size: Optional[int] = None\n num_parallel_calls: int = 12\n max_in_flight_samples_per_worker: Optional[int] = None\n postprocess: Optional[Transform] = None\n # dataset_name: str = \"transition_dataset\"\n\n\nclass TransitionDataset(TrainerDataset):\n def __init__(\n self,\n config: TransitionDatasetConfig = TransitionDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n max_in_flight_samples_per_worker = self.config.max_in_flight_samples_per_worker\n dataset = datasets.make_reverb_dataset(\n table=builder.store.trainer_id,\n server_address=builder.store.data_server_client.server_address,\n batch_size=self.config.sample_batch_size,\n prefetch_size=self.config.prefetch_size,\n num_parallel_calls=self.config.num_parallel_calls,\n max_in_flight_samples_per_worker=max_in_flight_samples_per_worker,\n postprocess=self.config.postprocess,\n )\n\n builder.store.dataset = iter(dataset)\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TransitionDatasetConfig\n\n\n@dataclass\nclass TrajectoryDatasetConfig:\n sample_batch_size: int = 256\n max_in_flight_samples_per_worker: int = 512\n num_workers_per_iterator: int = -1\n max_samples_per_stream: int = -1\n rate_limiter_timeout_ms: int = -1\n get_signature_timeout_secs: Optional[int] = None\n # max_samples: int = -1\n # dataset_name: str = \"trajectory_dataset\"\n\n\nclass TrajectoryDataset(TrainerDataset):\n def __init__(\n self,\n config: TrajectoryDatasetConfig = TrajectoryDatasetConfig(),\n ):\n \"\"\"_summary_\n\n Args:\n config : _description_.\n \"\"\"\n self.config = config\n\n def on_building_trainer_dataset(self, builder: SystemBuilder) -> None:\n \"\"\"_summary_\n\n Args:\n builder : _description_\n \"\"\"\n dataset = reverb.TrajectoryDataset.from_table_signature(\n server_address=builder.store.data_server_client.server_address,\n table=builder.store.trainer_id,\n max_in_flight_samples_per_worker=2 * self.config.sample_batch_size,\n num_workers_per_iterator=self.config.num_workers_per_iterator,\n max_samples_per_stream=self.config.max_samples_per_stream,\n rate_limiter_timeout_ms=self.config.rate_limiter_timeout_ms,\n get_signature_timeout_secs=self.config.get_signature_timeout_secs,\n # max_samples=self.config.max_samples,\n )\n\n # Add batch dimension.\n dataset = dataset.batch(self.config.sample_batch_size, drop_remainder=True)\n builder.store.sample_batch_size = self.config.sample_batch_size\n\n builder.store.dataset_iterator = dataset.as_numpy_iterator()\n\n @staticmethod\n def config_class() -> Optional[Callable]:\n \"\"\"Config class used for component.\n\n Returns:\n config class/dataclass for component.\n \"\"\"\n return TrajectoryDatasetConfig\n", "path": "mava/components/jax/building/datasets.py"}]}
| 2,126 | 109 |
gh_patches_debug_31533
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-5943
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Edit user config file programatically
It would be great to be able to modify the user config file from Qiskit. Something like `user_config['default']['option'] = True`.
</issue>
<code>
[start of qiskit/user_config.py]
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Utils for reading a user preference config files."""
14
15 import configparser
16 import os
17 from warnings import warn
18
19 from qiskit import exceptions
20
21 DEFAULT_FILENAME = os.path.join(os.path.expanduser("~"), ".qiskit", "settings.conf")
22
23
24 class UserConfig:
25 """Class representing a user config file
26
27 The config file format should look like:
28
29 [default]
30 circuit_drawer = mpl
31 circuit_mpl_style = default
32 circuit_mpl_style_path = ~/.qiskit:<default location>
33 transpile_optimization_level = 1
34 parallel = False
35 num_processes = 4
36
37 """
38
39 def __init__(self, filename=None):
40 """Create a UserConfig
41
42 Args:
43 filename (str): The path to the user config file. If one isn't
44 specified, ~/.qiskit/settings.conf is used.
45 """
46 if filename is None:
47 self.filename = DEFAULT_FILENAME
48 else:
49 self.filename = filename
50 self.settings = {}
51 self.config_parser = configparser.ConfigParser()
52
53 def read_config_file(self):
54 """Read config file and parse the contents into the settings attr."""
55 if not os.path.isfile(self.filename):
56 return
57 self.config_parser.read(self.filename)
58 if "default" in self.config_parser.sections():
59 # Parse circuit_drawer
60 circuit_drawer = self.config_parser.get("default", "circuit_drawer", fallback=None)
61 if circuit_drawer:
62 if circuit_drawer not in ["text", "mpl", "latex", "latex_source", "auto"]:
63 raise exceptions.QiskitUserConfigError(
64 "%s is not a valid circuit drawer backend. Must be "
65 "either 'text', 'mpl', 'latex', 'latex_source', or "
66 "'auto'." % circuit_drawer
67 )
68 self.settings["circuit_drawer"] = circuit_drawer
69
70 # Parse state_drawer
71 state_drawer = self.config_parser.get("default", "state_drawer", fallback=None)
72 if state_drawer:
73 valid_state_drawers = [
74 "repr",
75 "text",
76 "latex",
77 "latex_source",
78 "qsphere",
79 "hinton",
80 "bloch",
81 ]
82 if state_drawer not in valid_state_drawers:
83 valid_choices_string = "', '".join(c for c in valid_state_drawers)
84 raise exceptions.QiskitUserConfigError(
85 f"'{state_drawer}' is not a valid state drawer backend. "
86 f"Choose from: '{valid_choices_string}'"
87 )
88 self.settings["state_drawer"] = state_drawer
89
90 # Parse circuit_mpl_style
91 circuit_mpl_style = self.config_parser.get(
92 "default", "circuit_mpl_style", fallback=None
93 )
94 if circuit_mpl_style:
95 if not isinstance(circuit_mpl_style, str):
96 warn(
97 "%s is not a valid mpl circuit style. Must be "
98 "a text string. Will not load style." % circuit_mpl_style,
99 UserWarning,
100 2,
101 )
102 self.settings["circuit_mpl_style"] = circuit_mpl_style
103
104 # Parse circuit_mpl_style_path
105 circuit_mpl_style_path = self.config_parser.get(
106 "default", "circuit_mpl_style_path", fallback=None
107 )
108 if circuit_mpl_style_path:
109 cpath_list = circuit_mpl_style_path.split(":")
110 for path in cpath_list:
111 if not os.path.exists(os.path.expanduser(path)):
112 warn(
113 "%s is not a valid circuit mpl style path."
114 " Correct the path in ~/.qiskit/settings.conf." % path,
115 UserWarning,
116 2,
117 )
118 self.settings["circuit_mpl_style_path"] = cpath_list
119
120 # Parse transpile_optimization_level
121 transpile_optimization_level = self.config_parser.getint(
122 "default", "transpile_optimization_level", fallback=-1
123 )
124 if transpile_optimization_level != -1:
125 if transpile_optimization_level < 0 or transpile_optimization_level > 3:
126 raise exceptions.QiskitUserConfigError(
127 "%s is not a valid optimization level. Must be " "0, 1, 2, or 3."
128 )
129 self.settings["transpile_optimization_level"] = transpile_optimization_level
130
131 # Parse parallel
132 parallel_enabled = self.config_parser.getboolean("default", "parallel", fallback=None)
133 if parallel_enabled is not None:
134 self.settings["parallel_enabled"] = parallel_enabled
135
136 # Parse num_processes
137 num_processes = self.config_parser.getint("default", "num_processes", fallback=-1)
138 if num_processes != -1:
139 if num_processes <= 0:
140 raise exceptions.QiskitUserConfigError(
141 "%s is not a valid number of processes. Must be " "greater than 0"
142 )
143 self.settings["num_processes"] = num_processes
144
145
146 def get_config():
147 """Read the config file from the default location or env var
148
149 It will read a config file at either the default location
150 ~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.
151
152 It will return the parsed settings dict from the parsed config file.
153 Returns:
154 dict: The settings dict from the parsed config file.
155 """
156 filename = os.getenv("QISKIT_SETTINGS", DEFAULT_FILENAME)
157 if not os.path.isfile(filename):
158 return {}
159 user_config = UserConfig(filename)
160 user_config.read_config_file()
161 return user_config.settings
162
[end of qiskit/user_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qiskit/user_config.py b/qiskit/user_config.py
--- a/qiskit/user_config.py
+++ b/qiskit/user_config.py
@@ -143,6 +143,70 @@
self.settings["num_processes"] = num_processes
+def set_config(key, value, section=None, file_path=None):
+ """Adds or modifies a user configuration
+
+ It will add configuration to the currently configured location
+ or the value of file argument.
+
+ Only valid user config can be set in 'default' section. Custom
+ user config can be added in any other sections.
+
+ Changes to the existing config file will not be reflected in
+ the current session since the config file is parsed at import time.
+
+ Args:
+ key (str): name of the config
+ value (obj): value of the config
+ section (str, optional): if not specified, adds it to the
+ `default` section of the config file.
+ file_path (str, optional): the file to which config is added.
+ If not specified, adds it to the default config file or
+ if set, the value of `QISKIT_SETTINGS` env variable.
+
+ Raises:
+ QiskitUserConfigError: if the config is invalid
+ """
+ filename = file_path or os.getenv("QISKIT_SETTINGS", DEFAULT_FILENAME)
+ section = "default" if section is None else section
+
+ if not isinstance(key, str):
+ raise exceptions.QiskitUserConfigError("Key must be string type")
+
+ valid_config = {
+ "circuit_drawer",
+ "circuit_mpl_style",
+ "circuit_mpl_style_path",
+ "transpile_optimization_level",
+ "parallel",
+ "num_processes",
+ }
+
+ if section in [None, "default"]:
+ if key not in valid_config:
+ raise exceptions.QiskitUserConfigError("{} is not a valid user config.".format(key))
+
+ config = configparser.ConfigParser()
+ config.read(filename)
+
+ if section not in config.sections():
+ config.add_section(section)
+
+ config.set(section, key, str(value))
+
+ try:
+ with open(filename, "w") as cfgfile:
+ config.write(cfgfile)
+ except OSError as ex:
+ raise exceptions.QiskitUserConfigError(
+ "Unable to load the config file {}. Error: '{}'".format(filename, str(ex))
+ )
+
+ # validates config
+ user_config = UserConfig(filename)
+ user_config.read_config_file()
+
+
def get_config():
"""Read the config file from the default location or env var
|
{"golden_diff": "diff --git a/qiskit/user_config.py b/qiskit/user_config.py\n--- a/qiskit/user_config.py\n+++ b/qiskit/user_config.py\n@@ -143,6 +143,70 @@\n self.settings[\"num_processes\"] = num_processes\n \n \n+def set_config(key, value, section=None, file_path=None):\n+ \"\"\"Adds or modifies a user configuration\n+\n+ It will add configuration to the currently configured location\n+ or the value of file argument.\n+\n+ Only valid user config can be set in 'default' section. Custom\n+ user config can be added in any other sections.\n+\n+ Changes to the existing config file will not be reflected in\n+ the current session since the config file is parsed at import time.\n+\n+ Args:\n+ key (str): name of the config\n+ value (obj): value of the config\n+ section (str, optional): if not specified, adds it to the\n+ `default` section of the config file.\n+ file_path (str, optional): the file to which config is added.\n+ If not specified, adds it to the default config file or\n+ if set, the value of `QISKIT_SETTINGS` env variable.\n+\n+ Raises:\n+ QiskitUserConfigError: if the config is invalid\n+ \"\"\"\n+ filename = file_path or os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n+ section = \"default\" if section is None else section\n+\n+ if not isinstance(key, str):\n+ raise exceptions.QiskitUserConfigError(\"Key must be string type\")\n+\n+ valid_config = {\n+ \"circuit_drawer\",\n+ \"circuit_mpl_style\",\n+ \"circuit_mpl_style_path\",\n+ \"transpile_optimization_level\",\n+ \"parallel\",\n+ \"num_processes\",\n+ }\n+\n+ if section in [None, \"default\"]:\n+ if key not in valid_config:\n+ raise exceptions.QiskitUserConfigError(\"{} is not a valid user config.\".format(key))\n+\n+ config = configparser.ConfigParser()\n+ config.read(filename)\n+\n+ if section not in config.sections():\n+ config.add_section(section)\n+\n+ config.set(section, key, str(value))\n+\n+ try:\n+ with open(filename, \"w\") as cfgfile:\n+ config.write(cfgfile)\n+ except OSError as ex:\n+ raise exceptions.QiskitUserConfigError(\n+ \"Unable to load the config file {}. Error: '{}'\".format(filename, str(ex))\n+ )\n+\n+ # validates config\n+ user_config = UserConfig(filename)\n+ user_config.read_config_file()\n+\n+\n def get_config():\n \"\"\"Read the config file from the default location or env var\n", "issue": "Edit user config file programatically\nIt would be great to be able to modify the user config file from Qiskit. Something like `user_config['default']['option'] = True`.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utils for reading a user preference config files.\"\"\"\n\nimport configparser\nimport os\nfrom warnings import warn\n\nfrom qiskit import exceptions\n\nDEFAULT_FILENAME = os.path.join(os.path.expanduser(\"~\"), \".qiskit\", \"settings.conf\")\n\n\nclass UserConfig:\n \"\"\"Class representing a user config file\n\n The config file format should look like:\n\n [default]\n circuit_drawer = mpl\n circuit_mpl_style = default\n circuit_mpl_style_path = ~/.qiskit:<default location>\n transpile_optimization_level = 1\n parallel = False\n num_processes = 4\n\n \"\"\"\n\n def __init__(self, filename=None):\n \"\"\"Create a UserConfig\n\n Args:\n filename (str): The path to the user config file. If one isn't\n specified, ~/.qiskit/settings.conf is used.\n \"\"\"\n if filename is None:\n self.filename = DEFAULT_FILENAME\n else:\n self.filename = filename\n self.settings = {}\n self.config_parser = configparser.ConfigParser()\n\n def read_config_file(self):\n \"\"\"Read config file and parse the contents into the settings attr.\"\"\"\n if not os.path.isfile(self.filename):\n return\n self.config_parser.read(self.filename)\n if \"default\" in self.config_parser.sections():\n # Parse circuit_drawer\n circuit_drawer = self.config_parser.get(\"default\", \"circuit_drawer\", fallback=None)\n if circuit_drawer:\n if circuit_drawer not in [\"text\", \"mpl\", \"latex\", \"latex_source\", \"auto\"]:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid circuit drawer backend. Must be \"\n \"either 'text', 'mpl', 'latex', 'latex_source', or \"\n \"'auto'.\" % circuit_drawer\n )\n self.settings[\"circuit_drawer\"] = circuit_drawer\n\n # Parse state_drawer\n state_drawer = self.config_parser.get(\"default\", \"state_drawer\", fallback=None)\n if state_drawer:\n valid_state_drawers = [\n \"repr\",\n \"text\",\n \"latex\",\n \"latex_source\",\n \"qsphere\",\n \"hinton\",\n \"bloch\",\n ]\n if state_drawer not in valid_state_drawers:\n valid_choices_string = \"', '\".join(c for c in valid_state_drawers)\n raise exceptions.QiskitUserConfigError(\n f\"'{state_drawer}' is not a valid state drawer backend. \"\n f\"Choose from: '{valid_choices_string}'\"\n )\n self.settings[\"state_drawer\"] = state_drawer\n\n # Parse circuit_mpl_style\n circuit_mpl_style = self.config_parser.get(\n \"default\", \"circuit_mpl_style\", fallback=None\n )\n if circuit_mpl_style:\n if not isinstance(circuit_mpl_style, str):\n warn(\n \"%s is not a valid mpl circuit style. Must be \"\n \"a text string. Will not load style.\" % circuit_mpl_style,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style\"] = circuit_mpl_style\n\n # Parse circuit_mpl_style_path\n circuit_mpl_style_path = self.config_parser.get(\n \"default\", \"circuit_mpl_style_path\", fallback=None\n )\n if circuit_mpl_style_path:\n cpath_list = circuit_mpl_style_path.split(\":\")\n for path in cpath_list:\n if not os.path.exists(os.path.expanduser(path)):\n warn(\n \"%s is not a valid circuit mpl style path.\"\n \" Correct the path in ~/.qiskit/settings.conf.\" % path,\n UserWarning,\n 2,\n )\n self.settings[\"circuit_mpl_style_path\"] = cpath_list\n\n # Parse transpile_optimization_level\n transpile_optimization_level = self.config_parser.getint(\n \"default\", \"transpile_optimization_level\", fallback=-1\n )\n if transpile_optimization_level != -1:\n if transpile_optimization_level < 0 or transpile_optimization_level > 3:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid optimization level. Must be \" \"0, 1, 2, or 3.\"\n )\n self.settings[\"transpile_optimization_level\"] = transpile_optimization_level\n\n # Parse parallel\n parallel_enabled = self.config_parser.getboolean(\"default\", \"parallel\", fallback=None)\n if parallel_enabled is not None:\n self.settings[\"parallel_enabled\"] = parallel_enabled\n\n # Parse num_processes\n num_processes = self.config_parser.getint(\"default\", \"num_processes\", fallback=-1)\n if num_processes != -1:\n if num_processes <= 0:\n raise exceptions.QiskitUserConfigError(\n \"%s is not a valid number of processes. Must be \" \"greater than 0\"\n )\n self.settings[\"num_processes\"] = num_processes\n\n\ndef get_config():\n \"\"\"Read the config file from the default location or env var\n\n It will read a config file at either the default location\n ~/.qiskit/settings.conf or if set the value of the QISKIT_SETTINGS env var.\n\n It will return the parsed settings dict from the parsed config file.\n Returns:\n dict: The settings dict from the parsed config file.\n \"\"\"\n filename = os.getenv(\"QISKIT_SETTINGS\", DEFAULT_FILENAME)\n if not os.path.isfile(filename):\n return {}\n user_config = UserConfig(filename)\n user_config.read_config_file()\n return user_config.settings\n", "path": "qiskit/user_config.py"}]}
| 2,263 | 608 |
gh_patches_debug_5778
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-2099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'lines__product' is an invalid parameter to prefetch_related()
### What I'm trying to achieve
See order details page in the storefront.
### Steps to reproduce the problem
1. Create a digital product in the dashboard (no shipping required).
2. Log in as a customer and buy this product.
3. Once the order is created, go to order details page in the storefront. I've accessed it from my order confirmation email.
### What I expected to happen
See order details page in the storefront.
### What happened instead/how it failed
The following error happened:
```
Cannot find 'product' on OrderLine object, 'lines__product' is an invalid parameter to prefetch_related()
```
I don't know why why it works for other orders that I have in database (created by `populatedb`).
Seems that the problem is this line : https://github.com/mirumee/saleor/blob/master/saleor/order/views.py#L29
</issue>
<code>
[start of saleor/order/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import auth, messages
5 from django.contrib.auth.decorators import login_required
6 from django.db import transaction
7 from django.http import Http404, HttpResponseForbidden
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.urls import reverse
11 from django.utils.translation import pgettext_lazy
12 from django.views.decorators.csrf import csrf_exempt
13 from payments import PaymentStatus, RedirectNeeded
14
15 from . import FulfillmentStatus, OrderStatus
16 from ..account.forms import LoginForm
17 from ..account.models import User
18 from ..core.utils import get_client_ip
19 from .forms import (
20 OrderNoteForm, PasswordForm, PaymentDeleteForm, PaymentMethodsForm)
21 from .models import Order, OrderNote, Payment
22 from .utils import attach_order_to_user, check_order_status
23
24 logger = logging.getLogger(__name__)
25
26
27 def details(request, token):
28 orders = Order.objects.confirmed().prefetch_related(
29 'lines__product', 'fulfillments', 'fulfillments__lines',
30 'fulfillments__lines__order_line')
31 orders = orders.select_related(
32 'billing_address', 'shipping_address', 'user')
33 order = get_object_or_404(orders, token=token)
34 notes = order.notes.filter(is_public=True)
35 ctx = {'order': order, 'notes': notes}
36 if order.status == OrderStatus.UNFULFILLED:
37 user = request.user if request.user.is_authenticated else None
38 note = OrderNote(order=order, user=user)
39 note_form = OrderNoteForm(request.POST or None, instance=note)
40 ctx.update({'note_form': note_form})
41 if request.method == 'POST':
42 if note_form.is_valid():
43 note_form.save()
44 return redirect('order:details', token=order.token)
45 fulfillments = order.fulfillments.filter(
46 status=FulfillmentStatus.FULFILLED)
47 ctx.update({'fulfillments': fulfillments})
48 return TemplateResponse(request, 'order/details.html', ctx)
49
50
51 def payment(request, token):
52 orders = Order.objects.confirmed().filter(billing_address__isnull=False)
53 orders = Order.objects.prefetch_related('lines__variant')
54 orders = orders.select_related(
55 'billing_address', 'shipping_address', 'user')
56 order = get_object_or_404(orders, token=token)
57 payments = order.payments.all()
58 form_data = request.POST or None
59 try:
60 waiting_payment = order.payments.get(status=PaymentStatus.WAITING)
61 except Payment.DoesNotExist:
62 waiting_payment = None
63 waiting_payment_form = None
64 else:
65 form_data = None
66 waiting_payment_form = PaymentDeleteForm(
67 None, order=order, initial={'payment_id': waiting_payment.id})
68 if order.is_fully_paid() or not order.billing_address:
69 form_data = None
70 payment_form = None
71 if not order.is_pre_authorized():
72 payment_form = PaymentMethodsForm(form_data)
73 # FIXME: redirect if there is only one payment method
74 if payment_form.is_valid():
75 payment_method = payment_form.cleaned_data['method']
76 return redirect(
77 'order:payment', token=order.token, variant=payment_method)
78 ctx = {
79 'order': order, 'payment_form': payment_form, 'payments': payments,
80 'waiting_payment': waiting_payment,
81 'waiting_payment_form': waiting_payment_form}
82 return TemplateResponse(request, 'order/payment.html', ctx)
83
84
85 @check_order_status
86 def start_payment(request, order, variant):
87 waiting_payments = order.payments.filter(
88 status=PaymentStatus.WAITING).exists()
89 if waiting_payments:
90 return redirect('order:payment', token=order.token)
91 billing = order.billing_address
92 total = order.total
93 defaults = {
94 'total': total.gross.amount,
95 'tax': total.tax.amount,
96 'currency': total.currency,
97 'delivery': order.shipping_price.gross.amount,
98 'billing_first_name': billing.first_name,
99 'billing_last_name': billing.last_name,
100 'billing_address_1': billing.street_address_1,
101 'billing_address_2': billing.street_address_2,
102 'billing_city': billing.city,
103 'billing_postcode': billing.postal_code,
104 'billing_country_code': billing.country.code,
105 'billing_email': order.user_email,
106 'description': pgettext_lazy(
107 'Payment description', 'Order %(order_number)s') % {
108 'order_number': order},
109 'billing_country_area': billing.country_area,
110 'customer_ip_address': get_client_ip(request)}
111 variant_choices = settings.CHECKOUT_PAYMENT_CHOICES
112 if variant not in [code for code, dummy_name in variant_choices]:
113 raise Http404('%r is not a valid payment variant' % (variant,))
114 with transaction.atomic():
115 payment, dummy_created = Payment.objects.get_or_create(
116 variant=variant, status=PaymentStatus.WAITING, order=order,
117 defaults=defaults)
118 try:
119 form = payment.get_form(data=request.POST or None)
120 except RedirectNeeded as redirect_to:
121 return redirect(str(redirect_to))
122 except Exception:
123 logger.exception('Error communicating with the payment gateway')
124 msg = pgettext_lazy(
125 'Payment gateway error',
126 'Oops, it looks like we were unable to contact the selected '
127 'payment service')
128 messages.error(request, msg)
129 payment.change_status(PaymentStatus.ERROR)
130 return redirect('order:payment', token=order.token)
131 template = 'order/payment/%s.html' % variant
132 ctx = {'form': form, 'payment': payment}
133 return TemplateResponse(
134 request, [template, 'order/payment/default.html'], ctx)
135
136
137 @check_order_status
138 def cancel_payment(request, order):
139 form = PaymentDeleteForm(request.POST or None, order=order)
140 if form.is_valid():
141 with transaction.atomic():
142 form.save()
143 return redirect('order:payment', token=order.token)
144 return HttpResponseForbidden()
145
146
147 @csrf_exempt
148 def payment_success(request, token):
149 """Receive request from payment gateway after paying for an order.
150
151 Redirects user to payment success.
152 All post data and query strings are dropped.
153 """
154 url = reverse('order:checkout-success', kwargs={'token': token})
155 return redirect(url)
156
157
158 def checkout_success(request, token):
159 """Redirect user after placing an order.
160
161 Anonymous users are redirected to the checkout success page.
162 Registered users are redirected to order details page and the order
163 is attached to their account.
164 """
165 order = get_object_or_404(Order, token=token)
166 email = order.user_email
167 ctx = {'email': email, 'order': order}
168 if request.user.is_authenticated:
169 return TemplateResponse(request, 'order/checkout_success.html', ctx)
170 form_data = request.POST.copy()
171 if form_data:
172 form_data.update({'email': email})
173 register_form = PasswordForm(form_data or None)
174 if register_form.is_valid():
175 register_form.save()
176 password = register_form.cleaned_data.get('password')
177 user = auth.authenticate(
178 request=request, email=email, password=password)
179 auth.login(request, user)
180 attach_order_to_user(order, user)
181 return redirect('order:details', token=token)
182 user_exists = User.objects.filter(email=email).exists()
183 login_form = LoginForm(
184 initial={'username': email}) if user_exists else None
185 ctx.update({'form': register_form, 'login_form': login_form})
186 return TemplateResponse(
187 request, 'order/checkout_success_anonymous.html', ctx)
188
189
190 @login_required
191 def connect_order_with_user(request, token):
192 """Connect newly created order to an authenticated user."""
193 try:
194 order = Order.objects.get(user_email=request.user.email, token=token)
195 except Order.DoesNotExist:
196 order = None
197 if not order:
198 msg = pgettext_lazy(
199 'Connect order with user warning message',
200 "We couldn't assign the order to your account as the email"
201 " addresses don't match")
202 messages.warning(request, msg)
203 return redirect('account:details')
204 attach_order_to_user(order, request.user)
205 msg = pgettext_lazy(
206 'storefront message',
207 'The order is now assigned to your account')
208 messages.success(request, msg)
209 return redirect('order:details', token=order.token)
210
[end of saleor/order/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/order/views.py b/saleor/order/views.py
--- a/saleor/order/views.py
+++ b/saleor/order/views.py
@@ -26,7 +26,7 @@
def details(request, token):
orders = Order.objects.confirmed().prefetch_related(
- 'lines__product', 'fulfillments', 'fulfillments__lines',
+ 'lines__variant', 'fulfillments', 'fulfillments__lines',
'fulfillments__lines__order_line')
orders = orders.select_related(
'billing_address', 'shipping_address', 'user')
|
{"golden_diff": "diff --git a/saleor/order/views.py b/saleor/order/views.py\n--- a/saleor/order/views.py\n+++ b/saleor/order/views.py\n@@ -26,7 +26,7 @@\n \n def details(request, token):\n orders = Order.objects.confirmed().prefetch_related(\n- 'lines__product', 'fulfillments', 'fulfillments__lines',\n+ 'lines__variant', 'fulfillments', 'fulfillments__lines',\n 'fulfillments__lines__order_line')\n orders = orders.select_related(\n 'billing_address', 'shipping_address', 'user')\n", "issue": "'lines__product' is an invalid parameter to prefetch_related()\n### What I'm trying to achieve\r\nSee order details page in the storefront.\r\n\r\n### Steps to reproduce the problem\r\n1. Create a digital product in the dashboard (no shipping required).\r\n2. Log in as a customer and buy this product.\r\n3. Once the order is created, go to order details page in the storefront. I've accessed it from my order confirmation email.\r\n\r\n### What I expected to happen\r\nSee order details page in the storefront.\r\n\r\n### What happened instead/how it failed\r\nThe following error happened:\r\n\r\n```\r\nCannot find 'product' on OrderLine object, 'lines__product' is an invalid parameter to prefetch_related()\r\n```\r\n\r\nI don't know why why it works for other orders that I have in database (created by `populatedb`).\r\n\r\nSeems that the problem is this line : https://github.com/mirumee/saleor/blob/master/saleor/order/views.py#L29\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext_lazy\nfrom django.views.decorators.csrf import csrf_exempt\nfrom payments import PaymentStatus, RedirectNeeded\n\nfrom . import FulfillmentStatus, OrderStatus\nfrom ..account.forms import LoginForm\nfrom ..account.models import User\nfrom ..core.utils import get_client_ip\nfrom .forms import (\n OrderNoteForm, PasswordForm, PaymentDeleteForm, PaymentMethodsForm)\nfrom .models import Order, OrderNote, Payment\nfrom .utils import attach_order_to_user, check_order_status\n\nlogger = logging.getLogger(__name__)\n\n\ndef details(request, token):\n orders = Order.objects.confirmed().prefetch_related(\n 'lines__product', 'fulfillments', 'fulfillments__lines',\n 'fulfillments__lines__order_line')\n orders = orders.select_related(\n 'billing_address', 'shipping_address', 'user')\n order = get_object_or_404(orders, token=token)\n notes = order.notes.filter(is_public=True)\n ctx = {'order': order, 'notes': notes}\n if order.status == OrderStatus.UNFULFILLED:\n user = request.user if request.user.is_authenticated else None\n note = OrderNote(order=order, user=user)\n note_form = OrderNoteForm(request.POST or None, instance=note)\n ctx.update({'note_form': note_form})\n if request.method == 'POST':\n if note_form.is_valid():\n note_form.save()\n return redirect('order:details', token=order.token)\n fulfillments = order.fulfillments.filter(\n status=FulfillmentStatus.FULFILLED)\n ctx.update({'fulfillments': fulfillments})\n return TemplateResponse(request, 'order/details.html', ctx)\n\n\ndef payment(request, token):\n orders = Order.objects.confirmed().filter(billing_address__isnull=False)\n orders = Order.objects.prefetch_related('lines__variant')\n orders = orders.select_related(\n 'billing_address', 'shipping_address', 'user')\n order = get_object_or_404(orders, token=token)\n payments = order.payments.all()\n form_data = request.POST or None\n try:\n waiting_payment = order.payments.get(status=PaymentStatus.WAITING)\n except Payment.DoesNotExist:\n waiting_payment = None\n waiting_payment_form = None\n else:\n form_data = None\n waiting_payment_form = PaymentDeleteForm(\n None, order=order, initial={'payment_id': waiting_payment.id})\n if order.is_fully_paid() or not order.billing_address:\n form_data = None\n payment_form = None\n if not order.is_pre_authorized():\n payment_form = PaymentMethodsForm(form_data)\n # FIXME: redirect if there is only one payment method\n if payment_form.is_valid():\n payment_method = payment_form.cleaned_data['method']\n return redirect(\n 'order:payment', token=order.token, variant=payment_method)\n ctx = {\n 'order': order, 'payment_form': payment_form, 'payments': payments,\n 'waiting_payment': waiting_payment,\n 'waiting_payment_form': waiting_payment_form}\n return TemplateResponse(request, 'order/payment.html', ctx)\n\n\n@check_order_status\ndef start_payment(request, order, variant):\n waiting_payments = order.payments.filter(\n status=PaymentStatus.WAITING).exists()\n if waiting_payments:\n return redirect('order:payment', token=order.token)\n billing = order.billing_address\n total = order.total\n defaults = {\n 'total': total.gross.amount,\n 'tax': total.tax.amount,\n 'currency': total.currency,\n 'delivery': order.shipping_price.gross.amount,\n 'billing_first_name': billing.first_name,\n 'billing_last_name': billing.last_name,\n 'billing_address_1': billing.street_address_1,\n 'billing_address_2': billing.street_address_2,\n 'billing_city': billing.city,\n 'billing_postcode': billing.postal_code,\n 'billing_country_code': billing.country.code,\n 'billing_email': order.user_email,\n 'description': pgettext_lazy(\n 'Payment description', 'Order %(order_number)s') % {\n 'order_number': order},\n 'billing_country_area': billing.country_area,\n 'customer_ip_address': get_client_ip(request)}\n variant_choices = settings.CHECKOUT_PAYMENT_CHOICES\n if variant not in [code for code, dummy_name in variant_choices]:\n raise Http404('%r is not a valid payment variant' % (variant,))\n with transaction.atomic():\n payment, dummy_created = Payment.objects.get_or_create(\n variant=variant, status=PaymentStatus.WAITING, order=order,\n defaults=defaults)\n try:\n form = payment.get_form(data=request.POST or None)\n except RedirectNeeded as redirect_to:\n return redirect(str(redirect_to))\n except Exception:\n logger.exception('Error communicating with the payment gateway')\n msg = pgettext_lazy(\n 'Payment gateway error',\n 'Oops, it looks like we were unable to contact the selected '\n 'payment service')\n messages.error(request, msg)\n payment.change_status(PaymentStatus.ERROR)\n return redirect('order:payment', token=order.token)\n template = 'order/payment/%s.html' % variant\n ctx = {'form': form, 'payment': payment}\n return TemplateResponse(\n request, [template, 'order/payment/default.html'], ctx)\n\n\n@check_order_status\ndef cancel_payment(request, order):\n form = PaymentDeleteForm(request.POST or None, order=order)\n if form.is_valid():\n with transaction.atomic():\n form.save()\n return redirect('order:payment', token=order.token)\n return HttpResponseForbidden()\n\n\n@csrf_exempt\ndef payment_success(request, token):\n \"\"\"Receive request from payment gateway after paying for an order.\n\n Redirects user to payment success.\n All post data and query strings are dropped.\n \"\"\"\n url = reverse('order:checkout-success', kwargs={'token': token})\n return redirect(url)\n\n\ndef checkout_success(request, token):\n \"\"\"Redirect user after placing an order.\n\n Anonymous users are redirected to the checkout success page.\n Registered users are redirected to order details page and the order\n is attached to their account.\n \"\"\"\n order = get_object_or_404(Order, token=token)\n email = order.user_email\n ctx = {'email': email, 'order': order}\n if request.user.is_authenticated:\n return TemplateResponse(request, 'order/checkout_success.html', ctx)\n form_data = request.POST.copy()\n if form_data:\n form_data.update({'email': email})\n register_form = PasswordForm(form_data or None)\n if register_form.is_valid():\n register_form.save()\n password = register_form.cleaned_data.get('password')\n user = auth.authenticate(\n request=request, email=email, password=password)\n auth.login(request, user)\n attach_order_to_user(order, user)\n return redirect('order:details', token=token)\n user_exists = User.objects.filter(email=email).exists()\n login_form = LoginForm(\n initial={'username': email}) if user_exists else None\n ctx.update({'form': register_form, 'login_form': login_form})\n return TemplateResponse(\n request, 'order/checkout_success_anonymous.html', ctx)\n\n\n@login_required\ndef connect_order_with_user(request, token):\n \"\"\"Connect newly created order to an authenticated user.\"\"\"\n try:\n order = Order.objects.get(user_email=request.user.email, token=token)\n except Order.DoesNotExist:\n order = None\n if not order:\n msg = pgettext_lazy(\n 'Connect order with user warning message',\n \"We couldn't assign the order to your account as the email\"\n \" addresses don't match\")\n messages.warning(request, msg)\n return redirect('account:details')\n attach_order_to_user(order, request.user)\n msg = pgettext_lazy(\n 'storefront message',\n 'The order is now assigned to your account')\n messages.success(request, msg)\n return redirect('order:details', token=order.token)\n", "path": "saleor/order/views.py"}]}
| 3,061 | 135 |
gh_patches_debug_65358
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-696
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyOpenSSL support doesn't work with TLSv1.1 and TLSv1.2
`docker-py` tries to use latest protocol version in the ssl module, so use ssl.PROTOCOL_TLSv1_2. Unfortunately urllib3.contrib.pyopenssl only has SSLv2_3 and TLSv1 so it breaks:
https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py#L68
</issue>
<code>
[start of urllib3/contrib/pyopenssl.py]
1 '''SSL with SNI_-support for Python 2. Follow these instructions if you would
2 like to verify SSL certificates in Python 2. Note, the default libraries do
3 *not* do certificate checking; you need to do additional work to validate
4 certificates yourself.
5
6 This needs the following packages installed:
7
8 * pyOpenSSL (tested with 0.13)
9 * ndg-httpsclient (tested with 0.3.2)
10 * pyasn1 (tested with 0.1.6)
11
12 You can install them with the following command:
13
14 pip install pyopenssl ndg-httpsclient pyasn1
15
16 To activate certificate checking, call
17 :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
18 before you begin making HTTP requests. This can be done in a ``sitecustomize``
19 module, or at any other time before your application begins using ``urllib3``,
20 like this::
21
22 try:
23 import urllib3.contrib.pyopenssl
24 urllib3.contrib.pyopenssl.inject_into_urllib3()
25 except ImportError:
26 pass
27
28 Now you can use :mod:`urllib3` as you normally would, and it will support SNI
29 when the required modules are installed.
30
31 Activating this module also has the positive side effect of disabling SSL/TLS
32 compression in Python 2 (see `CRIME attack`_).
33
34 If you want to configure the default list of supported cipher suites, you can
35 set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
36
37 Module Variables
38 ----------------
39
40 :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
41
42 .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
43 .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
44
45 '''
46
47 try:
48 from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
49 from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
50 except SyntaxError as e:
51 raise ImportError(e)
52
53 import OpenSSL.SSL
54 from pyasn1.codec.der import decoder as der_decoder
55 from pyasn1.type import univ, constraint
56 from socket import _fileobject, timeout
57 import ssl
58 import select
59
60 from .. import connection
61 from .. import util
62
63 __all__ = ['inject_into_urllib3', 'extract_from_urllib3']
64
65 # SNI only *really* works if we can read the subjectAltName of certificates.
66 HAS_SNI = SUBJ_ALT_NAME_SUPPORT
67
68 # Map from urllib3 to PyOpenSSL compatible parameter-values.
69 _openssl_versions = {
70 ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
71 ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
72 }
73
74 try:
75 _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
76 except AttributeError:
77 pass
78
79 _openssl_verify = {
80 ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
81 ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
82 ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
83 + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
84 }
85
86 DEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS
87
88 # OpenSSL will only write 16K at a time
89 SSL_WRITE_BLOCKSIZE = 16384
90
91 try:
92 _ = memoryview
93 has_memoryview = True
94 except NameError:
95 has_memoryview = False
96
97 orig_util_HAS_SNI = util.HAS_SNI
98 orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
99
100
101 def inject_into_urllib3():
102 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
103
104 connection.ssl_wrap_socket = ssl_wrap_socket
105 util.HAS_SNI = HAS_SNI
106
107
108 def extract_from_urllib3():
109 'Undo monkey-patching by :func:`inject_into_urllib3`.'
110
111 connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
112 util.HAS_SNI = orig_util_HAS_SNI
113
114
115 ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
116 class SubjectAltName(BaseSubjectAltName):
117 '''ASN.1 implementation for subjectAltNames support'''
118
119 # There is no limit to how many SAN certificates a certificate may have,
120 # however this needs to have some limit so we'll set an arbitrarily high
121 # limit.
122 sizeSpec = univ.SequenceOf.sizeSpec + \
123 constraint.ValueSizeConstraint(1, 1024)
124
125
126 ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
127 def get_subj_alt_name(peer_cert):
128 # Search through extensions
129 dns_name = []
130 if not SUBJ_ALT_NAME_SUPPORT:
131 return dns_name
132
133 general_names = SubjectAltName()
134 for i in range(peer_cert.get_extension_count()):
135 ext = peer_cert.get_extension(i)
136 ext_name = ext.get_short_name()
137 if ext_name != 'subjectAltName':
138 continue
139
140 # PyOpenSSL returns extension data in ASN.1 encoded form
141 ext_dat = ext.get_data()
142 decoded_dat = der_decoder.decode(ext_dat,
143 asn1Spec=general_names)
144
145 for name in decoded_dat:
146 if not isinstance(name, SubjectAltName):
147 continue
148 for entry in range(len(name)):
149 component = name.getComponentByPosition(entry)
150 if component.getName() != 'dNSName':
151 continue
152 dns_name.append(str(component.getComponent()))
153
154 return dns_name
155
156
157 class WrappedSocket(object):
158 '''API-compatibility wrapper for Python OpenSSL's Connection-class.
159
160 Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
161 collector of pypy.
162 '''
163
164 def __init__(self, connection, socket, suppress_ragged_eofs=True):
165 self.connection = connection
166 self.socket = socket
167 self.suppress_ragged_eofs = suppress_ragged_eofs
168 self._makefile_refs = 0
169
170 def fileno(self):
171 return self.socket.fileno()
172
173 def makefile(self, mode, bufsize=-1):
174 self._makefile_refs += 1
175 return _fileobject(self, mode, bufsize, close=True)
176
177 def recv(self, *args, **kwargs):
178 try:
179 data = self.connection.recv(*args, **kwargs)
180 except OpenSSL.SSL.SysCallError as e:
181 if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
182 return b''
183 else:
184 raise
185 except OpenSSL.SSL.ZeroReturnError as e:
186 if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
187 return b''
188 else:
189 raise
190 except OpenSSL.SSL.WantReadError:
191 rd, wd, ed = select.select(
192 [self.socket], [], [], self.socket.gettimeout())
193 if not rd:
194 raise timeout('The read operation timed out')
195 else:
196 return self.recv(*args, **kwargs)
197 else:
198 return data
199
200 def settimeout(self, timeout):
201 return self.socket.settimeout(timeout)
202
203 def _send_until_done(self, data):
204 while True:
205 try:
206 return self.connection.send(data)
207 except OpenSSL.SSL.WantWriteError:
208 _, wlist, _ = select.select([], [self.socket], [],
209 self.socket.gettimeout())
210 if not wlist:
211 raise timeout()
212 continue
213
214 def sendall(self, data):
215 if has_memoryview and not isinstance(data, memoryview):
216 data = memoryview(data)
217
218 total_sent = 0
219 while total_sent < len(data):
220 sent = self._send_until_done(data[total_sent:total_sent+SSL_WRITE_BLOCKSIZE])
221 total_sent += sent
222
223 def shutdown(self):
224 # FIXME rethrow compatible exceptions should we ever use this
225 self.connection.shutdown()
226
227 def close(self):
228 if self._makefile_refs < 1:
229 return self.connection.close()
230 else:
231 self._makefile_refs -= 1
232
233 def getpeercert(self, binary_form=False):
234 x509 = self.connection.get_peer_certificate()
235
236 if not x509:
237 return x509
238
239 if binary_form:
240 return OpenSSL.crypto.dump_certificate(
241 OpenSSL.crypto.FILETYPE_ASN1,
242 x509)
243
244 return {
245 'subject': (
246 (('commonName', x509.get_subject().CN),),
247 ),
248 'subjectAltName': [
249 ('DNS', value)
250 for value in get_subj_alt_name(x509)
251 ]
252 }
253
254 def _reuse(self):
255 self._makefile_refs += 1
256
257 def _drop(self):
258 if self._makefile_refs < 1:
259 self.close()
260 else:
261 self._makefile_refs -= 1
262
263
264 def _verify_callback(cnx, x509, err_no, err_depth, return_code):
265 return err_no == 0
266
267
268 def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
269 ca_certs=None, server_hostname=None,
270 ssl_version=None):
271 ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
272 if certfile:
273 keyfile = keyfile or certfile # Match behaviour of the normal python ssl library
274 ctx.use_certificate_file(certfile)
275 if keyfile:
276 ctx.use_privatekey_file(keyfile)
277 if cert_reqs != ssl.CERT_NONE:
278 ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
279 if ca_certs:
280 try:
281 ctx.load_verify_locations(ca_certs, None)
282 except OpenSSL.SSL.Error as e:
283 raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
284 else:
285 ctx.set_default_verify_paths()
286
287 # Disable TLS compression to migitate CRIME attack (issue #309)
288 OP_NO_COMPRESSION = 0x20000
289 ctx.set_options(OP_NO_COMPRESSION)
290
291 # Set list of supported ciphersuites.
292 ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
293
294 cnx = OpenSSL.SSL.Connection(ctx, sock)
295 cnx.set_tlsext_host_name(server_hostname)
296 cnx.set_connect_state()
297 while True:
298 try:
299 cnx.do_handshake()
300 except OpenSSL.SSL.WantReadError:
301 rd, _, _ = select.select([sock], [], [], sock.gettimeout())
302 if not rd:
303 raise timeout('select timed out')
304 continue
305 except OpenSSL.SSL.Error as e:
306 raise ssl.SSLError('bad handshake: %r' % e)
307 break
308
309 return WrappedSocket(cnx, sock)
310
[end of urllib3/contrib/pyopenssl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/urllib3/contrib/pyopenssl.py b/urllib3/contrib/pyopenssl.py
--- a/urllib3/contrib/pyopenssl.py
+++ b/urllib3/contrib/pyopenssl.py
@@ -71,6 +71,12 @@
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
|
{"golden_diff": "diff --git a/urllib3/contrib/pyopenssl.py b/urllib3/contrib/pyopenssl.py\n--- a/urllib3/contrib/pyopenssl.py\n+++ b/urllib3/contrib/pyopenssl.py\n@@ -71,6 +71,12 @@\n ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,\n }\n \n+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):\n+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD\n+\n+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):\n+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD\n+\n try:\n _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})\n except AttributeError:\n", "issue": "PyOpenSSL support doesn't work with TLSv1.1 and TLSv1.2\n`docker-py` tries to use latest protocol version in the ssl module, so use ssl.PROTOCOL_TLSv1_2. Unfortunately urllib3.contrib.pyopenssl only has SSLv2_3 and TLSv1 so it breaks:\n\nhttps://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py#L68\n\n", "before_files": [{"content": "'''SSL with SNI_-support for Python 2. Follow these instructions if you would\nlike to verify SSL certificates in Python 2. Note, the default libraries do\n*not* do certificate checking; you need to do additional work to validate\ncertificates yourself.\n\nThis needs the following packages installed:\n\n* pyOpenSSL (tested with 0.13)\n* ndg-httpsclient (tested with 0.3.2)\n* pyasn1 (tested with 0.1.6)\n\nYou can install them with the following command:\n\n pip install pyopenssl ndg-httpsclient pyasn1\n\nTo activate certificate checking, call\n:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code\nbefore you begin making HTTP requests. This can be done in a ``sitecustomize``\nmodule, or at any other time before your application begins using ``urllib3``,\nlike this::\n\n try:\n import urllib3.contrib.pyopenssl\n urllib3.contrib.pyopenssl.inject_into_urllib3()\n except ImportError:\n pass\n\nNow you can use :mod:`urllib3` as you normally would, and it will support SNI\nwhen the required modules are installed.\n\nActivating this module also has the positive side effect of disabling SSL/TLS\ncompression in Python 2 (see `CRIME attack`_).\n\nIf you want to configure the default list of supported cipher suites, you can\nset the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.\n\nModule Variables\n----------------\n\n:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.\n\n.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication\n.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)\n\n'''\n\ntry:\n from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT\n from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName\nexcept SyntaxError as e:\n raise ImportError(e)\n\nimport OpenSSL.SSL\nfrom pyasn1.codec.der import decoder as der_decoder\nfrom pyasn1.type import univ, constraint\nfrom socket import _fileobject, timeout\nimport ssl\nimport select\n\nfrom .. import connection\nfrom .. import util\n\n__all__ = ['inject_into_urllib3', 'extract_from_urllib3']\n\n# SNI only *really* works if we can read the subjectAltName of certificates.\nHAS_SNI = SUBJ_ALT_NAME_SUPPORT\n\n# Map from urllib3 to PyOpenSSL compatible parameter-values.\n_openssl_versions = {\n ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,\n ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,\n}\n\ntry:\n _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})\nexcept AttributeError:\n pass\n\n_openssl_verify = {\n ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,\n ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,\n ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER\n + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n}\n\nDEFAULT_SSL_CIPHER_LIST = util.ssl_.DEFAULT_CIPHERS\n\n# OpenSSL will only write 16K at a time\nSSL_WRITE_BLOCKSIZE = 16384\n\ntry:\n _ = memoryview\n has_memoryview = True\nexcept NameError:\n has_memoryview = False\n\norig_util_HAS_SNI = util.HAS_SNI\norig_connection_ssl_wrap_socket = connection.ssl_wrap_socket\n\n\ndef inject_into_urllib3():\n 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'\n\n connection.ssl_wrap_socket = ssl_wrap_socket\n util.HAS_SNI = HAS_SNI\n\n\ndef extract_from_urllib3():\n 'Undo monkey-patching by :func:`inject_into_urllib3`.'\n\n connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket\n util.HAS_SNI = orig_util_HAS_SNI\n\n\n### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.\nclass SubjectAltName(BaseSubjectAltName):\n '''ASN.1 implementation for subjectAltNames support'''\n\n # There is no limit to how many SAN certificates a certificate may have,\n # however this needs to have some limit so we'll set an arbitrarily high\n # limit.\n sizeSpec = univ.SequenceOf.sizeSpec + \\\n constraint.ValueSizeConstraint(1, 1024)\n\n\n### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.\ndef get_subj_alt_name(peer_cert):\n # Search through extensions\n dns_name = []\n if not SUBJ_ALT_NAME_SUPPORT:\n return dns_name\n\n general_names = SubjectAltName()\n for i in range(peer_cert.get_extension_count()):\n ext = peer_cert.get_extension(i)\n ext_name = ext.get_short_name()\n if ext_name != 'subjectAltName':\n continue\n\n # PyOpenSSL returns extension data in ASN.1 encoded form\n ext_dat = ext.get_data()\n decoded_dat = der_decoder.decode(ext_dat,\n asn1Spec=general_names)\n\n for name in decoded_dat:\n if not isinstance(name, SubjectAltName):\n continue\n for entry in range(len(name)):\n component = name.getComponentByPosition(entry)\n if component.getName() != 'dNSName':\n continue\n dns_name.append(str(component.getComponent()))\n\n return dns_name\n\n\nclass WrappedSocket(object):\n '''API-compatibility wrapper for Python OpenSSL's Connection-class.\n\n Note: _makefile_refs, _drop() and _reuse() are needed for the garbage\n collector of pypy.\n '''\n\n def __init__(self, connection, socket, suppress_ragged_eofs=True):\n self.connection = connection\n self.socket = socket\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self._makefile_refs = 0\n\n def fileno(self):\n return self.socket.fileno()\n\n def makefile(self, mode, bufsize=-1):\n self._makefile_refs += 1\n return _fileobject(self, mode, bufsize, close=True)\n\n def recv(self, *args, **kwargs):\n try:\n data = self.connection.recv(*args, **kwargs)\n except OpenSSL.SSL.SysCallError as e:\n if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):\n return b''\n else:\n raise\n except OpenSSL.SSL.ZeroReturnError as e:\n if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:\n return b''\n else:\n raise\n except OpenSSL.SSL.WantReadError:\n rd, wd, ed = select.select(\n [self.socket], [], [], self.socket.gettimeout())\n if not rd:\n raise timeout('The read operation timed out')\n else:\n return self.recv(*args, **kwargs)\n else:\n return data\n\n def settimeout(self, timeout):\n return self.socket.settimeout(timeout)\n\n def _send_until_done(self, data):\n while True:\n try:\n return self.connection.send(data)\n except OpenSSL.SSL.WantWriteError:\n _, wlist, _ = select.select([], [self.socket], [],\n self.socket.gettimeout())\n if not wlist:\n raise timeout()\n continue\n\n def sendall(self, data):\n if has_memoryview and not isinstance(data, memoryview):\n data = memoryview(data)\n\n total_sent = 0\n while total_sent < len(data):\n sent = self._send_until_done(data[total_sent:total_sent+SSL_WRITE_BLOCKSIZE])\n total_sent += sent\n\n def shutdown(self):\n # FIXME rethrow compatible exceptions should we ever use this\n self.connection.shutdown()\n\n def close(self):\n if self._makefile_refs < 1:\n return self.connection.close()\n else:\n self._makefile_refs -= 1\n\n def getpeercert(self, binary_form=False):\n x509 = self.connection.get_peer_certificate()\n\n if not x509:\n return x509\n\n if binary_form:\n return OpenSSL.crypto.dump_certificate(\n OpenSSL.crypto.FILETYPE_ASN1,\n x509)\n\n return {\n 'subject': (\n (('commonName', x509.get_subject().CN),),\n ),\n 'subjectAltName': [\n ('DNS', value)\n for value in get_subj_alt_name(x509)\n ]\n }\n\n def _reuse(self):\n self._makefile_refs += 1\n\n def _drop(self):\n if self._makefile_refs < 1:\n self.close()\n else:\n self._makefile_refs -= 1\n\n\ndef _verify_callback(cnx, x509, err_no, err_depth, return_code):\n return err_no == 0\n\n\ndef ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,\n ca_certs=None, server_hostname=None,\n ssl_version=None):\n ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])\n if certfile:\n keyfile = keyfile or certfile # Match behaviour of the normal python ssl library\n ctx.use_certificate_file(certfile)\n if keyfile:\n ctx.use_privatekey_file(keyfile)\n if cert_reqs != ssl.CERT_NONE:\n ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)\n if ca_certs:\n try:\n ctx.load_verify_locations(ca_certs, None)\n except OpenSSL.SSL.Error as e:\n raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)\n else:\n ctx.set_default_verify_paths()\n\n # Disable TLS compression to migitate CRIME attack (issue #309)\n OP_NO_COMPRESSION = 0x20000\n ctx.set_options(OP_NO_COMPRESSION)\n\n # Set list of supported ciphersuites.\n ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)\n\n cnx = OpenSSL.SSL.Connection(ctx, sock)\n cnx.set_tlsext_host_name(server_hostname)\n cnx.set_connect_state()\n while True:\n try:\n cnx.do_handshake()\n except OpenSSL.SSL.WantReadError:\n rd, _, _ = select.select([sock], [], [], sock.gettimeout())\n if not rd:\n raise timeout('select timed out')\n continue\n except OpenSSL.SSL.Error as e:\n raise ssl.SSLError('bad handshake: %r' % e)\n break\n\n return WrappedSocket(cnx, sock)\n", "path": "urllib3/contrib/pyopenssl.py"}]}
| 3,792 | 225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.