problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_4242 | rasdani/github-patches | git_diff | kivy__python-for-android-1995 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TestGetSystemPythonExecutable.test_virtualenv test fail
The `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.
Error was:
```
ModuleNotFoundError: No module named \'pytoml\'\n'
```
This ca be reproduced in local via:
```sh
pytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv
```
</issue>
<code>
[start of setup.py]
1
2 import glob
3 from io import open # for open(..,encoding=...) parameter in python 2
4 from os import walk
5 from os.path import join, dirname, sep
6 import os
7 import re
8 from setuptools import setup, find_packages
9
10 # NOTE: All package data should also be set in MANIFEST.in
11
12 packages = find_packages()
13
14 package_data = {'': ['*.tmpl',
15 '*.patch', ], }
16
17 data_files = []
18
19
20
21 # must be a single statement since buildozer is currently parsing it, refs:
22 # https://github.com/kivy/buildozer/issues/722
23 install_reqs = [
24 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
25 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
26 'pep517', 'pytoml', 'virtualenv'
27 ]
28 # (pep517, pytoml and virtualenv are used by pythonpackage.py)
29
30 # By specifying every file manually, package_data will be able to
31 # include them in binary distributions. Note that we have to add
32 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
33 # work.
34 def recursively_include(results, directory, patterns):
35 for root, subfolders, files in walk(directory):
36 for fn in files:
37 if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):
38 continue
39 filename = join(root, fn)
40 directory = 'pythonforandroid'
41 if directory not in results:
42 results[directory] = []
43 results[directory].append(join(*filename.split(sep)[1:]))
44
45 recursively_include(package_data, 'pythonforandroid/recipes',
46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
47 '*.mk', '*.jam', ])
48 recursively_include(package_data, 'pythonforandroid/bootstraps',
49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',
51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ])
52 recursively_include(package_data, 'pythonforandroid/bootstraps',
53 ['sdl-config', ])
54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview',
55 ['*.html', ])
56 recursively_include(package_data, 'pythonforandroid',
57 ['liblink', 'biglink', 'liblink.sh'])
58
59 with open(join(dirname(__file__), 'README.md'),
60 encoding="utf-8",
61 errors="replace",
62 ) as fileh:
63 long_description = fileh.read()
64
65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')
66 version = None
67 try:
68 with open(init_filen,
69 encoding="utf-8",
70 errors="replace"
71 ) as fileh:
72 lines = fileh.readlines()
73 except IOError:
74 pass
75 else:
76 for line in lines:
77 line = line.strip()
78 if line.startswith('__version__ = '):
79 matches = re.findall(r'["\'].+["\']', line)
80 if matches:
81 version = matches[0].strip("'").strip('"')
82 break
83 if version is None:
84 raise Exception('Error: version could not be loaded from {}'.format(init_filen))
85
86 setup(name='python-for-android',
87 version=version,
88 description='Android APK packager for Python scripts and apps',
89 long_description=long_description,
90 long_description_content_type='text/markdown',
91 author='The Kivy team',
92 author_email='[email protected]',
93 url='https://github.com/kivy/python-for-android',
94 license='MIT',
95 install_requires=install_reqs,
96 entry_points={
97 'console_scripts': [
98 'python-for-android = pythonforandroid.entrypoints:main',
99 'p4a = pythonforandroid.entrypoints:main',
100 ],
101 'distutils.commands': [
102 'apk = pythonforandroid.bdistapk:BdistAPK',
103 ],
104 },
105 classifiers = [
106 'Development Status :: 5 - Production/Stable',
107 'Intended Audience :: Developers',
108 'License :: OSI Approved :: MIT License',
109 'Operating System :: Microsoft :: Windows',
110 'Operating System :: OS Independent',
111 'Operating System :: POSIX :: Linux',
112 'Operating System :: MacOS :: MacOS X',
113 'Operating System :: Android',
114 'Programming Language :: C',
115 'Programming Language :: Python :: 3',
116 'Topic :: Software Development',
117 'Topic :: Utilities',
118 ],
119 packages=packages,
120 package_data=package_data,
121 )
122
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
install_reqs = [
'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
- 'pep517', 'pytoml', 'virtualenv'
+ 'pep517<0.7.0"', 'pytoml', 'virtualenv'
]
# (pep517, pytoml and virtualenv are used by pythonpackage.py)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n install_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n- 'pep517', 'pytoml', 'virtualenv'\n+ 'pep517<0.7.0\"', 'pytoml', 'virtualenv'\n ]\n # (pep517, pytoml and virtualenv are used by pythonpackage.py)\n", "issue": "TestGetSystemPythonExecutable.test_virtualenv test fail\nThe `TestGetSystemPythonExecutable.test_virtualenv` and `TestGetSystemPythonExecutable.test_venv` tests started failing all of a sudden.\r\nError was:\r\n```\r\nModuleNotFoundError: No module named \\'pytoml\\'\\n'\r\n```\r\nThis ca be reproduced in local via:\r\n```sh\r\npytest tests/test_pythonpackage_basic.py::TestGetSystemPythonExecutable::test_virtualenv\r\n```\r\n\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517', 'pytoml', 'virtualenv'\n]\n# (pep517, pytoml and virtualenv are used by pythonpackage.py)\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any([glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns]):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n ],\n },\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]} | 1,898 | 150 |
gh_patches_debug_23758 | rasdani/github-patches | git_diff | holoviz__panel-705 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken image link in Django user guide
The link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?
Broken image link in Django user guide
The link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?
</issue>
<code>
[start of panel/util.py]
1 """
2 Various general utilities used in the panel codebase.
3 """
4 from __future__ import absolute_import, division, unicode_literals
5
6 import re
7 import sys
8 import inspect
9 import numbers
10 import datetime as dt
11
12 from datetime import datetime
13 from six import string_types
14 from collections import defaultdict, OrderedDict
15 try:
16 from collections.abc import MutableSequence, MutableMapping
17 except ImportError: # support for python>3.8
18 from collections import MutableSequence, MutableMapping
19
20 import param
21 import numpy as np
22
23 datetime_types = (np.datetime64, dt.datetime, dt.date)
24
25 if sys.version_info.major > 2:
26 unicode = str
27
28
29 def hashable(x):
30 if isinstance(x, MutableSequence):
31 return tuple(x)
32 elif isinstance(x, MutableMapping):
33 return tuple([(k,v) for k,v in x.items()])
34 else:
35 return x
36
37
38 def isIn(obj, objs):
39 """
40 Checks if the object is in the list of objects safely.
41 """
42 for o in objs:
43 if o is obj:
44 return True
45 try:
46 if o == obj:
47 return True
48 except:
49 pass
50 return False
51
52
53 def indexOf(obj, objs):
54 """
55 Returns the index of an object in a list of objects. Unlike the
56 list.index method this function only checks for identity not
57 equality.
58 """
59 for i, o in enumerate(objs):
60 if o is obj:
61 return i
62 try:
63 if o == obj:
64 return i
65 except:
66 pass
67 raise ValueError('%s not in list' % obj)
68
69
70 def as_unicode(obj):
71 """
72 Safely casts any object to unicode including regular string
73 (i.e. bytes) types in python 2.
74 """
75 if sys.version_info.major < 3 and isinstance(obj, str):
76 obj = obj.decode('utf-8')
77 return unicode(obj)
78
79
80 def param_name(name):
81 """
82 Removes the integer id from a Parameterized class name.
83 """
84 match = re.match(r'(.)+(\d){5}', name)
85 return name[:-5] if match else name
86
87
88 def unicode_repr(obj):
89 """
90 Returns a repr without the unicode prefix.
91 """
92 if sys.version_info.major == 2 and isinstance(obj, unicode):
93 return repr(obj)[1:]
94 return repr(obj)
95
96
97 def abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')):
98 """
99 Returns an abbreviated repr for the supplied object. Attempts to
100 find a natural break point while adhering to the maximum length.
101 """
102 vrepr = repr(value)
103 if len(vrepr) > max_length:
104 # Attempt to find natural cutoff point
105 abbrev = vrepr[max_length//2:]
106 natural_break = None
107 for brk in natural_breaks:
108 if brk in abbrev:
109 natural_break = abbrev.index(brk) + max_length//2
110 break
111 if natural_break and natural_break < max_length:
112 max_length = natural_break + 1
113
114 end_char = ''
115 if isinstance(value, list):
116 end_char = ']'
117 elif isinstance(value, OrderedDict):
118 end_char = '])'
119 elif isinstance(value, (dict, set)):
120 end_char = '}'
121 return vrepr[:max_length+1] + '...' + end_char
122 return vrepr
123
124
125 def param_reprs(parameterized, skip=None):
126 """
127 Returns a list of reprs for parameters on the parameterized object.
128 Skips default and empty values.
129 """
130 cls = type(parameterized).__name__
131 param_reprs = []
132 for p, v in sorted(parameterized.get_param_values()):
133 if v is parameterized.param[p].default: continue
134 elif v is None: continue
135 elif isinstance(v, string_types) and v == '': continue
136 elif isinstance(v, list) and v == []: continue
137 elif isinstance(v, dict) and v == {}: continue
138 elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue
139 param_reprs.append('%s=%s' % (p, abbreviated_repr(v)))
140 return param_reprs
141
142
143 def full_groupby(l, key=lambda x: x):
144 """
145 Groupby implementation which does not require a prior sort
146 """
147 d = defaultdict(list)
148 for item in l:
149 d[key(item)].append(item)
150 return d.items()
151
152
153 def get_method_owner(meth):
154 """
155 Returns the instance owning the supplied instancemethod or
156 the class owning the supplied classmethod.
157 """
158 if inspect.ismethod(meth):
159 if sys.version_info < (3,0):
160 return meth.im_class if meth.im_self is None else meth.im_self
161 else:
162 return meth.__self__
163
164
165 def is_parameterized(obj):
166 """
167 Whether an object is a Parameterized class or instance.
168 """
169 return (isinstance(obj, param.Parameterized) or
170 (isinstance(obj, type) and issubclass(obj, param.Parameterized)))
171
172
173 def isdatetime(value):
174 """
175 Whether the array or scalar is recognized datetime type.
176 """
177 if isinstance(value, np.ndarray):
178 return (value.dtype.kind == "M" or
179 (value.dtype.kind == "O" and len(value) and
180 isinstance(value[0], datetime_types)))
181 elif isinstance(value, list):
182 return all(isinstance(d, datetime_types) for d in value)
183 else:
184 return isinstance(value, datetime_types)
185
186 def value_as_datetime(value):
187 """
188 Retrieve the value tuple as a tuple of datetime objects.
189 """
190 if isinstance(value, numbers.Number):
191 value = datetime.utcfromtimestamp(value / 1000)
192 return value
193
194
195 def value_as_date(value):
196 if isinstance(value, numbers.Number):
197 value = datetime.utcfromtimestamp(value / 1000).date()
198 elif isinstance(value, datetime):
199 value = value.date()
200 return value
201
[end of panel/util.py]
[start of doc/conf.py]
1 # -*- coding: utf-8 -*-
2
3 from nbsite.shared_conf import *
4
5 project = u'Panel'
6 authors = u'Panel contributors'
7 copyright = u'2019 ' + authors
8 description = 'High-level dashboarding for python visualization libraries'
9
10 import panel
11 version = release = str(panel.__version__)
12
13 html_static_path += ['_static']
14 html_theme = 'sphinx_ioam_theme'
15 html_theme_options = {
16 'logo': 'logo_horizontal.png',
17 'favicon': 'favicon.ico',
18 'css': 'site.css'
19 }
20
21 extensions += ['nbsite.gallery']
22
23 nbsite_gallery_conf = {
24 'github_org': 'pyviz',
25 'github_project': 'panel',
26 'galleries': {
27 'gallery': {
28 'title': 'Gallery',
29 'sections': [
30 {'path': 'demos',
31 'title': 'Demos',
32 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},
33 {'path': 'simple',
34 'title': 'Simple Apps',
35 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},
36 {'path': 'apis',
37 'title': 'APIs',
38 'description': ('Examples meant to demonstrate the usage of different Panel APIs '
39 'such as interact and reactive functions.')},
40 {'path': 'layout',
41 'title': 'Layouts',
42 'description': 'How to leverage Panel layout components to achieve complex layouts.'},
43 {'path': 'dynamic',
44 'title': 'Dynamic UIs',
45 'description': ('Examples demonstrating how to build dynamic UIs with components that'
46 'are added or removed interactively.')},
47 {'path': 'param',
48 'title': 'Param based apps',
49 'description': 'Using the Param library to express UIs independently of Panel.'},
50 {'path': 'links',
51 'title': 'Linking',
52 'description': ('Using Javascript based links to define interactivity without '
53 'without requiring a live kernel.')},
54 {'path': 'external',
55 'title': 'External libraries',
56 'description': 'Wrapping external libraries with Panel.'}
57 ]
58 },
59 'reference': {
60 'title': 'Reference Gallery',
61 'sections': [
62 'panes',
63 'layouts',
64 'widgets'
65 ]
66 }
67 },
68 'thumbnail_url': 'https://assets.holoviews.org/panel/thumbnails',
69 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'
70 }
71
72 _NAV = (
73 ('Getting started', 'getting_started/index'),
74 ('User Guide', 'user_guide/index'),
75 ('Gallery', 'gallery/index'),
76 ('Reference Gallery', 'reference/index'),
77 ('Developer Guide', 'developer_guide/index'),
78 ('FAQ', 'FAQ'),
79 ('About', 'about')
80 )
81
82 templates_path = ['_templates']
83
84 html_context.update({
85 'PROJECT': project,
86 'DESCRIPTION': description,
87 'AUTHOR': authors,
88 'VERSION': version,
89 'WEBSITE_URL': 'https://panel.pyviz.org',
90 'WEBSITE_SERVER': 'https://panel.pyviz.org',
91 'VERSION': version,
92 'NAV': _NAV,
93 'LINKS': _NAV,
94 'SOCIAL': (
95 ('Gitter', '//gitter.im/pyviz/pyviz'),
96 ('Github', '//github.com/pyviz/panel'),
97 )
98 })
99
100 nbbuild_patterns_to_take_along = ["simple.html"]
101
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -69,7 +69,7 @@
'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'
}
-_NAV = (
+_NAV = (
('Getting started', 'getting_started/index'),
('User Guide', 'user_guide/index'),
('Gallery', 'gallery/index'),
@@ -88,7 +88,6 @@
'VERSION': version,
'WEBSITE_URL': 'https://panel.pyviz.org',
'WEBSITE_SERVER': 'https://panel.pyviz.org',
- 'VERSION': version,
'NAV': _NAV,
'LINKS': _NAV,
'SOCIAL': (
diff --git a/panel/util.py b/panel/util.py
--- a/panel/util.py
+++ b/panel/util.py
@@ -12,9 +12,9 @@
from datetime import datetime
from six import string_types
from collections import defaultdict, OrderedDict
-try:
+try: # python >= 3.3
from collections.abc import MutableSequence, MutableMapping
-except ImportError: # support for python>3.8
+except ImportError:
from collections import MutableSequence, MutableMapping
import param
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -69,7 +69,7 @@\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n }\n \n-_NAV = (\n+_NAV = (\n ('Getting started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('Gallery', 'gallery/index'),\n@@ -88,7 +88,6 @@\n 'VERSION': version,\n 'WEBSITE_URL': 'https://panel.pyviz.org',\n 'WEBSITE_SERVER': 'https://panel.pyviz.org',\n- 'VERSION': version,\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\ndiff --git a/panel/util.py b/panel/util.py\n--- a/panel/util.py\n+++ b/panel/util.py\n@@ -12,9 +12,9 @@\n from datetime import datetime\n from six import string_types\n from collections import defaultdict, OrderedDict\n-try:\n+try: # python >= 3.3\n from collections.abc import MutableSequence, MutableMapping\n-except ImportError: # support for python>3.8\n+except ImportError:\n from collections import MutableSequence, MutableMapping\n \n import param\n", "issue": "Broken image link in Django user guide\nThe link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?\nBroken image link in Django user guide\nThe link https://panel.pyviz.org/apps/django2/sliders.png in https://panel.pyviz.org/user_guide/Django_Apps.html doesn't seem to go anywhere; was that meant to point to https://parambokeh.pyviz.org/assets/sliders.png ?\n", "before_files": [{"content": "\"\"\"\nVarious general utilities used in the panel codebase.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport re\nimport sys\nimport inspect\nimport numbers\nimport datetime as dt\n\nfrom datetime import datetime\nfrom six import string_types\nfrom collections import defaultdict, OrderedDict\ntry:\n from collections.abc import MutableSequence, MutableMapping\nexcept ImportError: # support for python>3.8\n from collections import MutableSequence, MutableMapping\n\nimport param\nimport numpy as np\n\ndatetime_types = (np.datetime64, dt.datetime, dt.date)\n\nif sys.version_info.major > 2:\n unicode = str\n\n\ndef hashable(x):\n if isinstance(x, MutableSequence):\n return tuple(x)\n elif isinstance(x, MutableMapping):\n return tuple([(k,v) for k,v in x.items()])\n else:\n return x\n\n\ndef isIn(obj, objs):\n \"\"\"\n Checks if the object is in the list of objects safely.\n \"\"\"\n for o in objs:\n if o is obj:\n return True\n try:\n if o == obj:\n return True\n except:\n pass\n return False\n\n\ndef indexOf(obj, objs):\n \"\"\"\n Returns the index of an object in a list of objects. Unlike the\n list.index method this function only checks for identity not\n equality.\n \"\"\"\n for i, o in enumerate(objs):\n if o is obj:\n return i\n try:\n if o == obj:\n return i\n except:\n pass\n raise ValueError('%s not in list' % obj)\n\n\ndef as_unicode(obj):\n \"\"\"\n Safely casts any object to unicode including regular string\n (i.e. bytes) types in python 2.\n \"\"\"\n if sys.version_info.major < 3 and isinstance(obj, str):\n obj = obj.decode('utf-8')\n return unicode(obj)\n\n\ndef param_name(name):\n \"\"\"\n Removes the integer id from a Parameterized class name.\n \"\"\"\n match = re.match(r'(.)+(\\d){5}', name)\n return name[:-5] if match else name\n\n\ndef unicode_repr(obj):\n \"\"\"\n Returns a repr without the unicode prefix.\n \"\"\"\n if sys.version_info.major == 2 and isinstance(obj, unicode):\n return repr(obj)[1:]\n return repr(obj)\n\n\ndef abbreviated_repr(value, max_length=25, natural_breaks=(',', ' ')):\n \"\"\"\n Returns an abbreviated repr for the supplied object. Attempts to\n find a natural break point while adhering to the maximum length.\n \"\"\"\n vrepr = repr(value)\n if len(vrepr) > max_length:\n # Attempt to find natural cutoff point\n abbrev = vrepr[max_length//2:]\n natural_break = None\n for brk in natural_breaks:\n if brk in abbrev:\n natural_break = abbrev.index(brk) + max_length//2\n break\n if natural_break and natural_break < max_length:\n max_length = natural_break + 1\n\n end_char = ''\n if isinstance(value, list):\n end_char = ']'\n elif isinstance(value, OrderedDict):\n end_char = '])'\n elif isinstance(value, (dict, set)):\n end_char = '}'\n return vrepr[:max_length+1] + '...' + end_char\n return vrepr\n\n\ndef param_reprs(parameterized, skip=None):\n \"\"\"\n Returns a list of reprs for parameters on the parameterized object.\n Skips default and empty values.\n \"\"\"\n cls = type(parameterized).__name__\n param_reprs = []\n for p, v in sorted(parameterized.get_param_values()):\n if v is parameterized.param[p].default: continue\n elif v is None: continue\n elif isinstance(v, string_types) and v == '': continue\n elif isinstance(v, list) and v == []: continue\n elif isinstance(v, dict) and v == {}: continue\n elif (skip and p in skip) or (p == 'name' and v.startswith(cls)): continue\n param_reprs.append('%s=%s' % (p, abbreviated_repr(v)))\n return param_reprs\n\n\ndef full_groupby(l, key=lambda x: x):\n \"\"\"\n Groupby implementation which does not require a prior sort\n \"\"\"\n d = defaultdict(list)\n for item in l:\n d[key(item)].append(item)\n return d.items()\n\n\ndef get_method_owner(meth):\n \"\"\"\n Returns the instance owning the supplied instancemethod or\n the class owning the supplied classmethod.\n \"\"\"\n if inspect.ismethod(meth):\n if sys.version_info < (3,0):\n return meth.im_class if meth.im_self is None else meth.im_self\n else:\n return meth.__self__\n\n\ndef is_parameterized(obj):\n \"\"\"\n Whether an object is a Parameterized class or instance.\n \"\"\"\n return (isinstance(obj, param.Parameterized) or\n (isinstance(obj, type) and issubclass(obj, param.Parameterized)))\n\n\ndef isdatetime(value):\n \"\"\"\n Whether the array or scalar is recognized datetime type.\n \"\"\"\n if isinstance(value, np.ndarray):\n return (value.dtype.kind == \"M\" or\n (value.dtype.kind == \"O\" and len(value) and\n isinstance(value[0], datetime_types)))\n elif isinstance(value, list):\n return all(isinstance(d, datetime_types) for d in value)\n else:\n return isinstance(value, datetime_types)\n\ndef value_as_datetime(value):\n \"\"\"\n Retrieve the value tuple as a tuple of datetime objects.\n \"\"\"\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000)\n return value\n\n\ndef value_as_date(value):\n if isinstance(value, numbers.Number):\n value = datetime.utcfromtimestamp(value / 1000).date()\n elif isinstance(value, datetime):\n value = value.date()\n return value\n", "path": "panel/util.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom nbsite.shared_conf import *\n\nproject = u'Panel'\nauthors = u'Panel contributors'\ncopyright = u'2019 ' + authors\ndescription = 'High-level dashboarding for python visualization libraries'\n\nimport panel\nversion = release = str(panel.__version__)\n\nhtml_static_path += ['_static']\nhtml_theme = 'sphinx_ioam_theme'\nhtml_theme_options = {\n 'logo': 'logo_horizontal.png',\n 'favicon': 'favicon.ico',\n 'css': 'site.css' \n}\n\nextensions += ['nbsite.gallery']\n\nnbsite_gallery_conf = {\n 'github_org': 'pyviz',\n 'github_project': 'panel',\n 'galleries': {\n 'gallery': {\n 'title': 'Gallery',\n 'sections': [\n {'path': 'demos',\n 'title': 'Demos',\n 'description': 'A set of sophisticated apps built to demonstrate the features of Panel.'},\n {'path': 'simple',\n 'title': 'Simple Apps',\n 'description': 'Simple example apps meant to provide a quick introduction to Panel.'},\n {'path': 'apis',\n 'title': 'APIs',\n 'description': ('Examples meant to demonstrate the usage of different Panel APIs '\n 'such as interact and reactive functions.')},\n {'path': 'layout',\n 'title': 'Layouts',\n 'description': 'How to leverage Panel layout components to achieve complex layouts.'},\n {'path': 'dynamic',\n 'title': 'Dynamic UIs',\n 'description': ('Examples demonstrating how to build dynamic UIs with components that'\n 'are added or removed interactively.')},\n {'path': 'param',\n 'title': 'Param based apps',\n 'description': 'Using the Param library to express UIs independently of Panel.'},\n {'path': 'links',\n 'title': 'Linking',\n 'description': ('Using Javascript based links to define interactivity without '\n 'without requiring a live kernel.')},\n {'path': 'external',\n 'title': 'External libraries',\n 'description': 'Wrapping external libraries with Panel.'}\n ]\n },\n 'reference': {\n 'title': 'Reference Gallery',\n 'sections': [\n 'panes',\n 'layouts',\n 'widgets'\n ]\n }\n },\n 'thumbnail_url': 'https://assets.holoviews.org/panel/thumbnails',\n 'deployment_url': 'https://panel-gallery.pyviz.demo.anaconda.com/'\n}\n\n_NAV = (\n ('Getting started', 'getting_started/index'),\n ('User Guide', 'user_guide/index'),\n ('Gallery', 'gallery/index'),\n ('Reference Gallery', 'reference/index'),\n ('Developer Guide', 'developer_guide/index'),\n ('FAQ', 'FAQ'),\n ('About', 'about')\n)\n\ntemplates_path = ['_templates']\n\nhtml_context.update({\n 'PROJECT': project,\n 'DESCRIPTION': description,\n 'AUTHOR': authors,\n 'VERSION': version,\n 'WEBSITE_URL': 'https://panel.pyviz.org',\n 'WEBSITE_SERVER': 'https://panel.pyviz.org',\n 'VERSION': version,\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\n ('Gitter', '//gitter.im/pyviz/pyviz'),\n ('Github', '//github.com/pyviz/panel'),\n )\n})\n\nnbbuild_patterns_to_take_along = [\"simple.html\"]\n", "path": "doc/conf.py"}]} | 3,392 | 283 |
gh_patches_debug_11585 | rasdani/github-patches | git_diff | ibis-project__ibis-4602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: `.visualize(label_edges=True)` case ops.NodeList we get ValueError tuple.index(x): x not in tuple
Hi,
There's still one small thing that needs fixing in the great `.visualize(label_edges=True)` feature 😄 :
When running...
```py
import ibis
t = ibis.table((("a", "int32"), ("b", "string")))
expr = t[(t["a"] == 1) & (t["b"] == "x")]
expr.visualize(label_edges=True)
```
...I get:
```
Exception has occurred: ValueError - tuple.index(x): x not in tuple
```
at the following line:
https://github.com/ibis-project/ibis/blob/2c9cfea15fc4d5f61e9099c3b270ea61498b5e45/ibis/expr/visualize.py#L117
This is happening when `v` is an `ops.NodeList` and thus its `v.args` is a tuple `of tuples` and NOT a tuple of nodes.
Given that on the next line we have special logic for `ops.NodeList`, maybe one quick fix could be to use the `.values`/`.args[0]` in such cases:

Thank you.
</issue>
<code>
[start of ibis/expr/visualize.py]
1 import sys
2 import tempfile
3 from html import escape
4
5 import graphviz as gv
6
7 import ibis
8 import ibis.common.exceptions as com
9 import ibis.expr.operations as ops
10 from ibis.common.graph import Graph
11
12
13 def get_type(node):
14 try:
15 return str(node.output_dtype)
16 except (AttributeError, NotImplementedError):
17 pass
18
19 try:
20 schema = node.schema
21 except (AttributeError, NotImplementedError):
22 # TODO(kszucs): this branch should be removed
23 try:
24 # As a last resort try get the name of the output_type class
25 return node.output_type.__name__
26 except (AttributeError, NotImplementedError):
27 return '\u2205' # empty set character
28 except com.IbisError:
29 assert isinstance(node, ops.Join)
30 left_table_name = getattr(node.left, 'name', None) or ops.genname()
31 left_schema = node.left.schema
32 right_table_name = getattr(node.right, 'name', None) or ops.genname()
33 right_schema = node.right.schema
34 pairs = [
35 (f'{left_table_name}.{left_column}', type)
36 for left_column, type in left_schema.items()
37 ] + [
38 (f'{right_table_name}.{right_column}', type)
39 for right_column, type in right_schema.items()
40 ]
41 schema = ibis.schema(pairs)
42
43 return (
44 ''.join(
45 '<BR ALIGN="LEFT" /> <I>{}</I>: {}'.format(
46 escape(name), escape(str(type))
47 )
48 for name, type in zip(schema.names, schema.types)
49 )
50 + '<BR ALIGN="LEFT" />'
51 )
52
53
54 def get_label(node):
55 typename = get_type(node) # Already an escaped string
56 name = type(node).__name__
57 nodename = (
58 node.name
59 if isinstance(
60 node, (ops.Literal, ops.TableColumn, ops.Alias, ops.PhysicalTable)
61 )
62 else None
63 )
64 if nodename is not None:
65 if isinstance(node, ops.TableNode):
66 label_fmt = '<<I>{}</I>: <B>{}</B>{}>'
67 else:
68 label_fmt = '<<I>{}</I>: <B>{}</B><BR ALIGN="LEFT" />:: {}>'
69 label = label_fmt.format(escape(nodename), escape(name), typename)
70 else:
71 if isinstance(node, ops.TableNode):
72 label_fmt = '<<B>{}</B>{}>'
73 else:
74 label_fmt = '<<B>{}</B><BR ALIGN="LEFT" />:: {}>'
75 label = label_fmt.format(escape(name), typename)
76 return label
77
78
79 DEFAULT_NODE_ATTRS = {'shape': 'box', 'fontname': 'Deja Vu Sans Mono'}
80 DEFAULT_EDGE_ATTRS = {'fontname': 'Deja Vu Sans Mono'}
81
82
83 def to_graph(expr, node_attr=None, edge_attr=None, label_edges: bool = False):
84 graph = Graph.from_bfs(expr.op())
85
86 g = gv.Digraph(
87 node_attr=node_attr or DEFAULT_NODE_ATTRS,
88 edge_attr=edge_attr or DEFAULT_EDGE_ATTRS,
89 )
90
91 g.attr(rankdir='BT')
92
93 seen = set()
94 edges = set()
95
96 for v, us in graph.items():
97 if isinstance(v, ops.NodeList) and not v:
98 continue
99
100 vhash = str(hash(v))
101 if v not in seen:
102 g.node(vhash, label=get_label(v))
103 seen.add(v)
104
105 for u in us:
106 if isinstance(u, ops.NodeList) and not u:
107 continue
108
109 uhash = str(hash(u))
110 if u not in seen:
111 g.node(uhash, label=get_label(u))
112 seen.add(u)
113 if (edge := (u, v)) not in edges:
114 if not label_edges:
115 label = None
116 else:
117 index = v.args.index(u)
118 if isinstance(v, ops.NodeList):
119 arg_name = f"values[{index}]"
120 else:
121 arg_name = v.argnames[index]
122 label = f"<.{arg_name}>"
123
124 g.edge(uhash, vhash, label=label)
125 edges.add(edge)
126 return g
127
128
129 def draw(graph, path=None, format='png', verbose: bool = False):
130 if verbose:
131 print(graph.source, file=sys.stderr)
132
133 piped_source = graph.pipe(format=format)
134
135 if path is None:
136 with tempfile.NamedTemporaryFile(
137 delete=False, suffix=f'.{format}', mode='wb'
138 ) as f:
139 f.write(piped_source)
140 return f.name
141 else:
142 with open(path, mode='wb') as f:
143 f.write(piped_source)
144 return path
145
146
147 if __name__ == '__main__':
148 from argparse import ArgumentParser
149
150 from ibis import _
151
152 p = ArgumentParser(
153 description="Render a GraphViz SVG of an example ibis expression."
154 )
155
156 p.add_argument(
157 "-v",
158 "--verbose",
159 action="count",
160 default=0,
161 help="Print GraphViz DOT code to stderr.",
162 )
163 p.add_argument(
164 "-l",
165 "--label-edges",
166 action="store_true",
167 help="Show operation inputs as edge labels.",
168 )
169
170 args = p.parse_args()
171
172 left = ibis.table(dict(a="int64", b="string"), name="left")
173 right = ibis.table(dict(b="string", c="int64", d="string"), name="right")
174 expr = (
175 left.inner_join(right, "b")
176 .select(left.a, b=right.c, c=right.d)
177 .filter((_.a + _.b * 2 * _.b / _.b**3 > 4) & (_.b > 5))
178 .groupby(_.c)
179 .having(_.a.mean() > 0.0)
180 .aggregate(a_mean=_.a.mean(), b_sum=_.b.sum())
181 )
182 expr.visualize(verbose=args.verbose > 0, label_edges=args.label_edges)
183
[end of ibis/expr/visualize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/expr/visualize.py b/ibis/expr/visualize.py
--- a/ibis/expr/visualize.py
+++ b/ibis/expr/visualize.py
@@ -114,10 +114,11 @@
if not label_edges:
label = None
else:
- index = v.args.index(u)
if isinstance(v, ops.NodeList):
+ index = v.values.index(u)
arg_name = f"values[{index}]"
else:
+ index = v.args.index(u)
arg_name = v.argnames[index]
label = f"<.{arg_name}>"
| {"golden_diff": "diff --git a/ibis/expr/visualize.py b/ibis/expr/visualize.py\n--- a/ibis/expr/visualize.py\n+++ b/ibis/expr/visualize.py\n@@ -114,10 +114,11 @@\n if not label_edges:\n label = None\n else:\n- index = v.args.index(u)\n if isinstance(v, ops.NodeList):\n+ index = v.values.index(u)\n arg_name = f\"values[{index}]\"\n else:\n+ index = v.args.index(u)\n arg_name = v.argnames[index]\n label = f\"<.{arg_name}>\"\n", "issue": "bug: `.visualize(label_edges=True)` case ops.NodeList we get ValueError tuple.index(x): x not in tuple\nHi,\r\n\r\nThere's still one small thing that needs fixing in the great `.visualize(label_edges=True)` feature \ud83d\ude04 :\r\n\r\nWhen running...\r\n```py\r\nimport ibis\r\nt = ibis.table(((\"a\", \"int32\"), (\"b\", \"string\")))\r\nexpr = t[(t[\"a\"] == 1) & (t[\"b\"] == \"x\")]\r\n\r\nexpr.visualize(label_edges=True)\r\n```\r\n...I get:\r\n```\r\nException has occurred: ValueError - tuple.index(x): x not in tuple\r\n```\r\nat the following line:\r\nhttps://github.com/ibis-project/ibis/blob/2c9cfea15fc4d5f61e9099c3b270ea61498b5e45/ibis/expr/visualize.py#L117\r\n\r\nThis is happening when `v` is an `ops.NodeList` and thus its `v.args` is a tuple `of tuples` and NOT a tuple of nodes.\r\n\r\nGiven that on the next line we have special logic for `ops.NodeList`, maybe one quick fix could be to use the `.values`/`.args[0]` in such cases:\r\n\r\n\r\n\r\nThank you.\n", "before_files": [{"content": "import sys\nimport tempfile\nfrom html import escape\n\nimport graphviz as gv\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.operations as ops\nfrom ibis.common.graph import Graph\n\n\ndef get_type(node):\n try:\n return str(node.output_dtype)\n except (AttributeError, NotImplementedError):\n pass\n\n try:\n schema = node.schema\n except (AttributeError, NotImplementedError):\n # TODO(kszucs): this branch should be removed\n try:\n # As a last resort try get the name of the output_type class\n return node.output_type.__name__\n except (AttributeError, NotImplementedError):\n return '\\u2205' # empty set character\n except com.IbisError:\n assert isinstance(node, ops.Join)\n left_table_name = getattr(node.left, 'name', None) or ops.genname()\n left_schema = node.left.schema\n right_table_name = getattr(node.right, 'name', None) or ops.genname()\n right_schema = node.right.schema\n pairs = [\n (f'{left_table_name}.{left_column}', type)\n for left_column, type in left_schema.items()\n ] + [\n (f'{right_table_name}.{right_column}', type)\n for right_column, type in right_schema.items()\n ]\n schema = ibis.schema(pairs)\n\n return (\n ''.join(\n '<BR ALIGN=\"LEFT\" /> <I>{}</I>: {}'.format(\n escape(name), escape(str(type))\n )\n for name, type in zip(schema.names, schema.types)\n )\n + '<BR ALIGN=\"LEFT\" />'\n )\n\n\ndef get_label(node):\n typename = get_type(node) # Already an escaped string\n name = type(node).__name__\n nodename = (\n node.name\n if isinstance(\n node, (ops.Literal, ops.TableColumn, ops.Alias, ops.PhysicalTable)\n )\n else None\n )\n if nodename is not None:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<I>{}</I>: <B>{}</B>{}>'\n else:\n label_fmt = '<<I>{}</I>: <B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(nodename), escape(name), typename)\n else:\n if isinstance(node, ops.TableNode):\n label_fmt = '<<B>{}</B>{}>'\n else:\n label_fmt = '<<B>{}</B><BR ALIGN=\"LEFT\" />:: {}>'\n label = label_fmt.format(escape(name), typename)\n return label\n\n\nDEFAULT_NODE_ATTRS = {'shape': 'box', 'fontname': 'Deja Vu Sans Mono'}\nDEFAULT_EDGE_ATTRS = {'fontname': 'Deja Vu Sans Mono'}\n\n\ndef to_graph(expr, node_attr=None, edge_attr=None, label_edges: bool = False):\n graph = Graph.from_bfs(expr.op())\n\n g = gv.Digraph(\n node_attr=node_attr or DEFAULT_NODE_ATTRS,\n edge_attr=edge_attr or DEFAULT_EDGE_ATTRS,\n )\n\n g.attr(rankdir='BT')\n\n seen = set()\n edges = set()\n\n for v, us in graph.items():\n if isinstance(v, ops.NodeList) and not v:\n continue\n\n vhash = str(hash(v))\n if v not in seen:\n g.node(vhash, label=get_label(v))\n seen.add(v)\n\n for u in us:\n if isinstance(u, ops.NodeList) and not u:\n continue\n\n uhash = str(hash(u))\n if u not in seen:\n g.node(uhash, label=get_label(u))\n seen.add(u)\n if (edge := (u, v)) not in edges:\n if not label_edges:\n label = None\n else:\n index = v.args.index(u)\n if isinstance(v, ops.NodeList):\n arg_name = f\"values[{index}]\"\n else:\n arg_name = v.argnames[index]\n label = f\"<.{arg_name}>\"\n\n g.edge(uhash, vhash, label=label)\n edges.add(edge)\n return g\n\n\ndef draw(graph, path=None, format='png', verbose: bool = False):\n if verbose:\n print(graph.source, file=sys.stderr)\n\n piped_source = graph.pipe(format=format)\n\n if path is None:\n with tempfile.NamedTemporaryFile(\n delete=False, suffix=f'.{format}', mode='wb'\n ) as f:\n f.write(piped_source)\n return f.name\n else:\n with open(path, mode='wb') as f:\n f.write(piped_source)\n return path\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n from ibis import _\n\n p = ArgumentParser(\n description=\"Render a GraphViz SVG of an example ibis expression.\"\n )\n\n p.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Print GraphViz DOT code to stderr.\",\n )\n p.add_argument(\n \"-l\",\n \"--label-edges\",\n action=\"store_true\",\n help=\"Show operation inputs as edge labels.\",\n )\n\n args = p.parse_args()\n\n left = ibis.table(dict(a=\"int64\", b=\"string\"), name=\"left\")\n right = ibis.table(dict(b=\"string\", c=\"int64\", d=\"string\"), name=\"right\")\n expr = (\n left.inner_join(right, \"b\")\n .select(left.a, b=right.c, c=right.d)\n .filter((_.a + _.b * 2 * _.b / _.b**3 > 4) & (_.b > 5))\n .groupby(_.c)\n .having(_.a.mean() > 0.0)\n .aggregate(a_mean=_.a.mean(), b_sum=_.b.sum())\n )\n expr.visualize(verbose=args.verbose > 0, label_edges=args.label_edges)\n", "path": "ibis/expr/visualize.py"}]} | 2,641 | 147 |
gh_patches_debug_37107 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Telegram connector needs update for event message
# Description
The telegram connector uses message.target['id'] instead of message.target. This leads to problems when trying to use default_target per normal configuration and usage of the event flow logic.
## Steps to Reproduce
Setup telegram as the default connector... call the telegram connector.send with a target...
```
await self.opsdroid.send(Message(text='hello', target='<useridhere>')
```
The default_target is also always None.
```
await self.opsdroid.send(Message(text='hello')
```
You can hack around it with..
```
sillytarget = { 'id': <useridhere> }
await self.opsdroid.send(Message(text='hello', target=sillytarget)
```
## Expected Functionality
message.target should work like the other core connectors
## Experienced Functionality
Errors out.
```
opsdroid | ERROR opsdroid.connector.telegram.send_message(): Unable to respond.
```
## Versions
- opsdroid: latest/stable
- python 3.7.6
- docker image: opsdroid/opsdroid:latest
## Configuration File
```yaml
connectors:
## Telegram (core)
telegram:
token: "......"
# optional
update-interval: 0.5 # Interval between checking for messages
whitelisted-users: # List of users who can speak to the bot, if not set anyone can speak
- ......
```
## Additional Details
None
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
</issue>
<code>
[start of opsdroid/connector/telegram/__init__.py]
1 """A connector for Telegram."""
2 import asyncio
3 import logging
4 import aiohttp
5 from voluptuous import Required
6
7 from opsdroid.connector import Connector, register_event
8 from opsdroid.events import Message, Image
9
10
11 _LOGGER = logging.getLogger(__name__)
12 CONFIG_SCHEMA = {
13 Required("token"): str,
14 "update-interval": float,
15 "default-user": str,
16 "whitelisted-users": list,
17 }
18
19
20 class ConnectorTelegram(Connector):
21 """A connector the the char service Telegram."""
22
23 def __init__(self, config, opsdroid=None):
24 """Create the connector.
25
26 Args:
27 config (dict): configuration settings from the
28 file config.yaml.
29 opsdroid (OpsDroid): An instance of opsdroid.core.
30
31 """
32 _LOGGER.debug(_("Loaded Telegram Connector"))
33 super().__init__(config, opsdroid=opsdroid)
34 self.name = "telegram"
35 self.opsdroid = opsdroid
36 self.latest_update = None
37 self.default_target = None
38 self.listening = True
39 self.default_user = config.get("default-user", None)
40 self.whitelisted_users = config.get("whitelisted-users", None)
41 self.update_interval = config.get("update-interval", 1)
42 self.session = None
43 self._closing = asyncio.Event()
44 self.loop = asyncio.get_event_loop()
45
46 try:
47 self.token = config["token"]
48 except (KeyError, AttributeError):
49 _LOGGER.error(
50 _(
51 "Unable to login: Access token is missing. Telegram connector will be unavailable."
52 )
53 )
54
55 @staticmethod
56 def get_user(response):
57 """Get user from response.
58
59 The API response is different depending on how
60 the bot is set up and where the message is coming
61 from. This method was created to keep if/else
62 statements to a minium on _parse_message.
63
64 Args:
65 response (dict): Response returned by aiohttp.ClientSession.
66
67 """
68 user = None
69 user_id = None
70
71 if "username" in response["message"]["from"]:
72 user = response["message"]["from"]["username"]
73
74 elif "first_name" in response["message"]["from"]:
75 user = response["message"]["from"]["first_name"]
76 user_id = response["message"]["from"]["id"]
77
78 return user, user_id
79
80 def handle_user_permission(self, response, user):
81 """Handle user permissions.
82
83 This will check if the user that tried to talk with
84 the bot is allowed to do so. It will also work with
85 userid to improve security.
86
87 """
88 user_id = response["message"]["from"]["id"]
89
90 if (
91 not self.whitelisted_users
92 or user in self.whitelisted_users
93 or user_id in self.whitelisted_users
94 ):
95 return True
96
97 return False
98
99 def build_url(self, method):
100 """Build the url to connect to the API.
101
102 Args:
103 method (string): API call end point.
104
105 Return:
106 String that represents the full API url.
107
108 """
109 return "https://api.telegram.org/bot{}/{}".format(self.token, method)
110
111 async def delete_webhook(self):
112 """Delete Telegram webhook.
113
114 The Telegram api will thrown an 409 error when an webhook is
115 active and a call to getUpdates is made. This method will
116 try to request the deletion of the webhook to make the getUpdate
117 request possible.
118
119 """
120 _LOGGER.debug(_("Sending deleteWebhook request to Telegram..."))
121 resp = await self.session.get(self.build_url("deleteWebhook"))
122
123 if resp.status == 200:
124 _LOGGER.debug(_("Telegram webhook deleted successfully."))
125 else:
126 _LOGGER.debug(_("Unable to delete webhook."))
127
128 async def connect(self):
129 """Connect to Telegram.
130
131 This method is not an authorization call. It basically
132 checks if the API token was provided and makes an API
133 call to Telegram and evaluates the status of the call.
134
135 """
136
137 _LOGGER.debug(_("Connecting to Telegram."))
138 self.session = aiohttp.ClientSession()
139
140 resp = await self.session.get(self.build_url("getMe"))
141
142 if resp.status != 200:
143 _LOGGER.error(_("Unable to connect."))
144 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text)
145 else:
146 json = await resp.json()
147 _LOGGER.debug(json)
148 _LOGGER.debug(_("Connected to Telegram as %s."), json["result"]["username"])
149
150 async def _parse_message(self, response):
151 """Handle logic to parse a received message.
152
153 Since everyone can send a private message to any user/bot
154 in Telegram, this method allows to set a list of whitelisted
155 users that can interact with the bot. If any other user tries
156 to interact with the bot the command is not parsed and instead
157 the bot will inform that user that he is not allowed to talk
158 with the bot.
159
160 We also set self.latest_update to +1 in order to get the next
161 available message (or an empty {} if no message has been received
162 yet) with the method self._get_messages().
163
164 Args:
165 response (dict): Response returned by aiohttp.ClientSession.
166
167 """
168 for result in response["result"]:
169 _LOGGER.debug(result)
170 if result.get("edited_message", None):
171 result["message"] = result.pop("edited_message")
172 if "channel" in result["message"]["chat"]["type"]:
173 _LOGGER.debug(
174 _("Channel message parsing not supported " "- Ignoring message.")
175 )
176 elif "message" in result and "text" in result["message"]:
177 user, user_id = self.get_user(result)
178 message = Message(
179 text=result["message"]["text"],
180 user=user,
181 user_id=user_id,
182 target=result["message"]["chat"],
183 connector=self,
184 )
185
186 if self.handle_user_permission(result, user):
187 await self.opsdroid.parse(message)
188 else:
189 message.text = (
190 "Sorry, you're not allowed " "to speak with this bot."
191 )
192 await self.send(message)
193 self.latest_update = result["update_id"] + 1
194 elif (
195 "message" in result
196 and "sticker" in result["message"]
197 and "emoji" in result["message"]["sticker"]
198 ):
199 self.latest_update = result["update_id"] + 1
200 _LOGGER.debug(
201 _("Emoji message parsing not supported - Ignoring message.")
202 )
203 else:
204 _LOGGER.error(_("Unable to parse the message."))
205
206 async def _get_messages(self):
207 """Connect to the Telegram API.
208
209 Uses an aiohttp ClientSession to connect to Telegram API
210 and get the latest messages from the chat service.
211
212 The data["offset"] is used to consume every new message, the API
213 returns an int - "update_id" value. In order to get the next
214 message this value needs to be increased by 1 the next time
215 the API is called. If no new messages exists the API will just
216 return an empty {}.
217
218 """
219 data = {}
220 if self.latest_update is not None:
221 data["offset"] = self.latest_update
222
223 await asyncio.sleep(self.update_interval)
224 resp = await self.session.get(self.build_url("getUpdates"), params=data)
225
226 if resp.status == 409:
227 _LOGGER.info(
228 _(
229 "Can't get updates because previous webhook is still active. Will try to delete webhook."
230 )
231 )
232 await self.delete_webhook()
233
234 if resp.status != 200:
235 _LOGGER.error(_("Telegram error %s, %s."), resp.status, resp.text)
236 self.listening = False
237 else:
238 json = await resp.json()
239
240 await self._parse_message(json)
241
242 async def get_messages_loop(self):
243 """Listen for and parse new messages.
244
245 The bot will always listen to all opened chat windows,
246 as long as opsdroid is running. Since anyone can start
247 a new chat with the bot is recommended that a list of
248 users to be whitelisted be provided in config.yaml.
249
250 The method will sleep asynchronously at the end of
251 every loop. The time can either be specified in the
252 config.yaml with the param update-interval - this
253 defaults to 1 second.
254
255 """
256 while self.listening:
257 await self._get_messages()
258
259 async def listen(self):
260 """Listen method of the connector.
261
262 Every connector has to implement the listen method. When an
263 infinite loop is running, it becomes hard to cancel this task.
264 So we are creating a task and set it on a variable so we can
265 cancel the task.
266
267 """
268 message_getter = self.loop.create_task(await self.get_messages_loop())
269 await self._closing.wait()
270 message_getter.cancel()
271
272 @register_event(Message)
273 async def send_message(self, message):
274 """Respond with a message.
275
276 Args:
277 message (object): An instance of Message.
278
279 """
280 _LOGGER.debug(_("Responding with: %s."), message.text)
281
282 data = dict()
283 data["chat_id"] = message.target["id"]
284 data["text"] = message.text
285 resp = await self.session.post(self.build_url("sendMessage"), data=data)
286 if resp.status == 200:
287 _LOGGER.debug(_("Successfully responded."))
288 else:
289 _LOGGER.error(_("Unable to respond."))
290
291 @register_event(Image)
292 async def send_image(self, file_event):
293 """Send Image to Telegram.
294
295 Gets the chat id from the channel and then
296 sends the bytes of the image as multipart/form-data.
297
298 """
299 data = aiohttp.FormData()
300 data.add_field(
301 "chat_id", str(file_event.target["id"]), content_type="multipart/form-data"
302 )
303 data.add_field(
304 "photo",
305 await file_event.get_file_bytes(),
306 content_type="multipart/form-data",
307 )
308
309 resp = await self.session.post(self.build_url("sendPhoto"), data=data)
310 if resp.status == 200:
311 _LOGGER.debug(_("Sent %s image successfully."), file_event.name)
312 else:
313 _LOGGER.debug(_("Unable to send image - Status Code %s."), resp.status)
314
315 async def disconnect(self):
316 """Disconnect from Telegram.
317
318 Stops the infinite loop found in self._listen(), closes
319 aiohttp session.
320
321 """
322 self.listening = False
323 self._closing.set()
324 await self.session.close()
325
[end of opsdroid/connector/telegram/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py
--- a/opsdroid/connector/telegram/__init__.py
+++ b/opsdroid/connector/telegram/__init__.py
@@ -34,9 +34,9 @@
self.name = "telegram"
self.opsdroid = opsdroid
self.latest_update = None
- self.default_target = None
self.listening = True
self.default_user = config.get("default-user", None)
+ self.default_target = self.default_user
self.whitelisted_users = config.get("whitelisted-users", None)
self.update_interval = config.get("update-interval", 1)
self.session = None
@@ -167,9 +167,13 @@
"""
for result in response["result"]:
_LOGGER.debug(result)
+
if result.get("edited_message", None):
result["message"] = result.pop("edited_message")
- if "channel" in result["message"]["chat"]["type"]:
+ if result.get("channel_post", None) or result.get(
+ "edited_channel_post", None
+ ):
+ self.latest_update = result["update_id"] + 1
_LOGGER.debug(
_("Channel message parsing not supported " "- Ignoring message.")
)
@@ -179,7 +183,7 @@
text=result["message"]["text"],
user=user,
user_id=user_id,
- target=result["message"]["chat"],
+ target=result["message"]["chat"]["id"],
connector=self,
)
@@ -277,10 +281,12 @@
message (object): An instance of Message.
"""
- _LOGGER.debug(_("Responding with: %s."), message.text)
+ _LOGGER.debug(
+ _("Responding with: '%s' at target: '%s'"), message.text, message.target
+ )
data = dict()
- data["chat_id"] = message.target["id"]
+ data["chat_id"] = message.target
data["text"] = message.text
resp = await self.session.post(self.build_url("sendMessage"), data=data)
if resp.status == 200:
| {"golden_diff": "diff --git a/opsdroid/connector/telegram/__init__.py b/opsdroid/connector/telegram/__init__.py\n--- a/opsdroid/connector/telegram/__init__.py\n+++ b/opsdroid/connector/telegram/__init__.py\n@@ -34,9 +34,9 @@\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n- self.default_target = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n+ self.default_target = self.default_user\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n@@ -167,9 +167,13 @@\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n+\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n- if \"channel\" in result[\"message\"][\"chat\"][\"type\"]:\n+ if result.get(\"channel_post\", None) or result.get(\n+ \"edited_channel_post\", None\n+ ):\n+ self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n@@ -179,7 +183,7 @@\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n- target=result[\"message\"][\"chat\"],\n+ target=result[\"message\"][\"chat\"][\"id\"],\n connector=self,\n )\n \n@@ -277,10 +281,12 @@\n message (object): An instance of Message.\n \n \"\"\"\n- _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n+ _LOGGER.debug(\n+ _(\"Responding with: '%s' at target: '%s'\"), message.text, message.target\n+ )\n \n data = dict()\n- data[\"chat_id\"] = message.target[\"id\"]\n+ data[\"chat_id\"] = message.target\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n", "issue": "Telegram connector needs update for event message\n# Description\r\nThe telegram connector uses message.target['id'] instead of message.target. This leads to problems when trying to use default_target per normal configuration and usage of the event flow logic.\r\n\r\n## Steps to Reproduce\r\nSetup telegram as the default connector... call the telegram connector.send with a target... \r\n```\r\nawait self.opsdroid.send(Message(text='hello', target='<useridhere>') \r\n```\r\nThe default_target is also always None.\r\n```\r\nawait self.opsdroid.send(Message(text='hello') \r\n```\r\nYou can hack around it with..\r\n```\r\nsillytarget = { 'id': <useridhere> }\r\nawait self.opsdroid.send(Message(text='hello', target=sillytarget) \r\n```\r\n## Expected Functionality\r\nmessage.target should work like the other core connectors\r\n\r\n## Experienced Functionality\r\nErrors out.\r\n```\r\nopsdroid | ERROR opsdroid.connector.telegram.send_message(): Unable to respond. \r\n```\r\n## Versions\r\n\r\n- opsdroid: latest/stable\r\n- python 3.7.6\r\n- docker image: opsdroid/opsdroid:latest\r\n\r\n## Configuration File\r\n```yaml\r\nconnectors:\r\n## Telegram (core)\r\n telegram:\r\n token: \"......\"\r\n # optional\r\n update-interval: 0.5 # Interval between checking for messages\r\n whitelisted-users: # List of users who can speak to the bot, if not set anyone can speak\r\n - ......\r\n```\r\n## Additional Details\r\nNone\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Telegram.\"\"\"\nimport asyncio\nimport logging\nimport aiohttp\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Image\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"update-interval\": float,\n \"default-user\": str,\n \"whitelisted-users\": list,\n}\n\n\nclass ConnectorTelegram(Connector):\n \"\"\"A connector the the char service Telegram.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\n\n Args:\n config (dict): configuration settings from the\n file config.yaml.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n _LOGGER.debug(_(\"Loaded Telegram Connector\"))\n super().__init__(config, opsdroid=opsdroid)\n self.name = \"telegram\"\n self.opsdroid = opsdroid\n self.latest_update = None\n self.default_target = None\n self.listening = True\n self.default_user = config.get(\"default-user\", None)\n self.whitelisted_users = config.get(\"whitelisted-users\", None)\n self.update_interval = config.get(\"update-interval\", 1)\n self.session = None\n self._closing = asyncio.Event()\n self.loop = asyncio.get_event_loop()\n\n try:\n self.token = config[\"token\"]\n except (KeyError, AttributeError):\n _LOGGER.error(\n _(\n \"Unable to login: Access token is missing. Telegram connector will be unavailable.\"\n )\n )\n\n @staticmethod\n def get_user(response):\n \"\"\"Get user from response.\n\n The API response is different depending on how\n the bot is set up and where the message is coming\n from. This method was created to keep if/else\n statements to a minium on _parse_message.\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n user = None\n user_id = None\n\n if \"username\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"username\"]\n\n elif \"first_name\" in response[\"message\"][\"from\"]:\n user = response[\"message\"][\"from\"][\"first_name\"]\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n return user, user_id\n\n def handle_user_permission(self, response, user):\n \"\"\"Handle user permissions.\n\n This will check if the user that tried to talk with\n the bot is allowed to do so. It will also work with\n userid to improve security.\n\n \"\"\"\n user_id = response[\"message\"][\"from\"][\"id\"]\n\n if (\n not self.whitelisted_users\n or user in self.whitelisted_users\n or user_id in self.whitelisted_users\n ):\n return True\n\n return False\n\n def build_url(self, method):\n \"\"\"Build the url to connect to the API.\n\n Args:\n method (string): API call end point.\n\n Return:\n String that represents the full API url.\n\n \"\"\"\n return \"https://api.telegram.org/bot{}/{}\".format(self.token, method)\n\n async def delete_webhook(self):\n \"\"\"Delete Telegram webhook.\n\n The Telegram api will thrown an 409 error when an webhook is\n active and a call to getUpdates is made. This method will\n try to request the deletion of the webhook to make the getUpdate\n request possible.\n\n \"\"\"\n _LOGGER.debug(_(\"Sending deleteWebhook request to Telegram...\"))\n resp = await self.session.get(self.build_url(\"deleteWebhook\"))\n\n if resp.status == 200:\n _LOGGER.debug(_(\"Telegram webhook deleted successfully.\"))\n else:\n _LOGGER.debug(_(\"Unable to delete webhook.\"))\n\n async def connect(self):\n \"\"\"Connect to Telegram.\n\n This method is not an authorization call. It basically\n checks if the API token was provided and makes an API\n call to Telegram and evaluates the status of the call.\n\n \"\"\"\n\n _LOGGER.debug(_(\"Connecting to Telegram.\"))\n self.session = aiohttp.ClientSession()\n\n resp = await self.session.get(self.build_url(\"getMe\"))\n\n if resp.status != 200:\n _LOGGER.error(_(\"Unable to connect.\"))\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n else:\n json = await resp.json()\n _LOGGER.debug(json)\n _LOGGER.debug(_(\"Connected to Telegram as %s.\"), json[\"result\"][\"username\"])\n\n async def _parse_message(self, response):\n \"\"\"Handle logic to parse a received message.\n\n Since everyone can send a private message to any user/bot\n in Telegram, this method allows to set a list of whitelisted\n users that can interact with the bot. If any other user tries\n to interact with the bot the command is not parsed and instead\n the bot will inform that user that he is not allowed to talk\n with the bot.\n\n We also set self.latest_update to +1 in order to get the next\n available message (or an empty {} if no message has been received\n yet) with the method self._get_messages().\n\n Args:\n response (dict): Response returned by aiohttp.ClientSession.\n\n \"\"\"\n for result in response[\"result\"]:\n _LOGGER.debug(result)\n if result.get(\"edited_message\", None):\n result[\"message\"] = result.pop(\"edited_message\")\n if \"channel\" in result[\"message\"][\"chat\"][\"type\"]:\n _LOGGER.debug(\n _(\"Channel message parsing not supported \" \"- Ignoring message.\")\n )\n elif \"message\" in result and \"text\" in result[\"message\"]:\n user, user_id = self.get_user(result)\n message = Message(\n text=result[\"message\"][\"text\"],\n user=user,\n user_id=user_id,\n target=result[\"message\"][\"chat\"],\n connector=self,\n )\n\n if self.handle_user_permission(result, user):\n await self.opsdroid.parse(message)\n else:\n message.text = (\n \"Sorry, you're not allowed \" \"to speak with this bot.\"\n )\n await self.send(message)\n self.latest_update = result[\"update_id\"] + 1\n elif (\n \"message\" in result\n and \"sticker\" in result[\"message\"]\n and \"emoji\" in result[\"message\"][\"sticker\"]\n ):\n self.latest_update = result[\"update_id\"] + 1\n _LOGGER.debug(\n _(\"Emoji message parsing not supported - Ignoring message.\")\n )\n else:\n _LOGGER.error(_(\"Unable to parse the message.\"))\n\n async def _get_messages(self):\n \"\"\"Connect to the Telegram API.\n\n Uses an aiohttp ClientSession to connect to Telegram API\n and get the latest messages from the chat service.\n\n The data[\"offset\"] is used to consume every new message, the API\n returns an int - \"update_id\" value. In order to get the next\n message this value needs to be increased by 1 the next time\n the API is called. If no new messages exists the API will just\n return an empty {}.\n\n \"\"\"\n data = {}\n if self.latest_update is not None:\n data[\"offset\"] = self.latest_update\n\n await asyncio.sleep(self.update_interval)\n resp = await self.session.get(self.build_url(\"getUpdates\"), params=data)\n\n if resp.status == 409:\n _LOGGER.info(\n _(\n \"Can't get updates because previous webhook is still active. Will try to delete webhook.\"\n )\n )\n await self.delete_webhook()\n\n if resp.status != 200:\n _LOGGER.error(_(\"Telegram error %s, %s.\"), resp.status, resp.text)\n self.listening = False\n else:\n json = await resp.json()\n\n await self._parse_message(json)\n\n async def get_messages_loop(self):\n \"\"\"Listen for and parse new messages.\n\n The bot will always listen to all opened chat windows,\n as long as opsdroid is running. Since anyone can start\n a new chat with the bot is recommended that a list of\n users to be whitelisted be provided in config.yaml.\n\n The method will sleep asynchronously at the end of\n every loop. The time can either be specified in the\n config.yaml with the param update-interval - this\n defaults to 1 second.\n\n \"\"\"\n while self.listening:\n await self._get_messages()\n\n async def listen(self):\n \"\"\"Listen method of the connector.\n\n Every connector has to implement the listen method. When an\n infinite loop is running, it becomes hard to cancel this task.\n So we are creating a task and set it on a variable so we can\n cancel the task.\n\n \"\"\"\n message_getter = self.loop.create_task(await self.get_messages_loop())\n await self._closing.wait()\n message_getter.cancel()\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\n\n Args:\n message (object): An instance of Message.\n\n \"\"\"\n _LOGGER.debug(_(\"Responding with: %s.\"), message.text)\n\n data = dict()\n data[\"chat_id\"] = message.target[\"id\"]\n data[\"text\"] = message.text\n resp = await self.session.post(self.build_url(\"sendMessage\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Successfully responded.\"))\n else:\n _LOGGER.error(_(\"Unable to respond.\"))\n\n @register_event(Image)\n async def send_image(self, file_event):\n \"\"\"Send Image to Telegram.\n\n Gets the chat id from the channel and then\n sends the bytes of the image as multipart/form-data.\n\n \"\"\"\n data = aiohttp.FormData()\n data.add_field(\n \"chat_id\", str(file_event.target[\"id\"]), content_type=\"multipart/form-data\"\n )\n data.add_field(\n \"photo\",\n await file_event.get_file_bytes(),\n content_type=\"multipart/form-data\",\n )\n\n resp = await self.session.post(self.build_url(\"sendPhoto\"), data=data)\n if resp.status == 200:\n _LOGGER.debug(_(\"Sent %s image successfully.\"), file_event.name)\n else:\n _LOGGER.debug(_(\"Unable to send image - Status Code %s.\"), resp.status)\n\n async def disconnect(self):\n \"\"\"Disconnect from Telegram.\n\n Stops the infinite loop found in self._listen(), closes\n aiohttp session.\n\n \"\"\"\n self.listening = False\n self._closing.set()\n await self.session.close()\n", "path": "opsdroid/connector/telegram/__init__.py"}]} | 4,071 | 504 |
gh_patches_debug_19267 | rasdani/github-patches | git_diff | pyodide__pyodide-2935 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 0.21
I went through issues and tagged things for the 0.21 release https://github.com/pyodide/pyodide/milestone/11
Mostly naming related subjects need to be discussed/addressed before the release IMO, since once we release they will be harder to change.
</issue>
<code>
[start of pyodide-build/pyodide_build/__init__.py]
1 __version__ = "0.21.0a3"
2
[end of pyodide-build/pyodide_build/__init__.py]
[start of src/py/pyodide/__init__.py]
1 # When the pyodide package is imported, both the js and the pyodide_js modules
2 # will be available to import from. Not all functions in pyodide_js will work
3 # until after pyodide is first imported, imported functions from pyodide_js
4 # should not be used at import time. It is fine to use js functions at import
5 # time.
6 #
7 # All pure Python code that does not require js or pyodide_js should go in
8 # the _pyodide package.
9 #
10 # This package is imported by the test suite as well, and currently we don't use
11 # pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before
12 # importing from these.
13 __version__ = "0.21.0a3"
14
15 __all__ = ["__version__"]
16
17 from typing import Any
18
19 from . import _state # noqa: F401
20 from .code import CodeRunner # noqa: F401
21 from .code import eval_code # noqa: F401
22 from .code import eval_code_async # noqa: F401
23 from .code import find_imports # noqa: F401
24 from .code import should_quiet # noqa: F401
25 from .ffi import ConversionError # noqa: F401
26 from .ffi import JsException # noqa: F401
27 from .ffi import JsProxy # noqa: F401
28 from .ffi import create_once_callable # noqa: F401
29 from .ffi import create_proxy # noqa: F401
30 from .ffi import destroy_proxies # noqa: F401
31 from .ffi import register_js_module # noqa: F401
32 from .ffi import to_js # noqa: F401
33 from .ffi import unregister_js_module # noqa: F401
34 from .http import open_url # noqa: F401
35
36 DEPRECATED_LIST = {
37 "CodeRunner": "code",
38 "eval_code": "code",
39 "eval_code_async": "code",
40 "find_imports": "code",
41 "should_quiet": "code",
42 "open_url": "http",
43 "ConversionError": "ffi",
44 "JsException": "ffi",
45 "JsProxy": "ffi",
46 "create_once_callable": "ffi",
47 "create_proxy": "ffi",
48 "destroy_proxies": "ffi",
49 "to_js": "ffi",
50 "register_js_module": "ffi",
51 "unregister_js_module": "ffi",
52 }
53
54
55 from .webloop import _initialize_event_loop
56
57 _initialize_event_loop()
58 del _initialize_event_loop
59
60
61 def __dir__() -> list[str]:
62 return __all__
63
64
65 for name in DEPRECATED_LIST:
66 globals()[f"_deprecated_{name}"] = globals()[name]
67 del globals()[name]
68
69
70 def __getattr__(name: str) -> Any:
71 if name in DEPRECATED_LIST:
72 from warnings import warn
73
74 warn(
75 f"pyodide.{name} has been moved to pyodide.{DEPRECATED_LIST[name]}.{name} "
76 "Accessing it through the pyodide module is deprecated.",
77 FutureWarning,
78 )
79 # Put the name back so we won't warn next time this name is accessed
80 globals()[name] = globals()[f"_deprecated_{name}"]
81 return globals()[name]
82 raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
83
[end of src/py/pyodide/__init__.py]
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2
3 # -- Path setup --------------------------------------------------------------
4
5 import atexit
6 import os
7 import shutil
8 import subprocess
9 import sys
10 from pathlib import Path
11 from typing import Any
12 from unittest import mock
13
14 # -- Project information -----------------------------------------------------
15
16 project = "Pyodide"
17 copyright = "2019-2022, Pyodide contributors and Mozilla"
18 pyodide_version = "0.21.0a3"
19
20 if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest":
21 CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/"
22 else:
23 CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/"
24
25 # -- General configuration ---------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 #
29 # needs_sphinx = '1.0'
30
31 extensions = [
32 "sphinx.ext.autodoc",
33 "sphinx.ext.autosummary",
34 "sphinxcontrib.napoleon",
35 "myst_parser",
36 "sphinx_js",
37 "autodocsumm",
38 "sphinx_panels",
39 "sphinx_pyodide",
40 "sphinx_argparse_cli",
41 "versionwarning.extension",
42 "sphinx_issues",
43 ]
44
45 myst_enable_extensions = ["substitution"]
46
47 js_language = "typescript"
48 jsdoc_config_path = "../src/js/tsconfig.json"
49 root_for_relative_js_paths = "../src/"
50 issues_github_path = "pyodide/pyodide"
51
52 versionwarning_messages = {
53 "latest": (
54 "This is the development version of the documentation. "
55 'See <a href="https://pyodide.org/">here</a> for latest stable '
56 "documentation. Please do not use Pyodide with non "
57 "versioned (`dev`) URLs from the CDN for deployed applications!"
58 )
59 }
60 versionwarning_body_selector = "#main-content > div"
61
62 autosummary_generate = True
63 autodoc_default_flags = ["members", "inherited-members"]
64
65 # Add modules to be mocked.
66 mock_modules = ["ruamel.yaml", "tomli"]
67
68 # Add any paths that contain templates here, relative to this directory.
69 templates_path = ["_templates"]
70
71 # The suffix(es) of source filenames.
72 source_suffix = [".rst", ".md"]
73
74 # The master toctree document.
75 master_doc = "index"
76
77 # The language for content autogenerated by Sphinx.
78 language = None
79
80 # List of patterns, relative to source directory, that match files and
81 # directories to ignore when looking for source files.
82 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"]
83
84 # The name of the Pygments (syntax highlighting) style to use.
85 pygments_style = None
86
87 # -- Options for HTML output -------------------------------------------------
88
89 # The theme to use for HTML and HTML Help pages. See the documentation for
90 # a list of builtin themes.
91 #
92 html_theme = "sphinx_book_theme"
93 html_logo = "_static/img/pyodide-logo.png"
94
95 # theme-specific options
96 html_theme_options: dict[str, Any] = {}
97
98 # paths that contain custom static files (such as style sheets)
99 html_static_path = ["_static"]
100
101
102 html_css_files = [
103 "css/pyodide.css",
104 ]
105
106 # Custom sidebar templates, must be a dictionary that maps document names
107 # to template names.
108 # html_sidebars = {}
109
110 # -- Options for HTMLHelp output ---------------------------------------------
111
112 # Output file base name for HTML help builder.
113 htmlhelp_basename = "Pyodidedoc"
114
115 # A list of files that should not be packed into the epub file.
116 epub_exclude_files = ["search.html"]
117
118
119 def delete_attrs(cls):
120 for name in dir(cls):
121 if not name.startswith("_"):
122 try:
123 delattr(cls, name)
124 except Exception:
125 pass
126
127
128 # Try not to cause side effects if we are imported incidentally.
129
130 try:
131 import sphinx
132
133 IN_SPHINX = hasattr(sphinx, "application")
134 except ImportError:
135 IN_SPHINX = False
136
137 IN_READTHEDOCS = "READTHEDOCS" in os.environ
138
139 if IN_READTHEDOCS:
140 env = {"PYODIDE_BASE_URL": CDN_URL}
141 os.makedirs("_build/html", exist_ok=True)
142 res = subprocess.check_output(
143 ["make", "-C", "..", "docs/_build/html/console.html"],
144 env=env,
145 stderr=subprocess.STDOUT,
146 encoding="utf-8",
147 )
148 print(res)
149
150 if IN_SPHINX:
151 # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10
152 import collections
153 from typing import Callable, Mapping
154
155 collections.Mapping = Mapping # type: ignore[attr-defined]
156 collections.Callable = Callable # type: ignore[attr-defined]
157
158 base_dir = Path(__file__).resolve().parent.parent
159 path_dirs = [
160 str(base_dir),
161 str(base_dir / "pyodide-build"),
162 str(base_dir / "docs/sphinx_pyodide"),
163 str(base_dir / "src/py"),
164 str(base_dir / "packages/micropip/src"),
165 ]
166 sys.path = path_dirs + sys.path
167
168 import micropip # noqa: F401
169 import pyodide
170
171 # We hacked it so that autodoc will look for submodules, but only if we import
172 # them here. TODO: look these up in the source directory?
173 import pyodide.code
174 import pyodide.console
175 import pyodide.ffi.wrappers
176 import pyodide.http
177 import pyodide.webloop
178
179 # The full version, including alpha/beta/rc tags.
180 release = version = pyodide.__version__
181 html_title = f"Version {version}"
182
183 shutil.copy("../src/core/pyproxy.ts", "../src/js/pyproxy.gen.ts")
184 shutil.copy("../src/core/error_handling.ts", "../src/js/error_handling.gen.ts")
185 js_source_path = [str(x) for x in Path("../src/js").glob("*.ts")]
186
187 def remove_pyproxy_gen_ts():
188 Path("../src/js/pyproxy.gen.ts").unlink(missing_ok=True)
189
190 atexit.register(remove_pyproxy_gen_ts)
191
192 os.environ["PATH"] += f':{str(Path("../src/js/node_modules/.bin").resolve())}'
193 print(os.environ["PATH"])
194 if IN_READTHEDOCS:
195 subprocess.run(["npm", "ci"], cwd="../src/js")
196 elif not shutil.which("typedoc"):
197 raise Exception(
198 "Before building the Pyodide docs you must run 'npm install' in 'src/js'."
199 )
200
201 # Prevent API docs for webloop methods: they are the same as for base event loop
202 # and it clutters api docs too much
203 delete_attrs(pyodide.webloop.WebLoop)
204 delete_attrs(pyodide.webloop.WebLoopPolicy)
205 delete_attrs(pyodide.console.PyodideConsole)
206
207 for module in mock_modules:
208 sys.modules[module] = mock.Mock()
209
210
211 # https://github.com/sphinx-doc/sphinx/issues/4054
212 def globalReplace(app, docname, source):
213 result = source[0]
214 for key in app.config.global_replacements:
215 result = result.replace(key, app.config.global_replacements[key])
216 source[0] = result
217
218
219 global_replacements = {"{{PYODIDE_CDN_URL}}": CDN_URL}
220
221
222 def setup(app):
223 app.add_config_value("global_replacements", {}, True)
224 app.connect("source-read", globalReplace)
225
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -15,7 +15,7 @@
project = "Pyodide"
copyright = "2019-2022, Pyodide contributors and Mozilla"
-pyodide_version = "0.21.0a3"
+pyodide_version = "0.21.0"
if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest":
CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/"
diff --git a/pyodide-build/pyodide_build/__init__.py b/pyodide-build/pyodide_build/__init__.py
--- a/pyodide-build/pyodide_build/__init__.py
+++ b/pyodide-build/pyodide_build/__init__.py
@@ -1 +1 @@
-__version__ = "0.21.0a3"
+__version__ = "0.21.0"
diff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py
--- a/src/py/pyodide/__init__.py
+++ b/src/py/pyodide/__init__.py
@@ -10,7 +10,7 @@
# This package is imported by the test suite as well, and currently we don't use
# pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before
# importing from these.
-__version__ = "0.21.0a3"
+__version__ = "0.21.0"
__all__ = ["__version__"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -15,7 +15,7 @@\n \n project = \"Pyodide\"\n copyright = \"2019-2022, Pyodide contributors and Mozilla\"\n-pyodide_version = \"0.21.0a3\"\n+pyodide_version = \"0.21.0\"\n \n if \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\ndiff --git a/pyodide-build/pyodide_build/__init__.py b/pyodide-build/pyodide_build/__init__.py\n--- a/pyodide-build/pyodide_build/__init__.py\n+++ b/pyodide-build/pyodide_build/__init__.py\n@@ -1 +1 @@\n-__version__ = \"0.21.0a3\"\n+__version__ = \"0.21.0\"\ndiff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py\n--- a/src/py/pyodide/__init__.py\n+++ b/src/py/pyodide/__init__.py\n@@ -10,7 +10,7 @@\n # This package is imported by the test suite as well, and currently we don't use\n # pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n # importing from these.\n-__version__ = \"0.21.0a3\"\n+__version__ = \"0.21.0\"\n \n __all__ = [\"__version__\"]\n", "issue": "Release 0.21\nI went through issues and tagged things for the 0.21 release https://github.com/pyodide/pyodide/milestone/11 \r\n\r\nMostly naming related subjects need to be discussed/addressed before the release IMO, since once we release they will be harder to change.\n", "before_files": [{"content": "__version__ = \"0.21.0a3\"\n", "path": "pyodide-build/pyodide_build/__init__.py"}, {"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n__version__ = \"0.21.0a3\"\n\n__all__ = [\"__version__\"]\n\nfrom typing import Any\n\nfrom . import _state # noqa: F401\nfrom .code import CodeRunner # noqa: F401\nfrom .code import eval_code # noqa: F401\nfrom .code import eval_code_async # noqa: F401\nfrom .code import find_imports # noqa: F401\nfrom .code import should_quiet # noqa: F401\nfrom .ffi import ConversionError # noqa: F401\nfrom .ffi import JsException # noqa: F401\nfrom .ffi import JsProxy # noqa: F401\nfrom .ffi import create_once_callable # noqa: F401\nfrom .ffi import create_proxy # noqa: F401\nfrom .ffi import destroy_proxies # noqa: F401\nfrom .ffi import register_js_module # noqa: F401\nfrom .ffi import to_js # noqa: F401\nfrom .ffi import unregister_js_module # noqa: F401\nfrom .http import open_url # noqa: F401\n\nDEPRECATED_LIST = {\n \"CodeRunner\": \"code\",\n \"eval_code\": \"code\",\n \"eval_code_async\": \"code\",\n \"find_imports\": \"code\",\n \"should_quiet\": \"code\",\n \"open_url\": \"http\",\n \"ConversionError\": \"ffi\",\n \"JsException\": \"ffi\",\n \"JsProxy\": \"ffi\",\n \"create_once_callable\": \"ffi\",\n \"create_proxy\": \"ffi\",\n \"destroy_proxies\": \"ffi\",\n \"to_js\": \"ffi\",\n \"register_js_module\": \"ffi\",\n \"unregister_js_module\": \"ffi\",\n}\n\n\nfrom .webloop import _initialize_event_loop\n\n_initialize_event_loop()\ndel _initialize_event_loop\n\n\ndef __dir__() -> list[str]:\n return __all__\n\n\nfor name in DEPRECATED_LIST:\n globals()[f\"_deprecated_{name}\"] = globals()[name]\n del globals()[name]\n\n\ndef __getattr__(name: str) -> Any:\n if name in DEPRECATED_LIST:\n from warnings import warn\n\n warn(\n f\"pyodide.{name} has been moved to pyodide.{DEPRECATED_LIST[name]}.{name} \"\n \"Accessing it through the pyodide module is deprecated.\",\n FutureWarning,\n )\n # Put the name back so we won't warn next time this name is accessed\n globals()[name] = globals()[f\"_deprecated_{name}\"]\n return globals()[name]\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.21.0a3\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}]} | 3,831 | 375 |
gh_patches_debug_36038 | rasdani/github-patches | git_diff | scverse__scanpy-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`
I just tried
```python
import scanpy.api as sc
sc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')
```
I would expect scanpy complains that it does not know `'strange_organism'`, but I get the error
```python
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-13-6a41b361ab41> in <module>()
1 import scanpy.api as sc
----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')
~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)
34 s.add_attribute_to_xml('mgi_symbol')
35 else:
---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
37 return None
38 s.add_attribute_to_xml('chromosome_name')
NameError: name 'logg' is not defined
```
It seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.
Would maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?
</issue>
<code>
[start of scanpy/queries/__init__.py]
1 import pandas as pd
2
3
4 def mitochondrial_genes(host, org):
5 """Mitochondrial gene symbols for specific organism through BioMart.
6
7 Parameters
8 ----------
9 host : {{'www.ensembl.org', ...}}
10 A valid BioMart host URL.
11 org : {{'hsapiens', 'mmusculus'}}
12 Organism to query. Currently available are human ('hsapiens') and mouse
13 ('mmusculus').
14
15 Returns
16 -------
17 A `pd.Index` containing mitochondrial gene symbols.
18 """
19 try:
20 from bioservices import biomart
21 except ImportError:
22 raise ImportError(
23 'You need to install the `bioservices` module.')
24 from io import StringIO
25 s = biomart.BioMart(host=host)
26
27 # building query
28 s.new_query()
29 if org == 'hsapiens':
30 s.add_dataset_to_xml('hsapiens_gene_ensembl')
31 s.add_attribute_to_xml('hgnc_symbol')
32 elif org == 'mmusculus':
33 s.add_dataset_to_xml('mmusculus_gene_ensembl')
34 s.add_attribute_to_xml('mgi_symbol')
35 else:
36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
37 return None
38 s.add_attribute_to_xml('chromosome_name')
39 xml = s.get_xml()
40
41 # parsing mitochondrial gene symbols
42 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None)
43 res.columns = ['symbol', 'chromosome_name']
44 res = res.dropna()
45 res = res[res['chromosome_name'] == 'MT']
46 res = res.set_index('symbol')
47 res = res[~res.index.duplicated(keep='first')]
48
49 return res.index
50
51
52 def gene_coordinates(host, org, gene, chr_exclude=[]):
53 """Retrieve gene coordinates for specific organism through BioMart.
54 Parameters
55 ----------
56 host : {{'www.ensembl.org', ...}}
57 A valid BioMart host URL. Can be used to control genome build.
58 org : {{'hsapiens', 'mmusculus'}}
59 Organism to query. Currently available are human ('hsapiens') and mouse
60 ('mmusculus').
61 gene :
62 The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve
63 coordinates.
64 chr_exclude :
65 A list of chromosomes to exclude from query.
66 Returns
67 -------
68 A `pd.DataFrame` containing gene coordinates for the specified gene symbol.
69 """
70 try:
71 from bioservices import biomart
72 except ImportError:
73 raise ImportError(
74 'You need to install the `bioservices` module.')
75 from io import StringIO
76 s = biomart.BioMart(host=host)
77
78 # building query
79 s.new_query()
80 if org == 'hsapiens':
81 s.add_dataset_to_xml('hsapiens_gene_ensembl')
82 s.add_attribute_to_xml('hgnc_symbol')
83 elif org == 'mmusculus':
84 s.add_dataset_to_xml('mmusculus_gene_ensembl')
85 s.add_attribute_to_xml('mgi_symbol')
86 else:
87 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
88 return None
89 s.add_attribute_to_xml('chromosome_name')
90 s.add_attribute_to_xml('start_position')
91 s.add_attribute_to_xml('end_position')
92 xml = s.get_xml()
93
94 # parsing gene coordinates
95 res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None)
96 res.columns = ['symbol', 'chromosome_name', 'start', 'end']
97 res = res.dropna()
98 res = res[~res['chromosome_name'].isin(chr_exclude)]
99 res = res.set_index('symbol')
100
101 return res.loc[[gene], :]
102
[end of scanpy/queries/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py
--- a/scanpy/queries/__init__.py
+++ b/scanpy/queries/__init__.py
@@ -1,4 +1,5 @@
import pandas as pd
+from .. import logging as logg
def mitochondrial_genes(host, org):
@@ -8,9 +9,9 @@
----------
host : {{'www.ensembl.org', ...}}
A valid BioMart host URL.
- org : {{'hsapiens', 'mmusculus'}}
- Organism to query. Currently available are human ('hsapiens') and mouse
- ('mmusculus').
+ org : {{'hsapiens', 'mmusculus', 'drerio'}}
+ Organism to query. Currently available are human ('hsapiens'), mouse
+ ('mmusculus') and zebrafish ('drerio').
Returns
-------
@@ -32,6 +33,9 @@
elif org == 'mmusculus':
s.add_dataset_to_xml('mmusculus_gene_ensembl')
s.add_attribute_to_xml('mgi_symbol')
+ elif org == 'drerio':
+ s.add_dataset_to_xml('drerio_gene_ensembl')
+ s.add_attribute_to_xml('zfin_id_symbol')
else:
logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
return None
@@ -55,9 +59,9 @@
----------
host : {{'www.ensembl.org', ...}}
A valid BioMart host URL. Can be used to control genome build.
- org : {{'hsapiens', 'mmusculus'}}
- Organism to query. Currently available are human ('hsapiens') and mouse
- ('mmusculus').
+ org : {{'hsapiens', 'mmusculus', 'drerio'}}
+ Organism to query. Currently available are human ('hsapiens'), mouse
+ ('mmusculus') and zebrafish ('drerio').
gene :
The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve
coordinates.
@@ -83,6 +87,9 @@
elif org == 'mmusculus':
s.add_dataset_to_xml('mmusculus_gene_ensembl')
s.add_attribute_to_xml('mgi_symbol')
+ elif org == 'drerio':
+ s.add_dataset_to_xml('drerio_gene_ensembl')
+ s.add_attribute_to_xml('zfin_id_symbol')
else:
logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)
return None
| {"golden_diff": "diff --git a/scanpy/queries/__init__.py b/scanpy/queries/__init__.py\n--- a/scanpy/queries/__init__.py\n+++ b/scanpy/queries/__init__.py\n@@ -1,4 +1,5 @@\n import pandas as pd\n+from .. import logging as logg\n \n \n def mitochondrial_genes(host, org):\n@@ -8,9 +9,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n \n Returns\n -------\n@@ -32,6 +33,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n@@ -55,9 +59,9 @@\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n- org : {{'hsapiens', 'mmusculus'}}\n- Organism to query. Currently available are human ('hsapiens') and mouse\n- ('mmusculus').\n+ org : {{'hsapiens', 'mmusculus', 'drerio'}}\n+ Organism to query. Currently available are human ('hsapiens'), mouse\n+ ('mmusculus') and zebrafish ('drerio').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n@@ -83,6 +87,9 @@\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n+ elif org == 'drerio':\n+ s.add_dataset_to_xml('drerio_gene_ensembl')\n+ s.add_attribute_to_xml('zfin_id_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n", "issue": "`NameError: name 'logg' is not defined` when using `sc.queries.mitochondrial_genes`\nI just tried\r\n```python\r\nimport scanpy.api as sc\r\nsc.queries.mitochondrial_genes('www.ensembl.org', 'strange_organism')\r\n```\r\nI would expect scanpy complains that it does not know `'strange_organism'`, but I get the error \r\n```python\r\n---------------------------------------------------------------------------\r\nNameError Traceback (most recent call last)\r\n<ipython-input-13-6a41b361ab41> in <module>()\r\n 1 import scanpy.api as sc\r\n----> 2 sc.queries.mitochondrial_genes('www.ensembl.org', 'drerio')\r\n\r\n~/software/scanpy/scanpy/queries/__init__.py in mitochondrial_genes(host, org)\r\n 34 s.add_attribute_to_xml('mgi_symbol')\r\n 35 else:\r\n---> 36 logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\r\n 37 return None\r\n 38 s.add_attribute_to_xml('chromosome_name')\r\n\r\nNameError: name 'logg' is not defined\r\n```\r\nIt seems to me like `queries/__init__.py` misses an `from .. import logging as logg` statement.\r\n\r\nWould maybe also make sense to show the the message that an organism is not available at verbosity level 1 instead of 4?\n", "before_files": [{"content": "import pandas as pd\n\n\ndef mitochondrial_genes(host, org):\n \"\"\"Mitochondrial gene symbols for specific organism through BioMart.\n\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n\n Returns\n -------\n A `pd.Index` containing mitochondrial gene symbols.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n xml = s.get_xml()\n\n # parsing mitochondrial gene symbols\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name']\n res = res.dropna()\n res = res[res['chromosome_name'] == 'MT']\n res = res.set_index('symbol')\n res = res[~res.index.duplicated(keep='first')]\n\n return res.index\n\n\ndef gene_coordinates(host, org, gene, chr_exclude=[]):\n \"\"\"Retrieve gene coordinates for specific organism through BioMart.\n Parameters\n ----------\n host : {{'www.ensembl.org', ...}}\n A valid BioMart host URL. Can be used to control genome build.\n org : {{'hsapiens', 'mmusculus'}}\n Organism to query. Currently available are human ('hsapiens') and mouse\n ('mmusculus').\n gene :\n The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve\n coordinates.\n chr_exclude :\n A list of chromosomes to exclude from query.\n Returns\n -------\n A `pd.DataFrame` containing gene coordinates for the specified gene symbol.\n \"\"\"\n try:\n from bioservices import biomart\n except ImportError:\n raise ImportError(\n 'You need to install the `bioservices` module.')\n from io import StringIO\n s = biomart.BioMart(host=host)\n\n # building query\n s.new_query()\n if org == 'hsapiens':\n s.add_dataset_to_xml('hsapiens_gene_ensembl')\n s.add_attribute_to_xml('hgnc_symbol')\n elif org == 'mmusculus':\n s.add_dataset_to_xml('mmusculus_gene_ensembl')\n s.add_attribute_to_xml('mgi_symbol')\n else:\n logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True)\n return None\n s.add_attribute_to_xml('chromosome_name')\n s.add_attribute_to_xml('start_position')\n s.add_attribute_to_xml('end_position')\n xml = s.get_xml()\n\n # parsing gene coordinates\n res = pd.read_csv(StringIO(s.query(xml)), sep='\\t', header=None)\n res.columns = ['symbol', 'chromosome_name', 'start', 'end']\n res = res.dropna()\n res = res[~res['chromosome_name'].isin(chr_exclude)]\n res = res.set_index('symbol')\n\n return res.loc[[gene], :]\n", "path": "scanpy/queries/__init__.py"}]} | 1,883 | 613 |
gh_patches_debug_3479 | rasdani/github-patches | git_diff | docker__docker-py-1393 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception retrieving untagged images on api >= 1.24
Docker API >= 1.24 will return a null object if image tags DNE instead of not including it in the response. This makes the dict.get fail to catch the null case and the list comprehension to iterate over a non-iterable.
```
File "<stdin>", line 1, in <module>
File "docker/models/images.py", line 16, in __repr__
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
File "docker/models/images.py", line 34, in tags
tag for tag in self.attrs.get('RepoTags', [])
TypeError: 'NoneType' object is not iterable
```
This is similar to an issue seen in [salt](https://github.com/saltstack/salt/pull/35447/commits/b833b5f9587534d3b843a026ef91abc4ec929d0f)
Was able to get things working with a pretty quick change:
```
diff --git a/docker/models/images.py b/docker/models/images.py
index 32068e6..39a640d 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -30,9 +30,11 @@ class Image(Model):
"""
The image's tags.
"""
+ tags = self.attrs.get('RepoTags', [])
+ if tags is None:
+ return []
return [
- tag for tag in self.attrs.get('RepoTags', [])
- if tag != '<none>:<none>'
+ tag for tag in tags if tag != '<none>:<none>'
]
def history(self):
```
</issue>
<code>
[start of docker/models/images.py]
1 import re
2
3 import six
4
5 from ..api import APIClient
6 from ..errors import BuildError
7 from ..utils.json_stream import json_stream
8 from .resource import Collection, Model
9
10
11 class Image(Model):
12 """
13 An image on the server.
14 """
15 def __repr__(self):
16 return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
17
18 @property
19 def short_id(self):
20 """
21 The ID of the image truncated to 10 characters, plus the ``sha256:``
22 prefix.
23 """
24 if self.id.startswith('sha256:'):
25 return self.id[:17]
26 return self.id[:10]
27
28 @property
29 def tags(self):
30 """
31 The image's tags.
32 """
33 return [
34 tag for tag in self.attrs.get('RepoTags', [])
35 if tag != '<none>:<none>'
36 ]
37
38 def history(self):
39 """
40 Show the history of an image.
41
42 Returns:
43 (str): The history of the image.
44
45 Raises:
46 :py:class:`docker.errors.APIError`
47 If the server returns an error.
48 """
49 return self.client.api.history(self.id)
50
51 def save(self):
52 """
53 Get a tarball of an image. Similar to the ``docker save`` command.
54
55 Returns:
56 (urllib3.response.HTTPResponse object): The response from the
57 daemon.
58
59 Raises:
60 :py:class:`docker.errors.APIError`
61 If the server returns an error.
62
63 Example:
64
65 >>> image = cli.get("fedora:latest")
66 >>> resp = image.save()
67 >>> f = open('/tmp/fedora-latest.tar', 'w')
68 >>> f.write(resp.data)
69 >>> f.close()
70 """
71 return self.client.api.get_image(self.id)
72
73 def tag(self, repository, tag=None, **kwargs):
74 """
75 Tag this image into a repository. Similar to the ``docker tag``
76 command.
77
78 Args:
79 repository (str): The repository to set for the tag
80 tag (str): The tag name
81 force (bool): Force
82
83 Raises:
84 :py:class:`docker.errors.APIError`
85 If the server returns an error.
86
87 Returns:
88 (bool): ``True`` if successful
89 """
90 self.client.api.tag(self.id, repository, tag=tag, **kwargs)
91
92
93 class ImageCollection(Collection):
94 model = Image
95
96 def build(self, **kwargs):
97 """
98 Build an image and return it. Similar to the ``docker build``
99 command. Either ``path`` or ``fileobj`` must be set.
100
101 If you have a tar file for the Docker build context (including a
102 Dockerfile) already, pass a readable file-like object to ``fileobj``
103 and also pass ``custom_context=True``. If the stream is compressed
104 also, set ``encoding`` to the correct value (e.g ``gzip``).
105
106 If you want to get the raw output of the build, use the
107 :py:meth:`~docker.api.build.BuildApiMixin.build` method in the
108 low-level API.
109
110 Args:
111 path (str): Path to the directory containing the Dockerfile
112 fileobj: A file object to use as the Dockerfile. (Or a file-like
113 object)
114 tag (str): A tag to add to the final image
115 quiet (bool): Whether to return the status
116 nocache (bool): Don't use the cache when set to ``True``
117 rm (bool): Remove intermediate containers. The ``docker build``
118 command now defaults to ``--rm=true``, but we have kept the old
119 default of `False` to preserve backward compatibility
120 stream (bool): *Deprecated for API version > 1.8 (always True)*.
121 Return a blocking generator you can iterate over to retrieve
122 build output as it happens
123 timeout (int): HTTP timeout
124 custom_context (bool): Optional if using ``fileobj``
125 encoding (str): The encoding for a stream. Set to ``gzip`` for
126 compressing
127 pull (bool): Downloads any updates to the FROM image in Dockerfiles
128 forcerm (bool): Always remove intermediate containers, even after
129 unsuccessful builds
130 dockerfile (str): path within the build context to the Dockerfile
131 buildargs (dict): A dictionary of build arguments
132 container_limits (dict): A dictionary of limits applied to each
133 container created by the build process. Valid keys:
134
135 - memory (int): set memory limit for build
136 - memswap (int): Total memory (memory + swap), -1 to disable
137 swap
138 - cpushares (int): CPU shares (relative weight)
139 - cpusetcpus (str): CPUs in which to allow execution, e.g.,
140 ``"0-3"``, ``"0,1"``
141 decode (bool): If set to ``True``, the returned stream will be
142 decoded into dicts on the fly. Default ``False``.
143
144 Returns:
145 (:py:class:`Image`): The built image.
146
147 Raises:
148 :py:class:`docker.errors.BuildError`
149 If there is an error during the build.
150 :py:class:`docker.errors.APIError`
151 If the server returns any other error.
152 ``TypeError``
153 If neither ``path`` nor ``fileobj`` is specified.
154 """
155 resp = self.client.api.build(**kwargs)
156 if isinstance(resp, six.string_types):
157 return self.get(resp)
158 events = list(json_stream(resp))
159 if not events:
160 return BuildError('Unknown')
161 event = events[-1]
162 if 'stream' in event:
163 match = re.search(r'Successfully built ([0-9a-f]+)',
164 event.get('stream', ''))
165 if match:
166 image_id = match.group(1)
167 return self.get(image_id)
168
169 raise BuildError(event.get('error') or event)
170
171 def get(self, name):
172 """
173 Gets an image.
174
175 Args:
176 name (str): The name of the image.
177
178 Returns:
179 (:py:class:`Image`): The image.
180
181 Raises:
182 :py:class:`docker.errors.ImageNotFound` If the image does not
183 exist.
184 :py:class:`docker.errors.APIError`
185 If the server returns an error.
186 """
187 return self.prepare_model(self.client.api.inspect_image(name))
188
189 def list(self, name=None, all=False, filters=None):
190 """
191 List images on the server.
192
193 Args:
194 name (str): Only show images belonging to the repository ``name``
195 all (bool): Show intermediate image layers. By default, these are
196 filtered out.
197 filters (dict): Filters to be processed on the image list.
198 Available filters:
199 - ``dangling`` (bool)
200 - ``label`` (str): format either ``key`` or ``key=value``
201
202 Returns:
203 (list of :py:class:`Image`): The images.
204
205 Raises:
206 :py:class:`docker.errors.APIError`
207 If the server returns an error.
208 """
209 resp = self.client.api.images(name=name, all=all, filters=filters)
210 return [self.prepare_model(r) for r in resp]
211
212 def load(self, data):
213 """
214 Load an image that was previously saved using
215 :py:meth:`~docker.models.images.Image.save` (or ``docker save``).
216 Similar to ``docker load``.
217
218 Args:
219 data (binary): Image data to be loaded.
220
221 Raises:
222 :py:class:`docker.errors.APIError`
223 If the server returns an error.
224 """
225 return self.client.api.load_image(data)
226
227 def pull(self, name, **kwargs):
228 """
229 Pull an image of the given name and return it. Similar to the
230 ``docker pull`` command.
231
232 If you want to get the raw pull output, use the
233 :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
234 low-level API.
235
236 Args:
237 repository (str): The repository to pull
238 tag (str): The tag to pull
239 insecure_registry (bool): Use an insecure registry
240 auth_config (dict): Override the credentials that
241 :py:meth:`~docker.client.DockerClient.login` has set for
242 this request. ``auth_config`` should contain the ``username``
243 and ``password`` keys to be valid.
244
245 Returns:
246 (:py:class:`Image`): The image that has been pulled.
247
248 Raises:
249 :py:class:`docker.errors.APIError`
250 If the server returns an error.
251
252 Example:
253
254 >>> image = client.images.pull('busybox')
255 """
256 self.client.api.pull(name, **kwargs)
257 return self.get(name)
258
259 def push(self, repository, tag=None, **kwargs):
260 return self.client.api.push(repository, tag=tag, **kwargs)
261 push.__doc__ = APIClient.push.__doc__
262
263 def remove(self, *args, **kwargs):
264 self.client.api.remove_image(*args, **kwargs)
265 remove.__doc__ = APIClient.remove_image.__doc__
266
267 def search(self, *args, **kwargs):
268 return self.client.api.search(*args, **kwargs)
269 search.__doc__ = APIClient.search.__doc__
270
[end of docker/models/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/models/images.py b/docker/models/images.py
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -30,10 +30,10 @@
"""
The image's tags.
"""
- return [
- tag for tag in self.attrs.get('RepoTags', [])
- if tag != '<none>:<none>'
- ]
+ tags = self.attrs.get('RepoTags')
+ if tags is None:
+ tags = []
+ return [tag for tag in tags if tag != '<none>:<none>']
def history(self):
"""
| {"golden_diff": "diff --git a/docker/models/images.py b/docker/models/images.py\n--- a/docker/models/images.py\n+++ b/docker/models/images.py\n@@ -30,10 +30,10 @@\n \"\"\"\n The image's tags.\n \"\"\"\n- return [\n- tag for tag in self.attrs.get('RepoTags', [])\n- if tag != '<none>:<none>'\n- ]\n+ tags = self.attrs.get('RepoTags')\n+ if tags is None:\n+ tags = []\n+ return [tag for tag in tags if tag != '<none>:<none>']\n \n def history(self):\n \"\"\"\n", "issue": "Exception retrieving untagged images on api >= 1.24\nDocker API >= 1.24 will return a null object if image tags DNE instead of not including it in the response. This makes the dict.get fail to catch the null case and the list comprehension to iterate over a non-iterable.\r\n\r\n```\r\n File \"<stdin>\", line 1, in <module>\r\n File \"docker/models/images.py\", line 16, in __repr__\r\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\r\n File \"docker/models/images.py\", line 34, in tags\r\n tag for tag in self.attrs.get('RepoTags', [])\r\nTypeError: 'NoneType' object is not iterable\r\n```\r\n\r\nThis is similar to an issue seen in [salt](https://github.com/saltstack/salt/pull/35447/commits/b833b5f9587534d3b843a026ef91abc4ec929d0f)\r\n\r\nWas able to get things working with a pretty quick change:\r\n```\r\ndiff --git a/docker/models/images.py b/docker/models/images.py\r\nindex 32068e6..39a640d 100644\r\n--- a/docker/models/images.py\r\n+++ b/docker/models/images.py\r\n@@ -30,9 +30,11 @@ class Image(Model):\r\n \"\"\"\r\n The image's tags.\r\n \"\"\"\r\n+ tags = self.attrs.get('RepoTags', [])\r\n+ if tags is None:\r\n+ return []\r\n return [\r\n- tag for tag in self.attrs.get('RepoTags', [])\r\n- if tag != '<none>:<none>'\r\n+ tag for tag in tags if tag != '<none>:<none>'\r\n ]\r\n \r\n def history(self):\r\n```\n", "before_files": [{"content": "import re\n\nimport six\n\nfrom ..api import APIClient\nfrom ..errors import BuildError\nfrom ..utils.json_stream import json_stream\nfrom .resource import Collection, Model\n\n\nclass Image(Model):\n \"\"\"\n An image on the server.\n \"\"\"\n def __repr__(self):\n return \"<%s: '%s'>\" % (self.__class__.__name__, \"', '\".join(self.tags))\n\n @property\n def short_id(self):\n \"\"\"\n The ID of the image truncated to 10 characters, plus the ``sha256:``\n prefix.\n \"\"\"\n if self.id.startswith('sha256:'):\n return self.id[:17]\n return self.id[:10]\n\n @property\n def tags(self):\n \"\"\"\n The image's tags.\n \"\"\"\n return [\n tag for tag in self.attrs.get('RepoTags', [])\n if tag != '<none>:<none>'\n ]\n\n def history(self):\n \"\"\"\n Show the history of an image.\n\n Returns:\n (str): The history of the image.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.history(self.id)\n\n def save(self):\n \"\"\"\n Get a tarball of an image. Similar to the ``docker save`` command.\n\n Returns:\n (urllib3.response.HTTPResponse object): The response from the\n daemon.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = cli.get(\"fedora:latest\")\n >>> resp = image.save()\n >>> f = open('/tmp/fedora-latest.tar', 'w')\n >>> f.write(resp.data)\n >>> f.close()\n \"\"\"\n return self.client.api.get_image(self.id)\n\n def tag(self, repository, tag=None, **kwargs):\n \"\"\"\n Tag this image into a repository. Similar to the ``docker tag``\n command.\n\n Args:\n repository (str): The repository to set for the tag\n tag (str): The tag name\n force (bool): Force\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Returns:\n (bool): ``True`` if successful\n \"\"\"\n self.client.api.tag(self.id, repository, tag=tag, **kwargs)\n\n\nclass ImageCollection(Collection):\n model = Image\n\n def build(self, **kwargs):\n \"\"\"\n Build an image and return it. Similar to the ``docker build``\n command. Either ``path`` or ``fileobj`` must be set.\n\n If you have a tar file for the Docker build context (including a\n Dockerfile) already, pass a readable file-like object to ``fileobj``\n and also pass ``custom_context=True``. If the stream is compressed\n also, set ``encoding`` to the correct value (e.g ``gzip``).\n\n If you want to get the raw output of the build, use the\n :py:meth:`~docker.api.build.BuildApiMixin.build` method in the\n low-level API.\n\n Args:\n path (str): Path to the directory containing the Dockerfile\n fileobj: A file object to use as the Dockerfile. (Or a file-like\n object)\n tag (str): A tag to add to the final image\n quiet (bool): Whether to return the status\n nocache (bool): Don't use the cache when set to ``True``\n rm (bool): Remove intermediate containers. The ``docker build``\n command now defaults to ``--rm=true``, but we have kept the old\n default of `False` to preserve backward compatibility\n stream (bool): *Deprecated for API version > 1.8 (always True)*.\n Return a blocking generator you can iterate over to retrieve\n build output as it happens\n timeout (int): HTTP timeout\n custom_context (bool): Optional if using ``fileobj``\n encoding (str): The encoding for a stream. Set to ``gzip`` for\n compressing\n pull (bool): Downloads any updates to the FROM image in Dockerfiles\n forcerm (bool): Always remove intermediate containers, even after\n unsuccessful builds\n dockerfile (str): path within the build context to the Dockerfile\n buildargs (dict): A dictionary of build arguments\n container_limits (dict): A dictionary of limits applied to each\n container created by the build process. Valid keys:\n\n - memory (int): set memory limit for build\n - memswap (int): Total memory (memory + swap), -1 to disable\n swap\n - cpushares (int): CPU shares (relative weight)\n - cpusetcpus (str): CPUs in which to allow execution, e.g.,\n ``\"0-3\"``, ``\"0,1\"``\n decode (bool): If set to ``True``, the returned stream will be\n decoded into dicts on the fly. Default ``False``.\n\n Returns:\n (:py:class:`Image`): The built image.\n\n Raises:\n :py:class:`docker.errors.BuildError`\n If there is an error during the build.\n :py:class:`docker.errors.APIError`\n If the server returns any other error.\n ``TypeError``\n If neither ``path`` nor ``fileobj`` is specified.\n \"\"\"\n resp = self.client.api.build(**kwargs)\n if isinstance(resp, six.string_types):\n return self.get(resp)\n events = list(json_stream(resp))\n if not events:\n return BuildError('Unknown')\n event = events[-1]\n if 'stream' in event:\n match = re.search(r'Successfully built ([0-9a-f]+)',\n event.get('stream', ''))\n if match:\n image_id = match.group(1)\n return self.get(image_id)\n\n raise BuildError(event.get('error') or event)\n\n def get(self, name):\n \"\"\"\n Gets an image.\n\n Args:\n name (str): The name of the image.\n\n Returns:\n (:py:class:`Image`): The image.\n\n Raises:\n :py:class:`docker.errors.ImageNotFound` If the image does not\n exist.\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.prepare_model(self.client.api.inspect_image(name))\n\n def list(self, name=None, all=False, filters=None):\n \"\"\"\n List images on the server.\n\n Args:\n name (str): Only show images belonging to the repository ``name``\n all (bool): Show intermediate image layers. By default, these are\n filtered out.\n filters (dict): Filters to be processed on the image list.\n Available filters:\n - ``dangling`` (bool)\n - ``label`` (str): format either ``key`` or ``key=value``\n\n Returns:\n (list of :py:class:`Image`): The images.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n resp = self.client.api.images(name=name, all=all, filters=filters)\n return [self.prepare_model(r) for r in resp]\n\n def load(self, data):\n \"\"\"\n Load an image that was previously saved using\n :py:meth:`~docker.models.images.Image.save` (or ``docker save``).\n Similar to ``docker load``.\n\n Args:\n data (binary): Image data to be loaded.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n return self.client.api.load_image(data)\n\n def pull(self, name, **kwargs):\n \"\"\"\n Pull an image of the given name and return it. Similar to the\n ``docker pull`` command.\n\n If you want to get the raw pull output, use the\n :py:meth:`~docker.api.image.ImageApiMixin.pull` method in the\n low-level API.\n\n Args:\n repository (str): The repository to pull\n tag (str): The tag to pull\n insecure_registry (bool): Use an insecure registry\n auth_config (dict): Override the credentials that\n :py:meth:`~docker.client.DockerClient.login` has set for\n this request. ``auth_config`` should contain the ``username``\n and ``password`` keys to be valid.\n\n Returns:\n (:py:class:`Image`): The image that has been pulled.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> image = client.images.pull('busybox')\n \"\"\"\n self.client.api.pull(name, **kwargs)\n return self.get(name)\n\n def push(self, repository, tag=None, **kwargs):\n return self.client.api.push(repository, tag=tag, **kwargs)\n push.__doc__ = APIClient.push.__doc__\n\n def remove(self, *args, **kwargs):\n self.client.api.remove_image(*args, **kwargs)\n remove.__doc__ = APIClient.remove_image.__doc__\n\n def search(self, *args, **kwargs):\n return self.client.api.search(*args, **kwargs)\n search.__doc__ = APIClient.search.__doc__\n", "path": "docker/models/images.py"}]} | 3,656 | 134 |
gh_patches_debug_23640 | rasdani/github-patches | git_diff | ipython__ipython-7454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When kernel is missing, notebook fails to load
Using master as of 0f92b92748b0dc07dd, I tried loading a notebook with a kernel I don't have configured (Python 3). Instead of getting the message indicating I should install it and at least being able to read the notebook, the popup said
```
Notebook failed to load
The error was:
TypeError: Cannot read property 'resources' of undefined
```
And the notebook didn't load at all.
This is obviously a hard blocker for release.
</issue>
<code>
[start of IPython/kernel/kernelspec.py]
1 import io
2 import json
3 import os
4 import shutil
5 import sys
6
7 pjoin = os.path.join
8
9 from IPython.utils.path import get_ipython_dir
10 from IPython.utils.py3compat import PY3
11 from IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any
12 from .launcher import make_ipkernel_cmd
13
14 if os.name == 'nt':
15 programdata = os.environ.get('PROGRAMDATA', None)
16 if programdata:
17 SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')]
18 else: # PROGRAMDATA is not defined by default on XP.
19 SYSTEM_KERNEL_DIRS = []
20 else:
21 SYSTEM_KERNEL_DIRS = ["/usr/share/jupyter/kernels",
22 "/usr/local/share/jupyter/kernels",
23 ]
24
25 NATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2'
26
27 def _pythonfirst(s):
28 "Sort key function that will put strings starting with 'python' first."
29 if s == NATIVE_KERNEL_NAME:
30 return ' ' + s # Two spaces to sort this first of all
31 elif s.startswith('python'):
32 # Space is not valid in kernel names, so this should sort first
33 return ' ' + s
34 return s
35
36 class KernelSpec(HasTraits):
37 argv = List()
38 display_name = Unicode()
39 env = Dict()
40 resource_dir = Unicode()
41
42 @classmethod
43 def from_resource_dir(cls, resource_dir):
44 """Create a KernelSpec object by reading kernel.json
45
46 Pass the path to the *directory* containing kernel.json.
47 """
48 kernel_file = pjoin(resource_dir, 'kernel.json')
49 with io.open(kernel_file, 'r', encoding='utf-8') as f:
50 kernel_dict = json.load(f)
51 return cls(resource_dir=resource_dir, **kernel_dict)
52
53 def to_dict(self):
54 d = dict(argv=self.argv,
55 env=self.env,
56 display_name=self.display_name,
57 )
58
59 return d
60
61 def to_json(self):
62 return json.dumps(self.to_dict())
63
64 def _is_kernel_dir(path):
65 """Is ``path`` a kernel directory?"""
66 return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json'))
67
68 def _list_kernels_in(dir):
69 """Return a mapping of kernel names to resource directories from dir.
70
71 If dir is None or does not exist, returns an empty dict.
72 """
73 if dir is None or not os.path.isdir(dir):
74 return {}
75 return {f.lower(): pjoin(dir, f) for f in os.listdir(dir)
76 if _is_kernel_dir(pjoin(dir, f))}
77
78 class NoSuchKernel(KeyError):
79 def __init__(self, name):
80 self.name = name
81
82 class KernelSpecManager(HasTraits):
83 ipython_dir = Unicode()
84 def _ipython_dir_default(self):
85 return get_ipython_dir()
86
87 user_kernel_dir = Unicode()
88 def _user_kernel_dir_default(self):
89 return pjoin(self.ipython_dir, 'kernels')
90
91 @property
92 def env_kernel_dir(self):
93 return pjoin(sys.prefix, 'share', 'jupyter', 'kernels')
94
95 kernel_dirs = List(
96 help="List of kernel directories to search. Later ones take priority over earlier."
97 )
98 def _kernel_dirs_default(self):
99 dirs = SYSTEM_KERNEL_DIRS[:]
100 if self.env_kernel_dir not in dirs:
101 dirs.append(self.env_kernel_dir)
102 dirs.append(self.user_kernel_dir)
103 return dirs
104
105 @property
106 def _native_kernel_dict(self):
107 """Makes a kernel directory for the native kernel.
108
109 The native kernel is the kernel using the same Python runtime as this
110 process. This will put its information in the user kernels directory.
111 """
112 return {'argv': make_ipkernel_cmd(),
113 'display_name': 'Python %i' % (3 if PY3 else 2),
114 }
115
116 @property
117 def _native_kernel_resource_dir(self):
118 return pjoin(os.path.dirname(__file__), 'resources')
119
120 def find_kernel_specs(self):
121 """Returns a dict mapping kernel names to resource directories."""
122 d = {}
123 for kernel_dir in self.kernel_dirs:
124 d.update(_list_kernels_in(kernel_dir))
125
126 d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir
127 return d
128 # TODO: Caching?
129
130 def get_kernel_spec(self, kernel_name):
131 """Returns a :class:`KernelSpec` instance for the given kernel_name.
132
133 Raises :exc:`NoSuchKernel` if the given kernel name is not found.
134 """
135 if kernel_name in {'python', NATIVE_KERNEL_NAME}:
136 return KernelSpec(resource_dir=self._native_kernel_resource_dir,
137 **self._native_kernel_dict)
138
139 d = self.find_kernel_specs()
140 try:
141 resource_dir = d[kernel_name.lower()]
142 except KeyError:
143 raise NoSuchKernel(kernel_name)
144 return KernelSpec.from_resource_dir(resource_dir)
145
146 def _get_destination_dir(self, kernel_name, user=False):
147 if user:
148 return os.path.join(self.user_kernel_dir, kernel_name)
149 else:
150 if SYSTEM_KERNEL_DIRS:
151 return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name)
152 else:
153 raise EnvironmentError("No system kernel directory is available")
154
155
156 def install_kernel_spec(self, source_dir, kernel_name=None, user=False,
157 replace=False):
158 """Install a kernel spec by copying its directory.
159
160 If ``kernel_name`` is not given, the basename of ``source_dir`` will
161 be used.
162
163 If ``user`` is False, it will attempt to install into the systemwide
164 kernel registry. If the process does not have appropriate permissions,
165 an :exc:`OSError` will be raised.
166
167 If ``replace`` is True, this will replace an existing kernel of the same
168 name. Otherwise, if the destination already exists, an :exc:`OSError`
169 will be raised.
170 """
171 if not kernel_name:
172 kernel_name = os.path.basename(source_dir)
173 kernel_name = kernel_name.lower()
174
175 destination = self._get_destination_dir(kernel_name, user=user)
176
177 if replace and os.path.isdir(destination):
178 shutil.rmtree(destination)
179
180 shutil.copytree(source_dir, destination)
181
182 def install_native_kernel_spec(self, user=False):
183 """Install the native kernel spec to the filesystem
184
185 This allows a Python 3 frontend to use a Python 2 kernel, or vice versa.
186 The kernelspec will be written pointing to the Python executable on
187 which this is run.
188
189 If ``user`` is False, it will attempt to install into the systemwide
190 kernel registry. If the process does not have appropriate permissions,
191 an :exc:`OSError` will be raised.
192 """
193 path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user)
194 os.makedirs(path, mode=0o755)
195 with open(pjoin(path, 'kernel.json'), 'w') as f:
196 json.dump(self._native_kernel_dict, f, indent=1)
197 copy_from = self._native_kernel_resource_dir
198 for file in os.listdir(copy_from):
199 shutil.copy(pjoin(copy_from, file), path)
200 return path
201
202 def find_kernel_specs():
203 """Returns a dict mapping kernel names to resource directories."""
204 return KernelSpecManager().find_kernel_specs()
205
206 def get_kernel_spec(kernel_name):
207 """Returns a :class:`KernelSpec` instance for the given kernel_name.
208
209 Raises KeyError if the given kernel name is not found.
210 """
211 return KernelSpecManager().get_kernel_spec(kernel_name)
212
213 def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False):
214 return KernelSpecManager().install_kernel_spec(source_dir, kernel_name,
215 user, replace)
216
217 install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__
218
219 def install_native_kernel_spec(user=False):
220 return KernelSpecManager().install_native_kernel_spec(user=user)
221
222 install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__
223
[end of IPython/kernel/kernelspec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/kernel/kernelspec.py b/IPython/kernel/kernelspec.py
--- a/IPython/kernel/kernelspec.py
+++ b/IPython/kernel/kernelspec.py
@@ -36,6 +36,7 @@
class KernelSpec(HasTraits):
argv = List()
display_name = Unicode()
+ language = Unicode()
env = Dict()
resource_dir = Unicode()
@@ -54,6 +55,7 @@
d = dict(argv=self.argv,
env=self.env,
display_name=self.display_name,
+ language=self.language,
)
return d
@@ -109,8 +111,10 @@
The native kernel is the kernel using the same Python runtime as this
process. This will put its information in the user kernels directory.
"""
- return {'argv': make_ipkernel_cmd(),
+ return {
+ 'argv': make_ipkernel_cmd(),
'display_name': 'Python %i' % (3 if PY3 else 2),
+ 'language': 'python',
}
@property
| {"golden_diff": "diff --git a/IPython/kernel/kernelspec.py b/IPython/kernel/kernelspec.py\n--- a/IPython/kernel/kernelspec.py\n+++ b/IPython/kernel/kernelspec.py\n@@ -36,6 +36,7 @@\n class KernelSpec(HasTraits):\n argv = List()\n display_name = Unicode()\n+ language = Unicode()\n env = Dict()\n resource_dir = Unicode()\n \n@@ -54,6 +55,7 @@\n d = dict(argv=self.argv,\n env=self.env,\n display_name=self.display_name,\n+ language=self.language,\n )\n \n return d\n@@ -109,8 +111,10 @@\n The native kernel is the kernel using the same Python runtime as this\n process. This will put its information in the user kernels directory.\n \"\"\"\n- return {'argv': make_ipkernel_cmd(),\n+ return {\n+ 'argv': make_ipkernel_cmd(),\n 'display_name': 'Python %i' % (3 if PY3 else 2),\n+ 'language': 'python',\n }\n \n @property\n", "issue": "When kernel is missing, notebook fails to load\nUsing master as of 0f92b92748b0dc07dd, I tried loading a notebook with a kernel I don't have configured (Python 3). Instead of getting the message indicating I should install it and at least being able to read the notebook, the popup said\n\n```\nNotebook failed to load\nThe error was:\nTypeError: Cannot read property 'resources' of undefined\n```\n\nAnd the notebook didn't load at all.\n\nThis is obviously a hard blocker for release.\n\n", "before_files": [{"content": "import io\nimport json\nimport os\nimport shutil\nimport sys\n\npjoin = os.path.join\n\nfrom IPython.utils.path import get_ipython_dir\nfrom IPython.utils.py3compat import PY3\nfrom IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any\nfrom .launcher import make_ipkernel_cmd\n\nif os.name == 'nt':\n programdata = os.environ.get('PROGRAMDATA', None)\n if programdata:\n SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')]\n else: # PROGRAMDATA is not defined by default on XP.\n SYSTEM_KERNEL_DIRS = []\nelse:\n SYSTEM_KERNEL_DIRS = [\"/usr/share/jupyter/kernels\",\n \"/usr/local/share/jupyter/kernels\",\n ]\n \nNATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2'\n\ndef _pythonfirst(s):\n \"Sort key function that will put strings starting with 'python' first.\"\n if s == NATIVE_KERNEL_NAME:\n return ' ' + s # Two spaces to sort this first of all\n elif s.startswith('python'):\n # Space is not valid in kernel names, so this should sort first\n return ' ' + s\n return s\n\nclass KernelSpec(HasTraits):\n argv = List()\n display_name = Unicode()\n env = Dict()\n resource_dir = Unicode()\n \n @classmethod\n def from_resource_dir(cls, resource_dir):\n \"\"\"Create a KernelSpec object by reading kernel.json\n \n Pass the path to the *directory* containing kernel.json.\n \"\"\"\n kernel_file = pjoin(resource_dir, 'kernel.json')\n with io.open(kernel_file, 'r', encoding='utf-8') as f:\n kernel_dict = json.load(f)\n return cls(resource_dir=resource_dir, **kernel_dict)\n \n def to_dict(self):\n d = dict(argv=self.argv,\n env=self.env,\n display_name=self.display_name,\n )\n\n return d\n\n def to_json(self):\n return json.dumps(self.to_dict())\n\ndef _is_kernel_dir(path):\n \"\"\"Is ``path`` a kernel directory?\"\"\"\n return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json'))\n\ndef _list_kernels_in(dir):\n \"\"\"Return a mapping of kernel names to resource directories from dir.\n \n If dir is None or does not exist, returns an empty dict.\n \"\"\"\n if dir is None or not os.path.isdir(dir):\n return {}\n return {f.lower(): pjoin(dir, f) for f in os.listdir(dir)\n if _is_kernel_dir(pjoin(dir, f))}\n\nclass NoSuchKernel(KeyError):\n def __init__(self, name):\n self.name = name\n\nclass KernelSpecManager(HasTraits):\n ipython_dir = Unicode()\n def _ipython_dir_default(self):\n return get_ipython_dir()\n\n user_kernel_dir = Unicode()\n def _user_kernel_dir_default(self):\n return pjoin(self.ipython_dir, 'kernels')\n\n @property\n def env_kernel_dir(self):\n return pjoin(sys.prefix, 'share', 'jupyter', 'kernels')\n \n kernel_dirs = List(\n help=\"List of kernel directories to search. Later ones take priority over earlier.\" \n ) \n def _kernel_dirs_default(self):\n dirs = SYSTEM_KERNEL_DIRS[:]\n if self.env_kernel_dir not in dirs:\n dirs.append(self.env_kernel_dir)\n dirs.append(self.user_kernel_dir)\n return dirs\n\n @property\n def _native_kernel_dict(self):\n \"\"\"Makes a kernel directory for the native kernel.\n \n The native kernel is the kernel using the same Python runtime as this\n process. This will put its information in the user kernels directory.\n \"\"\"\n return {'argv': make_ipkernel_cmd(),\n 'display_name': 'Python %i' % (3 if PY3 else 2),\n }\n\n @property\n def _native_kernel_resource_dir(self):\n return pjoin(os.path.dirname(__file__), 'resources')\n\n def find_kernel_specs(self):\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n d = {}\n for kernel_dir in self.kernel_dirs:\n d.update(_list_kernels_in(kernel_dir))\n\n d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir\n return d\n # TODO: Caching?\n\n def get_kernel_spec(self, kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises :exc:`NoSuchKernel` if the given kernel name is not found.\n \"\"\"\n if kernel_name in {'python', NATIVE_KERNEL_NAME}:\n return KernelSpec(resource_dir=self._native_kernel_resource_dir,\n **self._native_kernel_dict)\n\n d = self.find_kernel_specs()\n try:\n resource_dir = d[kernel_name.lower()]\n except KeyError:\n raise NoSuchKernel(kernel_name)\n return KernelSpec.from_resource_dir(resource_dir)\n \n def _get_destination_dir(self, kernel_name, user=False):\n if user:\n return os.path.join(self.user_kernel_dir, kernel_name)\n else:\n if SYSTEM_KERNEL_DIRS:\n return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name)\n else:\n raise EnvironmentError(\"No system kernel directory is available\")\n\n\n def install_kernel_spec(self, source_dir, kernel_name=None, user=False,\n replace=False):\n \"\"\"Install a kernel spec by copying its directory.\n \n If ``kernel_name`` is not given, the basename of ``source_dir`` will\n be used.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions,\n an :exc:`OSError` will be raised.\n \n If ``replace`` is True, this will replace an existing kernel of the same\n name. Otherwise, if the destination already exists, an :exc:`OSError`\n will be raised.\n \"\"\"\n if not kernel_name:\n kernel_name = os.path.basename(source_dir)\n kernel_name = kernel_name.lower()\n \n destination = self._get_destination_dir(kernel_name, user=user)\n\n if replace and os.path.isdir(destination):\n shutil.rmtree(destination)\n\n shutil.copytree(source_dir, destination)\n\n def install_native_kernel_spec(self, user=False):\n \"\"\"Install the native kernel spec to the filesystem\n \n This allows a Python 3 frontend to use a Python 2 kernel, or vice versa.\n The kernelspec will be written pointing to the Python executable on\n which this is run.\n \n If ``user`` is False, it will attempt to install into the systemwide\n kernel registry. If the process does not have appropriate permissions, \n an :exc:`OSError` will be raised.\n \"\"\"\n path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user)\n os.makedirs(path, mode=0o755)\n with open(pjoin(path, 'kernel.json'), 'w') as f:\n json.dump(self._native_kernel_dict, f, indent=1)\n copy_from = self._native_kernel_resource_dir\n for file in os.listdir(copy_from):\n shutil.copy(pjoin(copy_from, file), path)\n return path\n\ndef find_kernel_specs():\n \"\"\"Returns a dict mapping kernel names to resource directories.\"\"\"\n return KernelSpecManager().find_kernel_specs()\n\ndef get_kernel_spec(kernel_name):\n \"\"\"Returns a :class:`KernelSpec` instance for the given kernel_name.\n \n Raises KeyError if the given kernel name is not found.\n \"\"\"\n return KernelSpecManager().get_kernel_spec(kernel_name)\n\ndef install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False):\n return KernelSpecManager().install_kernel_spec(source_dir, kernel_name,\n user, replace)\n\ninstall_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__\n\ndef install_native_kernel_spec(user=False):\n return KernelSpecManager().install_native_kernel_spec(user=user)\n\ninstall_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__\n", "path": "IPython/kernel/kernelspec.py"}]} | 2,953 | 239 |
gh_patches_debug_38704 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider wellstar is broken
During the global build at 2021-10-20-14-42-48, spider **wellstar** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wellstar.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson))
</issue>
<code>
[start of locations/spiders/wellstar.py]
1 import json
2 import scrapy
3 from locations.items import GeojsonPointItem
4 from locations.hours import OpeningHours
5
6 DAYS_NAME = {
7 'Monday': 'Mo',
8 'Tuesday': 'Tu',
9 'Wednesday': 'We',
10 'Wedsenday': 'We',
11 'Thursday': 'Th',
12 'Friday': 'Fr',
13 'Saturday': 'Sa',
14 'Sunday': 'Su'
15 }
16
17 class WellStarSpider(scrapy.Spider):
18 name = "wellstar"
19 item_attributes = {'brand': "WellStar Health System"}
20 allowed_domains = ["www.wellstar.org/"]
21 start_urls = ('https://www.wellstar.org/locations',)
22
23 def start_requests(self):
24 url = 'https://www.wellstar.org/api/LocationSearchApi/GetLocations'
25
26 headers = {
27 "authority": "www.wellstar.org",
28 "sec-ch-ua": "\"Chromium\";v=\"88\", \"Google Chrome\";v=\"88\", \";Not A Brand\";v=\"99\"",
29 "accept": "application/json, text/javascript, */*; q=0.01",
30 "sec-ch-ua-mobile": "?0",
31 "__requestverificationtoken": "Y0cuJinQRzvtT-kjKKyPZOlZPu0JU48XSyIvEl1p__yzZF4621s-0YUAkImeXkjGgLB7GyZlv8rYby0uII9LChhFlrKiWiDb2Va5skX-TmM1",
32 "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
33 "content-type": "application/json",
34 "origin": "https://www.wellstar.org",
35 "sec-fetch-site": "same-origin",
36 "sec-fetch-mode": "cors",
37 "sec-fetch-dest": "empty",
38 "referer": "https://www.wellstar.org/locations",
39 "accept-language": "en-US,en;q=0.9"
40 }
41
42 cookies = {
43 "ASP.NET_SessionId": "24p11pgzrg42p4ghtqwi40gl",
44 "__RequestVerificationToken": "3agF8JY4gLGMg_K1j7eXWtME4IsQmLPf1snFPF0d132SBNJG2JsIHpVgCAJ1cVysztrjgXuRrCmeMYp9kJjHDYNZlqIzc_ZYwq-TmGIeBTk1",
45 "sxa_site": "WellStarOrg",
46 "ARRAffinity": "e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5",
47 "ARRAffinitySameSite": "e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5",
48 "_gid": "GA1.2.1358908497.1614958557",
49 "_fbp": "fb.1.1614958557337.1409275872",
50 "_gcl_au": "1.1.1318529675.1614958557",
51 "SC_ANALYTICS_GLOBAL_COOKIE": "da85aa2fb9ad429eb899c581fcb2376d|True",
52 "fs_uid": "rs.fullstory.com#10J51H#5729266957271040:6498357093580800/1646494557",
53 "isIUnderstand": "true",
54 "searchedlatitude": "33.7489954",
55 "searchedlongitude": "-84.3879824",
56 "latitude": "30.259263699999998",
57 "longitude": "-97.7393472",
58 "_gat_UA-9373927-15": "1",
59 "_gat_UA-9373927-14": "1",
60 "_ga_BM192ND27H": "GS1.1.1614983138.7.1.1614983410.0",
61 "_ga": "GA1.1.688932849.1614958557"
62 }
63
64 body = '{"searchTerm":"","searchFilter":""}'
65
66 yield scrapy.Request(
67 url=url,
68 method='POST',
69 dont_filter=True,
70 cookies=cookies,
71 headers=headers,
72 body=body,
73 callback=self.parse
74 )
75
76 def parse_hours(self, hours):
77
78 opening_hours = OpeningHours()
79
80 if hours:
81 for dt in hours:
82 try:
83 day = DAYS_NAME[dt.split(':')[0]]
84 time = "".join(dt.split(':')[1:4])
85 open_time, close_time = time.split('-')
86 opening_hours.add_range(day=day,
87 open_time=open_time.strip(),
88 close_time=close_time.strip(),
89 time_format="%H%M"
90 )
91 except:
92 continue
93
94 return opening_hours.as_opening_hours()
95
96 def parse(self, response):
97 hdata = json.loads(response.text)
98
99 hdata = hdata["SearchResults"]
100
101 for row in hdata:
102
103 properties = {
104 'ref': row['LocationID'],
105 'name': row['Name'],
106 'addr_full': " ".join([row["Address"].split(",")[0], row.get('Address2',"") or ""]).strip(),
107 'city': row["Address"].split(",")[1].strip(),
108 'state': row["Address"].split(",")[2].strip(),
109 'postcode': row["Address"].split(",")[3].strip(),
110 'lat': row['Latitude'],
111 'lon': row['Longitude'],
112 'phone': row['LocationContactPhone']
113 }
114
115 hours = self.parse_hours(row['WorkingHours'])
116 properties['opening_hours'] = hours
117
118 yield GeojsonPointItem(**properties)
119
[end of locations/spiders/wellstar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/wellstar.py b/locations/spiders/wellstar.py
--- a/locations/spiders/wellstar.py
+++ b/locations/spiders/wellstar.py
@@ -81,11 +81,10 @@
for dt in hours:
try:
day = DAYS_NAME[dt.split(':')[0]]
- time = "".join(dt.split(':')[1:4])
- open_time, close_time = time.split('-')
+ open_time, close_time = hours.get(dt).split('-')
opening_hours.add_range(day=day,
- open_time=open_time.strip(),
- close_time=close_time.strip(),
+ open_time=open_time.replace(':', '').strip(),
+ close_time=close_time.replace(':', '').strip(),
time_format="%H%M"
)
except:
@@ -93,26 +92,38 @@
return opening_hours.as_opening_hours()
+ def get_address_attributes(self, address):
+ address_parts = address.split(",")
+ address_attributes = {}
+ if len(address_parts) > 1:
+ address_attributes['city'] = address_parts[1].strip()
+ if len(address_parts) > 2:
+ address_attributes['state'] = address_parts[2].strip()
+ if len(address_parts) > 3:
+ address_attributes['postcode'] = address_parts[3].strip()
+
+ return address_attributes
+
def parse(self, response):
hdata = json.loads(response.text)
- hdata = hdata["SearchResults"]
+ hdata = hdata["matchingItems"]
for row in hdata:
-
+ address_attributes = self.get_address_attributes(row.get("Address"))
properties = {
- 'ref': row['LocationID'],
- 'name': row['Name'],
- 'addr_full': " ".join([row["Address"].split(",")[0], row.get('Address2',"") or ""]).strip(),
- 'city': row["Address"].split(",")[1].strip(),
- 'state': row["Address"].split(",")[2].strip(),
- 'postcode': row["Address"].split(",")[3].strip(),
- 'lat': row['Latitude'],
- 'lon': row['Longitude'],
- 'phone': row['LocationContactPhone']
+ 'ref': row.get('LocationID'),
+ 'name': row.get('Name'),
+ 'addr_full': " ".join([row.get("Address").split(",")[0], row.get('Address2',"") or ""]).strip(),
+ 'city': address_attributes.get('city'),
+ 'state': address_attributes.get('state'),
+ 'postcode': address_attributes.get('postcode'),
+ 'lat': row.get('Latitude'),
+ 'lon': row.get('Longitude'),
+ 'phone': row.get('LocationContactPhone')
}
- hours = self.parse_hours(row['WorkingHours'])
+ hours = self.parse_hours(row.get('Hours'))
properties['opening_hours'] = hours
- yield GeojsonPointItem(**properties)
+ yield GeojsonPointItem(**properties)
\ No newline at end of file
| {"golden_diff": "diff --git a/locations/spiders/wellstar.py b/locations/spiders/wellstar.py\n--- a/locations/spiders/wellstar.py\n+++ b/locations/spiders/wellstar.py\n@@ -81,11 +81,10 @@\n for dt in hours:\n try:\n day = DAYS_NAME[dt.split(':')[0]]\n- time = \"\".join(dt.split(':')[1:4])\n- open_time, close_time = time.split('-')\n+ open_time, close_time = hours.get(dt).split('-')\n opening_hours.add_range(day=day,\n- open_time=open_time.strip(),\n- close_time=close_time.strip(),\n+ open_time=open_time.replace(':', '').strip(),\n+ close_time=close_time.replace(':', '').strip(),\n time_format=\"%H%M\"\n )\n except:\n@@ -93,26 +92,38 @@\n \n return opening_hours.as_opening_hours()\n \n+ def get_address_attributes(self, address):\n+ address_parts = address.split(\",\")\n+ address_attributes = {}\n+ if len(address_parts) > 1:\n+ address_attributes['city'] = address_parts[1].strip()\n+ if len(address_parts) > 2:\n+ address_attributes['state'] = address_parts[2].strip()\n+ if len(address_parts) > 3:\n+ address_attributes['postcode'] = address_parts[3].strip()\n+\n+ return address_attributes\n+\n def parse(self, response):\n hdata = json.loads(response.text)\n \n- hdata = hdata[\"SearchResults\"]\n+ hdata = hdata[\"matchingItems\"]\n \n for row in hdata:\n-\n+ address_attributes = self.get_address_attributes(row.get(\"Address\"))\n properties = {\n- 'ref': row['LocationID'],\n- 'name': row['Name'],\n- 'addr_full': \" \".join([row[\"Address\"].split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n- 'city': row[\"Address\"].split(\",\")[1].strip(),\n- 'state': row[\"Address\"].split(\",\")[2].strip(),\n- 'postcode': row[\"Address\"].split(\",\")[3].strip(),\n- 'lat': row['Latitude'],\n- 'lon': row['Longitude'],\n- 'phone': row['LocationContactPhone']\n+ 'ref': row.get('LocationID'),\n+ 'name': row.get('Name'),\n+ 'addr_full': \" \".join([row.get(\"Address\").split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n+ 'city': address_attributes.get('city'),\n+ 'state': address_attributes.get('state'),\n+ 'postcode': address_attributes.get('postcode'),\n+ 'lat': row.get('Latitude'),\n+ 'lon': row.get('Longitude'),\n+ 'phone': row.get('LocationContactPhone')\n }\n \n- hours = self.parse_hours(row['WorkingHours'])\n+ hours = self.parse_hours(row.get('Hours'))\n properties['opening_hours'] = hours\n \n- yield GeojsonPointItem(**properties)\n+ yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n", "issue": "Spider wellstar is broken\nDuring the global build at 2021-10-20-14-42-48, spider **wellstar** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/wellstar.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/wellstar.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAYS_NAME = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Wedsenday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\nclass WellStarSpider(scrapy.Spider):\n name = \"wellstar\"\n item_attributes = {'brand': \"WellStar Health System\"}\n allowed_domains = [\"www.wellstar.org/\"]\n start_urls = ('https://www.wellstar.org/locations',)\n\n def start_requests(self):\n url = 'https://www.wellstar.org/api/LocationSearchApi/GetLocations'\n\n headers = {\n \"authority\": \"www.wellstar.org\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v=\\\"88\\\", \\\"Google Chrome\\\";v=\\\"88\\\", \\\";Not A Brand\\\";v=\\\"99\\\"\",\n \"accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"__requestverificationtoken\": \"Y0cuJinQRzvtT-kjKKyPZOlZPu0JU48XSyIvEl1p__yzZF4621s-0YUAkImeXkjGgLB7GyZlv8rYby0uII9LChhFlrKiWiDb2Va5skX-TmM1\",\n \"user-agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36\",\n \"content-type\": \"application/json\",\n \"origin\": \"https://www.wellstar.org\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-dest\": \"empty\",\n \"referer\": \"https://www.wellstar.org/locations\",\n \"accept-language\": \"en-US,en;q=0.9\"\n }\n\n cookies = {\n \"ASP.NET_SessionId\": \"24p11pgzrg42p4ghtqwi40gl\",\n \"__RequestVerificationToken\": \"3agF8JY4gLGMg_K1j7eXWtME4IsQmLPf1snFPF0d132SBNJG2JsIHpVgCAJ1cVysztrjgXuRrCmeMYp9kJjHDYNZlqIzc_ZYwq-TmGIeBTk1\",\n \"sxa_site\": \"WellStarOrg\",\n \"ARRAffinity\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"ARRAffinitySameSite\": \"e29b803b013c7e5c76a254365bc4ef36ddc3a862faece1b8869fa46de635b9c5\",\n \"_gid\": \"GA1.2.1358908497.1614958557\",\n \"_fbp\": \"fb.1.1614958557337.1409275872\",\n \"_gcl_au\": \"1.1.1318529675.1614958557\",\n \"SC_ANALYTICS_GLOBAL_COOKIE\": \"da85aa2fb9ad429eb899c581fcb2376d|True\",\n \"fs_uid\": \"rs.fullstory.com#10J51H#5729266957271040:6498357093580800/1646494557\",\n \"isIUnderstand\": \"true\",\n \"searchedlatitude\": \"33.7489954\",\n \"searchedlongitude\": \"-84.3879824\",\n \"latitude\": \"30.259263699999998\",\n \"longitude\": \"-97.7393472\",\n \"_gat_UA-9373927-15\": \"1\",\n \"_gat_UA-9373927-14\": \"1\",\n \"_ga_BM192ND27H\": \"GS1.1.1614983138.7.1.1614983410.0\",\n \"_ga\": \"GA1.1.688932849.1614958557\"\n }\n\n body = '{\"searchTerm\":\"\",\"searchFilter\":\"\"}'\n\n yield scrapy.Request(\n url=url,\n method='POST',\n dont_filter=True,\n cookies=cookies,\n headers=headers,\n body=body,\n callback=self.parse\n )\n\n def parse_hours(self, hours):\n\n opening_hours = OpeningHours()\n\n if hours:\n for dt in hours:\n try:\n day = DAYS_NAME[dt.split(':')[0]]\n time = \"\".join(dt.split(':')[1:4])\n open_time, close_time = time.split('-')\n opening_hours.add_range(day=day,\n open_time=open_time.strip(),\n close_time=close_time.strip(),\n time_format=\"%H%M\"\n )\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n hdata = json.loads(response.text)\n\n hdata = hdata[\"SearchResults\"]\n\n for row in hdata:\n\n properties = {\n 'ref': row['LocationID'],\n 'name': row['Name'],\n 'addr_full': \" \".join([row[\"Address\"].split(\",\")[0], row.get('Address2',\"\") or \"\"]).strip(),\n 'city': row[\"Address\"].split(\",\")[1].strip(),\n 'state': row[\"Address\"].split(\",\")[2].strip(),\n 'postcode': row[\"Address\"].split(\",\")[3].strip(),\n 'lat': row['Latitude'],\n 'lon': row['Longitude'],\n 'phone': row['LocationContactPhone']\n }\n\n hours = self.parse_hours(row['WorkingHours'])\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wellstar.py"}]} | 2,498 | 689 |
gh_patches_debug_28469 | rasdani/github-patches | git_diff | fossasia__open-event-server-2390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show image and square crop option (like in wizard) for speakers and ensure it shows up after import

As the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.
Compare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/
</issue>
<code>
[start of app/views/admin/models_views/speakers.py]
1 import json
2
3 from flask.ext.admin import BaseView
4 from flask.ext.restplus import abort
5 from flask_admin import expose
6 from flask.ext import login
7 from flask import request, url_for, redirect, flash
8 from ....helpers.data import delete_from_db, save_to_db
9 from ....helpers.data_getter import DataGetter
10 from ....helpers.storage import upload, UPLOAD_PATHS
11
12
13 def get_speaker_or_throw(speaker_id):
14 session = DataGetter.get_speaker(speaker_id)
15 if not session:
16 abort(404)
17 return session
18
19
20 class SpeakersView(BaseView):
21
22 def is_accessible(self):
23 return login.current_user.is_authenticated
24
25 def _handle_view(self, name, **kwargs):
26 if not self.is_accessible():
27 return redirect(url_for('admin.login_view', next=request.url))
28 event = DataGetter.get_event(kwargs['event_id'])
29 if not event.has_session_speakers:
30 return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)
31
32 @expose('/')
33 def index_view(self, event_id):
34 speakers = DataGetter.get_speakers(event_id)
35 event = DataGetter.get_event(event_id)
36 return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',
37 speakers=speakers, event_id=event_id, event=event)
38
39 @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))
40 def edit_view(self, event_id, speaker_id):
41 speaker = get_speaker_or_throw(speaker_id)
42 event = DataGetter.get_event(event_id)
43 form_elems = DataGetter.get_custom_form_elements(event_id)
44 if not form_elems:
45 flash("Speaker form has been incorrectly configured for this event. Editing has been disabled", "danger")
46 return redirect(url_for('.index_view', event_id=event_id))
47 speaker_form = json.loads(form_elems.speaker_form)
48 if request.method == 'GET':
49 return self.render('/gentelella/admin/event/speakers/edit.html',
50 speaker=speaker, event_id=event_id,
51 event=event, speaker_form=speaker_form)
52 if request.method == 'POST':
53 # set photo
54 if 'photo' in request.files and request.files['photo'].filename != '':
55 speaker_img_file = request.files['photo']
56 speaker_img = upload(
57 speaker_img_file,
58 UPLOAD_PATHS['speakers']['photo'].format(
59 event_id=int(event_id), id=int(speaker.id)
60 ))
61 speaker.photo = speaker_img
62 # set other fields
63 speaker.name = request.form.get('name', None)
64 speaker.short_biography = request.form.get('short_biography', None)
65 speaker.long_biography = request.form.get('long_biography', None)
66 speaker.email = request.form.get('email', None)
67 speaker.mobile = request.form.get('mobile', None)
68 speaker.website = request.form.get('website', None)
69 speaker.twitter = request.form.get('twitter', None)
70 speaker.facebook = request.form.get('facebook', None)
71 speaker.github = request.form.get('github', None)
72 speaker.linkedin = request.form.get('linkedin', None)
73 speaker.organisation = request.form.get('organisation', None)
74 speaker.featured = True if request.form.get('featured', 'false') == 'true' else False
75 speaker.position = request.form.get('position', None)
76 speaker.country = request.form.get('country', None)
77 save_to_db(speaker, "Speaker has been updated")
78 flash("Speaker has been saved", "success")
79
80 return redirect(url_for('.index_view', event_id=event_id))
81
82 @expose('/<int:speaker_id>/delete', methods=('GET',))
83 def delete(self, event_id, speaker_id):
84 speaker = get_speaker_or_throw(speaker_id)
85 delete_from_db(speaker, 'Speaker Rejected')
86 flash("The speaker has been deleted", "danger")
87 return redirect(url_for('.index_view', event_id=event_id))
88
[end of app/views/admin/models_views/speakers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py
--- a/app/views/admin/models_views/speakers.py
+++ b/app/views/admin/models_views/speakers.py
@@ -4,10 +4,11 @@
from flask.ext.restplus import abort
from flask_admin import expose
from flask.ext import login
-from flask import request, url_for, redirect, flash
+from flask import request, url_for, redirect, flash, jsonify
from ....helpers.data import delete_from_db, save_to_db
from ....helpers.data_getter import DataGetter
from ....helpers.storage import upload, UPLOAD_PATHS
+from app.helpers.helpers import uploaded_file
def get_speaker_or_throw(speaker_id):
@@ -85,3 +86,23 @@
delete_from_db(speaker, 'Speaker Rejected')
flash("The speaker has been deleted", "danger")
return redirect(url_for('.index_view', event_id=event_id))
+
+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))
+ def photo_upload(self, event_id, speaker_id):
+ speaker = get_speaker_or_throw(speaker_id)
+ event = DataGetter.get_event(event_id)
+ photo = request.form['photo']
+ if photo:
+ photo_file = uploaded_file(file_content=photo)
+ photo = upload(
+ photo_file,
+ UPLOAD_PATHS['speakers']['photo'].format(
+ event_id=int(event_id), id=int(speaker.id)
+ ))
+ speaker.photo = photo
+ save_to_db(speaker)
+ return jsonify({'status': 'ok', 'photo': photo})
+ else:
+ speaker.photo = None
+ save_to_db(speaker)
+ return jsonify({'status': 'Removed'})
| {"golden_diff": "diff --git a/app/views/admin/models_views/speakers.py b/app/views/admin/models_views/speakers.py\n--- a/app/views/admin/models_views/speakers.py\n+++ b/app/views/admin/models_views/speakers.py\n@@ -4,10 +4,11 @@\n from flask.ext.restplus import abort\n from flask_admin import expose\n from flask.ext import login\n-from flask import request, url_for, redirect, flash\n+from flask import request, url_for, redirect, flash, jsonify\n from ....helpers.data import delete_from_db, save_to_db\n from ....helpers.data_getter import DataGetter\n from ....helpers.storage import upload, UPLOAD_PATHS\n+from app.helpers.helpers import uploaded_file\n \n \n def get_speaker_or_throw(speaker_id):\n@@ -85,3 +86,23 @@\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n+\n+ @expose('/<int:speaker_id>/photo_upload', methods=('POST',))\n+ def photo_upload(self, event_id, speaker_id):\n+ speaker = get_speaker_or_throw(speaker_id)\n+ event = DataGetter.get_event(event_id)\n+ photo = request.form['photo']\n+ if photo:\n+ photo_file = uploaded_file(file_content=photo)\n+ photo = upload(\n+ photo_file,\n+ UPLOAD_PATHS['speakers']['photo'].format(\n+ event_id=int(event_id), id=int(speaker.id)\n+ ))\n+ speaker.photo = photo\n+ save_to_db(speaker)\n+ return jsonify({'status': 'ok', 'photo': photo})\n+ else:\n+ speaker.photo = None\n+ save_to_db(speaker)\n+ return jsonify({'status': 'Removed'})\n", "issue": "Show image and square crop option (like in wizard) for speakers and ensure it shows up after import\n\n\nAs the above screenshot shows, the image of the speaker does not show up as expected. In the wizard step 1 it is already implemented in that way.\n\nCompare: http://open-event-dev.herokuapp.com/events/132/speakers/882/edit/\n\n", "before_files": [{"content": "import json\n\nfrom flask.ext.admin import BaseView\nfrom flask.ext.restplus import abort\nfrom flask_admin import expose\nfrom flask.ext import login\nfrom flask import request, url_for, redirect, flash\nfrom ....helpers.data import delete_from_db, save_to_db\nfrom ....helpers.data_getter import DataGetter\nfrom ....helpers.storage import upload, UPLOAD_PATHS\n\n\ndef get_speaker_or_throw(speaker_id):\n session = DataGetter.get_speaker(speaker_id)\n if not session:\n abort(404)\n return session\n\n\nclass SpeakersView(BaseView):\n\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n return redirect(url_for('admin.login_view', next=request.url))\n event = DataGetter.get_event(kwargs['event_id'])\n if not event.has_session_speakers:\n return self.render('/gentelella/admin/event/info/enable_module.html', active_page='speakers', title='Speakers', event=event)\n\n @expose('/')\n def index_view(self, event_id):\n speakers = DataGetter.get_speakers(event_id)\n event = DataGetter.get_event(event_id)\n return self.render('/gentelella/admin/event/speakers/base_speaker_table.html',\n speakers=speakers, event_id=event_id, event=event)\n\n @expose('/<int:speaker_id>/edit/', methods=('GET', 'POST'))\n def edit_view(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n event = DataGetter.get_event(event_id)\n form_elems = DataGetter.get_custom_form_elements(event_id)\n if not form_elems:\n flash(\"Speaker form has been incorrectly configured for this event. Editing has been disabled\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n speaker_form = json.loads(form_elems.speaker_form)\n if request.method == 'GET':\n return self.render('/gentelella/admin/event/speakers/edit.html',\n speaker=speaker, event_id=event_id,\n event=event, speaker_form=speaker_form)\n if request.method == 'POST':\n # set photo\n if 'photo' in request.files and request.files['photo'].filename != '':\n speaker_img_file = request.files['photo']\n speaker_img = upload(\n speaker_img_file,\n UPLOAD_PATHS['speakers']['photo'].format(\n event_id=int(event_id), id=int(speaker.id)\n ))\n speaker.photo = speaker_img\n # set other fields\n speaker.name = request.form.get('name', None)\n speaker.short_biography = request.form.get('short_biography', None)\n speaker.long_biography = request.form.get('long_biography', None)\n speaker.email = request.form.get('email', None)\n speaker.mobile = request.form.get('mobile', None)\n speaker.website = request.form.get('website', None)\n speaker.twitter = request.form.get('twitter', None)\n speaker.facebook = request.form.get('facebook', None)\n speaker.github = request.form.get('github', None)\n speaker.linkedin = request.form.get('linkedin', None)\n speaker.organisation = request.form.get('organisation', None)\n speaker.featured = True if request.form.get('featured', 'false') == 'true' else False\n speaker.position = request.form.get('position', None)\n speaker.country = request.form.get('country', None)\n save_to_db(speaker, \"Speaker has been updated\")\n flash(\"Speaker has been saved\", \"success\")\n\n return redirect(url_for('.index_view', event_id=event_id))\n\n @expose('/<int:speaker_id>/delete', methods=('GET',))\n def delete(self, event_id, speaker_id):\n speaker = get_speaker_or_throw(speaker_id)\n delete_from_db(speaker, 'Speaker Rejected')\n flash(\"The speaker has been deleted\", \"danger\")\n return redirect(url_for('.index_view', event_id=event_id))\n", "path": "app/views/admin/models_views/speakers.py"}]} | 1,730 | 396 |
gh_patches_debug_26592 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-2902 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cisco ASA_ACL "NameError: global name 'candidate' is not defined"
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
asa_acl
##### ANSIBLE VERSION
```
root@osboxes:~/asa# ansible-playbook -i inventory acl.yml
PLAY [Test ASA] ****************************************************************
TASK [Create object to ACL-ANSIBLE] ********************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: global name 'candidate' is not defined
fatal: [asa]: FAILED! => {"changed": false, "failed": true, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n", "module_stdout": "", "msg": "MODULE FAILURE"}
PLAY RECAP *********************************************************************
asa : ok=0 changed=0 unreachable=0 failed=1
root@osboxes:~/asa#
```
##### CONFIGURATION
##### OS / ENVIRONMENT
Ubuntu
##### SUMMARY
Attempting to run asa_acl using ansible dev version results in above errors. asa_command and asa_config work with no issues.
##### STEPS TO REPRODUCE
```
---
- name: Test ASA
hosts: asa
connection: local
gather_facts: false
tasks:
- name: Create object to ACL-ANSIBLE
asa_acl:
lines:
- "access-list ACL-ANSIBLE extended permit tcp any any eq 74"
username: "ntc"
host: "{{ inventory_hostname }}"
authorize: true
auth_pass: "\n"
```
```
ansible-playbook -i inventory acl.yml
```
##### EXPECTED RESULTS
ACL to be updated
##### ACTUAL RESULTS
```
root@osboxes:~/asa# ansible-playbook -i inventory acl.yml -vvvv
Using /etc/ansible/ansible.cfg as config file
Loading callback plugin default of type stdout, v2.0 from /tmp/ansible/lib/ansible/plugins/callback/__init__.pyc
PLAYBOOK: acl.yml **************************************************************
1 plays in acl.yml
PLAY [Test ASA] ****************************************************************
TASK [Create object to ACL-ANSIBLE] ********************************************
task path: /root/asa/acl.yml:10
Using module file /tmp/ansible/lib/ansible/modules/extras/network/asa/asa_acl.py
<asa> ESTABLISH LOCAL CONNECTION FOR USER: root
<asa> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `" && echo ansible-tmp-1473449999.57-34289925616872="` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `" ) && sleep 0'
<asa> PUT /tmp/tmpu2hszs TO /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py
<asa> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/ /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py && sleep 0'
<asa> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py; rm -rf "/root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/" > /dev/null 2>&1 && sleep 0'
An exception occurred during task execution. The full traceback is:
Traceback (most recent call last):
File "/tmp/ansible_o9grpZ/ansible_module_asa_acl.py", line 202, in <module>
main()
File "/tmp/ansible_o9grpZ/ansible_module_asa_acl.py", line 184, in main
commands = candidate.difference(config)
NameError: global name 'candidate' is not defined
fatal: [asa]: FAILED! => {
"changed": false,
"failed": true,
"invocation": {
"module_name": "asa_acl"
},
"module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n",
"module_stdout": "",
"msg": "MODULE FAILURE"
}
PLAY RECAP *********************************************************************
asa : ok=0 changed=0 unreachable=0 failed=1
```
</issue>
<code>
[start of network/asa/asa_acl.py]
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18
19 DOCUMENTATION = """
20 ---
21 module: asa_acl
22 version_added: "2.2"
23 author: "Patrick Ogenstad (@ogenstad)"
24 short_description: Manage access-lists on a Cisco ASA
25 description:
26 - This module allows you to work with access-lists on a Cisco ASA device.
27 extends_documentation_fragment: asa
28 options:
29 lines:
30 description:
31 - The ordered set of commands that should be configured in the
32 section. The commands must be the exact same commands as found
33 in the device running-config. Be sure to note the configuration
34 command syntanx as some commands are automatically modified by the
35 device config parser.
36 required: true
37 before:
38 description:
39 - The ordered set of commands to push on to the command stack if
40 a change needs to be made. This allows the playbook designer
41 the opportunity to perform configuration commands prior to pushing
42 any changes without affecting how the set of commands are matched
43 against the system
44 required: false
45 default: null
46 after:
47 description:
48 - The ordered set of commands to append to the end of the command
49 stack if a changed needs to be made. Just like with I(before) this
50 allows the playbook designer to append a set of commands to be
51 executed after the command set.
52 required: false
53 default: null
54 match:
55 description:
56 - Instructs the module on the way to perform the matching of
57 the set of commands against the current device config. If
58 match is set to I(line), commands are matched line by line. If
59 match is set to I(strict), command lines are matched with respect
60 to position. Finally if match is set to I(exact), command lines
61 must be an equal match.
62 required: false
63 default: line
64 choices: ['line', 'strict', 'exact']
65 replace:
66 description:
67 - Instructs the module on the way to perform the configuration
68 on the device. If the replace argument is set to I(line) then
69 the modified lines are pushed to the device in configuration
70 mode. If the replace argument is set to I(block) then the entire
71 command block is pushed to the device in configuration mode if any
72 line is not correct
73 required: false
74 default: line
75 choices: ['line', 'block']
76 force:
77 description:
78 - The force argument instructs the module to not consider the
79 current devices running-config. When set to true, this will
80 cause the module to push the contents of I(src) into the device
81 without first checking if already configured.
82 required: false
83 default: false
84 choices: ['yes', 'no']
85 config:
86 description:
87 - The module, by default, will connect to the remote device and
88 retrieve the current running-config to use as a base for comparing
89 against the contents of source. There are times when it is not
90 desirable to have the task get the current running-config for
91 every task in a playbook. The I(config) argument allows the
92 implementer to pass in the configuruation to use as the base
93 config for comparision.
94 required: false
95 default: null
96 """
97
98 EXAMPLES = """
99
100 - asa_acl:
101 lines:
102 - access-list ACL-ANSIBLE extended permit tcp any any eq 82
103 - access-list ACL-ANSIBLE extended permit tcp any any eq www
104 - access-list ACL-ANSIBLE extended permit tcp any any eq 97
105 - access-list ACL-ANSIBLE extended permit tcp any any eq 98
106 - access-list ACL-ANSIBLE extended permit tcp any any eq 99
107 before: clear configure access-list ACL-ANSIBLE
108 match: strict
109 replace: block
110
111 - asa_acl:
112 lines:
113 - access-list ACL-OUTSIDE extended permit tcp any any eq www
114 - access-list ACL-OUTSIDE extended permit tcp any any eq https
115 context: customer_a
116 """
117
118 RETURN = """
119 updates:
120 description: The set of commands that will be pushed to the remote device
121 returned: always
122 type: list
123 sample: ['...', '...']
124
125 responses:
126 description: The set of responses from issuing the commands on the device
127 retured: when not check_mode
128 type: list
129 sample: ['...', '...']
130 """
131 from ansible.module_utils.netcfg import NetworkConfig
132 from ansible.module_utils.asa import NetworkModule
133
134
135 def get_config(module):
136 config = module.params['config'] or dict()
137 if not config and not module.params['force']:
138 config = module.config
139 return config
140
141
142 def check_input_acl(lines, module):
143 first_line = True
144 for line in lines:
145 ace = line.split()
146 if ace[0] != 'access-list':
147 module.fail_json(msg='All lines/commands must begin with "access-list" %s is not permitted' % ace[0])
148 if len(ace) <= 1:
149 module.fail_json(msg='All lines/commainds must contain the name of the access-list')
150 if first_line:
151 acl_name = ace[1]
152 else:
153 if acl_name != ace[1]:
154 module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))
155 first_line = False
156
157 return 'access-list %s' % acl_name
158
159 def main():
160
161 argument_spec = dict(
162 lines=dict(aliases=['commands'], required=True, type='list'),
163 before=dict(type='list'),
164 after=dict(type='list'),
165 match=dict(default='line', choices=['line', 'strict', 'exact']),
166 replace=dict(default='line', choices=['line', 'block']),
167 force=dict(default=False, type='bool'),
168 config=dict()
169 )
170
171 module = NetworkModule(argument_spec=argument_spec,
172 supports_check_mode=True)
173
174 lines = module.params['lines']
175
176 before = module.params['before']
177 after = module.params['after']
178
179 match = module.params['match']
180 replace = module.params['replace']
181
182 module.filter = check_input_acl(lines, module)
183 if not module.params['force']:
184 commands = candidate.difference(config)
185 commands = dumps(commands, 'commands').split('\n')
186 commands = [str(c) for c in commands if c]
187 else:
188 commands = str(candidate).split('\n')
189
190 if commands:
191 if not module.check_mode:
192 response = module.config(commands)
193 result['responses'] = response
194 result['changed'] = True
195
196 result['updates'] = commands
197
198 module.exit_json(**result)
199
200
201 if __name__ == '__main__':
202 main()
203
[end of network/asa/asa_acl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py
--- a/network/asa/asa_acl.py
+++ b/network/asa/asa_acl.py
@@ -128,8 +128,10 @@
type: list
sample: ['...', '...']
"""
+import ansible.module_utils.asa
+
from ansible.module_utils.netcfg import NetworkConfig
-from ansible.module_utils.asa import NetworkModule
+from ansible.module_utils.network import NetworkModule
def get_config(module):
@@ -179,16 +181,22 @@
match = module.params['match']
replace = module.params['replace']
+ candidate = NetworkConfig(indent=1)
+ candidate.add(lines)
+
module.filter = check_input_acl(lines, module)
+
if not module.params['force']:
+ contents = get_config(module)
+ config = NetworkConfig(indent=1, contents=contents)
commands = candidate.difference(config)
commands = dumps(commands, 'commands').split('\n')
- commands = [str(c) for c in commands if c]
else:
commands = str(candidate).split('\n')
if commands:
if not module.check_mode:
+ commands = [str(c) for c in commands if c]
response = module.config(commands)
result['responses'] = response
result['changed'] = True
| {"golden_diff": "diff --git a/network/asa/asa_acl.py b/network/asa/asa_acl.py\n--- a/network/asa/asa_acl.py\n+++ b/network/asa/asa_acl.py\n@@ -128,8 +128,10 @@\n type: list\n sample: ['...', '...']\n \"\"\"\n+import ansible.module_utils.asa\n+\n from ansible.module_utils.netcfg import NetworkConfig\n-from ansible.module_utils.asa import NetworkModule\n+from ansible.module_utils.network import NetworkModule\n \n \n def get_config(module):\n@@ -179,16 +181,22 @@\n match = module.params['match']\n replace = module.params['replace']\n \n+ candidate = NetworkConfig(indent=1)\n+ candidate.add(lines)\n+\n module.filter = check_input_acl(lines, module)\n+\n if not module.params['force']:\n+ contents = get_config(module)\n+ config = NetworkConfig(indent=1, contents=contents)\n commands = candidate.difference(config)\n commands = dumps(commands, 'commands').split('\\n')\n- commands = [str(c) for c in commands if c]\n else:\n commands = str(candidate).split('\\n')\n \n if commands:\n if not module.check_mode:\n+ commands = [str(c) for c in commands if c]\n response = module.config(commands)\n result['responses'] = response\n result['changed'] = True\n", "issue": "Cisco ASA_ACL \"NameError: global name 'candidate' is not defined\"\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nasa_acl\n##### ANSIBLE VERSION\n\n```\nroot@osboxes:~/asa# ansible-playbook -i inventory acl.yml\n\nPLAY [Test ASA] ****************************************************************\n\nTASK [Create object to ACL-ANSIBLE] ********************************************\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: global name 'candidate' is not defined\nfatal: [asa]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\\\", line 202, in <module>\\n main()\\n File \\\"/tmp/ansible_JUM9ib/ansible_module_asa_acl.py\\\", line 184, in main\\n commands = candidate.difference(config)\\nNameError: global name 'candidate' is not defined\\n\", \"module_stdout\": \"\", \"msg\": \"MODULE FAILURE\"}\n\nPLAY RECAP *********************************************************************\nasa : ok=0 changed=0 unreachable=0 failed=1\n\nroot@osboxes:~/asa#\n\n```\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n\nUbuntu\n##### SUMMARY\n\nAttempting to run asa_acl using ansible dev version results in above errors. asa_command and asa_config work with no issues. \n##### STEPS TO REPRODUCE\n\n```\n\n---\n\n- name: Test ASA\n hosts: asa\n connection: local\n gather_facts: false\n\n tasks:\n\n - name: Create object to ACL-ANSIBLE\n asa_acl:\n lines:\n - \"access-list ACL-ANSIBLE extended permit tcp any any eq 74\"\n username: \"ntc\"\n host: \"{{ inventory_hostname }}\"\n authorize: true\n auth_pass: \"\\n\"\n```\n\n```\nansible-playbook -i inventory acl.yml\n```\n##### EXPECTED RESULTS\n\nACL to be updated \n##### ACTUAL RESULTS\n\n```\nroot@osboxes:~/asa# ansible-playbook -i inventory acl.yml -vvvv\nUsing /etc/ansible/ansible.cfg as config file\nLoading callback plugin default of type stdout, v2.0 from /tmp/ansible/lib/ansible/plugins/callback/__init__.pyc\n\nPLAYBOOK: acl.yml **************************************************************\n1 plays in acl.yml\n\nPLAY [Test ASA] ****************************************************************\n\nTASK [Create object to ACL-ANSIBLE] ********************************************\ntask path: /root/asa/acl.yml:10\nUsing module file /tmp/ansible/lib/ansible/modules/extras/network/asa/asa_acl.py\n<asa> ESTABLISH LOCAL CONNECTION FOR USER: root\n<asa> EXEC /bin/sh -c '( umask 77 && mkdir -p \"` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `\" && echo ansible-tmp-1473449999.57-34289925616872=\"` echo $HOME/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872 `\" ) && sleep 0'\n<asa> PUT /tmp/tmpu2hszs TO /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py\n<asa> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/ /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py && sleep 0'\n<asa> EXEC /bin/sh -c '/usr/bin/python /root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/asa_acl.py; rm -rf \"/root/.ansible/tmp/ansible-tmp-1473449999.57-34289925616872/\" > /dev/null 2>&1 && sleep 0'\nAn exception occurred during task execution. The full traceback is:\nTraceback (most recent call last):\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 202, in <module>\n main()\n File \"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\", line 184, in main\n commands = candidate.difference(config)\nNameError: global name 'candidate' is not defined\n\nfatal: [asa]: FAILED! => {\n \"changed\": false,\n \"failed\": true,\n \"invocation\": {\n \"module_name\": \"asa_acl\"\n },\n \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\\\", line 202, in <module>\\n main()\\n File \\\"/tmp/ansible_o9grpZ/ansible_module_asa_acl.py\\\", line 184, in main\\n commands = candidate.difference(config)\\nNameError: global name 'candidate' is not defined\\n\",\n \"module_stdout\": \"\",\n \"msg\": \"MODULE FAILURE\"\n}\n\nPLAY RECAP *********************************************************************\nasa : ok=0 changed=0 unreachable=0 failed=1\n\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: asa_acl\nversion_added: \"2.2\"\nauthor: \"Patrick Ogenstad (@ogenstad)\"\nshort_description: Manage access-lists on a Cisco ASA\ndescription:\n - This module allows you to work with access-lists on a Cisco ASA device.\nextends_documentation_fragment: asa\noptions:\n lines:\n description:\n - The ordered set of commands that should be configured in the\n section. The commands must be the exact same commands as found\n in the device running-config. Be sure to note the configuration\n command syntanx as some commands are automatically modified by the\n device config parser.\n required: true\n before:\n description:\n - The ordered set of commands to push on to the command stack if\n a change needs to be made. This allows the playbook designer\n the opportunity to perform configuration commands prior to pushing\n any changes without affecting how the set of commands are matched\n against the system\n required: false\n default: null\n after:\n description:\n - The ordered set of commands to append to the end of the command\n stack if a changed needs to be made. Just like with I(before) this\n allows the playbook designer to append a set of commands to be\n executed after the command set.\n required: false\n default: null\n match:\n description:\n - Instructs the module on the way to perform the matching of\n the set of commands against the current device config. If\n match is set to I(line), commands are matched line by line. If\n match is set to I(strict), command lines are matched with respect\n to position. Finally if match is set to I(exact), command lines\n must be an equal match.\n required: false\n default: line\n choices: ['line', 'strict', 'exact']\n replace:\n description:\n - Instructs the module on the way to perform the configuration\n on the device. If the replace argument is set to I(line) then\n the modified lines are pushed to the device in configuration\n mode. If the replace argument is set to I(block) then the entire\n command block is pushed to the device in configuration mode if any\n line is not correct\n required: false\n default: line\n choices: ['line', 'block']\n force:\n description:\n - The force argument instructs the module to not consider the\n current devices running-config. When set to true, this will\n cause the module to push the contents of I(src) into the device\n without first checking if already configured.\n required: false\n default: false\n choices: ['yes', 'no']\n config:\n description:\n - The module, by default, will connect to the remote device and\n retrieve the current running-config to use as a base for comparing\n against the contents of source. There are times when it is not\n desirable to have the task get the current running-config for\n every task in a playbook. The I(config) argument allows the\n implementer to pass in the configuruation to use as the base\n config for comparision.\n required: false\n default: null\n\"\"\"\n\nEXAMPLES = \"\"\"\n\n- asa_acl:\n lines:\n - access-list ACL-ANSIBLE extended permit tcp any any eq 82\n - access-list ACL-ANSIBLE extended permit tcp any any eq www\n - access-list ACL-ANSIBLE extended permit tcp any any eq 97\n - access-list ACL-ANSIBLE extended permit tcp any any eq 98\n - access-list ACL-ANSIBLE extended permit tcp any any eq 99\n before: clear configure access-list ACL-ANSIBLE\n match: strict\n replace: block\n\n- asa_acl:\n lines:\n - access-list ACL-OUTSIDE extended permit tcp any any eq www\n - access-list ACL-OUTSIDE extended permit tcp any any eq https\n context: customer_a\n\"\"\"\n\nRETURN = \"\"\"\nupdates:\n description: The set of commands that will be pushed to the remote device\n returned: always\n type: list\n sample: ['...', '...']\n\nresponses:\n description: The set of responses from issuing the commands on the device\n retured: when not check_mode\n type: list\n sample: ['...', '...']\n\"\"\"\nfrom ansible.module_utils.netcfg import NetworkConfig\nfrom ansible.module_utils.asa import NetworkModule\n\n\ndef get_config(module):\n config = module.params['config'] or dict()\n if not config and not module.params['force']:\n config = module.config\n return config\n\n\ndef check_input_acl(lines, module):\n first_line = True\n for line in lines:\n ace = line.split()\n if ace[0] != 'access-list':\n module.fail_json(msg='All lines/commands must begin with \"access-list\" %s is not permitted' % ace[0])\n if len(ace) <= 1:\n module.fail_json(msg='All lines/commainds must contain the name of the access-list')\n if first_line:\n acl_name = ace[1]\n else:\n if acl_name != ace[1]:\n module.fail_json(msg='All lines/commands must use the same access-list %s is not %s' % (ace[1], acl_name))\n first_line = False\n\n return 'access-list %s' % acl_name\n\ndef main():\n\n argument_spec = dict(\n lines=dict(aliases=['commands'], required=True, type='list'),\n before=dict(type='list'),\n after=dict(type='list'),\n match=dict(default='line', choices=['line', 'strict', 'exact']),\n replace=dict(default='line', choices=['line', 'block']),\n force=dict(default=False, type='bool'),\n config=dict()\n )\n\n module = NetworkModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n lines = module.params['lines']\n\n before = module.params['before']\n after = module.params['after']\n\n match = module.params['match']\n replace = module.params['replace']\n\n module.filter = check_input_acl(lines, module)\n if not module.params['force']:\n commands = candidate.difference(config)\n commands = dumps(commands, 'commands').split('\\n')\n commands = [str(c) for c in commands if c]\n else:\n commands = str(candidate).split('\\n')\n\n if commands:\n if not module.check_mode:\n response = module.config(commands)\n result['responses'] = response\n result['changed'] = True\n\n result['updates'] = commands\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "network/asa/asa_acl.py"}]} | 3,949 | 305 |
gh_patches_debug_36013 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UTF8 characters on version slugging -- or slugging in general
This ticket came up as part of #1407. We should make sure version slugging is handling UTF8 characters in a sane way.
</issue>
<code>
[start of readthedocs/builds/version_slug.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 Contains logic for handling version slugs.
5
6 Handling slugs for versions is not too straightforward. We need to allow some
7 characters which are uncommon in usual slugs. They are dots and underscores.
8 Usually we want the slug to be the name of the tag or branch corresponding VCS
9 version. However we need to strip url-destroying characters like slashes.
10
11 So the syntax for version slugs should be:
12
13 * Start with a lowercase ascii char or a digit.
14 * All other characters must be lowercase ascii chars, digits or dots.
15
16 If uniqueness is not met for a slug in a project, we append a dash and a letter
17 starting with ``a``. We keep increasing that letter until we have a unique
18 slug. This is used since using numbers in tags is too common and appending
19 another number would be confusing.
20 """
21
22 import math
23 import re
24 import string
25 from operator import truediv
26
27 from django.db import models
28 from django.utils.encoding import force_text
29
30
31 def get_fields_with_model(cls):
32 """
33 Replace deprecated function of the same name in Model._meta.
34
35 This replaces deprecated function (as of Django 1.10) in Model._meta as
36 prescrived in the Django docs.
37 https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api
38 """
39 return [(f, f.model if f.model != cls else None)
40 for f in cls._meta.get_fields()
41 if not f.is_relation or f.one_to_one or
42 (f.many_to_one and f.related_model)]
43
44
45 # Regex breakdown:
46 # [a-z0-9] -- start with alphanumeric value
47 # [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii
48 # *? -- allow multiple of those, but be not greedy about the matching
49 # (?: ... ) -- wrap everything so that the pattern cannot escape when used in
50 # regexes.
51 VERSION_SLUG_REGEX = '(?:[a-z0-9A-Z][-._a-z0-9A-Z]*?)'
52
53
54 class VersionSlugField(models.CharField):
55
56 """Inspired by ``django_extensions.db.fields.AutoSlugField``."""
57
58 invalid_chars_re = re.compile('[^-._a-z0-9]')
59 leading_punctuation_re = re.compile('^[-._]+')
60 placeholder = '-'
61 fallback_slug = 'unknown'
62 test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))
63
64 def __init__(self, *args, **kwargs):
65 kwargs.setdefault('db_index', True)
66
67 populate_from = kwargs.pop('populate_from', None)
68 if populate_from is None:
69 raise ValueError("missing 'populate_from' argument")
70 else:
71 self._populate_from = populate_from
72 super().__init__(*args, **kwargs)
73
74 def get_queryset(self, model_cls, slug_field):
75 # pylint: disable=protected-access
76 for field, model in get_fields_with_model(model_cls):
77 if model and field == slug_field:
78 return model._default_manager.all()
79 return model_cls._default_manager.all()
80
81 def slugify(self, content):
82 if not content:
83 return ''
84
85 slugified = content.lower()
86 slugified = self.invalid_chars_re.sub(self.placeholder, slugified)
87 slugified = self.leading_punctuation_re.sub('', slugified)
88
89 if not slugified:
90 return self.fallback_slug
91 return slugified
92
93 def uniquifying_suffix(self, iteration):
94 """
95 Create a unique suffix.
96
97 This creates a suffix based on the number given as ``iteration``. It
98 will return a value encoded as lowercase ascii letter. So we have an
99 alphabet of 26 letters. The returned suffix will be for example ``_yh``
100 where ``yh`` is the encoding of ``iteration``. The length of it will be
101 ``math.log(iteration, 26)``.
102
103 Examples::
104
105 uniquifying_suffix(0) == '_a'
106 uniquifying_suffix(25) == '_z'
107 uniquifying_suffix(26) == '_ba'
108 uniquifying_suffix(52) == '_ca'
109 """
110 alphabet = string.ascii_lowercase
111 length = len(alphabet)
112 if iteration == 0:
113 power = 0
114 else:
115 power = int(math.log(iteration, length))
116 current = iteration
117 suffix = ''
118 for exp in reversed(list(range(0, power + 1))):
119 digit = int(truediv(current, length ** exp))
120 suffix += alphabet[digit]
121 current = current % length ** exp
122 return '_{suffix}'.format(suffix=suffix)
123
124 def create_slug(self, model_instance):
125 """Generate a unique slug for a model instance."""
126 # pylint: disable=protected-access
127
128 # get fields to populate from and slug field to set
129 slug_field = model_instance._meta.get_field(self.attname)
130
131 slug = self.slugify(getattr(model_instance, self._populate_from))
132 count = 0
133
134 # strip slug depending on max_length attribute of the slug field
135 # and clean-up
136 slug_len = slug_field.max_length
137 if slug_len:
138 slug = slug[:slug_len]
139 original_slug = slug
140
141 # exclude the current model instance from the queryset used in finding
142 # the next valid slug
143 queryset = self.get_queryset(model_instance.__class__, slug_field)
144 if model_instance.pk:
145 queryset = queryset.exclude(pk=model_instance.pk)
146
147 # form a kwarg dict used to implement any unique_together constraints
148 kwargs = {}
149 for params in model_instance._meta.unique_together:
150 if self.attname in params:
151 for param in params:
152 kwargs[param] = getattr(model_instance, param, None)
153 kwargs[self.attname] = slug
154
155 # increases the number while searching for the next valid slug
156 # depending on the given slug, clean-up
157 while not slug or queryset.filter(**kwargs).exists():
158 slug = original_slug
159 end = self.uniquifying_suffix(count)
160 end_len = len(end)
161 if slug_len and len(slug) + end_len > slug_len:
162 slug = slug[:slug_len - end_len]
163 slug = slug + end
164 kwargs[self.attname] = slug
165 count += 1
166
167 assert self.test_pattern.match(slug), (
168 'Invalid generated slug: {slug}'.format(slug=slug)
169 )
170 return slug
171
172 def pre_save(self, model_instance, add):
173 value = getattr(model_instance, self.attname)
174 # We only create a new slug if none was set yet.
175 if not value and add:
176 value = force_text(self.create_slug(model_instance))
177 setattr(model_instance, self.attname, value)
178 return value
179
180 def deconstruct(self):
181 name, path, args, kwargs = super().deconstruct()
182 kwargs['populate_from'] = self._populate_from
183 return name, path, args, kwargs
184
[end of readthedocs/builds/version_slug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py
--- a/readthedocs/builds/version_slug.py
+++ b/readthedocs/builds/version_slug.py
@@ -26,6 +26,7 @@
from django.db import models
from django.utils.encoding import force_text
+from slugify import slugify as unicode_slugify
def get_fields_with_model(cls):
@@ -53,13 +54,15 @@
class VersionSlugField(models.CharField):
- """Inspired by ``django_extensions.db.fields.AutoSlugField``."""
+ """
+ Inspired by ``django_extensions.db.fields.AutoSlugField``.
- invalid_chars_re = re.compile('[^-._a-z0-9]')
- leading_punctuation_re = re.compile('^[-._]+')
- placeholder = '-'
- fallback_slug = 'unknown'
+ Uses ``unicode-slugify`` to generate the slug.
+ """
+
+ ok_chars = '-._' # dash, dot, underscore
test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))
+ fallback_slug = 'unknown'
def __init__(self, *args, **kwargs):
kwargs.setdefault('db_index', True)
@@ -78,13 +81,42 @@
return model._default_manager.all()
return model_cls._default_manager.all()
+ def _normalize(self, content):
+ """
+ Normalize some invalid characters (/, %, !, ?) to become a dash (``-``).
+
+ .. note::
+
+ We replace these characters to a dash to keep compatibility with the
+ old behavior and also because it makes this more readable.
+
+ For example, ``release/1.0`` will become ``release-1.0``.
+ """
+ return re.sub('[/%!?]', '-', content)
+
def slugify(self, content):
+ """
+ Make ``content`` a valid slug.
+
+ It uses ``unicode-slugify`` behind the scenes which works properly with
+ Unicode characters.
+ """
if not content:
return ''
- slugified = content.lower()
- slugified = self.invalid_chars_re.sub(self.placeholder, slugified)
- slugified = self.leading_punctuation_re.sub('', slugified)
+ normalized = self._normalize(content)
+ slugified = unicode_slugify(
+ normalized,
+ only_ascii=True,
+ spaces=False,
+ lower=True,
+ ok=self.ok_chars,
+ space_replacement='-',
+ )
+
+ # Remove first character wile it's an invalid character for the
+ # beginning of the slug
+ slugified = slugified.lstrip(self.ok_chars)
if not slugified:
return self.fallback_slug
| {"golden_diff": "diff --git a/readthedocs/builds/version_slug.py b/readthedocs/builds/version_slug.py\n--- a/readthedocs/builds/version_slug.py\n+++ b/readthedocs/builds/version_slug.py\n@@ -26,6 +26,7 @@\n \n from django.db import models\n from django.utils.encoding import force_text\n+from slugify import slugify as unicode_slugify\n \n \n def get_fields_with_model(cls):\n@@ -53,13 +54,15 @@\n \n class VersionSlugField(models.CharField):\n \n- \"\"\"Inspired by ``django_extensions.db.fields.AutoSlugField``.\"\"\"\n+ \"\"\"\n+ Inspired by ``django_extensions.db.fields.AutoSlugField``.\n \n- invalid_chars_re = re.compile('[^-._a-z0-9]')\n- leading_punctuation_re = re.compile('^[-._]+')\n- placeholder = '-'\n- fallback_slug = 'unknown'\n+ Uses ``unicode-slugify`` to generate the slug.\n+ \"\"\"\n+\n+ ok_chars = '-._' # dash, dot, underscore\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n+ fallback_slug = 'unknown'\n \n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n@@ -78,13 +81,42 @@\n return model._default_manager.all()\n return model_cls._default_manager.all()\n \n+ def _normalize(self, content):\n+ \"\"\"\n+ Normalize some invalid characters (/, %, !, ?) to become a dash (``-``).\n+\n+ .. note::\n+\n+ We replace these characters to a dash to keep compatibility with the\n+ old behavior and also because it makes this more readable.\n+\n+ For example, ``release/1.0`` will become ``release-1.0``.\n+ \"\"\"\n+ return re.sub('[/%!?]', '-', content)\n+\n def slugify(self, content):\n+ \"\"\"\n+ Make ``content`` a valid slug.\n+\n+ It uses ``unicode-slugify`` behind the scenes which works properly with\n+ Unicode characters.\n+ \"\"\"\n if not content:\n return ''\n \n- slugified = content.lower()\n- slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n- slugified = self.leading_punctuation_re.sub('', slugified)\n+ normalized = self._normalize(content)\n+ slugified = unicode_slugify(\n+ normalized,\n+ only_ascii=True,\n+ spaces=False,\n+ lower=True,\n+ ok=self.ok_chars,\n+ space_replacement='-',\n+ )\n+\n+ # Remove first character wile it's an invalid character for the\n+ # beginning of the slug\n+ slugified = slugified.lstrip(self.ok_chars)\n \n if not slugified:\n return self.fallback_slug\n", "issue": "UTF8 characters on version slugging -- or slugging in general\nThis ticket came up as part of #1407. We should make sure version slugging is handling UTF8 characters in a sane way.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nContains logic for handling version slugs.\n\nHandling slugs for versions is not too straightforward. We need to allow some\ncharacters which are uncommon in usual slugs. They are dots and underscores.\nUsually we want the slug to be the name of the tag or branch corresponding VCS\nversion. However we need to strip url-destroying characters like slashes.\n\nSo the syntax for version slugs should be:\n\n* Start with a lowercase ascii char or a digit.\n* All other characters must be lowercase ascii chars, digits or dots.\n\nIf uniqueness is not met for a slug in a project, we append a dash and a letter\nstarting with ``a``. We keep increasing that letter until we have a unique\nslug. This is used since using numbers in tags is too common and appending\nanother number would be confusing.\n\"\"\"\n\nimport math\nimport re\nimport string\nfrom operator import truediv\n\nfrom django.db import models\nfrom django.utils.encoding import force_text\n\n\ndef get_fields_with_model(cls):\n \"\"\"\n Replace deprecated function of the same name in Model._meta.\n\n This replaces deprecated function (as of Django 1.10) in Model._meta as\n prescrived in the Django docs.\n https://docs.djangoproject.com/en/1.11/ref/models/meta/#migrating-from-the-old-api\n \"\"\"\n return [(f, f.model if f.model != cls else None)\n for f in cls._meta.get_fields()\n if not f.is_relation or f.one_to_one or\n (f.many_to_one and f.related_model)]\n\n\n# Regex breakdown:\n# [a-z0-9] -- start with alphanumeric value\n# [-._a-z0-9] -- allow dash, dot, underscore, digit, lowercase ascii\n# *? -- allow multiple of those, but be not greedy about the matching\n# (?: ... ) -- wrap everything so that the pattern cannot escape when used in\n# regexes.\nVERSION_SLUG_REGEX = '(?:[a-z0-9A-Z][-._a-z0-9A-Z]*?)'\n\n\nclass VersionSlugField(models.CharField):\n\n \"\"\"Inspired by ``django_extensions.db.fields.AutoSlugField``.\"\"\"\n\n invalid_chars_re = re.compile('[^-._a-z0-9]')\n leading_punctuation_re = re.compile('^[-._]+')\n placeholder = '-'\n fallback_slug = 'unknown'\n test_pattern = re.compile('^{pattern}$'.format(pattern=VERSION_SLUG_REGEX))\n\n def __init__(self, *args, **kwargs):\n kwargs.setdefault('db_index', True)\n\n populate_from = kwargs.pop('populate_from', None)\n if populate_from is None:\n raise ValueError(\"missing 'populate_from' argument\")\n else:\n self._populate_from = populate_from\n super().__init__(*args, **kwargs)\n\n def get_queryset(self, model_cls, slug_field):\n # pylint: disable=protected-access\n for field, model in get_fields_with_model(model_cls):\n if model and field == slug_field:\n return model._default_manager.all()\n return model_cls._default_manager.all()\n\n def slugify(self, content):\n if not content:\n return ''\n\n slugified = content.lower()\n slugified = self.invalid_chars_re.sub(self.placeholder, slugified)\n slugified = self.leading_punctuation_re.sub('', slugified)\n\n if not slugified:\n return self.fallback_slug\n return slugified\n\n def uniquifying_suffix(self, iteration):\n \"\"\"\n Create a unique suffix.\n\n This creates a suffix based on the number given as ``iteration``. It\n will return a value encoded as lowercase ascii letter. So we have an\n alphabet of 26 letters. The returned suffix will be for example ``_yh``\n where ``yh`` is the encoding of ``iteration``. The length of it will be\n ``math.log(iteration, 26)``.\n\n Examples::\n\n uniquifying_suffix(0) == '_a'\n uniquifying_suffix(25) == '_z'\n uniquifying_suffix(26) == '_ba'\n uniquifying_suffix(52) == '_ca'\n \"\"\"\n alphabet = string.ascii_lowercase\n length = len(alphabet)\n if iteration == 0:\n power = 0\n else:\n power = int(math.log(iteration, length))\n current = iteration\n suffix = ''\n for exp in reversed(list(range(0, power + 1))):\n digit = int(truediv(current, length ** exp))\n suffix += alphabet[digit]\n current = current % length ** exp\n return '_{suffix}'.format(suffix=suffix)\n\n def create_slug(self, model_instance):\n \"\"\"Generate a unique slug for a model instance.\"\"\"\n # pylint: disable=protected-access\n\n # get fields to populate from and slug field to set\n slug_field = model_instance._meta.get_field(self.attname)\n\n slug = self.slugify(getattr(model_instance, self._populate_from))\n count = 0\n\n # strip slug depending on max_length attribute of the slug field\n # and clean-up\n slug_len = slug_field.max_length\n if slug_len:\n slug = slug[:slug_len]\n original_slug = slug\n\n # exclude the current model instance from the queryset used in finding\n # the next valid slug\n queryset = self.get_queryset(model_instance.__class__, slug_field)\n if model_instance.pk:\n queryset = queryset.exclude(pk=model_instance.pk)\n\n # form a kwarg dict used to implement any unique_together constraints\n kwargs = {}\n for params in model_instance._meta.unique_together:\n if self.attname in params:\n for param in params:\n kwargs[param] = getattr(model_instance, param, None)\n kwargs[self.attname] = slug\n\n # increases the number while searching for the next valid slug\n # depending on the given slug, clean-up\n while not slug or queryset.filter(**kwargs).exists():\n slug = original_slug\n end = self.uniquifying_suffix(count)\n end_len = len(end)\n if slug_len and len(slug) + end_len > slug_len:\n slug = slug[:slug_len - end_len]\n slug = slug + end\n kwargs[self.attname] = slug\n count += 1\n\n assert self.test_pattern.match(slug), (\n 'Invalid generated slug: {slug}'.format(slug=slug)\n )\n return slug\n\n def pre_save(self, model_instance, add):\n value = getattr(model_instance, self.attname)\n # We only create a new slug if none was set yet.\n if not value and add:\n value = force_text(self.create_slug(model_instance))\n setattr(model_instance, self.attname, value)\n return value\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n kwargs['populate_from'] = self._populate_from\n return name, path, args, kwargs\n", "path": "readthedocs/builds/version_slug.py"}]} | 2,547 | 621 |
gh_patches_debug_4890 | rasdani/github-patches | git_diff | gratipay__gratipay.com-2162 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 from credit card page
Reticketing from #2155.
https://app.getsentry.com/gittip/gittip/group/16333701/ (login required)
```
TypeError: 'NoneType' object has no attribute '__getitem__'
Stacktrace (most recent call last):
File "site-packages/algorithm.py", line 288, in run
new_state = function(**deps.as_kwargs)
File "aspen/algorithms/website.py", line 88, in get_response_for_resource
return {'response': resource.respond(request)}
File "aspen/resources/dynamic_resource.py", line 68, in respond
response = self.get_response(context)
File "aspen/resources/negotiated_resource.py", line 98, in get_response
response.body = render(context)
File "aspen/renderers/__init__.py", line 99, in __call__
return self.render_content(context)
File "site-packages/aspen_jinja2_renderer.py", line 66, in render_content
return self.compiled.render(context).encode(charset)
File "jinja2/environment.py", line 969, in render
return self.environment.handle_exception(exc_info, True)
File "jinja2/environment.py", line 742, in handle_exception
reraise(exc_type, exc_value, tb)
File "/app/www/credit-card.html.spt", line 1, in top-level template code
import traceback
File "/app/templates/base.html", line 54, in top-level template code
{% block killbox %}
File "/app/templates/base.html", line 56, in block "killbox"
{% block box %}{% endblock %}
File "/app/www/credit-card.html.spt", line 56, in block "box"
Gittip.payments.cc.init("{{ balanced.Marketplace.my_marketplace.uri }}", "{{ user.participant.username }}");
```
</issue>
<code>
[start of gittip/billing/__init__.py]
1 """This module encapsulates billing logic and db access.
2
3 There are three pieces of information for each participant related to billing:
4
5 balanced_customer_href
6 * NULL - This participant has never been billed.
7 * 'deadbeef' - This participant has had a Balanced account created for
8 them, either by adding a credit card or a bank account.
9 last_bill_result
10 * NULL - This participant has not had their credit card charged yet.
11 * '' - This participant has a working card.
12 * <message> - An error message.
13 last_ach_result
14 * NULL - This participant has not wired up a bank account yet.
15 * '' - This participant has a working bank account.
16 * <message> - An error message.
17
18 """
19 from __future__ import unicode_literals
20
21 import balanced
22 import stripe
23 from aspen.utils import typecheck
24
25
26 def get_balanced_account(db, username, balanced_customer_href):
27 """Find or create a balanced.Account.
28 """
29 typecheck( username, unicode
30 , balanced_customer_href, (unicode, None)
31 )
32
33 if balanced_customer_href is None:
34 customer = balanced.Customer(meta={
35 'username': username,
36 }).save()
37 BALANCED_ACCOUNT = """\
38
39 UPDATE participants
40 SET balanced_customer_href=%s
41 WHERE username=%s
42
43 """
44 db.run(BALANCED_ACCOUNT, (customer.href, username))
45 else:
46 customer = balanced.Customer.fetch(balanced_customer_href)
47 return customer
48
49
50 def associate(db, thing, username, balanced_customer_href, balanced_thing_uri):
51 """Given four unicodes, return a unicode.
52
53 This function attempts to associate the credit card or bank account details
54 referenced by balanced_thing_uri with a Balanced Account. If it fails we
55 log and return a unicode describing the failure. Even for failure we keep
56 balanced_customer_href; we don't reset it to None/NULL. It's useful for
57 loading the previous (bad) info from Balanced in order to prepopulate the
58 form.
59
60 """
61 typecheck( username, unicode
62 , balanced_customer_href, (unicode, None, balanced.Customer)
63 , balanced_thing_uri, unicode
64 , thing, unicode
65 )
66
67 if isinstance(balanced_customer_href, balanced.Customer):
68 balanced_account = balanced_customer_href
69 else:
70 balanced_account = get_balanced_account( db
71 , username
72 , balanced_customer_href
73 )
74 invalidate_on_balanced(thing, balanced_account.href)
75 SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s"
76 try:
77 if thing == "credit card":
78 SQL %= "bill"
79 obj = balanced.Card.fetch(balanced_thing_uri)
80 #add = balanced_account.add_card
81
82 else:
83 assert thing == "bank account", thing # sanity check
84 SQL %= "ach"
85 obj = balanced.BankAccount.fetch(balanced_thing_uri)
86 #add = balanced_account.add_bank_account
87
88 obj.associate_to_customer(balanced_account)
89 except balanced.exc.HTTPError as err:
90 error = err.message.message.decode('UTF-8') # XXX UTF-8?
91 else:
92 error = ''
93 typecheck(error, unicode)
94
95 db.run(SQL, (error, username))
96 return error
97
98
99 def invalidate_on_balanced(thing, balanced_customer_href):
100 """XXX Things in balanced cannot be deleted at the moment.
101
102 Instead we mark all valid cards as invalid which will restrict against
103 anyone being able to issue charges against them in the future.
104
105 See: https://github.com/balanced/balanced-api/issues/22
106
107 """
108 assert thing in ("credit card", "bank account")
109 typecheck(balanced_customer_href, (str, unicode))
110
111 customer = balanced.Customer.fetch(balanced_customer_href)
112 things = customer.cards if thing == "credit card" else customer.bank_accounts
113
114 for _thing in things:
115 _thing.unstore()
116
117
118 def clear(db, thing, username, balanced_customer_href):
119 typecheck( thing, unicode
120 , username, unicode
121 , balanced_customer_href, (unicode, str)
122 )
123 assert thing in ("credit card", "bank account"), thing
124 invalidate_on_balanced(thing, balanced_customer_href)
125 CLEAR = """\
126
127 UPDATE participants
128 SET last_%s_result=NULL
129 WHERE username=%%s
130
131 """ % ("bill" if thing == "credit card" else "ach")
132 db.run(CLEAR, (username,))
133
134
135 def store_error(db, thing, username, msg):
136 typecheck(thing, unicode, username, unicode, msg, unicode)
137 assert thing in ("credit card", "bank account"), thing
138 ERROR = """\
139
140 UPDATE participants
141 SET last_%s_result=%%s
142 WHERE username=%%s
143
144 """ % ("bill" if thing == "credit card" else "ach")
145 db.run(ERROR, (msg, username))
146
147
148 # Card
149 # ====
150 # While we're migrating data we need to support loading data from both Stripe
151 # and Balanced.
152
153 class StripeCard(object):
154 """This is a dict-like wrapper around a Stripe PaymentMethod.
155 """
156
157 _customer = None # underlying stripe.Customer object
158
159 def __init__(self, stripe_customer_id):
160 """Given a Stripe customer id, load data from Stripe.
161 """
162 if stripe_customer_id is not None:
163 self._customer = stripe.Customer.retrieve(stripe_customer_id)
164
165 def _get(self, name, default=""):
166 """Given a name, return a string.
167 """
168 out = ""
169 if self._customer is not None:
170 out = self._customer.get('active_card', {}).get(name, "")
171 if out is None:
172 out = default
173 return out
174
175 def __getitem__(self, name):
176 """Given a name, return a string.
177 """
178 if name == 'id':
179 out = self._customer.id if self._customer is not None else None
180 elif name == 'last4':
181 out = self._get('last4')
182 if out:
183 out = "************" + out
184 else:
185 name = { 'address_1': 'address_line1'
186 , 'address_2': 'address_line2'
187 , 'state': 'address_state'
188 , 'zip': 'address_zip'
189 }.get(name, name)
190 out = self._get(name)
191 return out
192
193
194 class BalancedThing(object):
195 """Represent either a credit card or a bank account.
196 """
197
198 thing_type = None # either 'card' or 'bank_account'
199 keys_to_attr_paths = None # set to a mapping in subclasses
200
201 _customer = None # underlying balanced.Customer object
202 _thing = None # underlying balanced.{BankAccount,Card} object
203
204 def __getitem__(self, key):
205 """Given a name, return a unicode.
206
207 Allow subclasses to provide a flat set of keys, which, under the hood,
208 might be nested attributes and/or keys. The traversal path is relative
209 to _thing (not self!).
210
211 """
212 attr_path = self.keys_to_attr_paths.get(key, key)
213
214 out = None
215 if self._customer is not None and self._thing is not None:
216 out = self._thing
217 for val in attr_path.split('.'):
218 if type(out) is dict:
219 # this lets us reach into the meta dict
220 out = out.get(val)
221 else:
222 try:
223 out = getattr(out, val)
224 except AttributeError:
225 raise KeyError("{} not found".format(val))
226 if out is None:
227 break
228 return out
229
230 def __init__(self, balanced_customer_href):
231 """Given a Balanced account_uri, load data from Balanced.
232 """
233 if balanced_customer_href is None:
234 return
235
236 # XXX Indexing is borken. See:
237 # https://github.com/balanced/balanced-python/issues/10
238
239 self._customer = balanced.Customer.fetch(balanced_customer_href)
240
241 things = getattr(self._customer, self.thing_type+'s')\
242 .filter(is_valid=True).all()
243 nvalid = len(things)
244
245 if nvalid == 0:
246 self._thing = None
247 elif nvalid == 1:
248 self._thing = things[0]
249 else:
250 msg = "%s has %d valid %ss"
251 msg %= (balanced_customer_href, len(things), self.thing_type)
252 raise RuntimeError(msg)
253
254 @property
255 def is_setup(self):
256 return self._thing is not None
257
258
259 class BalancedCard(BalancedThing):
260 """This is a dict-like wrapper around a Balanced credit card.
261 """
262
263 thing_type = 'card'
264
265 keys_to_attr_paths = {
266 'id': 'customer.href',
267 'address_1': 'address.line1',
268 'address_2': 'meta.address_2',
269 'country': 'meta.country',
270 'city_town': 'meta.city_town',
271 'zip': 'address.postal_code',
272 # gittip is saving the state in the meta field
273 # for compatibility with legacy customers
274 'state': 'meta.region',
275 'last4': 'number',
276 'last_four': 'number',
277 }
278
279
280 class BalancedBankAccount(BalancedThing):
281 """This is a dict-like wrapper around a Balanced bank account.
282 """
283
284 thing_type = 'bank_account'
285
286 keys_to_attr_paths = {
287 'customer_href': 'customer.href',
288 }
289
[end of gittip/billing/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py
--- a/gittip/billing/__init__.py
+++ b/gittip/billing/__init__.py
@@ -225,6 +225,11 @@
raise KeyError("{} not found".format(val))
if out is None:
break
+
+ if out is None:
+ # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.
+ out = ''
+
return out
def __init__(self, balanced_customer_href):
| {"golden_diff": "diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py\n--- a/gittip/billing/__init__.py\n+++ b/gittip/billing/__init__.py\n@@ -225,6 +225,11 @@\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n+\n+ if out is None:\n+ # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n+ out = ''\n+\n return out\n \n def __init__(self, balanced_customer_href):\n", "issue": "500 from credit card page\nReticketing from #2155.\n\nhttps://app.getsentry.com/gittip/gittip/group/16333701/ (login required)\n\n```\nTypeError: 'NoneType' object has no attribute '__getitem__'\n\nStacktrace (most recent call last):\n\n File \"site-packages/algorithm.py\", line 288, in run\n new_state = function(**deps.as_kwargs)\n File \"aspen/algorithms/website.py\", line 88, in get_response_for_resource\n return {'response': resource.respond(request)}\n File \"aspen/resources/dynamic_resource.py\", line 68, in respond\n response = self.get_response(context)\n File \"aspen/resources/negotiated_resource.py\", line 98, in get_response\n response.body = render(context)\n File \"aspen/renderers/__init__.py\", line 99, in __call__\n return self.render_content(context)\n File \"site-packages/aspen_jinja2_renderer.py\", line 66, in render_content\n return self.compiled.render(context).encode(charset)\n File \"jinja2/environment.py\", line 969, in render\n return self.environment.handle_exception(exc_info, True)\n File \"jinja2/environment.py\", line 742, in handle_exception\n reraise(exc_type, exc_value, tb)\n File \"/app/www/credit-card.html.spt\", line 1, in top-level template code\n import traceback\n File \"/app/templates/base.html\", line 54, in top-level template code\n {% block killbox %}\n File \"/app/templates/base.html\", line 56, in block \"killbox\"\n {% block box %}{% endblock %}\n File \"/app/www/credit-card.html.spt\", line 56, in block \"box\"\n Gittip.payments.cc.init(\"{{ balanced.Marketplace.my_marketplace.uri }}\", \"{{ user.participant.username }}\");\n```\n\n", "before_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nimport stripe\nfrom aspen.utils import typecheck\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n\n\n# Card\n# ====\n# While we're migrating data we need to support loading data from both Stripe\n# and Balanced.\n\nclass StripeCard(object):\n \"\"\"This is a dict-like wrapper around a Stripe PaymentMethod.\n \"\"\"\n\n _customer = None # underlying stripe.Customer object\n\n def __init__(self, stripe_customer_id):\n \"\"\"Given a Stripe customer id, load data from Stripe.\n \"\"\"\n if stripe_customer_id is not None:\n self._customer = stripe.Customer.retrieve(stripe_customer_id)\n\n def _get(self, name, default=\"\"):\n \"\"\"Given a name, return a string.\n \"\"\"\n out = \"\"\n if self._customer is not None:\n out = self._customer.get('active_card', {}).get(name, \"\")\n if out is None:\n out = default\n return out\n\n def __getitem__(self, name):\n \"\"\"Given a name, return a string.\n \"\"\"\n if name == 'id':\n out = self._customer.id if self._customer is not None else None\n elif name == 'last4':\n out = self._get('last4')\n if out:\n out = \"************\" + out\n else:\n name = { 'address_1': 'address_line1'\n , 'address_2': 'address_line2'\n , 'state': 'address_state'\n , 'zip': 'address_zip'\n }.get(name, name)\n out = self._get(name)\n return out\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}]} | 3,819 | 137 |
gh_patches_debug_16542 | rasdani/github-patches | git_diff | Kinto__kinto-1637 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash in batch view deserialization
```
AttributeError: 'bytes' object has no attribute 'get'
File "pyramid/tweens.py", line 39, in excview_tween
response = handler(request)
File "kinto/core/events.py", line 76, in tween
response = handler(request)
File "pyramid/router.py", line 156, in handle_request
view_name
File "pyramid/view.py", line 642, in _call_view
response = view_callable(context, request)
File "pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "pyramid/viewderivers.py", line 390, in attr_view
return view(context, request)
File "pyramid/viewderivers.py", line 368, in predicate_wrapper
return view(context, request)
File "pyramid/viewderivers.py", line 439, in rendered_view
result = view(context, request)
File "pyramid/viewderivers.py", line 148, in _requestonly_view
response = view(request)
File "cornice/service.py", line 487, in wrapper
validator(request, **args)
File "cornice/validators/_colander.py", line 113, in validator
deserialized = schema.deserialize(cstruct)
File "colander/__init__.py", line 2073, in deserialize
appstruct = self.typ.deserialize(self, cstruct)
File "colander/__init__.py", line 724, in deserialize
return self._impl(node, cstruct, callback)
File "colander/__init__.py", line 683, in _impl
sub_result = callback(subnode, subval)
File "colander/__init__.py", line 722, in callback
return subnode.deserialize(subcstruct)
File "kinto/core/views/batch.py", line 68, in deserialize
defaults = cstruct.get('defaults')
```
Crash in batch view deserialization
```
AttributeError: 'bytes' object has no attribute 'get'
File "pyramid/tweens.py", line 39, in excview_tween
response = handler(request)
File "kinto/core/events.py", line 76, in tween
response = handler(request)
File "pyramid/router.py", line 156, in handle_request
view_name
File "pyramid/view.py", line 642, in _call_view
response = view_callable(context, request)
File "pyramid/config/views.py", line 181, in __call__
return view(context, request)
File "pyramid/viewderivers.py", line 390, in attr_view
return view(context, request)
File "pyramid/viewderivers.py", line 368, in predicate_wrapper
return view(context, request)
File "pyramid/viewderivers.py", line 439, in rendered_view
result = view(context, request)
File "pyramid/viewderivers.py", line 148, in _requestonly_view
response = view(request)
File "cornice/service.py", line 487, in wrapper
validator(request, **args)
File "cornice/validators/_colander.py", line 113, in validator
deserialized = schema.deserialize(cstruct)
File "colander/__init__.py", line 2073, in deserialize
appstruct = self.typ.deserialize(self, cstruct)
File "colander/__init__.py", line 724, in deserialize
return self._impl(node, cstruct, callback)
File "colander/__init__.py", line 683, in _impl
sub_result = callback(subnode, subval)
File "colander/__init__.py", line 722, in callback
return subnode.deserialize(subcstruct)
File "kinto/core/views/batch.py", line 68, in deserialize
defaults = cstruct.get('defaults')
```
</issue>
<code>
[start of kinto/core/views/batch.py]
1 import logging
2
3 import colander
4 from cornice.validators import colander_validator
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7
8 from kinto.core import errors
9 from kinto.core import Service
10 from kinto.core.errors import ErrorSchema
11 from kinto.core.utils import merge_dicts, build_request, build_response
12
13
14 subrequest_logger = logging.getLogger('subrequest.summary')
15
16 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',
17 'POST', 'PUT', 'PATCH'))
18
19
20 def string_values(node, cstruct):
21 """Validate that a ``colander.Mapping`` only has strings in its values.
22
23 .. warning::
24
25 Should be associated to a ``colander.Mapping`` schema node.
26 """
27 are_strings = [isinstance(v, str) for v in cstruct.values()]
28 if not all(are_strings):
29 error_msg = '{} contains non string value'.format(cstruct)
30 raise colander.Invalid(node, error_msg)
31
32
33 class BatchRequestSchema(colander.MappingSchema):
34 method = colander.SchemaNode(colander.String(),
35 validator=valid_http_method,
36 missing=colander.drop)
37 path = colander.SchemaNode(colander.String(),
38 validator=colander.Regex('^/'))
39 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
40 validator=string_values,
41 missing=colander.drop)
42 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
43 missing=colander.drop)
44
45 @staticmethod
46 def schema_type():
47 return colander.Mapping(unknown='raise')
48
49
50 class BatchPayloadSchema(colander.MappingSchema):
51 defaults = BatchRequestSchema(missing=colander.drop).clone()
52 requests = colander.SchemaNode(colander.Sequence(),
53 BatchRequestSchema())
54
55 @staticmethod
56 def schema_type():
57 return colander.Mapping(unknown='raise')
58
59 def __init__(self, *args, **kwargs):
60 super().__init__(*args, **kwargs)
61 # On defaults, path is not mandatory.
62 self.get('defaults').get('path').missing = colander.drop
63
64 def deserialize(self, cstruct=colander.null):
65 """Preprocess received data to carefully merge defaults.
66 """
67 if cstruct is not colander.null:
68 defaults = cstruct.get('defaults')
69 requests = cstruct.get('requests')
70 if isinstance(defaults, dict) and isinstance(requests, list):
71 for request in requests:
72 if isinstance(request, dict):
73 merge_dicts(request, defaults)
74 return super().deserialize(cstruct)
75
76
77 class BatchRequest(colander.MappingSchema):
78 body = BatchPayloadSchema()
79
80
81 class BatchResponseSchema(colander.MappingSchema):
82 status = colander.SchemaNode(colander.Integer())
83 path = colander.SchemaNode(colander.String())
84 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
85 validator=string_values,
86 missing=colander.drop)
87 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
88 missing=colander.drop)
89
90
91 class BatchResponseBodySchema(colander.MappingSchema):
92 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))
93
94
95 class BatchResponse(colander.MappingSchema):
96 body = BatchResponseBodySchema()
97
98
99 class ErrorResponseSchema(colander.MappingSchema):
100 body = ErrorSchema()
101
102
103 batch_responses = {
104 '200': BatchResponse(description='Return a list of operation responses.'),
105 '400': ErrorResponseSchema(description='The request was badly formatted.'),
106 'default': ErrorResponseSchema(description='an unknown error occurred.')
107 }
108
109 batch = Service(name='batch', path='/batch',
110 description='Batch operations')
111
112
113 @batch.post(schema=BatchRequest,
114 validators=(colander_validator,),
115 permission=NO_PERMISSION_REQUIRED,
116 tags=['Batch'], operation_id='batch',
117 response_schemas=batch_responses)
118 def post_batch(request):
119 requests = request.validated['body']['requests']
120
121 request.log_context(batch_size=len(requests))
122
123 limit = request.registry.settings['batch_max_requests']
124 if limit and len(requests) > int(limit):
125 error_msg = 'Number of requests is limited to {}'.format(limit)
126 request.errors.add('body', 'requests', error_msg)
127 return
128
129 if any([batch.path in req['path'] for req in requests]):
130 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)
131 request.errors.add('body', 'requests', error_msg)
132 return
133
134 responses = []
135
136 for subrequest_spec in requests:
137 subrequest = build_request(request, subrequest_spec)
138
139 log_context = {**request.log_context(),
140 'path': subrequest.path,
141 'method': subrequest.method}
142 try:
143 # Invoke subrequest without individual transaction.
144 resp, subrequest = request.follow_subrequest(subrequest,
145 use_tweens=False)
146 except httpexceptions.HTTPException as e:
147 # Since some request in the batch failed, we need to stop the parent request
148 # through Pyramid's transaction manager. 5XX errors are already caught by
149 # pyramid_tm's commit_veto
150 # https://github.com/Kinto/kinto/issues/624
151 if e.status_code == 409:
152 request.tm.abort()
153
154 if e.content_type == 'application/json':
155 resp = e
156 else:
157 # JSONify raw Pyramid errors.
158 resp = errors.http_error(e)
159
160 subrequest_logger.info('subrequest.summary', extra=log_context)
161
162 dict_resp = build_response(resp, subrequest)
163 responses.append(dict_resp)
164
165 return {
166 'responses': responses
167 }
168
[end of kinto/core/views/batch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -9,6 +9,7 @@
from kinto.core import Service
from kinto.core.errors import ErrorSchema
from kinto.core.utils import merge_dicts, build_request, build_response
+from kinto.core.resource.viewset import CONTENT_TYPES
subrequest_logger = logging.getLogger('subrequest.summary')
@@ -112,6 +113,7 @@
@batch.post(schema=BatchRequest,
validators=(colander_validator,),
+ content_type=CONTENT_TYPES,
permission=NO_PERMISSION_REQUIRED,
tags=['Batch'], operation_id='batch',
response_schemas=batch_responses)
| {"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -9,6 +9,7 @@\n from kinto.core import Service\n from kinto.core.errors import ErrorSchema\n from kinto.core.utils import merge_dicts, build_request, build_response\n+from kinto.core.resource.viewset import CONTENT_TYPES\n \n \n subrequest_logger = logging.getLogger('subrequest.summary')\n@@ -112,6 +113,7 @@\n \n @batch.post(schema=BatchRequest,\n validators=(colander_validator,),\n+ content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\n", "issue": "Crash in batch view deserialization\n```\r\nAttributeError: 'bytes' object has no attribute 'get'\r\n File \"pyramid/tweens.py\", line 39, in excview_tween\r\n response = handler(request)\r\n File \"kinto/core/events.py\", line 76, in tween\r\n response = handler(request)\r\n File \"pyramid/router.py\", line 156, in handle_request\r\n view_name\r\n File \"pyramid/view.py\", line 642, in _call_view\r\n response = view_callable(context, request)\r\n File \"pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 390, in attr_view\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 368, in predicate_wrapper\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 439, in rendered_view\r\n result = view(context, request)\r\n File \"pyramid/viewderivers.py\", line 148, in _requestonly_view\r\n response = view(request)\r\n File \"cornice/service.py\", line 487, in wrapper\r\n validator(request, **args)\r\n File \"cornice/validators/_colander.py\", line 113, in validator\r\n deserialized = schema.deserialize(cstruct)\r\n File \"colander/__init__.py\", line 2073, in deserialize\r\n appstruct = self.typ.deserialize(self, cstruct)\r\n File \"colander/__init__.py\", line 724, in deserialize\r\n return self._impl(node, cstruct, callback)\r\n File \"colander/__init__.py\", line 683, in _impl\r\n sub_result = callback(subnode, subval)\r\n File \"colander/__init__.py\", line 722, in callback\r\n return subnode.deserialize(subcstruct)\r\n File \"kinto/core/views/batch.py\", line 68, in deserialize\r\n defaults = cstruct.get('defaults')\r\n```\nCrash in batch view deserialization\n```\r\nAttributeError: 'bytes' object has no attribute 'get'\r\n File \"pyramid/tweens.py\", line 39, in excview_tween\r\n response = handler(request)\r\n File \"kinto/core/events.py\", line 76, in tween\r\n response = handler(request)\r\n File \"pyramid/router.py\", line 156, in handle_request\r\n view_name\r\n File \"pyramid/view.py\", line 642, in _call_view\r\n response = view_callable(context, request)\r\n File \"pyramid/config/views.py\", line 181, in __call__\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 390, in attr_view\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 368, in predicate_wrapper\r\n return view(context, request)\r\n File \"pyramid/viewderivers.py\", line 439, in rendered_view\r\n result = view(context, request)\r\n File \"pyramid/viewderivers.py\", line 148, in _requestonly_view\r\n response = view(request)\r\n File \"cornice/service.py\", line 487, in wrapper\r\n validator(request, **args)\r\n File \"cornice/validators/_colander.py\", line 113, in validator\r\n deserialized = schema.deserialize(cstruct)\r\n File \"colander/__init__.py\", line 2073, in deserialize\r\n appstruct = self.typ.deserialize(self, cstruct)\r\n File \"colander/__init__.py\", line 724, in deserialize\r\n return self._impl(node, cstruct, callback)\r\n File \"colander/__init__.py\", line 683, in _impl\r\n sub_result = callback(subnode, subval)\r\n File \"colander/__init__.py\", line 722, in callback\r\n return subnode.deserialize(subcstruct)\r\n File \"kinto/core/views/batch.py\", line 68, in deserialize\r\n defaults = cstruct.get('defaults')\r\n```\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]} | 3,070 | 167 |
gh_patches_debug_8658 | rasdani/github-patches | git_diff | benoitc__gunicorn-931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Connection reset by peer" in handle_request isn't handled (or ignored)
We see a lot of these errors with HAProxy 1.5 in front of gunicorn:
```
2014-10-23 17:17:24,366 DEBUG OPTIONS /haproxy_check
2014-10-23 17:17:24,384 DEBUG Ignoring connection reset
2014-10-23 17:17:24,700 DEBUG OPTIONS /haproxy_check
2014-10-23 17:17:24,719 DEBUG Ignoring connection reset
2014-10-23 17:17:24,753 DEBUG OPTIONS /haproxy_check
2014-10-23 17:17:24,770 ERROR Error handling request
Traceback (most recent call last):
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/workers/async.py", line 108, in handle_request
resp.write(item)
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/http/wsgi.py", line 344, in write
util.write(self.sock, arg, self.chunked)
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py", line 300, in write
return write_chunk(sock, data)
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py", line 295, in write_chunk
sock.sendall(chunk)
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py", line 458, in sendall
data_sent += self.send(_get_memory(data, data_sent), flags)
File "/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py", line 435, in send
return sock.send(data, flags)
error: [Errno 104] Connection reset by peer
2014-10-23 17:17:24,770 DEBUG Closing connection.
```
It seems that in the async worker, the error is sometimes caught and ignored in `handle`, but it also sometimes happens in `handle_request`. It should probably be ignored there as well?
</issue>
<code>
[start of gunicorn/workers/async.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import errno
8 import socket
9 import ssl
10 import sys
11
12 import gunicorn.http as http
13 import gunicorn.http.wsgi as wsgi
14 import gunicorn.util as util
15 import gunicorn.workers.base as base
16 from gunicorn import six
17
18 ALREADY_HANDLED = object()
19
20
21 class AsyncWorker(base.Worker):
22
23 def __init__(self, *args, **kwargs):
24 super(AsyncWorker, self).__init__(*args, **kwargs)
25 self.worker_connections = self.cfg.worker_connections
26
27 def timeout_ctx(self):
28 raise NotImplementedError()
29
30 def handle(self, listener, client, addr):
31 req = None
32 try:
33 parser = http.RequestParser(self.cfg, client)
34 try:
35 listener_name = listener.getsockname()
36 if not self.cfg.keepalive:
37 req = six.next(parser)
38 self.handle_request(listener_name, req, client, addr)
39 else:
40 # keepalive loop
41 proxy_protocol_info = {}
42 while True:
43 req = None
44 with self.timeout_ctx():
45 req = six.next(parser)
46 if not req:
47 break
48 if req.proxy_protocol_info:
49 proxy_protocol_info = req.proxy_protocol_info
50 else:
51 req.proxy_protocol_info = proxy_protocol_info
52 self.handle_request(listener_name, req, client, addr)
53 except http.errors.NoMoreData as e:
54 self.log.debug("Ignored premature client disconnection. %s", e)
55 except StopIteration as e:
56 self.log.debug("Closing connection. %s", e)
57 except ssl.SSLError:
58 exc_info = sys.exc_info()
59 # pass to next try-except level
60 six.reraise(exc_info[0], exc_info[1], exc_info[2])
61 except socket.error:
62 exc_info = sys.exc_info()
63 # pass to next try-except level
64 six.reraise(exc_info[0], exc_info[1], exc_info[2])
65 except Exception as e:
66 self.handle_error(req, client, addr, e)
67 except ssl.SSLError as e:
68 if e.args[0] == ssl.SSL_ERROR_EOF:
69 self.log.debug("ssl connection closed")
70 client.close()
71 else:
72 self.log.debug("Error processing SSL request.")
73 self.handle_error(req, client, addr, e)
74 except socket.error as e:
75 if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
76 self.log.exception("Socket error processing request.")
77 else:
78 if e.args[0] == errno.ECONNRESET:
79 self.log.debug("Ignoring connection reset")
80 else:
81 self.log.debug("Ignoring EPIPE")
82 except Exception as e:
83 self.handle_error(req, client, addr, e)
84 finally:
85 util.close(client)
86
87 def handle_request(self, listener_name, req, sock, addr):
88 request_start = datetime.now()
89 environ = {}
90 resp = None
91 try:
92 self.cfg.pre_request(self, req)
93 resp, environ = wsgi.create(req, sock, addr,
94 listener_name, self.cfg)
95 environ["wsgi.multithread"] = True
96 self.nr += 1
97 if self.alive and self.nr >= self.max_requests:
98 self.log.info("Autorestarting worker after current request.")
99 resp.force_close()
100 self.alive = False
101
102 if not self.cfg.keepalive:
103 resp.force_close()
104
105 respiter = self.wsgi(environ, resp.start_response)
106 if respiter == ALREADY_HANDLED:
107 return False
108 try:
109 if isinstance(respiter, environ['wsgi.file_wrapper']):
110 resp.write_file(respiter)
111 else:
112 for item in respiter:
113 resp.write(item)
114 resp.close()
115 request_time = datetime.now() - request_start
116 self.log.access(resp, req, environ, request_time)
117 finally:
118 if hasattr(respiter, "close"):
119 respiter.close()
120 if resp.should_close():
121 raise StopIteration()
122 except StopIteration:
123 raise
124 except Exception:
125 if resp and resp.headers_sent:
126 # If the requests have already been sent, we should close the
127 # connection to indicate the error.
128 self.log.exception("Error handling request")
129 try:
130 sock.shutdown(socket.SHUT_RDWR)
131 sock.close()
132 except socket.error:
133 pass
134 raise StopIteration()
135 raise
136 finally:
137 try:
138 self.cfg.post_request(self, req, environ, resp)
139 except Exception:
140 self.log.exception("Exception in post_request hook")
141 return True
142
[end of gunicorn/workers/async.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py
--- a/gunicorn/workers/async.py
+++ b/gunicorn/workers/async.py
@@ -118,6 +118,10 @@
raise StopIteration()
except StopIteration:
raise
+ except socket.error:
+ # If the original exception was a socket.error we delegate
+ # handling it to the caller (where handle() might ignore it
+ six.reraise(*sys.exc_info())
except Exception:
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
| {"golden_diff": "diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py\n--- a/gunicorn/workers/async.py\n+++ b/gunicorn/workers/async.py\n@@ -118,6 +118,10 @@\n raise StopIteration()\n except StopIteration:\n raise\n+ except socket.error:\n+ # If the original exception was a socket.error we delegate\n+ # handling it to the caller (where handle() might ignore it \n+ six.reraise(*sys.exc_info())\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n", "issue": "\"Connection reset by peer\" in handle_request isn't handled (or ignored)\nWe see a lot of these errors with HAProxy 1.5 in front of gunicorn:\n\n```\n2014-10-23 17:17:24,366 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,384 DEBUG Ignoring connection reset\n2014-10-23 17:17:24,700 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,719 DEBUG Ignoring connection reset\n2014-10-23 17:17:24,753 DEBUG OPTIONS /haproxy_check\n2014-10-23 17:17:24,770 ERROR Error handling request\nTraceback (most recent call last):\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/workers/async.py\", line 108, in handle_request\n resp.write(item)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/http/wsgi.py\", line 344, in write\n util.write(self.sock, arg, self.chunked)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py\", line 300, in write\n return write_chunk(sock, data)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gunicorn/util.py\", line 295, in write_chunk\n sock.sendall(chunk)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py\", line 458, in sendall\n data_sent += self.send(_get_memory(data, data_sent), flags)\n File \"/opt/lp-apps/webshell/lib/python2.7/site-packages/gevent/socket.py\", line 435, in send\n return sock.send(data, flags)\nerror: [Errno 104] Connection reset by peer\n2014-10-23 17:17:24,770 DEBUG Closing connection.\n```\n\nIt seems that in the async worker, the error is sometimes caught and ignored in `handle`, but it also sometimes happens in `handle_request`. It should probably be ignored there as well?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport errno\nimport socket\nimport ssl\nimport sys\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\nALREADY_HANDLED = object()\n\n\nclass AsyncWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super(AsyncWorker, self).__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n\n def timeout_ctx(self):\n raise NotImplementedError()\n\n def handle(self, listener, client, addr):\n req = None\n try:\n parser = http.RequestParser(self.cfg, client)\n try:\n listener_name = listener.getsockname()\n if not self.cfg.keepalive:\n req = six.next(parser)\n self.handle_request(listener_name, req, client, addr)\n else:\n # keepalive loop\n proxy_protocol_info = {}\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n if req.proxy_protocol_info:\n proxy_protocol_info = req.proxy_protocol_info\n else:\n req.proxy_protocol_info = proxy_protocol_info\n self.handle_request(listener_name, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except socket.error:\n exc_info = sys.exc_info()\n # pass to next try-except level\n six.reraise(exc_info[0], exc_info[1], exc_info[2])\n except Exception as e:\n self.handle_error(req, client, addr, e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.args[0] == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener_name, req, sock, addr):\n request_start = datetime.now()\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n resp, environ = wsgi.create(req, sock, addr,\n listener_name, self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n resp.force_close()\n self.alive = False\n\n if not self.cfg.keepalive:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n if respiter == ALREADY_HANDLED:\n return False\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n if resp.should_close():\n raise StopIteration()\n except StopIteration:\n raise\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n self.log.exception(\"Error handling request\")\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n except socket.error:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n return True\n", "path": "gunicorn/workers/async.py"}]} | 2,421 | 142 |
gh_patches_debug_10477 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
1.8.0 translation feedback: say replies are saved, not stored
## Description
From [AO](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=a6f950dfa57047f3): "Your reply has been saved." might be better than "Your reply has been stored."
</issue>
<code>
[start of securedrop/journalist_app/main.py]
1 # -*- coding: utf-8 -*-
2 from datetime import datetime
3 from typing import Union
4
5 import werkzeug
6 from flask import (Blueprint, request, current_app, session, url_for, redirect,
7 render_template, g, flash, abort, Markup, escape)
8 from flask_babel import gettext
9 from sqlalchemy.orm import joinedload
10 from sqlalchemy.sql import func
11
12 import store
13
14 from db import db
15 from models import SeenReply, Source, SourceStar, Submission, Reply
16 from journalist_app.forms import ReplyForm
17 from journalist_app.utils import (validate_user, bulk_delete, download,
18 confirm_bulk_delete, get_source)
19 from sdconfig import SDConfig
20
21
22 def make_blueprint(config: SDConfig) -> Blueprint:
23 view = Blueprint('main', __name__)
24
25 @view.route('/login', methods=('GET', 'POST'))
26 def login() -> Union[str, werkzeug.Response]:
27 if request.method == 'POST':
28 user = validate_user(request.form['username'],
29 request.form['password'],
30 request.form['token'])
31 if user:
32 current_app.logger.info("'{}' logged in with the two-factor code {}"
33 .format(request.form['username'],
34 request.form['token']))
35
36 # Update access metadata
37 user.last_access = datetime.utcnow()
38 db.session.add(user)
39 db.session.commit()
40
41 session['uid'] = user.id
42 session['nonce'] = user.session_nonce
43 return redirect(url_for('main.index'))
44
45 return render_template("login.html")
46
47 @view.route('/logout')
48 def logout() -> werkzeug.Response:
49 session.pop('uid', None)
50 session.pop('expires', None)
51 session.pop('nonce', None)
52 return redirect(url_for('main.index'))
53
54 @view.route("/")
55 def index() -> str:
56 # Gather the count of unread submissions for each source
57 # ID. This query will be joined in the queries for starred and
58 # unstarred sources below, and the unread counts added to
59 # their result sets as an extra column.
60 unread_stmt = (
61 db.session.query(Submission.source_id, func.count("*").label("num_unread"))
62 .filter_by(seen_files=None, seen_messages=None)
63 .group_by(Submission.source_id)
64 .subquery()
65 )
66
67 # Query for starred sources, along with their unread
68 # submission counts.
69 starred = (
70 db.session.query(Source, unread_stmt.c.num_unread)
71 .filter_by(pending=False, deleted_at=None)
72 .filter(Source.last_updated.isnot(None))
73 .filter(SourceStar.starred.is_(True))
74 .outerjoin(SourceStar)
75 .options(joinedload(Source.submissions))
76 .options(joinedload(Source.star))
77 .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)
78 .order_by(Source.last_updated.desc())
79 .all()
80 )
81
82 # Now, add "num_unread" attributes to the source entities.
83 for source, num_unread in starred:
84 source.num_unread = num_unread or 0
85 starred = [source for source, num_unread in starred]
86
87 # Query for sources without stars, along with their unread
88 # submission counts.
89 unstarred = (
90 db.session.query(Source, unread_stmt.c.num_unread)
91 .filter_by(pending=False, deleted_at=None)
92 .filter(Source.last_updated.isnot(None))
93 .filter(~Source.star.has(SourceStar.starred.is_(True)))
94 .options(joinedload(Source.submissions))
95 .options(joinedload(Source.star))
96 .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)
97 .order_by(Source.last_updated.desc())
98 .all()
99 )
100
101 # Again, add "num_unread" attributes to the source entities.
102 for source, num_unread in unstarred:
103 source.num_unread = num_unread or 0
104 unstarred = [source for source, num_unread in unstarred]
105
106 response = render_template("index.html", unstarred=unstarred, starred=starred)
107 return response
108
109 @view.route('/reply', methods=('POST',))
110 def reply() -> werkzeug.Response:
111 """Attempt to send a Reply from a Journalist to a Source. Empty
112 messages are rejected, and an informative error message is flashed
113 on the client. In the case of unexpected errors involving database
114 transactions (potentially caused by racing request threads that
115 modify the same the database object) logging is done in such a way
116 so as not to write potentially sensitive information to disk, and a
117 generic error message is flashed on the client.
118
119 Returns:
120 flask.Response: The user is redirected to the same Source
121 collection view, regardless if the Reply is created
122 successfully.
123 """
124 form = ReplyForm()
125 if not form.validate_on_submit():
126 for error in form.message.errors:
127 flash(error, "error")
128 return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
129
130 g.source.interaction_count += 1
131 filename = "{0}-{1}-reply.gpg".format(g.source.interaction_count,
132 g.source.journalist_filename)
133 current_app.crypto_util.encrypt(
134 form.message.data,
135 [current_app.crypto_util.get_fingerprint(g.filesystem_id),
136 config.JOURNALIST_KEY],
137 output=current_app.storage.path(g.filesystem_id, filename),
138 )
139
140 try:
141 reply = Reply(g.user, g.source, filename)
142 db.session.add(reply)
143 db.session.flush()
144 seen_reply = SeenReply(reply_id=reply.id, journalist_id=g.user.id)
145 db.session.add(seen_reply)
146 db.session.commit()
147 store.async_add_checksum_for_file(reply)
148 except Exception as exc:
149 flash(gettext(
150 "An unexpected error occurred! Please "
151 "inform your admin."), "error")
152 # We take a cautious approach to logging here because we're dealing
153 # with responses to sources. It's possible the exception message
154 # could contain information we don't want to write to disk.
155 current_app.logger.error(
156 "Reply from '{}' (ID {}) failed: {}!".format(g.user.username,
157 g.user.id,
158 exc.__class__))
159 else:
160
161 flash(
162 Markup(
163 "<b>{}</b> {}".format(
164 # Translators: Precedes a message confirming the success of an operation.
165 escape(gettext("Success!")),
166 escape(gettext("Your reply has been stored."))
167 )
168 ), 'success')
169 finally:
170 return redirect(url_for('col.col', filesystem_id=g.filesystem_id))
171
172 @view.route('/bulk', methods=('POST',))
173 def bulk() -> Union[str, werkzeug.Response]:
174 action = request.form['action']
175 error_redirect = url_for('col.col', filesystem_id=g.filesystem_id)
176 doc_names_selected = request.form.getlist('doc_names_selected')
177 selected_docs = [doc for doc in g.source.collection
178 if doc.filename in doc_names_selected]
179 if selected_docs == []:
180 if action == 'download':
181 flash(
182 Markup(
183 "<b>{}</b> {}".format(
184 # Translators: Error shown when a user has not selected items to act on.
185 escape(gettext("Nothing Selected")),
186 escape(gettext("You must select one or more items for download"))
187 )
188 ), 'error')
189 elif action in ('delete', 'confirm_delete'):
190 flash(
191 Markup(
192 "<b>{}</b> {}".format(
193 # Translators: Error shown when a user has not selected items to act on.
194 escape(gettext("Nothing Selected")),
195 escape(gettext("You must select one or more items for deletion"))
196 )
197 ), 'error')
198
199 return redirect(error_redirect)
200
201 if action == 'download':
202 source = get_source(g.filesystem_id)
203 return download(
204 source.journalist_filename, selected_docs, on_error_redirect=error_redirect
205 )
206 elif action == 'delete':
207 return bulk_delete(g.filesystem_id, selected_docs)
208 elif action == 'confirm_delete':
209 return confirm_bulk_delete(g.filesystem_id, selected_docs)
210 else:
211 abort(400)
212
213 @view.route('/download_unread/<filesystem_id>')
214 def download_unread_filesystem_id(filesystem_id: str) -> werkzeug.Response:
215 unseen_submissions = (
216 Submission.query.join(Source)
217 .filter(
218 Source.deleted_at.is_(None),
219 Source.filesystem_id == filesystem_id
220 )
221 .filter(~Submission.seen_files.any(), ~Submission.seen_messages.any())
222 .all()
223 )
224 if len(unseen_submissions) == 0:
225 flash(gettext("No unread submissions for this source."), "error")
226 return redirect(url_for('col.col', filesystem_id=filesystem_id))
227 source = get_source(filesystem_id)
228 return download(source.journalist_filename, unseen_submissions)
229
230 return view
231
[end of securedrop/journalist_app/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/journalist_app/main.py b/securedrop/journalist_app/main.py
--- a/securedrop/journalist_app/main.py
+++ b/securedrop/journalist_app/main.py
@@ -163,7 +163,8 @@
"<b>{}</b> {}".format(
# Translators: Precedes a message confirming the success of an operation.
escape(gettext("Success!")),
- escape(gettext("Your reply has been stored."))
+ escape(gettext("The source will receive your reply "
+ "next time they log in."))
)
), 'success')
finally:
| {"golden_diff": "diff --git a/securedrop/journalist_app/main.py b/securedrop/journalist_app/main.py\n--- a/securedrop/journalist_app/main.py\n+++ b/securedrop/journalist_app/main.py\n@@ -163,7 +163,8 @@\n \"<b>{}</b> {}\".format(\n # Translators: Precedes a message confirming the success of an operation.\n escape(gettext(\"Success!\")),\n- escape(gettext(\"Your reply has been stored.\"))\n+ escape(gettext(\"The source will receive your reply \"\n+ \"next time they log in.\"))\n )\n ), 'success')\n finally:\n", "issue": "1.8.0 translation feedback: say replies are saved, not stored\n## Description\r\n\r\nFrom [AO](https://weblate.securedrop.org/translate/securedrop/securedrop/en/?checksum=a6f950dfa57047f3): \"Your reply has been saved.\" might be better than \"Your reply has been stored.\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom typing import Union\n\nimport werkzeug\nfrom flask import (Blueprint, request, current_app, session, url_for, redirect,\n render_template, g, flash, abort, Markup, escape)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.sql import func\n\nimport store\n\nfrom db import db\nfrom models import SeenReply, Source, SourceStar, Submission, Reply\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (validate_user, bulk_delete, download,\n confirm_bulk_delete, get_source)\nfrom sdconfig import SDConfig\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('main', __name__)\n\n @view.route('/login', methods=('GET', 'POST'))\n def login() -> Union[str, werkzeug.Response]:\n if request.method == 'POST':\n user = validate_user(request.form['username'],\n request.form['password'],\n request.form['token'])\n if user:\n current_app.logger.info(\"'{}' logged in with the two-factor code {}\"\n .format(request.form['username'],\n request.form['token']))\n\n # Update access metadata\n user.last_access = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n\n session['uid'] = user.id\n session['nonce'] = user.session_nonce\n return redirect(url_for('main.index'))\n\n return render_template(\"login.html\")\n\n @view.route('/logout')\n def logout() -> werkzeug.Response:\n session.pop('uid', None)\n session.pop('expires', None)\n session.pop('nonce', None)\n return redirect(url_for('main.index'))\n\n @view.route(\"/\")\n def index() -> str:\n # Gather the count of unread submissions for each source\n # ID. This query will be joined in the queries for starred and\n # unstarred sources below, and the unread counts added to\n # their result sets as an extra column.\n unread_stmt = (\n db.session.query(Submission.source_id, func.count(\"*\").label(\"num_unread\"))\n .filter_by(seen_files=None, seen_messages=None)\n .group_by(Submission.source_id)\n .subquery()\n )\n\n # Query for starred sources, along with their unread\n # submission counts.\n starred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(SourceStar.starred.is_(True))\n .outerjoin(SourceStar)\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Now, add \"num_unread\" attributes to the source entities.\n for source, num_unread in starred:\n source.num_unread = num_unread or 0\n starred = [source for source, num_unread in starred]\n\n # Query for sources without stars, along with their unread\n # submission counts.\n unstarred = (\n db.session.query(Source, unread_stmt.c.num_unread)\n .filter_by(pending=False, deleted_at=None)\n .filter(Source.last_updated.isnot(None))\n .filter(~Source.star.has(SourceStar.starred.is_(True)))\n .options(joinedload(Source.submissions))\n .options(joinedload(Source.star))\n .outerjoin(unread_stmt, Source.id == unread_stmt.c.source_id)\n .order_by(Source.last_updated.desc())\n .all()\n )\n\n # Again, add \"num_unread\" attributes to the source entities.\n for source, num_unread in unstarred:\n source.num_unread = num_unread or 0\n unstarred = [source for source, num_unread in unstarred]\n\n response = render_template(\"index.html\", unstarred=unstarred, starred=starred)\n return response\n\n @view.route('/reply', methods=('POST',))\n def reply() -> werkzeug.Response:\n \"\"\"Attempt to send a Reply from a Journalist to a Source. Empty\n messages are rejected, and an informative error message is flashed\n on the client. In the case of unexpected errors involving database\n transactions (potentially caused by racing request threads that\n modify the same the database object) logging is done in such a way\n so as not to write potentially sensitive information to disk, and a\n generic error message is flashed on the client.\n\n Returns:\n flask.Response: The user is redirected to the same Source\n collection view, regardless if the Reply is created\n successfully.\n \"\"\"\n form = ReplyForm()\n if not form.validate_on_submit():\n for error in form.message.errors:\n flash(error, \"error\")\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n g.source.interaction_count += 1\n filename = \"{0}-{1}-reply.gpg\".format(g.source.interaction_count,\n g.source.journalist_filename)\n current_app.crypto_util.encrypt(\n form.message.data,\n [current_app.crypto_util.get_fingerprint(g.filesystem_id),\n config.JOURNALIST_KEY],\n output=current_app.storage.path(g.filesystem_id, filename),\n )\n\n try:\n reply = Reply(g.user, g.source, filename)\n db.session.add(reply)\n db.session.flush()\n seen_reply = SeenReply(reply_id=reply.id, journalist_id=g.user.id)\n db.session.add(seen_reply)\n db.session.commit()\n store.async_add_checksum_for_file(reply)\n except Exception as exc:\n flash(gettext(\n \"An unexpected error occurred! Please \"\n \"inform your admin.\"), \"error\")\n # We take a cautious approach to logging here because we're dealing\n # with responses to sources. It's possible the exception message\n # could contain information we don't want to write to disk.\n current_app.logger.error(\n \"Reply from '{}' (ID {}) failed: {}!\".format(g.user.username,\n g.user.id,\n exc.__class__))\n else:\n\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Precedes a message confirming the success of an operation.\n escape(gettext(\"Success!\")),\n escape(gettext(\"Your reply has been stored.\"))\n )\n ), 'success')\n finally:\n return redirect(url_for('col.col', filesystem_id=g.filesystem_id))\n\n @view.route('/bulk', methods=('POST',))\n def bulk() -> Union[str, werkzeug.Response]:\n action = request.form['action']\n error_redirect = url_for('col.col', filesystem_id=g.filesystem_id)\n doc_names_selected = request.form.getlist('doc_names_selected')\n selected_docs = [doc for doc in g.source.collection\n if doc.filename in doc_names_selected]\n if selected_docs == []:\n if action == 'download':\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for download\"))\n )\n ), 'error')\n elif action in ('delete', 'confirm_delete'):\n flash(\n Markup(\n \"<b>{}</b> {}\".format(\n # Translators: Error shown when a user has not selected items to act on.\n escape(gettext(\"Nothing Selected\")),\n escape(gettext(\"You must select one or more items for deletion\"))\n )\n ), 'error')\n\n return redirect(error_redirect)\n\n if action == 'download':\n source = get_source(g.filesystem_id)\n return download(\n source.journalist_filename, selected_docs, on_error_redirect=error_redirect\n )\n elif action == 'delete':\n return bulk_delete(g.filesystem_id, selected_docs)\n elif action == 'confirm_delete':\n return confirm_bulk_delete(g.filesystem_id, selected_docs)\n else:\n abort(400)\n\n @view.route('/download_unread/<filesystem_id>')\n def download_unread_filesystem_id(filesystem_id: str) -> werkzeug.Response:\n unseen_submissions = (\n Submission.query.join(Source)\n .filter(\n Source.deleted_at.is_(None),\n Source.filesystem_id == filesystem_id\n )\n .filter(~Submission.seen_files.any(), ~Submission.seen_messages.any())\n .all()\n )\n if len(unseen_submissions) == 0:\n flash(gettext(\"No unread submissions for this source.\"), \"error\")\n return redirect(url_for('col.col', filesystem_id=filesystem_id))\n source = get_source(filesystem_id)\n return download(source.journalist_filename, unseen_submissions)\n\n return view\n", "path": "securedrop/journalist_app/main.py"}]} | 3,117 | 143 |
gh_patches_debug_9587 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2475 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session expiring do not display a localized logout message.
# Bug
## Description
Like #2391, if a source has their session expire, they will not be shown a localized message when they log out.
## Steps to Reproduce
Set session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.
## Expected Behavior
The logout message is localized.
## Actual Behavior
It is not.
</issue>
<code>
[start of securedrop/source_app/__init__.py]
1 from datetime import datetime, timedelta
2 from flask import (Flask, render_template, flash, Markup, request, g, session,
3 url_for, redirect)
4 from flask_babel import gettext
5 from flask_assets import Environment
6 from flask_wtf.csrf import CSRFProtect
7 from jinja2 import evalcontextfilter
8 from os import path
9 from sqlalchemy.orm.exc import NoResultFound
10
11 import crypto_util
12 import i18n
13 import store
14 import template_filters
15 import version
16
17 from db import Source, db_session
18 from request_that_secures_file_uploads import RequestThatSecuresFileUploads
19 from source_app import main, info, api
20 from source_app.decorators import ignore_static
21 from source_app.utils import logged_in
22
23
24 def create_app(config):
25 app = Flask(__name__,
26 template_folder=config.SOURCE_TEMPLATES_DIR,
27 static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
28 app.request_class = RequestThatSecuresFileUploads
29 app.config.from_object(config.SourceInterfaceFlaskConfig)
30
31 # The default CSRF token expiration is 1 hour. Since large uploads can
32 # take longer than an hour over Tor, we increase the valid window to 24h.
33 app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24
34 CSRFProtect(app)
35
36 assets = Environment(app)
37 app.config['assets'] = assets
38
39 i18n.setup_app(app)
40
41 app.jinja_env.trim_blocks = True
42 app.jinja_env.lstrip_blocks = True
43 app.jinja_env.globals['version'] = version.__version__
44 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
45 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
46 app.jinja_env.globals['use_custom_header_image'] = True
47 else:
48 app.jinja_env.globals['header_image'] = 'logo.png'
49 app.jinja_env.globals['use_custom_header_image'] = False
50
51 app.jinja_env.filters['rel_datetime_format'] = \
52 template_filters.rel_datetime_format
53 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)
54 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
55
56 for module in [main, info, api]:
57 app.register_blueprint(module.make_blueprint(config))
58
59 @app.before_request
60 @ignore_static
61 def check_tor2web():
62 # ignore_static here so we only flash a single message warning
63 # about Tor2Web, corresponding to the initial page load.
64 if 'X-tor2web' in request.headers:
65 flash(Markup(gettext(
66 '<strong>WARNING:</strong> You appear to be using Tor2Web. '
67 'This <strong>does not</strong> provide anonymity. '
68 '<a href="{url}">Why is this dangerous?</a>')
69 .format(url=url_for('info.tor2web_warning'))),
70 "banner-warning")
71
72 @app.before_request
73 @ignore_static
74 def setup_g():
75 """Store commonly used values in Flask's special g object"""
76 g.locale = i18n.get_locale()
77 g.text_direction = i18n.get_text_direction(g.locale)
78 g.html_lang = i18n.locale_to_rfc_5646(g.locale)
79 g.locales = i18n.get_locale2name()
80
81 if 'expires' in session and datetime.utcnow() >= session['expires']:
82 session.clear()
83 msg = render_template('session_timeout.html')
84 flash(Markup(msg), "important")
85
86 session['expires'] = datetime.utcnow() + \
87 timedelta(minutes=getattr(config,
88 'SESSION_EXPIRATION_MINUTES',
89 30))
90
91 # ignore_static here because `crypto_util.hash_codename` is scrypt
92 # (very time consuming), and we don't need to waste time running if
93 # we're just serving a static resource that won't need to access
94 # these common values.
95 if logged_in():
96 g.codename = session['codename']
97 g.filesystem_id = crypto_util.hash_codename(g.codename)
98 try:
99 g.source = Source.query \
100 .filter(Source.filesystem_id == g.filesystem_id) \
101 .one()
102 except NoResultFound as e:
103 app.logger.error(
104 "Found no Sources when one was expected: %s" %
105 (e,))
106 del session['logged_in']
107 del session['codename']
108 return redirect(url_for('main.index'))
109 g.loc = store.path(g.filesystem_id)
110
111 @app.teardown_appcontext
112 def shutdown_session(exception=None):
113 """Automatically remove database sessions at the end of the request, or
114 when the application shuts down"""
115 db_session.remove()
116
117 @app.errorhandler(404)
118 def page_not_found(error):
119 return render_template('notfound.html'), 404
120
121 @app.errorhandler(500)
122 def internal_error(error):
123 return render_template('error.html'), 500
124
125 return app
126
[end of securedrop/source_app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py
--- a/securedrop/source_app/__init__.py
+++ b/securedrop/source_app/__init__.py
@@ -79,8 +79,11 @@
g.locales = i18n.get_locale2name()
if 'expires' in session and datetime.utcnow() >= session['expires']:
- session.clear()
msg = render_template('session_timeout.html')
+
+ # clear the session after we render the message so it's localized
+ session.clear()
+
flash(Markup(msg), "important")
session['expires'] = datetime.utcnow() + \
| {"golden_diff": "diff --git a/securedrop/source_app/__init__.py b/securedrop/source_app/__init__.py\n--- a/securedrop/source_app/__init__.py\n+++ b/securedrop/source_app/__init__.py\n@@ -79,8 +79,11 @@\n g.locales = i18n.get_locale2name()\n \n if 'expires' in session and datetime.utcnow() >= session['expires']:\n- session.clear()\n msg = render_template('session_timeout.html')\n+\n+ # clear the session after we render the message so it's localized\n+ session.clear()\n+\n flash(Markup(msg), \"important\")\n \n session['expires'] = datetime.utcnow() + \\\n", "issue": "Session expiring do not display a localized logout message.\n# Bug\r\n\r\n## Description\r\n\r\nLike #2391, if a source has their session expire, they will not be shown a localized message when they log out.\r\n\r\n## Steps to Reproduce\r\n\r\nSet session expire to 30 seconds. Log in. Set locale to not-english. Wait 30 seconds. Refresh. See no-localized flashed message.\r\n\r\n## Expected Behavior\r\n\r\nThe logout message is localized.\r\n\r\n## Actual Behavior\r\n\r\nIt is not.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nfrom flask import (Flask, render_template, flash, Markup, request, g, session,\n url_for, redirect)\nfrom flask_babel import gettext\nfrom flask_assets import Environment\nfrom flask_wtf.csrf import CSRFProtect\nfrom jinja2 import evalcontextfilter\nfrom os import path\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport crypto_util\nimport i18n\nimport store\nimport template_filters\nimport version\n\nfrom db import Source, db_session\nfrom request_that_secures_file_uploads import RequestThatSecuresFileUploads\nfrom source_app import main, info, api\nfrom source_app.decorators import ignore_static\nfrom source_app.utils import logged_in\n\n\ndef create_app(config):\n app = Flask(__name__,\n template_folder=config.SOURCE_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n app.request_class = RequestThatSecuresFileUploads\n app.config.from_object(config.SourceInterfaceFlaskConfig)\n\n # The default CSRF token expiration is 1 hour. Since large uploads can\n # take longer than an hour over Tor, we increase the valid window to 24h.\n app.config['WTF_CSRF_TIME_LIMIT'] = 60 * 60 * 24\n CSRFProtect(app)\n\n assets = Environment(app)\n app.config['assets'] = assets\n\n i18n.setup_app(app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n if getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\n else:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n for module in [main, info, api]:\n app.register_blueprint(module.make_blueprint(config))\n\n @app.before_request\n @ignore_static\n def check_tor2web():\n # ignore_static here so we only flash a single message warning\n # about Tor2Web, corresponding to the initial page load.\n if 'X-tor2web' in request.headers:\n flash(Markup(gettext(\n '<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"{url}\">Why is this dangerous?</a>')\n .format(url=url_for('info.tor2web_warning'))),\n \"banner-warning\")\n\n @app.before_request\n @ignore_static\n def setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n g.locale = i18n.get_locale()\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n session.clear()\n msg = render_template('session_timeout.html')\n flash(Markup(msg), \"important\")\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 30))\n\n # ignore_static here because `crypto_util.hash_codename` is scrypt\n # (very time consuming), and we don't need to waste time running if\n # we're just serving a static resource that won't need to access\n # these common values.\n if logged_in():\n g.codename = session['codename']\n g.filesystem_id = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query \\\n .filter(Source.filesystem_id == g.filesystem_id) \\\n .one()\n except NoResultFound as e:\n app.logger.error(\n \"Found no Sources when one was expected: %s\" %\n (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('main.index'))\n g.loc = store.path(g.filesystem_id)\n\n @app.teardown_appcontext\n def shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n @app.errorhandler(404)\n def page_not_found(error):\n return render_template('notfound.html'), 404\n\n @app.errorhandler(500)\n def internal_error(error):\n return render_template('error.html'), 500\n\n return app\n", "path": "securedrop/source_app/__init__.py"}]} | 2,015 | 152 |
gh_patches_debug_29478 | rasdani/github-patches | git_diff | vyperlang__vyper-3409 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Immutable variables can be read before assignment
### Version Information
* vyper Version (output of `vyper --version`): 0.3.6+commit.4a2124d
* OS: osx
* Python Version: 3.9.13
### What's your issue about?
Immutable variables can be read before assignment, their memory location during constructor execution is accessed, but that memory is not initialized yet. Its value is not necessarily `empty(type)` since memory could have been written to ephemerally.
This should not compile:
```python
# @version 0.3.6
a:immutable(uint256)
b:uint256
@payable
@external
def __init__(s:address):
self.b = a
a = 12
```
A more interesting example where transient memory written during execution of `create_copy_of()` is written into the storage variable `b`:
```python
# @version 0.3.6
a:immutable(uint256)
b:uint256
@payable
@external
def __init__():
s:address = 0x9D0464996170c6B9e75eED71c68B99dDEDf279e8 #random contract containing code that is copied to memory before redeploying
c:address = create_copy_of(s)
self.b = a
a = 12
```
### How can it be fixed?
check that immutable variables are assigned before usage
</issue>
<code>
[start of vyper/codegen/module.py]
1 # a contract.vy -- all functions and constructor
2
3 from typing import Any, List, Optional
4
5 from vyper import ast as vy_ast
6 from vyper.codegen.core import shr
7 from vyper.codegen.function_definitions import FuncIRInfo, generate_ir_for_function
8 from vyper.codegen.global_context import GlobalContext
9 from vyper.codegen.ir_node import IRnode
10 from vyper.exceptions import CompilerPanic
11
12
13 def _topsort_helper(functions, lookup):
14 # single pass to get a global topological sort of functions (so that each
15 # function comes after each of its callees). may have duplicates, which get
16 # filtered out in _topsort()
17
18 ret = []
19 for f in functions:
20 # called_functions is a list of ContractFunctions, need to map
21 # back to FunctionDefs.
22 callees = [lookup[t.name] for t in f._metadata["type"].called_functions]
23 ret.extend(_topsort_helper(callees, lookup))
24 ret.append(f)
25
26 return ret
27
28
29 def _topsort(functions):
30 lookup = {f.name: f for f in functions}
31 # strip duplicates
32 return list(dict.fromkeys(_topsort_helper(functions, lookup)))
33
34
35 def _is_constructor(func_ast):
36 return func_ast._metadata["type"].is_constructor
37
38
39 def _is_fallback(func_ast):
40 return func_ast._metadata["type"].is_fallback
41
42
43 def _is_internal(func_ast):
44 return func_ast._metadata["type"].is_internal
45
46
47 def _is_payable(func_ast):
48 return func_ast._metadata["type"].is_payable
49
50
51 # codegen for all runtime functions + callvalue/calldata checks + method selector routines
52 def _runtime_ir(runtime_functions, global_ctx):
53 # categorize the runtime functions because we will organize the runtime
54 # code into the following sections:
55 # payable functions, nonpayable functions, fallback function, internal_functions
56 internal_functions = [f for f in runtime_functions if _is_internal(f)]
57
58 external_functions = [f for f in runtime_functions if not _is_internal(f)]
59 default_function = next((f for f in external_functions if _is_fallback(f)), None)
60
61 # functions that need to go exposed in the selector section
62 regular_functions = [f for f in external_functions if not _is_fallback(f)]
63 payables = [f for f in regular_functions if _is_payable(f)]
64 nonpayables = [f for f in regular_functions if not _is_payable(f)]
65
66 # create a map of the IR functions since they might live in both
67 # runtime and deploy code (if init function calls them)
68 internal_functions_ir: list[IRnode] = []
69
70 for func_ast in internal_functions:
71 func_ir = generate_ir_for_function(func_ast, global_ctx, False)
72 internal_functions_ir.append(func_ir)
73
74 # for some reason, somebody may want to deploy a contract with no
75 # external functions, or more likely, a "pure data" contract which
76 # contains immutables
77 if len(external_functions) == 0:
78 # TODO: prune internal functions in this case? dead code eliminator
79 # might not eliminate them, since internal function jumpdest is at the
80 # first instruction in the contract.
81 runtime = ["seq"] + internal_functions_ir
82 return runtime
83
84 # note: if the user does not provide one, the default fallback function
85 # reverts anyway. so it does not hurt to batch the payable check.
86 default_is_nonpayable = default_function is None or not _is_payable(default_function)
87
88 # when a contract has a nonpayable default function,
89 # we can do a single check for all nonpayable functions
90 batch_payable_check = len(nonpayables) > 0 and default_is_nonpayable
91 skip_nonpayable_check = batch_payable_check
92
93 selector_section = ["seq"]
94
95 for func_ast in payables:
96 func_ir = generate_ir_for_function(func_ast, global_ctx, False)
97 selector_section.append(func_ir)
98
99 if batch_payable_check:
100 selector_section.append(["assert", ["iszero", "callvalue"]])
101
102 for func_ast in nonpayables:
103 func_ir = generate_ir_for_function(func_ast, global_ctx, skip_nonpayable_check)
104 selector_section.append(func_ir)
105
106 if default_function:
107 fallback_ir = generate_ir_for_function(default_function, global_ctx, skip_nonpayable_check)
108 else:
109 fallback_ir = IRnode.from_list(
110 ["revert", 0, 0], annotation="Default function", error_msg="fallback function"
111 )
112
113 # ensure the external jumptable section gets closed out
114 # (for basic block hygiene and also for zksync interpreter)
115 # NOTE: this jump gets optimized out in assembly since the
116 # fallback label is the immediate next instruction,
117 close_selector_section = ["goto", "fallback"]
118
119 runtime = [
120 "seq",
121 ["with", "_calldata_method_id", shr(224, ["calldataload", 0]), selector_section],
122 close_selector_section,
123 ["label", "fallback", ["var_list"], fallback_ir],
124 ]
125
126 # note: dead code eliminator will clean dead functions
127 runtime.extend(internal_functions_ir)
128
129 return runtime
130
131
132 # take a GlobalContext, and generate the runtime and deploy IR
133 def generate_ir_for_module(global_ctx: GlobalContext) -> tuple[IRnode, IRnode]:
134 # order functions so that each function comes after all of its callees
135 function_defs = _topsort(global_ctx.functions)
136
137 init_function: Optional[vy_ast.FunctionDef] = None
138
139 # generate all FuncIRInfos
140 for f in function_defs:
141 func_t = f._metadata["type"]
142 func_t._ir_info = FuncIRInfo(func_t)
143
144 runtime_functions = [f for f in function_defs if not _is_constructor(f)]
145 init_function = next((f for f in function_defs if _is_constructor(f)), None)
146
147 runtime = _runtime_ir(runtime_functions, global_ctx)
148
149 deploy_code: List[Any] = ["seq"]
150 immutables_len = global_ctx.immutable_section_bytes
151 if init_function:
152 # TODO might be cleaner to separate this into an _init_ir helper func
153 init_func_ir = generate_ir_for_function(
154 init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True
155 )
156 deploy_code.append(init_func_ir)
157
158 # pass the amount of memory allocated for the init function
159 # so that deployment does not clobber while preparing immutables
160 # note: (deploy mem_ofst, code, extra_padding)
161 init_mem_used = init_function._metadata["type"]._ir_info.frame_info.mem_used
162 deploy_code.append(["deploy", init_mem_used, runtime, immutables_len])
163
164 # internal functions come after everything else
165 internal_functions = [f for f in runtime_functions if _is_internal(f)]
166 for f in internal_functions:
167 func_ir = generate_ir_for_function(
168 f, global_ctx, skip_nonpayable_check=False, is_ctor_context=True
169 )
170 # note: we depend on dead code eliminator to clean dead function defs
171 deploy_code.append(func_ir)
172
173 else:
174 if immutables_len != 0:
175 raise CompilerPanic("unreachable")
176 deploy_code.append(["deploy", 0, runtime, 0])
177
178 return IRnode.from_list(deploy_code), IRnode.from_list(runtime)
179
[end of vyper/codegen/module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/codegen/module.py b/vyper/codegen/module.py
--- a/vyper/codegen/module.py
+++ b/vyper/codegen/module.py
@@ -153,12 +153,31 @@
init_func_ir = generate_ir_for_function(
init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True
)
- deploy_code.append(init_func_ir)
# pass the amount of memory allocated for the init function
# so that deployment does not clobber while preparing immutables
# note: (deploy mem_ofst, code, extra_padding)
init_mem_used = init_function._metadata["type"]._ir_info.frame_info.mem_used
+
+ # force msize to be initialized past the end of immutables section
+ # so that builtins which use `msize` for "dynamic" memory
+ # allocation do not clobber uninitialized immutables.
+ # cf. GH issue 3101.
+ # note mload/iload X touches bytes from X to X+32, and msize rounds up
+ # to the nearest 32, so `iload`ing `immutables_len - 32` guarantees
+ # that `msize` will refer to a memory location of at least
+ # `<immutables_start> + immutables_len` (where <immutables_start> ==
+ # `_mem_deploy_end` as defined in the assembler).
+ # note:
+ # mload 32 => msize == 64
+ # mload 33 => msize == 96
+ # assumption in general: (mload X) => msize == ceil32(X + 32)
+ # see py-evm extend_memory: after_size = ceil32(start_position + size)
+ if immutables_len > 0:
+ deploy_code.append(["iload", max(0, immutables_len - 32)])
+
+ deploy_code.append(init_func_ir)
+
deploy_code.append(["deploy", init_mem_used, runtime, immutables_len])
# internal functions come after everything else
| {"golden_diff": "diff --git a/vyper/codegen/module.py b/vyper/codegen/module.py\n--- a/vyper/codegen/module.py\n+++ b/vyper/codegen/module.py\n@@ -153,12 +153,31 @@\n init_func_ir = generate_ir_for_function(\n init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n- deploy_code.append(init_func_ir)\n \n # pass the amount of memory allocated for the init function\n # so that deployment does not clobber while preparing immutables\n # note: (deploy mem_ofst, code, extra_padding)\n init_mem_used = init_function._metadata[\"type\"]._ir_info.frame_info.mem_used\n+\n+ # force msize to be initialized past the end of immutables section\n+ # so that builtins which use `msize` for \"dynamic\" memory\n+ # allocation do not clobber uninitialized immutables.\n+ # cf. GH issue 3101.\n+ # note mload/iload X touches bytes from X to X+32, and msize rounds up\n+ # to the nearest 32, so `iload`ing `immutables_len - 32` guarantees\n+ # that `msize` will refer to a memory location of at least\n+ # `<immutables_start> + immutables_len` (where <immutables_start> ==\n+ # `_mem_deploy_end` as defined in the assembler).\n+ # note:\n+ # mload 32 => msize == 64\n+ # mload 33 => msize == 96\n+ # assumption in general: (mload X) => msize == ceil32(X + 32)\n+ # see py-evm extend_memory: after_size = ceil32(start_position + size)\n+ if immutables_len > 0:\n+ deploy_code.append([\"iload\", max(0, immutables_len - 32)])\n+\n+ deploy_code.append(init_func_ir)\n+\n deploy_code.append([\"deploy\", init_mem_used, runtime, immutables_len])\n \n # internal functions come after everything else\n", "issue": "Immutable variables can be read before assignment\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.6+commit.4a2124d\r\n* OS: osx\r\n* Python Version: 3.9.13\r\n \r\n### What's your issue about?\r\n\r\nImmutable variables can be read before assignment, their memory location during constructor execution is accessed, but that memory is not initialized yet. Its value is not necessarily `empty(type)` since memory could have been written to ephemerally.\r\n\r\nThis should not compile:\r\n\r\n```python\r\n# @version 0.3.6\r\n\r\na:immutable(uint256)\r\nb:uint256\r\n\r\n@payable\r\n@external\r\ndef __init__(s:address):\r\n self.b = a\r\n a = 12\r\n```\r\n\r\nA more interesting example where transient memory written during execution of `create_copy_of()` is written into the storage variable `b`:\r\n\r\n```python\r\n# @version 0.3.6\r\n\r\na:immutable(uint256)\r\nb:uint256\r\n\r\n@payable\r\n@external\r\ndef __init__():\r\n s:address = 0x9D0464996170c6B9e75eED71c68B99dDEDf279e8 #random contract containing code that is copied to memory before redeploying\r\n c:address = create_copy_of(s)\r\n self.b = a\r\n a = 12\r\n```\r\n\r\n### How can it be fixed?\r\n\r\ncheck that immutable variables are assigned before usage\r\n\n", "before_files": [{"content": "# a contract.vy -- all functions and constructor\n\nfrom typing import Any, List, Optional\n\nfrom vyper import ast as vy_ast\nfrom vyper.codegen.core import shr\nfrom vyper.codegen.function_definitions import FuncIRInfo, generate_ir_for_function\nfrom vyper.codegen.global_context import GlobalContext\nfrom vyper.codegen.ir_node import IRnode\nfrom vyper.exceptions import CompilerPanic\n\n\ndef _topsort_helper(functions, lookup):\n # single pass to get a global topological sort of functions (so that each\n # function comes after each of its callees). may have duplicates, which get\n # filtered out in _topsort()\n\n ret = []\n for f in functions:\n # called_functions is a list of ContractFunctions, need to map\n # back to FunctionDefs.\n callees = [lookup[t.name] for t in f._metadata[\"type\"].called_functions]\n ret.extend(_topsort_helper(callees, lookup))\n ret.append(f)\n\n return ret\n\n\ndef _topsort(functions):\n lookup = {f.name: f for f in functions}\n # strip duplicates\n return list(dict.fromkeys(_topsort_helper(functions, lookup)))\n\n\ndef _is_constructor(func_ast):\n return func_ast._metadata[\"type\"].is_constructor\n\n\ndef _is_fallback(func_ast):\n return func_ast._metadata[\"type\"].is_fallback\n\n\ndef _is_internal(func_ast):\n return func_ast._metadata[\"type\"].is_internal\n\n\ndef _is_payable(func_ast):\n return func_ast._metadata[\"type\"].is_payable\n\n\n# codegen for all runtime functions + callvalue/calldata checks + method selector routines\ndef _runtime_ir(runtime_functions, global_ctx):\n # categorize the runtime functions because we will organize the runtime\n # code into the following sections:\n # payable functions, nonpayable functions, fallback function, internal_functions\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n\n external_functions = [f for f in runtime_functions if not _is_internal(f)]\n default_function = next((f for f in external_functions if _is_fallback(f)), None)\n\n # functions that need to go exposed in the selector section\n regular_functions = [f for f in external_functions if not _is_fallback(f)]\n payables = [f for f in regular_functions if _is_payable(f)]\n nonpayables = [f for f in regular_functions if not _is_payable(f)]\n\n # create a map of the IR functions since they might live in both\n # runtime and deploy code (if init function calls them)\n internal_functions_ir: list[IRnode] = []\n\n for func_ast in internal_functions:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n internal_functions_ir.append(func_ir)\n\n # for some reason, somebody may want to deploy a contract with no\n # external functions, or more likely, a \"pure data\" contract which\n # contains immutables\n if len(external_functions) == 0:\n # TODO: prune internal functions in this case? dead code eliminator\n # might not eliminate them, since internal function jumpdest is at the\n # first instruction in the contract.\n runtime = [\"seq\"] + internal_functions_ir\n return runtime\n\n # note: if the user does not provide one, the default fallback function\n # reverts anyway. so it does not hurt to batch the payable check.\n default_is_nonpayable = default_function is None or not _is_payable(default_function)\n\n # when a contract has a nonpayable default function,\n # we can do a single check for all nonpayable functions\n batch_payable_check = len(nonpayables) > 0 and default_is_nonpayable\n skip_nonpayable_check = batch_payable_check\n\n selector_section = [\"seq\"]\n\n for func_ast in payables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, False)\n selector_section.append(func_ir)\n\n if batch_payable_check:\n selector_section.append([\"assert\", [\"iszero\", \"callvalue\"]])\n\n for func_ast in nonpayables:\n func_ir = generate_ir_for_function(func_ast, global_ctx, skip_nonpayable_check)\n selector_section.append(func_ir)\n\n if default_function:\n fallback_ir = generate_ir_for_function(default_function, global_ctx, skip_nonpayable_check)\n else:\n fallback_ir = IRnode.from_list(\n [\"revert\", 0, 0], annotation=\"Default function\", error_msg=\"fallback function\"\n )\n\n # ensure the external jumptable section gets closed out\n # (for basic block hygiene and also for zksync interpreter)\n # NOTE: this jump gets optimized out in assembly since the\n # fallback label is the immediate next instruction,\n close_selector_section = [\"goto\", \"fallback\"]\n\n runtime = [\n \"seq\",\n [\"with\", \"_calldata_method_id\", shr(224, [\"calldataload\", 0]), selector_section],\n close_selector_section,\n [\"label\", \"fallback\", [\"var_list\"], fallback_ir],\n ]\n\n # note: dead code eliminator will clean dead functions\n runtime.extend(internal_functions_ir)\n\n return runtime\n\n\n# take a GlobalContext, and generate the runtime and deploy IR\ndef generate_ir_for_module(global_ctx: GlobalContext) -> tuple[IRnode, IRnode]:\n # order functions so that each function comes after all of its callees\n function_defs = _topsort(global_ctx.functions)\n\n init_function: Optional[vy_ast.FunctionDef] = None\n\n # generate all FuncIRInfos\n for f in function_defs:\n func_t = f._metadata[\"type\"]\n func_t._ir_info = FuncIRInfo(func_t)\n\n runtime_functions = [f for f in function_defs if not _is_constructor(f)]\n init_function = next((f for f in function_defs if _is_constructor(f)), None)\n\n runtime = _runtime_ir(runtime_functions, global_ctx)\n\n deploy_code: List[Any] = [\"seq\"]\n immutables_len = global_ctx.immutable_section_bytes\n if init_function:\n # TODO might be cleaner to separate this into an _init_ir helper func\n init_func_ir = generate_ir_for_function(\n init_function, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n deploy_code.append(init_func_ir)\n\n # pass the amount of memory allocated for the init function\n # so that deployment does not clobber while preparing immutables\n # note: (deploy mem_ofst, code, extra_padding)\n init_mem_used = init_function._metadata[\"type\"]._ir_info.frame_info.mem_used\n deploy_code.append([\"deploy\", init_mem_used, runtime, immutables_len])\n\n # internal functions come after everything else\n internal_functions = [f for f in runtime_functions if _is_internal(f)]\n for f in internal_functions:\n func_ir = generate_ir_for_function(\n f, global_ctx, skip_nonpayable_check=False, is_ctor_context=True\n )\n # note: we depend on dead code eliminator to clean dead function defs\n deploy_code.append(func_ir)\n\n else:\n if immutables_len != 0:\n raise CompilerPanic(\"unreachable\")\n deploy_code.append([\"deploy\", 0, runtime, 0])\n\n return IRnode.from_list(deploy_code), IRnode.from_list(runtime)\n", "path": "vyper/codegen/module.py"}]} | 2,951 | 486 |
gh_patches_debug_10365 | rasdani/github-patches | git_diff | acl-org__acl-anthology-2399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ingestion request: NEJLT vol 7 & 8
This is a new (to the anthology) venue.
* **Venue name:** Northern European Journal of Language Technology (NEJLT)
* **Website:** [nejlt.org](https://www.nejlt.org/)
* Papers at https://doi.org/10.3384/nejlt.2000-1533.8.1 and https://doi.org/10.3384/nejlt.2000-1533.7.1
I propose:
* volume identifier `nejlt`
* volume titles in the format _Northern European Journal of Language Technology, Volume n_
We're ready to send over two volumes, for 2021 and 2022. Iff and when this is OK with you, I'm happy to go assemble ACLPUB format volumes and send them.
</issue>
<code>
[start of bin/anthology/data.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Marcel Bollmann <[email protected]>
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 ################################################################################
18 # This file contains all constants and functions that have hardcoded data (such
19 # as URLs or journal titles) which does not come from the XML. This is to
20 # provide a single file where such hardcoded data can be looked up and/or
21 # changed.
22 ################################################################################
23
24 import os
25 import re
26
27 from typing import Tuple
28
29
30 # this is the canonical URL. In contrast to all other
31 # URL templates, it always links to the official anthology.
32 CANONICAL_URL_TEMPLATE = "https://aclanthology.org/{}"
33
34 # the prefix is used in different programs and we need to set it everywhere
35 # We use a environment variable to set this and not have to forward the value
36 # through all the programs. If this does not look like the best idea, keep in mind
37 # that the structure is historically grown -- from 2019 to 2020 :-)
38 try:
39 ANTHOLOGY_PREFIX = os.environ["ANTHOLOGY_PREFIX"]
40 except:
41 ANTHOLOGY_PREFIX = "https://aclanthology.org"
42
43 ATTACHMENT_PREFIX = ANTHOLOGY_PREFIX + "/attachments"
44 ATTACHMENT_TEMPLATE = ATTACHMENT_PREFIX + "/{}"
45
46 PDF_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/{}.pdf"
47 PDF_THUMBNAIL_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/thumb/{}.jpg"
48
49 VIDEO_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + "/{}"
50
51 # Regular expression matching full Anthology IDs
52 ANTHOLOGY_ID_REGEX = r"[A-Z]\d{2}-\d{4}"
53
54 # Anthology file location on server
55 # Defaults to ~/anthology-files
56 ANTHOLOGY_FILE_DIR = os.environ.get(
57 "ANTHOLOGY_FILES", os.path.join(os.environ["HOME"], "anthology-files")
58 )
59
60 # Names of XML elements that may appear multiple times
61 LIST_ELEMENTS = (
62 "attachment",
63 "author",
64 "editor",
65 "video",
66 "revision",
67 "erratum",
68 "award",
69 "pwcdataset",
70 "video",
71 "venue",
72 "colocated",
73 )
74
75 # New-style IDs that should be handled as journals
76 JOURNAL_IDS = ("cl", "tacl", "tal", "lilt")
77
78 # Constants associated with DOI assignation
79 DOI_URL_PREFIX = "https://dx.doi.org/"
80 DOI_PREFIX = "10.18653/v1/"
81
82 # Default ingestion date (= unknown)
83 UNKNOWN_INGEST_DATE = "1900-01-01"
84
85 # The venue format must match this pattern
86 VENUE_FORMAT = r"^[a-z\d]+$"
87
88
89 def match_volume_and_issue(booktitle) -> Tuple[str, str]:
90 """Parses a volume name and issue name from a title.
91
92 Examples:
93 - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>
94 - <booktitle>Traitement Automatique des Langues 2011 Volume 52 Numéro 1</booktitle>
95 - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>
96
97 :param booktitle: The booktitle
98 :return: the volume and issue numbers
99 """
100 volume_no = re.search(r"Volume\s*(\d+)", booktitle, flags=re.IGNORECASE)
101 if volume_no is not None:
102 volume_no = volume_no.group(1)
103
104 issue_no = re.search(
105 r"(Number|Numéro|Issue)\s*(\d+-?\d*)", booktitle, flags=re.IGNORECASE
106 )
107 if issue_no is not None:
108 issue_no = issue_no.group(2)
109
110 return volume_no, issue_no
111
112
113 def get_journal_info(top_level_id, volume_title) -> Tuple[str, str, str]:
114 """Returns info about the journal: title, volume no., and issue no.
115 Currently (Feb 2023), this information is parsed from the <booktitle> tag!
116 We should move instead to an explicit representation. See
117
118 https://github.com/acl-org/acl-anthology/issues/2379
119
120 :param top_level_id: The collection ID
121 :param volume_title: The text from the <booktitle> tag
122 :return: The journal title, volume number, and issue number
123 """
124
125 # TODO: consider moving this from code to data (perhaps
126 # under <booktitle> in the volume metadata
127
128 top_level_id = top_level_id.split(".")[-1] # for new-style IDs; is a no-op otherwise
129
130 journal_title = None
131 volume_no = None
132 issue_no = None
133
134 if top_level_id == "cl":
135 # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>
136 journal_title = "Computational Linguistics"
137 volume_no, issue_no = match_volume_and_issue(volume_title)
138
139 elif top_level_id == "lilt":
140 # <booktitle>Linguistic Issues in Language Technology, Volume 10, 2015</booktitle>
141 journal_title = "Linguistic Issues in Language Technology"
142 volume_no, _ = match_volume_and_issue(volume_title)
143
144 elif top_level_id == "tal":
145 # <booktitle>Traitement Automatique des Langues 2011 Volume 52 Numéro 1</booktitle>
146 journal_title = "Traitement Automatique des Langues"
147 volume_no, issue_no = match_volume_and_issue(volume_title)
148
149 elif top_level_id[0] == "J":
150 # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>
151 year = int(top_level_id[1:3])
152 if year >= 65 and year <= 83:
153 journal_title = "American Journal of Computational Linguistics"
154 else:
155 journal_title = "Computational Linguistics"
156
157 volume_no, issue_no = match_volume_and_issue(volume_title)
158
159 elif top_level_id[0] == "Q" or top_level_id == "tacl":
160 journal_title = "Transactions of the Association for Computational Linguistics"
161 volume_no, _ = match_volume_and_issue(volume_title)
162
163 else:
164 journal_title = volume_title
165
166 return journal_title, volume_no, issue_no
167
[end of bin/anthology/data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/anthology/data.py b/bin/anthology/data.py
--- a/bin/anthology/data.py
+++ b/bin/anthology/data.py
@@ -146,6 +146,10 @@
journal_title = "Traitement Automatique des Langues"
volume_no, issue_no = match_volume_and_issue(volume_title)
+ elif top_level_id == "nejlt":
+ journal_title = "Northern European Journal of Language Technology"
+ volume_no, _ = match_volume_and_issue(volume_title)
+
elif top_level_id[0] == "J":
# <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>
year = int(top_level_id[1:3])
| {"golden_diff": "diff --git a/bin/anthology/data.py b/bin/anthology/data.py\n--- a/bin/anthology/data.py\n+++ b/bin/anthology/data.py\n@@ -146,6 +146,10 @@\n journal_title = \"Traitement Automatique des Langues\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n \n+ elif top_level_id == \"nejlt\":\n+ journal_title = \"Northern European Journal of Language Technology\"\n+ volume_no, _ = match_volume_and_issue(volume_title)\n+\n elif top_level_id[0] == \"J\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n year = int(top_level_id[1:3])\n", "issue": "Ingestion request: NEJLT vol 7 & 8\nThis is a new (to the anthology) venue.\r\n\r\n* **Venue name:** Northern European Journal of Language Technology (NEJLT)\r\n* **Website:** [nejlt.org](https://www.nejlt.org/)\r\n* Papers at https://doi.org/10.3384/nejlt.2000-1533.8.1 and https://doi.org/10.3384/nejlt.2000-1533.7.1\r\n\r\nI propose:\r\n* volume identifier `nejlt`\r\n* volume titles in the format _Northern European Journal of Language Technology, Volume n_\r\n\r\nWe're ready to send over two volumes, for 2021 and 2022. Iff and when this is OK with you, I'm happy to go assemble ACLPUB format volumes and send them.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################################################################################\n# This file contains all constants and functions that have hardcoded data (such\n# as URLs or journal titles) which does not come from the XML. This is to\n# provide a single file where such hardcoded data can be looked up and/or\n# changed.\n################################################################################\n\nimport os\nimport re\n\nfrom typing import Tuple\n\n\n# this is the canonical URL. In contrast to all other\n# URL templates, it always links to the official anthology.\nCANONICAL_URL_TEMPLATE = \"https://aclanthology.org/{}\"\n\n# the prefix is used in different programs and we need to set it everywhere\n# We use a environment variable to set this and not have to forward the value\n# through all the programs. If this does not look like the best idea, keep in mind\n# that the structure is historically grown -- from 2019 to 2020 :-)\ntry:\n ANTHOLOGY_PREFIX = os.environ[\"ANTHOLOGY_PREFIX\"]\nexcept:\n ANTHOLOGY_PREFIX = \"https://aclanthology.org\"\n\nATTACHMENT_PREFIX = ANTHOLOGY_PREFIX + \"/attachments\"\nATTACHMENT_TEMPLATE = ATTACHMENT_PREFIX + \"/{}\"\n\nPDF_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}.pdf\"\nPDF_THUMBNAIL_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/thumb/{}.jpg\"\n\nVIDEO_LOCATION_TEMPLATE = ANTHOLOGY_PREFIX + \"/{}\"\n\n# Regular expression matching full Anthology IDs\nANTHOLOGY_ID_REGEX = r\"[A-Z]\\d{2}-\\d{4}\"\n\n# Anthology file location on server\n# Defaults to ~/anthology-files\nANTHOLOGY_FILE_DIR = os.environ.get(\n \"ANTHOLOGY_FILES\", os.path.join(os.environ[\"HOME\"], \"anthology-files\")\n)\n\n# Names of XML elements that may appear multiple times\nLIST_ELEMENTS = (\n \"attachment\",\n \"author\",\n \"editor\",\n \"video\",\n \"revision\",\n \"erratum\",\n \"award\",\n \"pwcdataset\",\n \"video\",\n \"venue\",\n \"colocated\",\n)\n\n# New-style IDs that should be handled as journals\nJOURNAL_IDS = (\"cl\", \"tacl\", \"tal\", \"lilt\")\n\n# Constants associated with DOI assignation\nDOI_URL_PREFIX = \"https://dx.doi.org/\"\nDOI_PREFIX = \"10.18653/v1/\"\n\n# Default ingestion date (= unknown)\nUNKNOWN_INGEST_DATE = \"1900-01-01\"\n\n# The venue format must match this pattern\nVENUE_FORMAT = r\"^[a-z\\d]+$\"\n\n\ndef match_volume_and_issue(booktitle) -> Tuple[str, str]:\n \"\"\"Parses a volume name and issue name from a title.\n\n Examples:\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n - <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n - <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n\n :param booktitle: The booktitle\n :return: the volume and issue numbers\n \"\"\"\n volume_no = re.search(r\"Volume\\s*(\\d+)\", booktitle, flags=re.IGNORECASE)\n if volume_no is not None:\n volume_no = volume_no.group(1)\n\n issue_no = re.search(\n r\"(Number|Num\u00e9ro|Issue)\\s*(\\d+-?\\d*)\", booktitle, flags=re.IGNORECASE\n )\n if issue_no is not None:\n issue_no = issue_no.group(2)\n\n return volume_no, issue_no\n\n\ndef get_journal_info(top_level_id, volume_title) -> Tuple[str, str, str]:\n \"\"\"Returns info about the journal: title, volume no., and issue no.\n Currently (Feb 2023), this information is parsed from the <booktitle> tag!\n We should move instead to an explicit representation. See\n\n https://github.com/acl-org/acl-anthology/issues/2379\n\n :param top_level_id: The collection ID\n :param volume_title: The text from the <booktitle> tag\n :return: The journal title, volume number, and issue number\n \"\"\"\n\n # TODO: consider moving this from code to data (perhaps\n # under <booktitle> in the volume metadata\n\n top_level_id = top_level_id.split(\".\")[-1] # for new-style IDs; is a no-op otherwise\n\n journal_title = None\n volume_no = None\n issue_no = None\n\n if top_level_id == \"cl\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n journal_title = \"Computational Linguistics\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"lilt\":\n # <booktitle>Linguistic Issues in Language Technology, Volume 10, 2015</booktitle>\n journal_title = \"Linguistic Issues in Language Technology\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n elif top_level_id == \"tal\":\n # <booktitle>Traitement Automatique des Langues 2011 Volume 52 Num\u00e9ro 1</booktitle>\n journal_title = \"Traitement Automatique des Langues\"\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"J\":\n # <booktitle>Computational Linguistics, Volume 26, Number 1, March 2000</booktitle>\n year = int(top_level_id[1:3])\n if year >= 65 and year <= 83:\n journal_title = \"American Journal of Computational Linguistics\"\n else:\n journal_title = \"Computational Linguistics\"\n\n volume_no, issue_no = match_volume_and_issue(volume_title)\n\n elif top_level_id[0] == \"Q\" or top_level_id == \"tacl\":\n journal_title = \"Transactions of the Association for Computational Linguistics\"\n volume_no, _ = match_volume_and_issue(volume_title)\n\n else:\n journal_title = volume_title\n\n return journal_title, volume_no, issue_no\n", "path": "bin/anthology/data.py"}]} | 2,682 | 173 |
gh_patches_debug_16444 | rasdani/github-patches | git_diff | bentoml__BentoML-3636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature: `client.health`
### Feature request
client to have a health function:
```python
client.health()
await client.async_health()
```
### Motivation
For HTTP, would probably just need to invoke `/readyz`, and for gRPC is to invoke the `Health` rpc from `grpc.health.v1.HealthServicer`.
### Other
_No response_
</issue>
<code>
[start of src/bentoml/_internal/client/http.py]
1 from __future__ import annotations
2
3 import json
4 import time
5 import socket
6 import typing as t
7 import logging
8 import urllib.error
9 import urllib.request
10 from http.client import HTTPConnection
11 from urllib.parse import urlparse
12
13 import aiohttp
14 import starlette.requests
15 import starlette.datastructures
16
17 from . import Client
18 from .. import io_descriptors as io
19 from ..service import Service
20 from ...exceptions import RemoteException
21 from ...exceptions import BentoMLException
22 from ..configuration import get_debug_mode
23 from ..service.inference_api import InferenceAPI
24
25 logger = logging.getLogger(__name__)
26
27
28 class HTTPClient(Client):
29 @staticmethod
30 def wait_until_server_ready(
31 host: str,
32 port: int,
33 timeout: int = 30,
34 check_interval: int = 1,
35 # set kwargs here to omit gRPC kwargs
36 **kwargs: t.Any,
37 ) -> None:
38 start_time = time.time()
39 status = None
40
41 logger.debug("Waiting for host %s to be ready.", f"{host}:{port}")
42 while time.time() - start_time < timeout:
43 try:
44 conn = HTTPConnection(host, port)
45 conn.request("GET", "/readyz")
46 status = conn.getresponse().status
47 if status == 200:
48 break
49 else:
50 time.sleep(check_interval)
51 except (
52 ConnectionError,
53 urllib.error.URLError,
54 socket.timeout,
55 ConnectionRefusedError,
56 ):
57 logger.debug("Server is not ready. Retrying...")
58 time.sleep(check_interval)
59
60 # try to connect one more time and raise exception.
61 try:
62 conn = HTTPConnection(host, port)
63 conn.request("GET", "/readyz")
64 status = conn.getresponse().status
65 if status != 200:
66 raise TimeoutError(
67 f"Timed out waiting {timeout} seconds for server at '{host}:{port}' to be ready."
68 )
69 except (
70 ConnectionError,
71 urllib.error.URLError,
72 socket.timeout,
73 ConnectionRefusedError,
74 TimeoutError,
75 ) as err:
76 logger.error("Caught exception while connecting to %s:%s:", host, port)
77 logger.error(err)
78 raise
79
80 @classmethod
81 def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:
82 server_url = server_url if "://" in server_url else "http://" + server_url
83 url_parts = urlparse(server_url)
84
85 # TODO: SSL support
86 conn = HTTPConnection(url_parts.netloc)
87 conn.set_debuglevel(logging.DEBUG if get_debug_mode() else 0)
88 conn.request("GET", url_parts.path + "/docs.json")
89 resp = conn.getresponse()
90 if resp.status != 200:
91 raise RemoteException(
92 f"Failed to get OpenAPI schema from the server: {resp.status} {resp.reason}:\n{resp.read()}"
93 )
94 openapi_spec = json.load(resp)
95 conn.close()
96
97 dummy_service = Service(openapi_spec["info"]["title"])
98
99 for route, spec in openapi_spec["paths"].items():
100 for meth_spec in spec.values():
101 if "tags" in meth_spec and "Service APIs" in meth_spec["tags"]:
102 if "x-bentoml-io-descriptor" not in meth_spec["requestBody"]:
103 # TODO: better message stating min version for from_url to work
104 raise BentoMLException(
105 f"Malformed BentoML spec received from BentoML server {server_url}"
106 )
107 if "x-bentoml-io-descriptor" not in meth_spec["responses"]["200"]:
108 raise BentoMLException(
109 f"Malformed BentoML spec received from BentoML server {server_url}"
110 )
111 if "x-bentoml-name" not in meth_spec:
112 raise BentoMLException(
113 f"Malformed BentoML spec received from BentoML server {server_url}"
114 )
115 try:
116 api = InferenceAPI(
117 None,
118 io.from_spec(
119 meth_spec["requestBody"]["x-bentoml-io-descriptor"]
120 ),
121 io.from_spec(
122 meth_spec["responses"]["200"]["x-bentoml-io-descriptor"]
123 ),
124 name=meth_spec["x-bentoml-name"],
125 doc=meth_spec["description"],
126 route=route.lstrip("/"),
127 )
128 dummy_service.apis[meth_spec["x-bentoml-name"]] = api
129 except BentoMLException as e:
130 logger.error(
131 "Failed to instantiate client for API %s: ",
132 meth_spec["x-bentoml-name"],
133 e,
134 )
135
136 return cls(dummy_service, server_url)
137
138 async def _call(
139 self, inp: t.Any = None, *, _bentoml_api: InferenceAPI, **kwargs: t.Any
140 ) -> t.Any:
141 # All gRPC kwargs should be poped out.
142 kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_grpc_")}
143 api = _bentoml_api
144
145 if api.multi_input:
146 if inp is not None:
147 raise BentoMLException(
148 f"'{api.name}' takes multiple inputs; all inputs must be passed as keyword arguments."
149 )
150 fake_resp = await api.input.to_http_response(kwargs, None)
151 else:
152 fake_resp = await api.input.to_http_response(inp, None)
153 req_body = fake_resp.body
154
155 async with aiohttp.ClientSession(self.server_url) as sess:
156 async with sess.post(
157 "/" + api.route,
158 data=req_body,
159 headers={"content-type": fake_resp.headers["content-type"]},
160 ) as resp:
161 if resp.status != 200:
162 raise BentoMLException(
163 f"Error making request: {resp.status}: {str(await resp.read())}"
164 )
165
166 fake_req = starlette.requests.Request(scope={"type": "http"})
167 headers = starlette.datastructures.Headers(headers=resp.headers)
168 fake_req._body = await resp.read()
169 # Request.headers sets a _headers variable. We will need to set this
170 # value to our fake request object.
171 fake_req._headers = headers # type: ignore (request._headers is property)
172
173 return await api.output.from_http_request(fake_req)
174
[end of src/bentoml/_internal/client/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/bentoml/_internal/client/http.py b/src/bentoml/_internal/client/http.py
--- a/src/bentoml/_internal/client/http.py
+++ b/src/bentoml/_internal/client/http.py
@@ -4,6 +4,7 @@
import time
import socket
import typing as t
+import asyncio
import logging
import urllib.error
import urllib.request
@@ -77,6 +78,14 @@
logger.error(err)
raise
+ async def async_health(self) -> t.Any:
+ async with aiohttp.ClientSession(self.server_url) as sess:
+ async with sess.get("/readyz") as resp:
+ return resp
+
+ def health(self) -> t.Any:
+ return asyncio.run(self.async_health())
+
@classmethod
def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:
server_url = server_url if "://" in server_url else "http://" + server_url
| {"golden_diff": "diff --git a/src/bentoml/_internal/client/http.py b/src/bentoml/_internal/client/http.py\n--- a/src/bentoml/_internal/client/http.py\n+++ b/src/bentoml/_internal/client/http.py\n@@ -4,6 +4,7 @@\n import time\n import socket\n import typing as t\n+import asyncio\n import logging\n import urllib.error\n import urllib.request\n@@ -77,6 +78,14 @@\n logger.error(err)\n raise\n \n+ async def async_health(self) -> t.Any:\n+ async with aiohttp.ClientSession(self.server_url) as sess:\n+ async with sess.get(\"/readyz\") as resp:\n+ return resp\n+\n+ def health(self) -> t.Any:\n+ return asyncio.run(self.async_health())\n+\n @classmethod\n def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:\n server_url = server_url if \"://\" in server_url else \"http://\" + server_url\n", "issue": "feature: `client.health`\n### Feature request\n\nclient to have a health function:\r\n```python\r\nclient.health()\r\nawait client.async_health()\r\n```\n\n### Motivation\n\nFor HTTP, would probably just need to invoke `/readyz`, and for gRPC is to invoke the `Health` rpc from `grpc.health.v1.HealthServicer`.\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nimport time\nimport socket\nimport typing as t\nimport logging\nimport urllib.error\nimport urllib.request\nfrom http.client import HTTPConnection\nfrom urllib.parse import urlparse\n\nimport aiohttp\nimport starlette.requests\nimport starlette.datastructures\n\nfrom . import Client\nfrom .. import io_descriptors as io\nfrom ..service import Service\nfrom ...exceptions import RemoteException\nfrom ...exceptions import BentoMLException\nfrom ..configuration import get_debug_mode\nfrom ..service.inference_api import InferenceAPI\n\nlogger = logging.getLogger(__name__)\n\n\nclass HTTPClient(Client):\n @staticmethod\n def wait_until_server_ready(\n host: str,\n port: int,\n timeout: int = 30,\n check_interval: int = 1,\n # set kwargs here to omit gRPC kwargs\n **kwargs: t.Any,\n ) -> None:\n start_time = time.time()\n status = None\n\n logger.debug(\"Waiting for host %s to be ready.\", f\"{host}:{port}\")\n while time.time() - start_time < timeout:\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status == 200:\n break\n else:\n time.sleep(check_interval)\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n ):\n logger.debug(\"Server is not ready. Retrying...\")\n time.sleep(check_interval)\n\n # try to connect one more time and raise exception.\n try:\n conn = HTTPConnection(host, port)\n conn.request(\"GET\", \"/readyz\")\n status = conn.getresponse().status\n if status != 200:\n raise TimeoutError(\n f\"Timed out waiting {timeout} seconds for server at '{host}:{port}' to be ready.\"\n )\n except (\n ConnectionError,\n urllib.error.URLError,\n socket.timeout,\n ConnectionRefusedError,\n TimeoutError,\n ) as err:\n logger.error(\"Caught exception while connecting to %s:%s:\", host, port)\n logger.error(err)\n raise\n\n @classmethod\n def from_url(cls, server_url: str, **kwargs: t.Any) -> HTTPClient:\n server_url = server_url if \"://\" in server_url else \"http://\" + server_url\n url_parts = urlparse(server_url)\n\n # TODO: SSL support\n conn = HTTPConnection(url_parts.netloc)\n conn.set_debuglevel(logging.DEBUG if get_debug_mode() else 0)\n conn.request(\"GET\", url_parts.path + \"/docs.json\")\n resp = conn.getresponse()\n if resp.status != 200:\n raise RemoteException(\n f\"Failed to get OpenAPI schema from the server: {resp.status} {resp.reason}:\\n{resp.read()}\"\n )\n openapi_spec = json.load(resp)\n conn.close()\n\n dummy_service = Service(openapi_spec[\"info\"][\"title\"])\n\n for route, spec in openapi_spec[\"paths\"].items():\n for meth_spec in spec.values():\n if \"tags\" in meth_spec and \"Service APIs\" in meth_spec[\"tags\"]:\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"requestBody\"]:\n # TODO: better message stating min version for from_url to work\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-io-descriptor\" not in meth_spec[\"responses\"][\"200\"]:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n if \"x-bentoml-name\" not in meth_spec:\n raise BentoMLException(\n f\"Malformed BentoML spec received from BentoML server {server_url}\"\n )\n try:\n api = InferenceAPI(\n None,\n io.from_spec(\n meth_spec[\"requestBody\"][\"x-bentoml-io-descriptor\"]\n ),\n io.from_spec(\n meth_spec[\"responses\"][\"200\"][\"x-bentoml-io-descriptor\"]\n ),\n name=meth_spec[\"x-bentoml-name\"],\n doc=meth_spec[\"description\"],\n route=route.lstrip(\"/\"),\n )\n dummy_service.apis[meth_spec[\"x-bentoml-name\"]] = api\n except BentoMLException as e:\n logger.error(\n \"Failed to instantiate client for API %s: \",\n meth_spec[\"x-bentoml-name\"],\n e,\n )\n\n return cls(dummy_service, server_url)\n\n async def _call(\n self, inp: t.Any = None, *, _bentoml_api: InferenceAPI, **kwargs: t.Any\n ) -> t.Any:\n # All gRPC kwargs should be poped out.\n kwargs = {k: v for k, v in kwargs.items() if not k.startswith(\"_grpc_\")}\n api = _bentoml_api\n\n if api.multi_input:\n if inp is not None:\n raise BentoMLException(\n f\"'{api.name}' takes multiple inputs; all inputs must be passed as keyword arguments.\"\n )\n fake_resp = await api.input.to_http_response(kwargs, None)\n else:\n fake_resp = await api.input.to_http_response(inp, None)\n req_body = fake_resp.body\n\n async with aiohttp.ClientSession(self.server_url) as sess:\n async with sess.post(\n \"/\" + api.route,\n data=req_body,\n headers={\"content-type\": fake_resp.headers[\"content-type\"]},\n ) as resp:\n if resp.status != 200:\n raise BentoMLException(\n f\"Error making request: {resp.status}: {str(await resp.read())}\"\n )\n\n fake_req = starlette.requests.Request(scope={\"type\": \"http\"})\n headers = starlette.datastructures.Headers(headers=resp.headers)\n fake_req._body = await resp.read()\n # Request.headers sets a _headers variable. We will need to set this\n # value to our fake request object.\n fake_req._headers = headers # type: ignore (request._headers is property)\n\n return await api.output.from_http_request(fake_req)\n", "path": "src/bentoml/_internal/client/http.py"}]} | 2,401 | 220 |
gh_patches_debug_1179 | rasdani/github-patches | git_diff | locustio__locust-1395 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update flask version
Our minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)
https://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.
I can do the PR
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5 import sys
6
7 from setuptools import find_packages, setup
8
9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
10
11 # parse version from locust/__init__.py
12 _version_re = re.compile(r'__version__\s+=\s+(.*)')
13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py")
14 with open(_init_file, 'rb') as f:
15 version = str(ast.literal_eval(_version_re.search(
16 f.read().decode('utf-8')).group(1)))
17
18 setup(
19 name='locust',
20 version=version,
21 install_requires=[
22 "gevent>=1.5.0",
23 "flask>=0.10.1",
24 "requests>=2.9.1",
25 "msgpack>=0.6.2",
26 "pyzmq>=16.0.2",
27 "geventhttpclient>=1.4.2",
28 "ConfigArgParse>=1.0",
29 "psutil>=5.6.7",
30 "Flask-BasicAuth>=0.2.0"
31 ],
32 test_suite="locust.test",
33 tests_require=[
34 'cryptography',
35 'mock',
36 'pyquery',
37 ],
38 )
39
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
version=version,
install_requires=[
"gevent>=1.5.0",
- "flask>=0.10.1",
+ "flask>=1.1.2",
"requests>=2.9.1",
"msgpack>=0.6.2",
"pyzmq>=16.0.2",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n- \"flask>=0.10.1\", \n+ \"flask>=1.1.2\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\",\n", "issue": "Update flask version\nOur minimum required flask version is too old (saw at least one person having an issue https://stackoverflow.com/questions/61969924/typeerror-when-i-run-a-locustfile-py)\r\n\r\nhttps://flask.palletsprojects.com/en/1.1.x/changelog/#version-0-12-5 is a minimum, but we should probably go to 1.x right away.\r\n\r\nI can do the PR\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name='locust',\n version=version,\n install_requires=[\n \"gevent>=1.5.0\",\n \"flask>=0.10.1\", \n \"requests>=2.9.1\", \n \"msgpack>=0.6.2\", \n \"pyzmq>=16.0.2\", \n \"geventhttpclient>=1.4.2\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\"\n ],\n test_suite=\"locust.test\",\n tests_require=[\n 'cryptography',\n 'mock',\n 'pyquery',\n ], \n)\n", "path": "setup.py"}]} | 985 | 115 |
gh_patches_debug_18495 | rasdani/github-patches | git_diff | apache__airflow-8230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Airflow webserver not starting with SQLAlchemy==1.3.16
**Apache Airflow version**: 1.10.9
**Environment**: Ubuntu 18.04 LTS
- **Cloud provider or hardware configuration**:
- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS
**What happened**: airflow webserver error
airflow@airflow:~$ airflow webserver
[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor
[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py", line 75, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py", line 900, in webserver
app = cached_app_rbac(None) if settings.RBAC else cached_app(None)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 233, in cached_app
app = create_app(config, testing)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py", line 103, in create_app
models.Chart, Session, name="Charts", category="Data Profiling"))
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 330, in __init__
menu_icon_value=menu_icon_value)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 818, in __init__
self._refresh_cache()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py", line 913, in _refresh_cache
self._search_supported = self.init_search()
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py", line 581, in init_search
if tools.is_hybrid_property(self.model, name):
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 209, in is_hybrid_property
return last_name in get_hybrid_properties(last_model)
File "/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py", line 190, in get_hybrid_properties
for key, prop in inspect(model).all_orm_descriptors.items()
File "/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py", line 72, in inspect
"available for object of type %s" % type_
sqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>
**What you expected to happen**: to start
<!-- What do you think went wrong? -->
**How to reproduce it**:
Install airflow with pip3 and postgres from ubuntu which is 10.
</issue>
<code>
[start of airflow/models/chart.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Licensed to the Apache Software Foundation (ASF) under one
4 # or more contributor license agreements. See the NOTICE file
5 # distributed with this work for additional information
6 # regarding copyright ownership. The ASF licenses this file
7 # to you under the Apache License, Version 2.0 (the
8 # "License"); you may not use this file except in compliance
9 # with the License. You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing,
14 # software distributed under the License is distributed on an
15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 # KIND, either express or implied. See the License for the
17 # specific language governing permissions and limitations
18 # under the License.
19
20 from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text
21 from sqlalchemy.orm import relationship
22
23 from airflow.models.base import Base, ID_LEN
24 from airflow.utils.sqlalchemy import UtcDateTime
25 from airflow.utils import timezone
26
27
28 class Chart(Base):
29 __tablename__ = "chart"
30
31 id = Column(Integer, primary_key=True)
32 label = Column(String(200))
33 conn_id = Column(String(ID_LEN), nullable=False)
34 user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)
35 chart_type = Column(String(100), default="line")
36 sql_layout = Column(String(50), default="series")
37 sql = Column(Text, default="SELECT series, x, y FROM table")
38 y_log_scale = Column(Boolean)
39 show_datatable = Column(Boolean)
40 show_sql = Column(Boolean, default=True)
41 height = Column(Integer, default=600)
42 default_params = Column(String(5000), default="{}")
43 owner = relationship(
44 "User", cascade=False, cascade_backrefs=False, backref='charts')
45 x_is_date = Column(Boolean, default=True)
46 iteration_no = Column(Integer, default=0)
47 last_modified = Column(UtcDateTime, default=timezone.utcnow)
48
49 def __repr__(self):
50 return self.label
51
[end of airflow/models/chart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/models/chart.py b/airflow/models/chart.py
--- a/airflow/models/chart.py
+++ b/airflow/models/chart.py
@@ -21,6 +21,7 @@
from sqlalchemy.orm import relationship
from airflow.models.base import Base, ID_LEN
+from airflow.models.user import User
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils import timezone
@@ -41,7 +42,7 @@
height = Column(Integer, default=600)
default_params = Column(String(5000), default="{}")
owner = relationship(
- "User", cascade=False, cascade_backrefs=False, backref='charts')
+ User, cascade=False, cascade_backrefs=False, backref='charts')
x_is_date = Column(Boolean, default=True)
iteration_no = Column(Integer, default=0)
last_modified = Column(UtcDateTime, default=timezone.utcnow)
| {"golden_diff": "diff --git a/airflow/models/chart.py b/airflow/models/chart.py\n--- a/airflow/models/chart.py\n+++ b/airflow/models/chart.py\n@@ -21,6 +21,7 @@\n from sqlalchemy.orm import relationship\n \n from airflow.models.base import Base, ID_LEN\n+from airflow.models.user import User\n from airflow.utils.sqlalchemy import UtcDateTime\n from airflow.utils import timezone\n \n@@ -41,7 +42,7 @@\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n- \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n+ User, cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n", "issue": "Airflow webserver not starting with SQLAlchemy==1.3.16\n\r\n**Apache Airflow version**: 1.10.9\r\n**Environment**: Ubuntu 18.04 LTS\r\n\r\n- **Cloud provider or hardware configuration**:\r\n- **OS** (e.g. from /etc/os-release):Ubuntu 18.04 LTS\r\n\r\n**What happened**: airflow webserver error\r\n\r\nairflow@airflow:~$ airflow webserver\r\n[2020-04-08 09:45:49,843] {settings.py:253} INFO - settings.configure_orm(): Using pool settings. pool_size=5, max_overflow=10, pool_recycle=1800, pid=30494\r\n ____________ _____________\r\n ____ |__( )_________ __/__ /________ __\r\n____ /| |_ /__ ___/_ /_ __ /_ __ \\_ | /| / /\r\n___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /\r\n _/_/ |_/_/ /_/ /_/ /_/ \\____/____/|__/\r\n[2020-04-08 09:45:50,462] {__init__.py:51} INFO - Using executor LocalExecutor\r\n[2020-04-08 09:45:50,463] {dagbag.py:403} INFO - Filling up the DagBag from /home/airflow/airflow/dags\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/cli.py\", line 75, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/bin/cli.py\", line 900, in webserver\r\n app = cached_app_rbac(None) if settings.RBAC else cached_app(None)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 233, in cached_app\r\n app = create_app(config, testing)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/www/app.py\", line 103, in create_app\r\n models.Chart, Session, name=\"Charts\", category=\"Data Profiling\"))\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 330, in __init__\r\n menu_icon_value=menu_icon_value)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 818, in __init__\r\n self._refresh_cache()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/model/base.py\", line 913, in _refresh_cache\r\n self._search_supported = self.init_search()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/view.py\", line 581, in init_search\r\n if tools.is_hybrid_property(self.model, name):\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 209, in is_hybrid_property\r\n return last_name in get_hybrid_properties(last_model)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/flask_admin/contrib/sqla/tools.py\", line 190, in get_hybrid_properties\r\n for key, prop in inspect(model).all_orm_descriptors.items()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/sqlalchemy/inspection.py\", line 72, in inspect\r\n \"available for object of type %s\" % type_\r\nsqlalchemy.exc.NoInspectionAvailable: No inspection system is available for object of type <class 'method'>\r\n\r\n**What you expected to happen**: to start\r\n\r\n<!-- What do you think went wrong? -->\r\n\r\n**How to reproduce it**:\r\nInstall airflow with pip3 and postgres from ubuntu which is 10.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Text\nfrom sqlalchemy.orm import relationship\n\nfrom airflow.models.base import Base, ID_LEN\nfrom airflow.utils.sqlalchemy import UtcDateTime\nfrom airflow.utils import timezone\n\n\nclass Chart(Base):\n __tablename__ = \"chart\"\n\n id = Column(Integer, primary_key=True)\n label = Column(String(200))\n conn_id = Column(String(ID_LEN), nullable=False)\n user_id = Column(Integer(), ForeignKey('users.id'), nullable=True)\n chart_type = Column(String(100), default=\"line\")\n sql_layout = Column(String(50), default=\"series\")\n sql = Column(Text, default=\"SELECT series, x, y FROM table\")\n y_log_scale = Column(Boolean)\n show_datatable = Column(Boolean)\n show_sql = Column(Boolean, default=True)\n height = Column(Integer, default=600)\n default_params = Column(String(5000), default=\"{}\")\n owner = relationship(\n \"User\", cascade=False, cascade_backrefs=False, backref='charts')\n x_is_date = Column(Boolean, default=True)\n iteration_no = Column(Integer, default=0)\n last_modified = Column(UtcDateTime, default=timezone.utcnow)\n\n def __repr__(self):\n return self.label\n", "path": "airflow/models/chart.py"}]} | 2,066 | 204 |
gh_patches_debug_21535 | rasdani/github-patches | git_diff | ansible-collections__community.general-3263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
apache2_module: Relies on a2enmod / misleading error on CentOS
### Summary
The ansible `apache2_module` module relies on Debian-style (also sometimes called "Debianisms") binaries as `a2enmod` for managing `httpd`/`apache` modules. On CentOS this is not the case as this distribution doesn't use these binaries.
On CentOS this module emits a misleading error:
````
None not found. Perhaps this system does not use None to manage apache
````
Responsible code:
https://github.com/ansible-collections/community.general/blob/da11a98cb734e99cc57f4ae6ec09d9199875c39b/plugins/modules/web_infrastructure/apache2_module.py#L207
### Issue Type
Bug Report
### Component Name
apache2_module
### Ansible Version
```console (paste below)
$ ansible --version
ansible 2.9.10
config file = None
configured module search path = ['/home/build/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
(Empty)
```
### OS / Environment
WSL 2/Ubuntu 20.04 LTS (ansible client); CentOS 7.x (ansible target).
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: Enable mod_substitute
apache2_module:
name: mod_substitute
state: present
```
### Expected Results
`apache_module` ansible module manages the modules without errors.
### Actual Results
```console (paste below)
None not found. Perhaps this system does not use None to manage apache
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/web_infrastructure/apache2_module.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013-2014, Christian Berendt <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 DOCUMENTATION = '''
12 ---
13 module: apache2_module
14 author:
15 - Christian Berendt (@berendt)
16 - Ralf Hertel (@n0trax)
17 - Robin Roth (@robinro)
18 short_description: Enables/disables a module of the Apache2 webserver.
19 description:
20 - Enables or disables a specified module of the Apache2 webserver.
21 options:
22 name:
23 type: str
24 description:
25 - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
26 required: true
27 identifier:
28 type: str
29 description:
30 - Identifier of the module as listed by C(apache2ctl -M).
31 This is optional and usually determined automatically by the common convention of
32 appending C(_module) to I(name) as well as custom exception for popular modules.
33 required: False
34 force:
35 description:
36 - Force disabling of default modules and override Debian warnings.
37 required: false
38 type: bool
39 default: False
40 state:
41 type: str
42 description:
43 - Desired state of the module.
44 choices: ['present', 'absent']
45 default: present
46 ignore_configcheck:
47 description:
48 - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
49 type: bool
50 default: False
51 requirements: ["a2enmod","a2dismod"]
52 notes:
53 - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
54 Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
55 '''
56
57 EXAMPLES = '''
58 - name: Enable the Apache2 module wsgi
59 community.general.apache2_module:
60 state: present
61 name: wsgi
62
63 - name: Disables the Apache2 module wsgi
64 community.general.apache2_module:
65 state: absent
66 name: wsgi
67
68 - name: Disable default modules for Debian
69 community.general.apache2_module:
70 state: absent
71 name: autoindex
72 force: True
73
74 - name: Disable mpm_worker and ignore warnings about missing mpm module
75 community.general.apache2_module:
76 state: absent
77 name: mpm_worker
78 ignore_configcheck: True
79
80 - name: Enable dump_io module, which is identified as dumpio_module inside apache2
81 community.general.apache2_module:
82 state: present
83 name: dump_io
84 identifier: dumpio_module
85 '''
86
87 RETURN = '''
88 result:
89 description: message about action taken
90 returned: always
91 type: str
92 warnings:
93 description: list of warning messages
94 returned: when needed
95 type: list
96 rc:
97 description: return code of underlying command
98 returned: failed
99 type: int
100 stdout:
101 description: stdout of underlying command
102 returned: failed
103 type: str
104 stderr:
105 description: stderr of underlying command
106 returned: failed
107 type: str
108 '''
109
110 import re
111
112 # import module snippets
113 from ansible.module_utils.basic import AnsibleModule
114
115 _re_threaded = re.compile(r'threaded: *yes')
116
117
118 def _run_threaded(module):
119 control_binary = _get_ctl_binary(module)
120 result, stdout, stderr = module.run_command([control_binary, "-V"])
121
122 return bool(_re_threaded.search(stdout))
123
124
125 def _get_ctl_binary(module):
126 for command in ['apache2ctl', 'apachectl']:
127 ctl_binary = module.get_bin_path(command)
128 if ctl_binary is not None:
129 return ctl_binary
130
131 module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
132
133
134 def _module_is_enabled(module):
135 control_binary = _get_ctl_binary(module)
136 result, stdout, stderr = module.run_command([control_binary, "-M"])
137
138 if result != 0:
139 error_msg = "Error executing %s: %s" % (control_binary, stderr)
140 if module.params['ignore_configcheck']:
141 if 'AH00534' in stderr and 'mpm_' in module.params['name']:
142 module.warnings.append(
143 "No MPM module loaded! apache2 reload AND other module actions"
144 " will fail if no MPM module is loaded immediately."
145 )
146 else:
147 module.warnings.append(error_msg)
148 return False
149 else:
150 module.fail_json(msg=error_msg)
151
152 searchstring = ' ' + module.params['identifier']
153 return searchstring in stdout
154
155
156 def create_apache_identifier(name):
157 """
158 By convention if a module is loaded via name, it appears in apache2ctl -M as
159 name_module.
160
161 Some modules don't follow this convention and we use replacements for those."""
162
163 # a2enmod name replacement to apache2ctl -M names
164 text_workarounds = [
165 ('shib', 'mod_shib'),
166 ('shib2', 'mod_shib'),
167 ('evasive', 'evasive20_module'),
168 ]
169
170 # re expressions to extract subparts of names
171 re_workarounds = [
172 ('php', re.compile(r'^(php\d)\.')),
173 ]
174
175 for a2enmod_spelling, module_name in text_workarounds:
176 if a2enmod_spelling in name:
177 return module_name
178
179 for search, reexpr in re_workarounds:
180 if search in name:
181 try:
182 rematch = reexpr.search(name)
183 return rematch.group(1) + '_module'
184 except AttributeError:
185 pass
186
187 return name + '_module'
188
189
190 def _set_state(module, state):
191 name = module.params['name']
192 force = module.params['force']
193
194 want_enabled = state == 'present'
195 state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
196 a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
197 success_msg = "Module %s %s" % (name, state_string)
198
199 if _module_is_enabled(module) != want_enabled:
200 if module.check_mode:
201 module.exit_json(changed=True,
202 result=success_msg,
203 warnings=module.warnings)
204
205 a2mod_binary = [module.get_bin_path(a2mod_binary)]
206 if a2mod_binary is None:
207 module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
208
209 if not want_enabled and force:
210 # force exists only for a2dismod on debian
211 a2mod_binary.append('-f')
212
213 result, stdout, stderr = module.run_command(a2mod_binary + [name])
214
215 if _module_is_enabled(module) == want_enabled:
216 module.exit_json(changed=True,
217 result=success_msg,
218 warnings=module.warnings)
219 else:
220 msg = (
221 'Failed to set module {name} to {state}:\n'
222 '{stdout}\n'
223 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
224 'Consider setting the "identifier" option.'
225 ).format(
226 name=name,
227 state=state_string,
228 stdout=stdout,
229 identifier=module.params['identifier']
230 )
231 module.fail_json(msg=msg,
232 rc=result,
233 stdout=stdout,
234 stderr=stderr)
235 else:
236 module.exit_json(changed=False,
237 result=success_msg,
238 warnings=module.warnings)
239
240
241 def main():
242 module = AnsibleModule(
243 argument_spec=dict(
244 name=dict(required=True),
245 identifier=dict(type='str'),
246 force=dict(type='bool', default=False),
247 state=dict(default='present', choices=['absent', 'present']),
248 ignore_configcheck=dict(type='bool', default=False),
249 ),
250 supports_check_mode=True,
251 )
252
253 module.warnings = []
254
255 name = module.params['name']
256 if name == 'cgi' and _run_threaded(module):
257 module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
258
259 if not module.params['identifier']:
260 module.params['identifier'] = create_apache_identifier(module.params['name'])
261
262 if module.params['state'] in ['present', 'absent']:
263 _set_state(module, module.params['state'])
264
265
266 if __name__ == '__main__':
267 main()
268
[end of plugins/modules/web_infrastructure/apache2_module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py
--- a/plugins/modules/web_infrastructure/apache2_module.py
+++ b/plugins/modules/web_infrastructure/apache2_module.py
@@ -202,15 +202,17 @@
result=success_msg,
warnings=module.warnings)
- a2mod_binary = [module.get_bin_path(a2mod_binary)]
- if a2mod_binary is None:
+ a2mod_binary_path = module.get_bin_path(a2mod_binary)
+ if a2mod_binary_path is None:
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+ a2mod_binary_cmd = [a2mod_binary_path]
+
if not want_enabled and force:
# force exists only for a2dismod on debian
- a2mod_binary.append('-f')
+ a2mod_binary_cmd.append('-f')
- result, stdout, stderr = module.run_command(a2mod_binary + [name])
+ result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
if _module_is_enabled(module) == want_enabled:
module.exit_json(changed=True,
| {"golden_diff": "diff --git a/plugins/modules/web_infrastructure/apache2_module.py b/plugins/modules/web_infrastructure/apache2_module.py\n--- a/plugins/modules/web_infrastructure/apache2_module.py\n+++ b/plugins/modules/web_infrastructure/apache2_module.py\n@@ -202,15 +202,17 @@\n result=success_msg,\n warnings=module.warnings)\n \n- a2mod_binary = [module.get_bin_path(a2mod_binary)]\n- if a2mod_binary is None:\n+ a2mod_binary_path = module.get_bin_path(a2mod_binary)\n+ if a2mod_binary_path is None:\n module.fail_json(msg=\"%s not found. Perhaps this system does not use %s to manage apache\" % (a2mod_binary, a2mod_binary))\n \n+ a2mod_binary_cmd = [a2mod_binary_path]\n+\n if not want_enabled and force:\n # force exists only for a2dismod on debian\n- a2mod_binary.append('-f')\n+ a2mod_binary_cmd.append('-f')\n \n- result, stdout, stderr = module.run_command(a2mod_binary + [name])\n+ result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])\n \n if _module_is_enabled(module) == want_enabled:\n module.exit_json(changed=True,\n", "issue": "apache2_module: Relies on a2enmod / misleading error on CentOS\n### Summary\n\nThe ansible `apache2_module` module relies on Debian-style (also sometimes called \"Debianisms\") binaries as `a2enmod` for managing `httpd`/`apache` modules. On CentOS this is not the case as this distribution doesn't use these binaries. \r\n\r\nOn CentOS this module emits a misleading error:\r\n````\r\nNone not found. Perhaps this system does not use None to manage apache\r\n````\r\n\r\nResponsible code:\r\nhttps://github.com/ansible-collections/community.general/blob/da11a98cb734e99cc57f4ae6ec09d9199875c39b/plugins/modules/web_infrastructure/apache2_module.py#L207\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\napache2_module\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\n\r\nansible 2.9.10\r\n config file = None\r\n configured module search path = ['/home/build/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n(Empty)\r\n```\r\n\n\n### OS / Environment\n\nWSL 2/Ubuntu 20.04 LTS (ansible client); CentOS 7.x (ansible target).\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n - name: Enable mod_substitute\r\n apache2_module:\r\n name: mod_substitute\r\n state: present\r\n```\r\n\n\n### Expected Results\n\n`apache_module` ansible module manages the modules without errors.\n\n### Actual Results\n\n```console (paste below)\r\nNone not found. Perhaps this system does not use None to manage apache\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013-2014, Christian Berendt <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: apache2_module\nauthor:\n - Christian Berendt (@berendt)\n - Ralf Hertel (@n0trax)\n - Robin Roth (@robinro)\nshort_description: Enables/disables a module of the Apache2 webserver.\ndescription:\n - Enables or disables a specified module of the Apache2 webserver.\noptions:\n name:\n type: str\n description:\n - Name of the module to enable/disable as given to C(a2enmod/a2dismod).\n required: true\n identifier:\n type: str\n description:\n - Identifier of the module as listed by C(apache2ctl -M).\n This is optional and usually determined automatically by the common convention of\n appending C(_module) to I(name) as well as custom exception for popular modules.\n required: False\n force:\n description:\n - Force disabling of default modules and override Debian warnings.\n required: false\n type: bool\n default: False\n state:\n type: str\n description:\n - Desired state of the module.\n choices: ['present', 'absent']\n default: present\n ignore_configcheck:\n description:\n - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.\n type: bool\n default: False\nrequirements: [\"a2enmod\",\"a2dismod\"]\nnotes:\n - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.\n Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.\n'''\n\nEXAMPLES = '''\n- name: Enable the Apache2 module wsgi\n community.general.apache2_module:\n state: present\n name: wsgi\n\n- name: Disables the Apache2 module wsgi\n community.general.apache2_module:\n state: absent\n name: wsgi\n\n- name: Disable default modules for Debian\n community.general.apache2_module:\n state: absent\n name: autoindex\n force: True\n\n- name: Disable mpm_worker and ignore warnings about missing mpm module\n community.general.apache2_module:\n state: absent\n name: mpm_worker\n ignore_configcheck: True\n\n- name: Enable dump_io module, which is identified as dumpio_module inside apache2\n community.general.apache2_module:\n state: present\n name: dump_io\n identifier: dumpio_module\n'''\n\nRETURN = '''\nresult:\n description: message about action taken\n returned: always\n type: str\nwarnings:\n description: list of warning messages\n returned: when needed\n type: list\nrc:\n description: return code of underlying command\n returned: failed\n type: int\nstdout:\n description: stdout of underlying command\n returned: failed\n type: str\nstderr:\n description: stderr of underlying command\n returned: failed\n type: str\n'''\n\nimport re\n\n# import module snippets\nfrom ansible.module_utils.basic import AnsibleModule\n\n_re_threaded = re.compile(r'threaded: *yes')\n\n\ndef _run_threaded(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-V\"])\n\n return bool(_re_threaded.search(stdout))\n\n\ndef _get_ctl_binary(module):\n for command in ['apache2ctl', 'apachectl']:\n ctl_binary = module.get_bin_path(command)\n if ctl_binary is not None:\n return ctl_binary\n\n module.fail_json(msg=\"Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.\")\n\n\ndef _module_is_enabled(module):\n control_binary = _get_ctl_binary(module)\n result, stdout, stderr = module.run_command([control_binary, \"-M\"])\n\n if result != 0:\n error_msg = \"Error executing %s: %s\" % (control_binary, stderr)\n if module.params['ignore_configcheck']:\n if 'AH00534' in stderr and 'mpm_' in module.params['name']:\n module.warnings.append(\n \"No MPM module loaded! apache2 reload AND other module actions\"\n \" will fail if no MPM module is loaded immediately.\"\n )\n else:\n module.warnings.append(error_msg)\n return False\n else:\n module.fail_json(msg=error_msg)\n\n searchstring = ' ' + module.params['identifier']\n return searchstring in stdout\n\n\ndef create_apache_identifier(name):\n \"\"\"\n By convention if a module is loaded via name, it appears in apache2ctl -M as\n name_module.\n\n Some modules don't follow this convention and we use replacements for those.\"\"\"\n\n # a2enmod name replacement to apache2ctl -M names\n text_workarounds = [\n ('shib', 'mod_shib'),\n ('shib2', 'mod_shib'),\n ('evasive', 'evasive20_module'),\n ]\n\n # re expressions to extract subparts of names\n re_workarounds = [\n ('php', re.compile(r'^(php\\d)\\.')),\n ]\n\n for a2enmod_spelling, module_name in text_workarounds:\n if a2enmod_spelling in name:\n return module_name\n\n for search, reexpr in re_workarounds:\n if search in name:\n try:\n rematch = reexpr.search(name)\n return rematch.group(1) + '_module'\n except AttributeError:\n pass\n\n return name + '_module'\n\n\ndef _set_state(module, state):\n name = module.params['name']\n force = module.params['force']\n\n want_enabled = state == 'present'\n state_string = {'present': 'enabled', 'absent': 'disabled'}[state]\n a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]\n success_msg = \"Module %s %s\" % (name, state_string)\n\n if _module_is_enabled(module) != want_enabled:\n if module.check_mode:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n\n a2mod_binary = [module.get_bin_path(a2mod_binary)]\n if a2mod_binary is None:\n module.fail_json(msg=\"%s not found. Perhaps this system does not use %s to manage apache\" % (a2mod_binary, a2mod_binary))\n\n if not want_enabled and force:\n # force exists only for a2dismod on debian\n a2mod_binary.append('-f')\n\n result, stdout, stderr = module.run_command(a2mod_binary + [name])\n\n if _module_is_enabled(module) == want_enabled:\n module.exit_json(changed=True,\n result=success_msg,\n warnings=module.warnings)\n else:\n msg = (\n 'Failed to set module {name} to {state}:\\n'\n '{stdout}\\n'\n 'Maybe the module identifier ({identifier}) was guessed incorrectly.'\n 'Consider setting the \"identifier\" option.'\n ).format(\n name=name,\n state=state_string,\n stdout=stdout,\n identifier=module.params['identifier']\n )\n module.fail_json(msg=msg,\n rc=result,\n stdout=stdout,\n stderr=stderr)\n else:\n module.exit_json(changed=False,\n result=success_msg,\n warnings=module.warnings)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(required=True),\n identifier=dict(type='str'),\n force=dict(type='bool', default=False),\n state=dict(default='present', choices=['absent', 'present']),\n ignore_configcheck=dict(type='bool', default=False),\n ),\n supports_check_mode=True,\n )\n\n module.warnings = []\n\n name = module.params['name']\n if name == 'cgi' and _run_threaded(module):\n module.fail_json(msg=\"Your MPM seems to be threaded. No automatic actions on module cgi possible.\")\n\n if not module.params['identifier']:\n module.params['identifier'] = create_apache_identifier(module.params['name'])\n\n if module.params['state'] in ['present', 'absent']:\n _set_state(module, module.params['state'])\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/web_infrastructure/apache2_module.py"}]} | 3,656 | 288 |
gh_patches_debug_17420 | rasdani/github-patches | git_diff | pytorch__ignite-2676 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduled workflow failed
Oh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.
Please look into it:
https://github.com/pytorch/ignite/actions/runs/2923090334
Feel free to close this if this was just a one-off error.
</issue>
<code>
[start of ignite/metrics/gan/utils.py]
1 from typing import Callable, Optional, Union
2
3 import torch
4 from packaging.version import Version
5
6 from ignite.metrics.metric import Metric
7
8
9 class InceptionModel(torch.nn.Module):
10 r"""Inception Model pre-trained on the ImageNet Dataset.
11
12 Args:
13 return_features: set it to `True` if you want the model to return features from the last pooling
14 layer instead of prediction probabilities.
15 device: specifies which device updates are accumulated on. Setting the
16 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
17 non-blocking. By default, CPU.
18 """
19
20 def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
21 try:
22 from torchvision import models
23 except ImportError:
24 raise RuntimeError("This module requires torchvision to be installed.")
25 super(InceptionModel, self).__init__()
26 self._device = device
27 if Version(torch.__version__) <= Version("1.7.0"):
28 model_kwargs = {"pretrained": True}
29 else:
30 model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
31
32 self.model = models.inception_v3(**model_kwargs).to(self._device)
33
34 if return_features:
35 self.model.fc = torch.nn.Identity()
36 else:
37 self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))
38 self.model.eval()
39
40 @torch.no_grad()
41 def forward(self, data: torch.Tensor) -> torch.Tensor:
42 if data.dim() != 4:
43 raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}")
44 if data.shape[1] != 3:
45 raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}")
46 if data.device != torch.device(self._device):
47 data = data.to(self._device)
48 return self.model(data)
49
50
51 class _BaseInceptionMetric(Metric):
52 def __init__(
53 self,
54 num_features: Optional[int],
55 feature_extractor: Optional[torch.nn.Module],
56 output_transform: Callable = lambda x: x,
57 device: Union[str, torch.device] = torch.device("cpu"),
58 ) -> None:
59
60 if num_features is None:
61 raise ValueError("Argument num_features must be provided, if feature_extractor is specified.")
62
63 if feature_extractor is None:
64 feature_extractor = torch.nn.Identity()
65
66 if num_features <= 0:
67 raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}")
68
69 if not isinstance(feature_extractor, torch.nn.Module):
70 raise TypeError(
71 f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}"
72 )
73
74 self._num_features = num_features
75 self._feature_extractor = feature_extractor.to(device)
76
77 super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)
78
79 def _check_feature_shapes(self, samples: torch.Tensor) -> None:
80
81 if samples.dim() != 2:
82 raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}")
83
84 if samples.shape[0] == 0:
85 raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}")
86
87 if samples.shape[1] != self._num_features:
88 raise ValueError(
89 f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}"
90 )
91
92 def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:
93
94 inputs = inputs.detach()
95
96 if inputs.device != torch.device(self._device):
97 inputs = inputs.to(self._device)
98
99 with torch.no_grad():
100 outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)
101 self._check_feature_shapes(outputs)
102
103 return outputs
104
[end of ignite/metrics/gan/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py
--- a/ignite/metrics/gan/utils.py
+++ b/ignite/metrics/gan/utils.py
@@ -19,12 +19,13 @@
def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
try:
+ import torchvision
from torchvision import models
except ImportError:
raise RuntimeError("This module requires torchvision to be installed.")
super(InceptionModel, self).__init__()
self._device = device
- if Version(torch.__version__) <= Version("1.7.0"):
+ if Version(torchvision.__version__) < Version("0.13.0"):
model_kwargs = {"pretrained": True}
else:
model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
| {"golden_diff": "diff --git a/ignite/metrics/gan/utils.py b/ignite/metrics/gan/utils.py\n--- a/ignite/metrics/gan/utils.py\n+++ b/ignite/metrics/gan/utils.py\n@@ -19,12 +19,13 @@\n \n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n+ import torchvision\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n- if Version(torch.__version__) <= Version(\"1.7.0\"):\n+ if Version(torchvision.__version__) < Version(\"0.13.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n", "issue": "Scheduled workflow failed\nOh no, something went wrong in the scheduled workflow **PyTorch version tests with commit 98844bf82b963a429d22b09f650cb0af2023bf20**.\nPlease look into it:\n\nhttps://github.com/pytorch/ignite/actions/runs/2923090334\n\nFeel free to close this if this was just a one-off error.\n\n", "before_files": [{"content": "from typing import Callable, Optional, Union\n\nimport torch\nfrom packaging.version import Version\n\nfrom ignite.metrics.metric import Metric\n\n\nclass InceptionModel(torch.nn.Module):\n r\"\"\"Inception Model pre-trained on the ImageNet Dataset.\n\n Args:\n return_features: set it to `True` if you want the model to return features from the last pooling\n layer instead of prediction probabilities.\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \"\"\"\n\n def __init__(self, return_features: bool, device: Union[str, torch.device] = \"cpu\") -> None:\n try:\n from torchvision import models\n except ImportError:\n raise RuntimeError(\"This module requires torchvision to be installed.\")\n super(InceptionModel, self).__init__()\n self._device = device\n if Version(torch.__version__) <= Version(\"1.7.0\"):\n model_kwargs = {\"pretrained\": True}\n else:\n model_kwargs = {\"weights\": models.Inception_V3_Weights.DEFAULT}\n\n self.model = models.inception_v3(**model_kwargs).to(self._device)\n\n if return_features:\n self.model.fc = torch.nn.Identity()\n else:\n self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))\n self.model.eval()\n\n @torch.no_grad()\n def forward(self, data: torch.Tensor) -> torch.Tensor:\n if data.dim() != 4:\n raise ValueError(f\"Inputs should be a tensor of dim 4, got {data.dim()}\")\n if data.shape[1] != 3:\n raise ValueError(f\"Inputs should be a tensor with 3 channels, got {data.shape}\")\n if data.device != torch.device(self._device):\n data = data.to(self._device)\n return self.model(data)\n\n\nclass _BaseInceptionMetric(Metric):\n def __init__(\n self,\n num_features: Optional[int],\n feature_extractor: Optional[torch.nn.Module],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if num_features is None:\n raise ValueError(\"Argument num_features must be provided, if feature_extractor is specified.\")\n\n if feature_extractor is None:\n feature_extractor = torch.nn.Identity()\n\n if num_features <= 0:\n raise ValueError(f\"Argument num_features must be greater to zero, got: {num_features}\")\n\n if not isinstance(feature_extractor, torch.nn.Module):\n raise TypeError(\n f\"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}\"\n )\n\n self._num_features = num_features\n self._feature_extractor = feature_extractor.to(device)\n\n super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)\n\n def _check_feature_shapes(self, samples: torch.Tensor) -> None:\n\n if samples.dim() != 2:\n raise ValueError(f\"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}\")\n\n if samples.shape[0] == 0:\n raise ValueError(f\"Batch size should be greater than one, got: {samples.shape[0]}\")\n\n if samples.shape[1] != self._num_features:\n raise ValueError(\n f\"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}\"\n )\n\n def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:\n\n inputs = inputs.detach()\n\n if inputs.device != torch.device(self._device):\n inputs = inputs.to(self._device)\n\n with torch.no_grad():\n outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)\n self._check_feature_shapes(outputs)\n\n return outputs\n", "path": "ignite/metrics/gan/utils.py"}]} | 1,698 | 198 |
gh_patches_debug_11941 | rasdani/github-patches | git_diff | SeldonIO__MLServer-1104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mlserver >= 1.3 doesn't work with MLflow
After the release of `mlserver==1.3.1`, one of our tests in `https://github.com/mlflow/mlflow` started failing with the following error. We're investigating the cause.
https://github.com/mlflow/mlflow/actions/runs/4716614587/jobs/8364498235?pr=8241#step:7:8138
```
2023-04-17 03:00:01,885 [mlserver.parallel] DEBUG - Starting response processing loop...
2023-04-17 03:00:01,894 [mlserver.rest] INFO - HTTP server running on http://0.0.0.0:8080/
2023-04-17 03:00:01,943 [mlserver.metrics] INFO - Metrics server running on http://0.0.0.0:8082/
2023-04-17 03:00:01,944 [mlserver.metrics] INFO - Prometheus scraping endpoint can be accessed on http://0.0.0.0:8082/metrics
2023-04-17 03:00:01,963 [mlserver.grpc] INFO - gRPC server running on http://0.0.0.0:8081/
INFO: 172.17.0.1:59636 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59644 - "GET /ping HTTP/1.1" 404 Not Found
2023-04-17 03:00:03,801 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.
2023-04-17 03:00:03,802 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.
2023-04-17 03:00:03,804 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.
INFO: 172.17.0.1:59646 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59660 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59674 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59690 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59702 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59708 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59712 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:59718 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42498 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42512 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42528 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42530 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42536 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42544 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42546 - "GET /ping HTTP/1.1" 404 Not Found
INFO: 172.17.0.1:42552 - "GET /ping HTTP/1.1" 404 Not Found
...
```
The error above indicates that the `/ping` endpoint is not registered.
</issue>
<code>
[start of mlserver/parallel/registry.py]
1 import asyncio
2 import os
3 import shutil
4
5 from typing import Optional, Dict, List
6
7 from ..settings import ModelSettings
8 from ..utils import to_absolute_path
9 from ..model import MLModel
10 from ..settings import Settings
11 from ..env import Environment, compute_hash
12 from ..registry import model_initialiser
13
14 from .errors import EnvironmentNotFound
15 from .logging import logger
16 from .pool import InferencePool, InferencePoolHook
17
18 ENV_HASH_ATTR = "__env_hash__"
19
20
21 def _set_environment_hash(model: MLModel, env_hash: Optional[str]):
22 setattr(model, ENV_HASH_ATTR, env_hash)
23
24
25 def _get_environment_hash(model: MLModel) -> Optional[str]:
26 return getattr(model, ENV_HASH_ATTR, None)
27
28
29 def _get_env_tarball(model: MLModel) -> Optional[str]:
30 model_settings = model.settings
31 if model_settings.parameters is None:
32 return None
33
34 env_tarball = model_settings.parameters.environment_tarball
35 if env_tarball is None:
36 return None
37
38 return to_absolute_path(model_settings, env_tarball)
39
40
41 class InferencePoolRegistry:
42 """
43 Keeps track of the different inference pools loaded in the server.
44 Each inference pool will generally be used to load a different environment.
45 """
46
47 def __init__(
48 self, settings: Settings, on_worker_stop: List[InferencePoolHook] = []
49 ):
50 self._settings = settings
51 self._on_worker_stop = on_worker_stop
52 self._default_pool = InferencePool(
53 self._settings, on_worker_stop=on_worker_stop
54 )
55 self._pools: Dict[str, InferencePool] = {}
56
57 os.makedirs(self._settings.environments_dir, exist_ok=True)
58
59 async def _get_or_create(self, model: MLModel) -> InferencePool:
60 env_tarball = _get_env_tarball(model)
61 if not env_tarball:
62 return self._default_pool
63
64 env_hash = await compute_hash(env_tarball)
65 if env_hash in self._pools:
66 return self._pools[env_hash]
67
68 env = await self._extract_tarball(env_hash, env_tarball)
69 pool = InferencePool(
70 self._settings, env=env, on_worker_stop=self._on_worker_stop
71 )
72 self._pools[env_hash] = pool
73 return pool
74
75 async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment:
76 env_path = self._get_env_path(env_hash)
77 if os.path.isdir(env_path):
78 # If env has already been extracted, use that
79 return Environment(env_path, env_hash)
80
81 os.makedirs(env_path)
82 return await Environment.from_tarball(env_tarball, env_path, env_hash)
83
84 def _get_env_path(self, env_hash: str) -> str:
85 return os.path.join(self._settings.environments_dir, env_hash)
86
87 async def _find(self, model: MLModel) -> InferencePool:
88 env_hash = _get_environment_hash(model)
89 if not env_hash:
90 return self._default_pool
91
92 if env_hash not in self._pools:
93 raise EnvironmentNotFound(model, env_hash)
94
95 return self._pools[env_hash]
96
97 def _should_load_model(self, model_settings: ModelSettings):
98 if model_settings.parallel_workers is not None:
99 logger.warning(
100 "DEPRECATED!! The `parallel_workers` setting at the model-level "
101 "has now been deprecated and moved "
102 "to the top-level server "
103 "settings. "
104 "This field will be removed in MLServer 1.2.0. "
105 "To access the new field, you can either update the "
106 "`settings.json` file, or update the `MLSERVER_PARALLEL_WORKERS` "
107 "environment variable. "
108 f"The current value of the server-level's `parallel_workers` field is "
109 f"'{self._settings.parallel_workers}'."
110 )
111
112 # NOTE: This is a remnant from the previous architecture for parallel
113 # workers, where each worker had its own pool.
114 # For backwards compatibility, we will respect when a model disables
115 # parallel inference.
116 if model_settings.parallel_workers <= 0:
117 return False
118
119 if not self._settings.parallel_workers:
120 return False
121
122 return True
123
124 def model_initialiser(self, model_settings: ModelSettings) -> MLModel:
125 """
126 Used to initialise a model object in the ModelRegistry.
127 """
128 if not self._should_load_model(model_settings):
129 # If parallel inference should not be used, instantiate the model
130 # as normal.
131 return model_initialiser(model_settings)
132
133 # Otherwise, return a dummy model for now and wait for the load_model
134 # hook to create the actual thing.
135 # This avoids instantiating the model's actual class within the
136 # main process.
137 return MLModel(model_settings)
138
139 async def load_model(self, model: MLModel) -> MLModel:
140 if not self._should_load_model(model.settings):
141 # Skip load if model has disabled parallel workers
142 return model
143
144 # TODO: If load fails, should we remove pool if empty?
145 pool = await self._get_or_create(model)
146 loaded = await pool.load_model(model)
147 _set_environment_hash(loaded, pool.env_hash)
148 return loaded
149
150 async def reload_model(self, old_model: MLModel, new_model: MLModel) -> MLModel:
151 if not self._should_load_model(new_model.settings):
152 # TODO: What would happen if old_model had parallel inference
153 # enabled and is disabled in new_model (and viceversa)?
154 # Skip reload if model has disabled parallel workers
155 return new_model
156
157 old_hash = _get_environment_hash(old_model)
158 new_pool = await self._get_or_create(new_model)
159
160 loaded = await new_pool.reload_model(old_model, new_model)
161 _set_environment_hash(loaded, new_pool.env_hash)
162 if old_hash != new_pool.env_hash:
163 # Environment has changed in the new version, so unload the old one
164 await self.unload_model(old_model)
165
166 return loaded
167
168 async def unload_model(self, model: MLModel) -> MLModel:
169 if not self._should_load_model(model.settings):
170 # Skip unload if model has disabled parallel workers
171 return model
172
173 pool = await self._find(model)
174 unloaded = await pool.unload_model(model)
175
176 if pool != self._default_pool and pool.empty():
177 logger.info(f"Inference pool with hash '{pool.env_hash}' is now empty")
178 await self._close_pool(pool.env_hash)
179
180 return unloaded
181
182 async def close(self):
183 await asyncio.gather(
184 self._close_pool(None),
185 *[self._close_pool(env_hash) for env_hash in self._pools],
186 )
187
188 async def _close_pool(self, env_hash: Optional[str] = None):
189 pool = self._default_pool
190 pool_name = "default inference pool"
191 if env_hash:
192 pool = self._pools[env_hash]
193 pool_name = f"inference pool with hash '{env_hash}'"
194
195 logger.info(f"Waiting for shutdown of {pool_name}...")
196 await pool.close()
197 logger.info(f"Shutdown of {pool_name} complete")
198
199 if env_hash:
200 del self._pools[env_hash]
201 env_path = self._get_env_path(env_hash)
202 shutil.rmtree(env_path)
203
[end of mlserver/parallel/registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/parallel/registry.py b/mlserver/parallel/registry.py
--- a/mlserver/parallel/registry.py
+++ b/mlserver/parallel/registry.py
@@ -130,6 +130,12 @@
# as normal.
return model_initialiser(model_settings)
+ parameters = model_settings.parameters
+ if not parameters or not parameters.environment_tarball:
+ # If model is not using a custom environment, instantiate the model
+ # as normal.
+ return model_initialiser(model_settings)
+
# Otherwise, return a dummy model for now and wait for the load_model
# hook to create the actual thing.
# This avoids instantiating the model's actual class within the
| {"golden_diff": "diff --git a/mlserver/parallel/registry.py b/mlserver/parallel/registry.py\n--- a/mlserver/parallel/registry.py\n+++ b/mlserver/parallel/registry.py\n@@ -130,6 +130,12 @@\n # as normal.\n return model_initialiser(model_settings)\n \n+ parameters = model_settings.parameters\n+ if not parameters or not parameters.environment_tarball:\n+ # If model is not using a custom environment, instantiate the model\n+ # as normal.\n+ return model_initialiser(model_settings)\n+\n # Otherwise, return a dummy model for now and wait for the load_model\n # hook to create the actual thing.\n # This avoids instantiating the model's actual class within the\n", "issue": "mlserver >= 1.3 doesn't work with MLflow\nAfter the release of `mlserver==1.3.1`, one of our tests in `https://github.com/mlflow/mlflow` started failing with the following error. We're investigating the cause.\r\n\r\nhttps://github.com/mlflow/mlflow/actions/runs/4716614587/jobs/8364498235?pr=8241#step:7:8138\r\n\r\n```\r\n2023-04-17 03:00:01,885 [mlserver.parallel] DEBUG - Starting response processing loop...\r\n2023-04-17 03:00:01,894 [mlserver.rest] INFO - HTTP server running on http://0.0.0.0:8080/\r\n2023-04-17 03:00:01,943 [mlserver.metrics] INFO - Metrics server running on http://0.0.0.0:8082/\r\n2023-04-17 03:00:01,944 [mlserver.metrics] INFO - Prometheus scraping endpoint can be accessed on http://0.0.0.0:8082/metrics\r\n2023-04-17 03:00:01,963 [mlserver.grpc] INFO - gRPC server running on http://0.0.0.0:8081/\r\nINFO: 172.17.0.1:59636 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59644 - \"GET /ping HTTP/1.1\" 404 Not Found\r\n2023-04-17 03:00:03,801 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\n2023-04-17 03:00:03,802 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\n2023-04-17 03:00:03,804 [mlserver] INFO - Loaded model 'mlflow-model' succesfully.\r\nINFO: 172.17.0.1:59646 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59660 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59674 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59690 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59702 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59708 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59712 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:59718 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42498 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42512 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42528 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42530 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42536 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42544 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42546 - \"GET /ping HTTP/1.1\" 404 Not Found\r\nINFO: 172.17.0.1:42552 - \"GET /ping HTTP/1.1\" 404 Not Found\r\n...\r\n```\r\n\r\nThe error above indicates that the `/ping` endpoint is not registered.\n", "before_files": [{"content": "import asyncio\nimport os\nimport shutil\n\nfrom typing import Optional, Dict, List\n\nfrom ..settings import ModelSettings\nfrom ..utils import to_absolute_path\nfrom ..model import MLModel\nfrom ..settings import Settings\nfrom ..env import Environment, compute_hash\nfrom ..registry import model_initialiser\n\nfrom .errors import EnvironmentNotFound\nfrom .logging import logger\nfrom .pool import InferencePool, InferencePoolHook\n\nENV_HASH_ATTR = \"__env_hash__\"\n\n\ndef _set_environment_hash(model: MLModel, env_hash: Optional[str]):\n setattr(model, ENV_HASH_ATTR, env_hash)\n\n\ndef _get_environment_hash(model: MLModel) -> Optional[str]:\n return getattr(model, ENV_HASH_ATTR, None)\n\n\ndef _get_env_tarball(model: MLModel) -> Optional[str]:\n model_settings = model.settings\n if model_settings.parameters is None:\n return None\n\n env_tarball = model_settings.parameters.environment_tarball\n if env_tarball is None:\n return None\n\n return to_absolute_path(model_settings, env_tarball)\n\n\nclass InferencePoolRegistry:\n \"\"\"\n Keeps track of the different inference pools loaded in the server.\n Each inference pool will generally be used to load a different environment.\n \"\"\"\n\n def __init__(\n self, settings: Settings, on_worker_stop: List[InferencePoolHook] = []\n ):\n self._settings = settings\n self._on_worker_stop = on_worker_stop\n self._default_pool = InferencePool(\n self._settings, on_worker_stop=on_worker_stop\n )\n self._pools: Dict[str, InferencePool] = {}\n\n os.makedirs(self._settings.environments_dir, exist_ok=True)\n\n async def _get_or_create(self, model: MLModel) -> InferencePool:\n env_tarball = _get_env_tarball(model)\n if not env_tarball:\n return self._default_pool\n\n env_hash = await compute_hash(env_tarball)\n if env_hash in self._pools:\n return self._pools[env_hash]\n\n env = await self._extract_tarball(env_hash, env_tarball)\n pool = InferencePool(\n self._settings, env=env, on_worker_stop=self._on_worker_stop\n )\n self._pools[env_hash] = pool\n return pool\n\n async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment:\n env_path = self._get_env_path(env_hash)\n if os.path.isdir(env_path):\n # If env has already been extracted, use that\n return Environment(env_path, env_hash)\n\n os.makedirs(env_path)\n return await Environment.from_tarball(env_tarball, env_path, env_hash)\n\n def _get_env_path(self, env_hash: str) -> str:\n return os.path.join(self._settings.environments_dir, env_hash)\n\n async def _find(self, model: MLModel) -> InferencePool:\n env_hash = _get_environment_hash(model)\n if not env_hash:\n return self._default_pool\n\n if env_hash not in self._pools:\n raise EnvironmentNotFound(model, env_hash)\n\n return self._pools[env_hash]\n\n def _should_load_model(self, model_settings: ModelSettings):\n if model_settings.parallel_workers is not None:\n logger.warning(\n \"DEPRECATED!! The `parallel_workers` setting at the model-level \"\n \"has now been deprecated and moved \"\n \"to the top-level server \"\n \"settings. \"\n \"This field will be removed in MLServer 1.2.0. \"\n \"To access the new field, you can either update the \"\n \"`settings.json` file, or update the `MLSERVER_PARALLEL_WORKERS` \"\n \"environment variable. \"\n f\"The current value of the server-level's `parallel_workers` field is \"\n f\"'{self._settings.parallel_workers}'.\"\n )\n\n # NOTE: This is a remnant from the previous architecture for parallel\n # workers, where each worker had its own pool.\n # For backwards compatibility, we will respect when a model disables\n # parallel inference.\n if model_settings.parallel_workers <= 0:\n return False\n\n if not self._settings.parallel_workers:\n return False\n\n return True\n\n def model_initialiser(self, model_settings: ModelSettings) -> MLModel:\n \"\"\"\n Used to initialise a model object in the ModelRegistry.\n \"\"\"\n if not self._should_load_model(model_settings):\n # If parallel inference should not be used, instantiate the model\n # as normal.\n return model_initialiser(model_settings)\n\n # Otherwise, return a dummy model for now and wait for the load_model\n # hook to create the actual thing.\n # This avoids instantiating the model's actual class within the\n # main process.\n return MLModel(model_settings)\n\n async def load_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip load if model has disabled parallel workers\n return model\n\n # TODO: If load fails, should we remove pool if empty?\n pool = await self._get_or_create(model)\n loaded = await pool.load_model(model)\n _set_environment_hash(loaded, pool.env_hash)\n return loaded\n\n async def reload_model(self, old_model: MLModel, new_model: MLModel) -> MLModel:\n if not self._should_load_model(new_model.settings):\n # TODO: What would happen if old_model had parallel inference\n # enabled and is disabled in new_model (and viceversa)?\n # Skip reload if model has disabled parallel workers\n return new_model\n\n old_hash = _get_environment_hash(old_model)\n new_pool = await self._get_or_create(new_model)\n\n loaded = await new_pool.reload_model(old_model, new_model)\n _set_environment_hash(loaded, new_pool.env_hash)\n if old_hash != new_pool.env_hash:\n # Environment has changed in the new version, so unload the old one\n await self.unload_model(old_model)\n\n return loaded\n\n async def unload_model(self, model: MLModel) -> MLModel:\n if not self._should_load_model(model.settings):\n # Skip unload if model has disabled parallel workers\n return model\n\n pool = await self._find(model)\n unloaded = await pool.unload_model(model)\n\n if pool != self._default_pool and pool.empty():\n logger.info(f\"Inference pool with hash '{pool.env_hash}' is now empty\")\n await self._close_pool(pool.env_hash)\n\n return unloaded\n\n async def close(self):\n await asyncio.gather(\n self._close_pool(None),\n *[self._close_pool(env_hash) for env_hash in self._pools],\n )\n\n async def _close_pool(self, env_hash: Optional[str] = None):\n pool = self._default_pool\n pool_name = \"default inference pool\"\n if env_hash:\n pool = self._pools[env_hash]\n pool_name = f\"inference pool with hash '{env_hash}'\"\n\n logger.info(f\"Waiting for shutdown of {pool_name}...\")\n await pool.close()\n logger.info(f\"Shutdown of {pool_name} complete\")\n\n if env_hash:\n del self._pools[env_hash]\n env_path = self._get_env_path(env_hash)\n shutil.rmtree(env_path)\n", "path": "mlserver/parallel/registry.py"}]} | 3,817 | 161 |
gh_patches_debug_34450 | rasdani/github-patches | git_diff | medtagger__MedTagger-88 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not drop tables in functional tests
## Expected Behavior
Functional tests should clean the tables in Postgres and HBase.
## Actual Behavior
Functional tests drops all the available tables and create them again. This may be (and probably is) time consuming.
## Additional comment
Please compare both implementations before merge.
</issue>
<code>
[start of backend/medtagger/clients/hbase_client.py]
1 """Module responsible for definition of client for HBase database."""
2 import logging
3 from typing import Iterable, List, Mapping, Tuple, Any
4
5 import happybase
6 from retrying import retry
7 from thriftpy.transport import TTransportException
8
9 from medtagger.config import AppConfiguration
10
11 logger = logging.getLogger(__name__)
12
13 configuration = AppConfiguration()
14 host = configuration.get('hbase', 'host', fallback='localhost')
15 port = configuration.getint('hbase', 'port', fallback=9090)
16 size = configuration.getint('hbase', 'connection_pool_size', fallback=10)
17 try:
18 HBASE_CONNECTION_POOL = happybase.ConnectionPool(size, host=host, port=port)
19 except (TTransportException, BrokenPipeError):
20 logger.warning('Could not connect to HBase. Is it down?')
21
22
23 def is_alive() -> bool:
24 """Return boolean information if HBase is alive or not."""
25 try:
26 happybase.ConnectionPool(1, host=host, port=port)
27 return True
28 except (TTransportException, BrokenPipeError):
29 return False
30
31
32 class HBaseClient(object):
33 """Client for HBase.
34
35 How to use this client?
36 -----------------------
37 This is a wrapper for HappyBase Connection. Client uses HappyBase's Connection Pool, so don't worry about closing
38 connection, etc. This client should do everything inside below methods.
39
40 WATCH OUT: Script that migrates HBase schema may not work properly if you want to change column names!
41 In such case please run your migration manually!
42
43 Example:
44
45 >>> hbase_client = HBaseClient()
46 >>> data = hbase_client.get('my_table_name', 'row_key')
47 >>> ...
48
49 """
50
51 ORIGINAL_SLICES_TABLE = 'original_slices'
52 CONVERTED_SLICES_TABLE = 'converted_slices'
53 LABEL_SELECTION_BINARY_MASK_TABLE = 'label_selection_binary_mask'
54
55 HBASE_SCHEMA = {
56 ORIGINAL_SLICES_TABLE: ['image'],
57 CONVERTED_SLICES_TABLE: ['image'],
58 LABEL_SELECTION_BINARY_MASK_TABLE: ['binary_mask'],
59 }
60
61 def __init__(self) -> None:
62 """Initialize client."""
63 pass
64
65 @staticmethod
66 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
67 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
68 def get_all_keys(table_name: str, starts_with: str = None) -> Iterable[str]:
69 """Fetch all keys for given table.
70
71 :param table_name: name of a table
72 :param starts_with: prefix for keys
73 :return: iterator for table keys
74 """
75 with HBASE_CONNECTION_POOL.connection() as connection:
76 row_prefix = str.encode(starts_with) if starts_with else None
77 table = connection.table(table_name)
78 for key, _ in table.scan(row_prefix=row_prefix, filter=str.encode('KeyOnlyFilter()')):
79 yield key.decode('utf-8')
80
81 @staticmethod
82 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
83 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
84 def get_all_rows(table_name: str, columns: List, starts_with: str = None) -> Iterable[Tuple[str, Any]]:
85 """Fetch all rows for given table.
86
87 :param table_name: name of a table
88 :param starts_with: prefix for keys
89 :param columns: list of columns to fetch
90 :return: iterator for table keys
91 """
92 with HBASE_CONNECTION_POOL.connection() as connection:
93 row_prefix = str.encode(starts_with) if starts_with else None
94 table = connection.table(table_name)
95 for key, value in table.scan(row_prefix=row_prefix, columns=columns):
96 yield key.decode('utf-8'), value
97
98 @staticmethod
99 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
100 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
101 def get(table_name: str, key: str, columns: List[str] = None) -> Mapping:
102 """Fetch a single row from HBase table.
103
104 :param table_name: name of a table
105 :param key: key representing a row
106 :param columns: columns which should be loaded (by default all)
107 :return: mapping returned by HBase
108 """
109 hbase_key = str.encode(key)
110 with HBASE_CONNECTION_POOL.connection() as connection:
111 table = connection.table(table_name)
112 return table.row(hbase_key, columns=columns)
113
114 @staticmethod
115 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
116 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
117 def put(table_name: str, key: str, value: Any) -> None:
118 """Add new entry into HBase table.
119
120 :param table_name: name of a table
121 :param key: key under value should be stored
122 :param value: value which should be stored
123 """
124 hbase_key = str.encode(key)
125 with HBASE_CONNECTION_POOL.connection() as connection:
126 table = connection.table(table_name)
127 table.put(hbase_key, value)
128
129 @staticmethod
130 @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
131 retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
132 def check_if_exists(table_name: str, key: str) -> bool:
133 """Scan database and check if given key exists.
134
135 :param table_name: name of a table
136 :param key: HBase key
137 :return: boolean information if such key exists or not
138 """
139 hbase_key = str.encode(key)
140 with HBASE_CONNECTION_POOL.connection() as connection:
141 table = connection.table(table_name)
142 results = table.scan(row_start=hbase_key, row_stop=hbase_key,
143 filter=str.encode('KeyOnlyFilter() AND FirstKeyOnlyFilter()'), limit=1)
144 return next(results, None) is not None
145
[end of backend/medtagger/clients/hbase_client.py]
[start of backend/scripts/utils.py]
1 """Storage for all utility functions."""
2 from starbase import Connection
3 from medtagger.config import AppConfiguration
4
5
6 def get_connection_to_hbase() -> Connection:
7 """Fetch configuration data and create HBase connection.
8
9 :return: connection to HBase using Starbase library
10 """
11 configuration = AppConfiguration()
12 host = configuration.get('hbase', 'host', fallback='localhost')
13 port = configuration.getint('hbase', 'rest_port', fallback=8080)
14 return Connection(host=host, port=port)
15
16
17 def user_agrees(prompt_message: str) -> bool:
18 """Ask user a question and ask him/her for True/False answer (default answer is False).
19
20 :param prompt_message: message that will be prompted to user
21 :return: boolean information if user agrees or not
22 """
23 answer = input(prompt_message + ' [y/N] ')
24 return answer.lower() in ['y', 'yes', 't', 'true']
25
[end of backend/scripts/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/medtagger/clients/hbase_client.py b/backend/medtagger/clients/hbase_client.py
--- a/backend/medtagger/clients/hbase_client.py
+++ b/backend/medtagger/clients/hbase_client.py
@@ -111,6 +111,21 @@
table = connection.table(table_name)
return table.row(hbase_key, columns=columns)
+ @staticmethod
+ @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
+ retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
+ def delete(table_name: str, key: str, columns: List[str] = None) -> None:
+ """Delete a single row (or values from colums in given row) in HBase table.
+
+ :param table_name: name of a table
+ :param key: key representing a row
+ :param columns: columns which should be cleared
+ """
+ hbase_key = str.encode(key)
+ with HBASE_CONNECTION_POOL.connection() as connection:
+ table = connection.table(table_name)
+ table.delete(hbase_key, columns=columns)
+
@staticmethod
@retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,
retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))
diff --git a/backend/scripts/utils.py b/backend/scripts/utils.py
--- a/backend/scripts/utils.py
+++ b/backend/scripts/utils.py
@@ -1,8 +1,13 @@
"""Storage for all utility functions."""
from starbase import Connection
+from retrying import retry
+import requests
+
from medtagger.config import AppConfiguration
+@retry(stop_max_attempt_number=5, wait_random_min=200, wait_random_max=1000,
+ retry_on_exception=lambda ex: isinstance(ex, requests.ConnectionError))
def get_connection_to_hbase() -> Connection:
"""Fetch configuration data and create HBase connection.
@@ -11,7 +16,9 @@
configuration = AppConfiguration()
host = configuration.get('hbase', 'host', fallback='localhost')
port = configuration.getint('hbase', 'rest_port', fallback=8080)
- return Connection(host=host, port=port)
+ connection = Connection(host=host, port=port)
+ connection.tables() # Test if the connection was properly set up
+ return connection
def user_agrees(prompt_message: str) -> bool:
| {"golden_diff": "diff --git a/backend/medtagger/clients/hbase_client.py b/backend/medtagger/clients/hbase_client.py\n--- a/backend/medtagger/clients/hbase_client.py\n+++ b/backend/medtagger/clients/hbase_client.py\n@@ -111,6 +111,21 @@\n table = connection.table(table_name)\n return table.row(hbase_key, columns=columns)\n \n+ @staticmethod\n+ @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n+ retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n+ def delete(table_name: str, key: str, columns: List[str] = None) -> None:\n+ \"\"\"Delete a single row (or values from colums in given row) in HBase table.\n+\n+ :param table_name: name of a table\n+ :param key: key representing a row\n+ :param columns: columns which should be cleared\n+ \"\"\"\n+ hbase_key = str.encode(key)\n+ with HBASE_CONNECTION_POOL.connection() as connection:\n+ table = connection.table(table_name)\n+ table.delete(hbase_key, columns=columns)\n+\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\ndiff --git a/backend/scripts/utils.py b/backend/scripts/utils.py\n--- a/backend/scripts/utils.py\n+++ b/backend/scripts/utils.py\n@@ -1,8 +1,13 @@\n \"\"\"Storage for all utility functions.\"\"\"\n from starbase import Connection\n+from retrying import retry\n+import requests\n+\n from medtagger.config import AppConfiguration\n \n \n+@retry(stop_max_attempt_number=5, wait_random_min=200, wait_random_max=1000,\n+ retry_on_exception=lambda ex: isinstance(ex, requests.ConnectionError))\n def get_connection_to_hbase() -> Connection:\n \"\"\"Fetch configuration data and create HBase connection.\n \n@@ -11,7 +16,9 @@\n configuration = AppConfiguration()\n host = configuration.get('hbase', 'host', fallback='localhost')\n port = configuration.getint('hbase', 'rest_port', fallback=8080)\n- return Connection(host=host, port=port)\n+ connection = Connection(host=host, port=port)\n+ connection.tables() # Test if the connection was properly set up\n+ return connection\n \n \n def user_agrees(prompt_message: str) -> bool:\n", "issue": "Do not drop tables in functional tests\n## Expected Behavior\r\n\r\nFunctional tests should clean the tables in Postgres and HBase.\r\n\r\n## Actual Behavior\r\n\r\nFunctional tests drops all the available tables and create them again. This may be (and probably is) time consuming.\r\n\r\n## Additional comment\r\n\r\nPlease compare both implementations before merge.\n", "before_files": [{"content": "\"\"\"Module responsible for definition of client for HBase database.\"\"\"\nimport logging\nfrom typing import Iterable, List, Mapping, Tuple, Any\n\nimport happybase\nfrom retrying import retry\nfrom thriftpy.transport import TTransportException\n\nfrom medtagger.config import AppConfiguration\n\nlogger = logging.getLogger(__name__)\n\nconfiguration = AppConfiguration()\nhost = configuration.get('hbase', 'host', fallback='localhost')\nport = configuration.getint('hbase', 'port', fallback=9090)\nsize = configuration.getint('hbase', 'connection_pool_size', fallback=10)\ntry:\n HBASE_CONNECTION_POOL = happybase.ConnectionPool(size, host=host, port=port)\nexcept (TTransportException, BrokenPipeError):\n logger.warning('Could not connect to HBase. Is it down?')\n\n\ndef is_alive() -> bool:\n \"\"\"Return boolean information if HBase is alive or not.\"\"\"\n try:\n happybase.ConnectionPool(1, host=host, port=port)\n return True\n except (TTransportException, BrokenPipeError):\n return False\n\n\nclass HBaseClient(object):\n \"\"\"Client for HBase.\n\n How to use this client?\n -----------------------\n This is a wrapper for HappyBase Connection. Client uses HappyBase's Connection Pool, so don't worry about closing\n connection, etc. This client should do everything inside below methods.\n\n WATCH OUT: Script that migrates HBase schema may not work properly if you want to change column names!\n In such case please run your migration manually!\n\n Example:\n\n >>> hbase_client = HBaseClient()\n >>> data = hbase_client.get('my_table_name', 'row_key')\n >>> ...\n\n \"\"\"\n\n ORIGINAL_SLICES_TABLE = 'original_slices'\n CONVERTED_SLICES_TABLE = 'converted_slices'\n LABEL_SELECTION_BINARY_MASK_TABLE = 'label_selection_binary_mask'\n\n HBASE_SCHEMA = {\n ORIGINAL_SLICES_TABLE: ['image'],\n CONVERTED_SLICES_TABLE: ['image'],\n LABEL_SELECTION_BINARY_MASK_TABLE: ['binary_mask'],\n }\n\n def __init__(self) -> None:\n \"\"\"Initialize client.\"\"\"\n pass\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_keys(table_name: str, starts_with: str = None) -> Iterable[str]:\n \"\"\"Fetch all keys for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, _ in table.scan(row_prefix=row_prefix, filter=str.encode('KeyOnlyFilter()')):\n yield key.decode('utf-8')\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get_all_rows(table_name: str, columns: List, starts_with: str = None) -> Iterable[Tuple[str, Any]]:\n \"\"\"Fetch all rows for given table.\n\n :param table_name: name of a table\n :param starts_with: prefix for keys\n :param columns: list of columns to fetch\n :return: iterator for table keys\n \"\"\"\n with HBASE_CONNECTION_POOL.connection() as connection:\n row_prefix = str.encode(starts_with) if starts_with else None\n table = connection.table(table_name)\n for key, value in table.scan(row_prefix=row_prefix, columns=columns):\n yield key.decode('utf-8'), value\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def get(table_name: str, key: str, columns: List[str] = None) -> Mapping:\n \"\"\"Fetch a single row from HBase table.\n\n :param table_name: name of a table\n :param key: key representing a row\n :param columns: columns which should be loaded (by default all)\n :return: mapping returned by HBase\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n return table.row(hbase_key, columns=columns)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def put(table_name: str, key: str, value: Any) -> None:\n \"\"\"Add new entry into HBase table.\n\n :param table_name: name of a table\n :param key: key under value should be stored\n :param value: value which should be stored\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n table.put(hbase_key, value)\n\n @staticmethod\n @retry(stop_max_attempt_number=3, wait_random_min=200, wait_random_max=1000,\n retry_on_exception=lambda ex: isinstance(ex, (TTransportException, BrokenPipeError)))\n def check_if_exists(table_name: str, key: str) -> bool:\n \"\"\"Scan database and check if given key exists.\n\n :param table_name: name of a table\n :param key: HBase key\n :return: boolean information if such key exists or not\n \"\"\"\n hbase_key = str.encode(key)\n with HBASE_CONNECTION_POOL.connection() as connection:\n table = connection.table(table_name)\n results = table.scan(row_start=hbase_key, row_stop=hbase_key,\n filter=str.encode('KeyOnlyFilter() AND FirstKeyOnlyFilter()'), limit=1)\n return next(results, None) is not None\n", "path": "backend/medtagger/clients/hbase_client.py"}, {"content": "\"\"\"Storage for all utility functions.\"\"\"\nfrom starbase import Connection\nfrom medtagger.config import AppConfiguration\n\n\ndef get_connection_to_hbase() -> Connection:\n \"\"\"Fetch configuration data and create HBase connection.\n\n :return: connection to HBase using Starbase library\n \"\"\"\n configuration = AppConfiguration()\n host = configuration.get('hbase', 'host', fallback='localhost')\n port = configuration.getint('hbase', 'rest_port', fallback=8080)\n return Connection(host=host, port=port)\n\n\ndef user_agrees(prompt_message: str) -> bool:\n \"\"\"Ask user a question and ask him/her for True/False answer (default answer is False).\n\n :param prompt_message: message that will be prompted to user\n :return: boolean information if user agrees or not\n \"\"\"\n answer = input(prompt_message + ' [y/N] ')\n return answer.lower() in ['y', 'yes', 't', 'true']\n", "path": "backend/scripts/utils.py"}]} | 2,563 | 576 |
gh_patches_debug_41631 | rasdani/github-patches | git_diff | acl-org__acl-anthology-3045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compact bibfile
Overleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`:
* The advantage of encompactifying the current file is it would work for everyone without having to change anything.
* The disadvantage is it complicates cutting-and-pasting.
* However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf.
I'm therefore include to simply replace `anthology.bib`.
</issue>
<code>
[start of bin/create_bibtex.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2019 Marcel Bollmann <[email protected]>
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]
19
20 Creates .bib files for all papers in the Hugo directory.
21
22 Options:
23 --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]
24 --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]
25 --debug Output debug-level log messages.
26 -c, --clean Delete existing files in target directory before generation.
27 -h, --help Display this helpful text.
28 """
29
30 from docopt import docopt
31 from tqdm import tqdm
32 import gzip
33 import logging as log
34 import os
35
36 from anthology import Anthology
37 from anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year
38 from create_hugo_pages import check_directory
39
40
41 def volume_sorter(volume_tuple):
42 """
43 Extracts the year so that we can sort by the year and then
44 the collection ID.
45 """
46 volume_id = volume_tuple[0]
47 collection_id, year, _ = deconstruct_anthology_id(volume_id)
48 year = infer_year(collection_id)
49 return year, volume_id
50
51
52 def create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:
53 """Creates .bib files for all papers.
54
55 :param anthology: The Anthology object.
56 :param trgdir: The target directory to write to
57 :param limit: If nonzero, only generate {limit} entries per volume
58 :param clean: Clean the directory first
59 """
60 if not check_directory("{}/papers".format(trgdir), clean=clean):
61 return
62 if not check_directory("{}/volumes".format(trgdir), clean=clean):
63 return
64
65 log.info("Creating BibTeX files for all papers...")
66 with open(
67 "{}/anthology.bib".format(trgdir), "wt", encoding="utf-8"
68 ) as file_anthology_raw, gzip.open(
69 "{}/anthology.bib.gz".format(trgdir), "wt", encoding="utf-8"
70 ) as file_anthology, gzip.open(
71 "{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8"
72 ) as file_anthology_with_abstracts:
73 for volume_id, volume in tqdm(
74 sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)
75 ):
76 volume_dir = trgdir
77 if not os.path.exists(volume_dir):
78 os.makedirs(volume_dir)
79 with open("{}/volumes/{}.bib".format(trgdir, volume_id), "w") as file_volume:
80 for i, paper in enumerate(volume, 1):
81 if limit and i > limit:
82 break
83
84 with open(
85 "{}/{}.bib".format(volume_dir, paper.full_id), "w"
86 ) as file_paper:
87 contents = paper.as_bibtex()
88 print(contents, file=file_paper)
89 print(contents, file=file_anthology_with_abstracts)
90
91 concise_contents = paper.as_bibtex(concise=True)
92 print(concise_contents, file=file_volume)
93 print(concise_contents, file=file_anthology)
94 print(concise_contents, file=file_anthology_raw)
95
96
97 if __name__ == "__main__":
98 args = docopt(__doc__)
99 scriptdir = os.path.dirname(os.path.abspath(__file__))
100 if "{scriptdir}" in args["--importdir"]:
101 args["--importdir"] = os.path.abspath(
102 args["--importdir"].format(scriptdir=scriptdir)
103 )
104 if "{scriptdir}" in args["--exportdir"]:
105 args["--exportdir"] = os.path.abspath(
106 args["--exportdir"].format(scriptdir=scriptdir)
107 )
108
109 log_level = log.DEBUG if args["--debug"] else log.INFO
110 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level)
111 tracker = SeverityTracker()
112 log.getLogger().addHandler(tracker)
113
114 # If NOBIB is set, generate only three bibs per volume
115 limit = 0 if os.environ.get("NOBIB", "false") == "false" else 3
116 log.info(f"NOBIB=true, generating only {limit} BibTEX files per volume")
117
118 anthology = Anthology(importdir=args["--importdir"], fast_load=True)
119 create_bibtex(anthology, args["--exportdir"], limit=limit, clean=args["--clean"])
120
121 if tracker.highest >= log.ERROR:
122 exit(1)
123
[end of bin/create_bibtex.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py
--- a/bin/create_bibtex.py
+++ b/bin/create_bibtex.py
@@ -27,6 +27,7 @@
-h, --help Display this helpful text.
"""
+import re
from docopt import docopt
from tqdm import tqdm
import gzip
@@ -70,9 +71,20 @@
) as file_anthology, gzip.open(
"{}/anthology+abstracts.bib.gz".format(trgdir), "wt", encoding="utf-8"
) as file_anthology_with_abstracts:
+ # Add some shortcuts to the consolidated bib file
+ print(
+ "@string{acl = {Association for Computational Linguistics}}",
+ file=file_anthology_raw,
+ )
+ print("@string{anth = {https://aclanthology.org/}}", file=file_anthology_raw)
+ print(file=file_anthology_raw)
+
for volume_id, volume in tqdm(
sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)
):
+ # reset this each time
+ abbrev = None
+
volume_dir = trgdir
if not os.path.exists(volume_dir):
os.makedirs(volume_dir)
@@ -91,6 +103,53 @@
concise_contents = paper.as_bibtex(concise=True)
print(concise_contents, file=file_volume)
print(concise_contents, file=file_anthology)
+
+ # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)
+ # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf
+
+ concise_contents = concise_contents.replace(
+ 'publisher = "Association for Computational Linguistics",',
+ "publisher = acl,",
+ )
+ concise_contents = re.sub(
+ r'url = "https://aclanthology.org/(.*)"',
+ r"url = anth # {\1}",
+ concise_contents,
+ )
+
+ # Abbreviate the booktitle by extracting it and printing it before
+ # the first entry in each volume
+ if concise_contents.startswith("@proceedings"):
+ # Grab the title string and create the alias
+ abbrev = f"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}"
+ try:
+ booktitle = re.search(
+ r" title = \"(.*)\",", concise_contents
+ ).group(1)
+ print(
+ f"@string{{{abbrev} = {{{booktitle}}}}}",
+ file=file_anthology_raw,
+ )
+ except AttributeError:
+ import sys
+
+ print(
+ f"Could not find title for {volume_id}",
+ file=sys.stderr,
+ )
+ abbrev = None
+
+ if abbrev is not None and "booktitle" in concise_contents:
+ # substitute the alias for the booktitle
+ concise_contents = re.sub(
+ r" booktitle = (\".*\"),",
+ f" booktitle = {abbrev},",
+ concise_contents,
+ )
+
+ # Remove newlines, indentations, and double-spaces around author separators
+ concise_contents = re.sub(r"\s+", " ", concise_contents)
+
print(concise_contents, file=file_anthology_raw)
| {"golden_diff": "diff --git a/bin/create_bibtex.py b/bin/create_bibtex.py\n--- a/bin/create_bibtex.py\n+++ b/bin/create_bibtex.py\n@@ -27,6 +27,7 @@\n -h, --help Display this helpful text.\n \"\"\"\n \n+import re\n from docopt import docopt\n from tqdm import tqdm\n import gzip\n@@ -70,9 +71,20 @@\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n+ # Add some shortcuts to the consolidated bib file\n+ print(\n+ \"@string{acl = {Association for Computational Linguistics}}\",\n+ file=file_anthology_raw,\n+ )\n+ print(\"@string{anth = {https://aclanthology.org/}}\", file=file_anthology_raw)\n+ print(file=file_anthology_raw)\n+\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n+ # reset this each time\n+ abbrev = None\n+\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n@@ -91,6 +103,53 @@\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n+\n+ # Space saver (https://github.com/acl-org/acl-anthology/issues/3016)\n+ # Replace verbose text with abbreviations to get the file under 50 MB for Overleaf\n+\n+ concise_contents = concise_contents.replace(\n+ 'publisher = \"Association for Computational Linguistics\",',\n+ \"publisher = acl,\",\n+ )\n+ concise_contents = re.sub(\n+ r'url = \"https://aclanthology.org/(.*)\"',\n+ r\"url = anth # {\\1}\",\n+ concise_contents,\n+ )\n+\n+ # Abbreviate the booktitle by extracting it and printing it before\n+ # the first entry in each volume\n+ if concise_contents.startswith(\"@proceedings\"):\n+ # Grab the title string and create the alias\n+ abbrev = f\"{volume.get_venues()[0].upper()}:{infer_year(volume.collection_id)}:{volume.volume_id}\"\n+ try:\n+ booktitle = re.search(\n+ r\" title = \\\"(.*)\\\",\", concise_contents\n+ ).group(1)\n+ print(\n+ f\"@string{{{abbrev} = {{{booktitle}}}}}\",\n+ file=file_anthology_raw,\n+ )\n+ except AttributeError:\n+ import sys\n+\n+ print(\n+ f\"Could not find title for {volume_id}\",\n+ file=sys.stderr,\n+ )\n+ abbrev = None\n+\n+ if abbrev is not None and \"booktitle\" in concise_contents:\n+ # substitute the alias for the booktitle\n+ concise_contents = re.sub(\n+ r\" booktitle = (\\\".*\\\"),\",\n+ f\" booktitle = {abbrev},\",\n+ concise_contents,\n+ )\n+\n+ # Remove newlines, indentations, and double-spaces around author separators\n+ concise_contents = re.sub(r\"\\s+\", \" \", concise_contents)\n+\n print(concise_contents, file=file_anthology_raw)\n", "issue": "Compact bibfile\nOverleaf has a 50 MB file size limit, and `anthology.bib` is now larger than this. We should create a compact BibTeX export using string substitution [as suggested here](https://twitter.com/daniel_hers/status/1744434842895294496). I'm not sure if this should just replace the current Anthology bib file, or become a new export, say `anthology-compact.bib`:\r\n\r\n* The advantage of encompactifying the current file is it would work for everyone without having to change anything.\r\n* The disadvantage is it complicates cutting-and-pasting.\r\n* However, we already have [https://aclanthology.org/anthology+abstracts.bib.gz](https://aclanthology.org/anthology+abstracts.bib.gz). I'm not sure how often people cut-and-paste individual entries from the complete file, anyway; it seems that it's main use is in Overleaf.\r\n\r\nI'm therefore include to simply replace `anthology.bib`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_bibtex.py [--importdir=DIR] [--exportdir=DIR] [-c] [--debug]\n\nCreates .bib files for all papers in the Hugo directory.\n\nOptions:\n --importdir=DIR Directory to import XML files from. [default: {scriptdir}/../data/]\n --exportdir=DIR Directory to write exported files to. [default: {scriptdir}/../build/data-export/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom tqdm import tqdm\nimport gzip\nimport logging as log\nimport os\n\nfrom anthology import Anthology\nfrom anthology.utils import SeverityTracker, deconstruct_anthology_id, infer_year\nfrom create_hugo_pages import check_directory\n\n\ndef volume_sorter(volume_tuple):\n \"\"\"\n Extracts the year so that we can sort by the year and then\n the collection ID.\n \"\"\"\n volume_id = volume_tuple[0]\n collection_id, year, _ = deconstruct_anthology_id(volume_id)\n year = infer_year(collection_id)\n return year, volume_id\n\n\ndef create_bibtex(anthology, trgdir, limit=0, clean=False) -> None:\n \"\"\"Creates .bib files for all papers.\n\n :param anthology: The Anthology object.\n :param trgdir: The target directory to write to\n :param limit: If nonzero, only generate {limit} entries per volume\n :param clean: Clean the directory first\n \"\"\"\n if not check_directory(\"{}/papers\".format(trgdir), clean=clean):\n return\n if not check_directory(\"{}/volumes\".format(trgdir), clean=clean):\n return\n\n log.info(\"Creating BibTeX files for all papers...\")\n with open(\n \"{}/anthology.bib\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_raw, gzip.open(\n \"{}/anthology.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology, gzip.open(\n \"{}/anthology+abstracts.bib.gz\".format(trgdir), \"wt\", encoding=\"utf-8\"\n ) as file_anthology_with_abstracts:\n for volume_id, volume in tqdm(\n sorted(anthology.volumes.items(), key=volume_sorter, reverse=True)\n ):\n volume_dir = trgdir\n if not os.path.exists(volume_dir):\n os.makedirs(volume_dir)\n with open(\"{}/volumes/{}.bib\".format(trgdir, volume_id), \"w\") as file_volume:\n for i, paper in enumerate(volume, 1):\n if limit and i > limit:\n break\n\n with open(\n \"{}/{}.bib\".format(volume_dir, paper.full_id), \"w\"\n ) as file_paper:\n contents = paper.as_bibtex()\n print(contents, file=file_paper)\n print(contents, file=file_anthology_with_abstracts)\n\n concise_contents = paper.as_bibtex(concise=True)\n print(concise_contents, file=file_volume)\n print(concise_contents, file=file_anthology)\n print(concise_contents, file=file_anthology_raw)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--importdir\"]:\n args[\"--importdir\"] = os.path.abspath(\n args[\"--importdir\"].format(scriptdir=scriptdir)\n )\n if \"{scriptdir}\" in args[\"--exportdir\"]:\n args[\"--exportdir\"] = os.path.abspath(\n args[\"--exportdir\"].format(scriptdir=scriptdir)\n )\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n # If NOBIB is set, generate only three bibs per volume\n limit = 0 if os.environ.get(\"NOBIB\", \"false\") == \"false\" else 3\n log.info(f\"NOBIB=true, generating only {limit} BibTEX files per volume\")\n\n anthology = Anthology(importdir=args[\"--importdir\"], fast_load=True)\n create_bibtex(anthology, args[\"--exportdir\"], limit=limit, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_bibtex.py"}]} | 2,195 | 769 |
gh_patches_debug_6629 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-8853 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OSError: File does not exist: staticfiles.json - Dev Install
## Details
* Read the Docs project URL: **N/A**
* Build URL (if applicable): **N/A**
* Read the Docs username (if applicable): **N/A**
## Expected Result
Show the http://community.dev.readthedocs.io/ local site and allow you to login and generate docs.
## Actual Result

This error also shows up when you try to go to http://community.dev.readthedocs.io/ but in the form of a nicely formatted django error with details about it.
</issue>
<code>
[start of readthedocs/settings/docker_compose.py]
1 import os
2 import socket
3
4 from .dev import CommunityDevSettings
5
6
7 class DockerBaseSettings(CommunityDevSettings):
8
9 """Settings for local development with Docker"""
10
11 DOCKER_ENABLE = True
12 RTD_DOCKER_COMPOSE = True
13 RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds'
14 RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'
15 DOCKER_LIMITS = {'memory': '1g', 'time': 900}
16 USE_SUBDOMAIN = True
17
18 PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'
19 PUBLIC_DOMAIN = 'community.dev.readthedocs.io'
20 PUBLIC_API_URL = f'http://{PRODUCTION_DOMAIN}'
21
22 SLUMBER_API_HOST = 'http://web:8000'
23 SLUMBER_USERNAME = 'admin'
24 SLUMBER_PASSWORD = 'admin'
25
26 RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build'
27
28 STATIC_URL = '/static/'
29
30 # In the local docker environment, nginx should be trusted to set the host correctly
31 USE_X_FORWARDED_HOST = True
32
33 MULTIPLE_BUILD_SERVERS = ['build']
34
35 # https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host
36 # export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \$2}' | cut -d / -f 1`
37 HOSTIP = os.environ.get('HOSTIP')
38
39 # If the host IP is not specified, try to get it from the socket address list
40 _, __, ips = socket.gethostbyname_ex(socket.gethostname())
41 if ips and not HOSTIP:
42 HOSTIP = ips[0][:-1] + "1"
43
44 # Turn this on to test ads
45 USE_PROMOS = False
46 ADSERVER_API_BASE = f'http://{HOSTIP}:5000'
47 # Create a Token for an admin User and set it here.
48 ADSERVER_API_KEY = None
49 ADSERVER_API_TIMEOUT = 2 # seconds - Docker for Mac is very slow
50
51 # New templates
52 @property
53 def RTD_EXT_THEME_DEV_SERVER_ENABLED(self):
54 return os.environ.get('RTD_EXT_THEME_DEV_SERVER_ENABLED') is not None
55
56 @property
57 def RTD_EXT_THEME_DEV_SERVER(self):
58 if self.RTD_EXT_THEME_DEV_SERVER_ENABLED:
59 return "http://assets.community.dev.readthedocs.io:10001"
60
61 # Enable auto syncing elasticsearch documents
62 ELASTICSEARCH_DSL_AUTOSYNC = 'SEARCH' in os.environ
63
64 RTD_CLEAN_AFTER_BUILD = True
65
66 @property
67 def RTD_EMBED_API_EXTERNAL_DOMAINS(self):
68 domains = super().RTD_EMBED_API_EXTERNAL_DOMAINS
69 domains.extend([
70 r'.*\.readthedocs\.io',
71 r'.*\.org\.readthedocs\.build',
72 r'.*\.readthedocs-hosted\.com',
73 r'.*\.com\.readthedocs\.build',
74 ])
75 return domains
76
77 @property
78 def LOGGING(self):
79 logging = super().LOGGING
80 logging['handlers']['console']['formatter'] = 'colored_console'
81 logging['loggers'].update({
82 # Disable Django access requests logging (e.g. GET /path/to/url)
83 # https://github.com/django/django/blob/ca9872905559026af82000e46cde6f7dedc897b6/django/core/servers/basehttp.py#L24
84 'django.server': {
85 'handlers': ['null'],
86 'propagate': False,
87 },
88 # Disable S3 logging
89 'boto3': {
90 'handlers': ['null'],
91 'propagate': False,
92 },
93 'botocore': {
94 'handlers': ['null'],
95 'propagate': False,
96 },
97 's3transfer': {
98 'handlers': ['null'],
99 'propagate': False,
100 },
101 # Disable Docker API logging
102 'urllib3': {
103 'handlers': ['null'],
104 'propagate': False,
105 },
106 # Disable gitpython logging
107 'git.cmd': {
108 'handlers': ['null'],
109 'propagate': False,
110 },
111 })
112 return logging
113
114 @property
115 def DATABASES(self): # noqa
116 return {
117 "default": {
118 "ENGINE": "django.db.backends.postgresql_psycopg2",
119 "NAME": "docs_db",
120 "USER": os.environ.get("DB_USER", "docs_user"),
121 "PASSWORD": os.environ.get("DB_PWD", "docs_pwd"),
122 "HOST": os.environ.get("DB_HOST", "database"),
123 "PORT": "",
124 }
125 }
126
127 def show_debug_toolbar(request):
128 from django.conf import settings
129 return settings.DEBUG
130
131 DEBUG_TOOLBAR_CONFIG = {
132 'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar,
133 }
134
135 ACCOUNT_EMAIL_VERIFICATION = "none"
136 SESSION_COOKIE_DOMAIN = None
137 CACHES = {
138 'default': {
139 'BACKEND': 'redis_cache.RedisCache',
140 'LOCATION': 'cache:6379',
141 }
142 }
143
144 BROKER_URL = "redis://cache:6379/0"
145 CELERY_RESULT_BACKEND = "redis://cache:6379/0"
146 CELERY_RESULT_SERIALIZER = "json"
147 CELERY_ALWAYS_EAGER = False
148 CELERY_TASK_IGNORE_RESULT = False
149
150 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
151
152 RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.s3_storage.S3BuildMediaStorage'
153 # Storage backend for build cached environments
154 RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.storage.s3_storage.S3BuildEnvironmentStorage'
155 # Storage backend for build languages
156 RTD_BUILD_TOOLS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildToolsStorage'
157 # Storage for static files (those collected with `collectstatic`)
158 STATICFILES_STORAGE = 'readthedocs.storage.s3_storage.S3StaticStorage'
159
160 AWS_ACCESS_KEY_ID = 'admin'
161 AWS_SECRET_ACCESS_KEY = 'password'
162 S3_MEDIA_STORAGE_BUCKET = 'media'
163 S3_BUILD_COMMANDS_STORAGE_BUCKET = 'builds'
164 S3_BUILD_ENVIRONMENT_STORAGE_BUCKET = 'envs'
165 S3_BUILD_TOOLS_STORAGE_BUCKET = 'build-tools'
166 S3_STATIC_STORAGE_BUCKET = 'static'
167 S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'
168 S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'
169
170 AWS_AUTO_CREATE_BUCKET = True
171 AWS_DEFAULT_ACL = 'public-read'
172 AWS_BUCKET_ACL = 'public-read'
173 AWS_S3_ENCRYPTION = False
174 AWS_S3_SECURE_URLS = False
175 AWS_S3_USE_SSL = False
176 AWS_S3_ENDPOINT_URL = 'http://storage:9000/'
177 AWS_QUERYSTRING_AUTH = False
178
179 RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = True
180 RTD_BUILD_COMMANDS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildCommandsStorage'
181 BUILD_COLD_STORAGE_URL = 'http://storage:9000/builds'
182
183 STATICFILES_DIRS = [
184 os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),
185 os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),
186 ]
187
188 # Remove the checks on the number of fields being submitted
189 # This limit is mostly hit on large forms in the Django admin
190 DATA_UPLOAD_MAX_NUMBER_FIELDS = None
191
192 # This allows us to have CORS work well in dev
193 CORS_ORIGIN_ALLOW_ALL = True
194
[end of readthedocs/settings/docker_compose.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/settings/docker_compose.py b/readthedocs/settings/docker_compose.py
--- a/readthedocs/settings/docker_compose.py
+++ b/readthedocs/settings/docker_compose.py
@@ -167,9 +167,6 @@
S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'
S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'
- AWS_AUTO_CREATE_BUCKET = True
- AWS_DEFAULT_ACL = 'public-read'
- AWS_BUCKET_ACL = 'public-read'
AWS_S3_ENCRYPTION = False
AWS_S3_SECURE_URLS = False
AWS_S3_USE_SSL = False
| {"golden_diff": "diff --git a/readthedocs/settings/docker_compose.py b/readthedocs/settings/docker_compose.py\n--- a/readthedocs/settings/docker_compose.py\n+++ b/readthedocs/settings/docker_compose.py\n@@ -167,9 +167,6 @@\n S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n \n- AWS_AUTO_CREATE_BUCKET = True\n- AWS_DEFAULT_ACL = 'public-read'\n- AWS_BUCKET_ACL = 'public-read'\n AWS_S3_ENCRYPTION = False\n AWS_S3_SECURE_URLS = False\n AWS_S3_USE_SSL = False\n", "issue": "OSError: File does not exist: staticfiles.json - Dev Install\n## Details\r\n\r\n* Read the Docs project URL: **N/A**\r\n* Build URL (if applicable): **N/A**\r\n* Read the Docs username (if applicable): **N/A**\r\n\r\n## Expected Result\r\n\r\nShow the http://community.dev.readthedocs.io/ local site and allow you to login and generate docs.\r\n\r\n## Actual Result\r\n\r\n\r\n\r\nThis error also shows up when you try to go to http://community.dev.readthedocs.io/ but in the form of a nicely formatted django error with details about it.\r\n\n", "before_files": [{"content": "import os\nimport socket\n\nfrom .dev import CommunityDevSettings\n\n\nclass DockerBaseSettings(CommunityDevSettings):\n\n \"\"\"Settings for local development with Docker\"\"\"\n\n DOCKER_ENABLE = True\n RTD_DOCKER_COMPOSE = True\n RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds'\n RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'\n DOCKER_LIMITS = {'memory': '1g', 'time': 900}\n USE_SUBDOMAIN = True\n\n PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_DOMAIN = 'community.dev.readthedocs.io'\n PUBLIC_API_URL = f'http://{PRODUCTION_DOMAIN}'\n\n SLUMBER_API_HOST = 'http://web:8000'\n SLUMBER_USERNAME = 'admin'\n SLUMBER_PASSWORD = 'admin'\n\n RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build'\n\n STATIC_URL = '/static/'\n\n # In the local docker environment, nginx should be trusted to set the host correctly\n USE_X_FORWARDED_HOST = True\n\n MULTIPLE_BUILD_SERVERS = ['build']\n\n # https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host\n # export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \\$2}' | cut -d / -f 1`\n HOSTIP = os.environ.get('HOSTIP')\n\n # If the host IP is not specified, try to get it from the socket address list\n _, __, ips = socket.gethostbyname_ex(socket.gethostname())\n if ips and not HOSTIP:\n HOSTIP = ips[0][:-1] + \"1\"\n\n # Turn this on to test ads\n USE_PROMOS = False\n ADSERVER_API_BASE = f'http://{HOSTIP}:5000'\n # Create a Token for an admin User and set it here.\n ADSERVER_API_KEY = None\n ADSERVER_API_TIMEOUT = 2 # seconds - Docker for Mac is very slow\n\n # New templates\n @property\n def RTD_EXT_THEME_DEV_SERVER_ENABLED(self):\n return os.environ.get('RTD_EXT_THEME_DEV_SERVER_ENABLED') is not None\n\n @property\n def RTD_EXT_THEME_DEV_SERVER(self):\n if self.RTD_EXT_THEME_DEV_SERVER_ENABLED:\n return \"http://assets.community.dev.readthedocs.io:10001\"\n\n # Enable auto syncing elasticsearch documents\n ELASTICSEARCH_DSL_AUTOSYNC = 'SEARCH' in os.environ\n\n RTD_CLEAN_AFTER_BUILD = True\n\n @property\n def RTD_EMBED_API_EXTERNAL_DOMAINS(self):\n domains = super().RTD_EMBED_API_EXTERNAL_DOMAINS\n domains.extend([\n r'.*\\.readthedocs\\.io',\n r'.*\\.org\\.readthedocs\\.build',\n r'.*\\.readthedocs-hosted\\.com',\n r'.*\\.com\\.readthedocs\\.build',\n ])\n return domains\n\n @property\n def LOGGING(self):\n logging = super().LOGGING\n logging['handlers']['console']['formatter'] = 'colored_console'\n logging['loggers'].update({\n # Disable Django access requests logging (e.g. GET /path/to/url)\n # https://github.com/django/django/blob/ca9872905559026af82000e46cde6f7dedc897b6/django/core/servers/basehttp.py#L24\n 'django.server': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable S3 logging\n 'boto3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'botocore': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 's3transfer': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable Docker API logging\n 'urllib3': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n # Disable gitpython logging\n 'git.cmd': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n })\n return logging\n\n @property\n def DATABASES(self): # noqa\n return {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": \"docs_db\",\n \"USER\": os.environ.get(\"DB_USER\", \"docs_user\"),\n \"PASSWORD\": os.environ.get(\"DB_PWD\", \"docs_pwd\"),\n \"HOST\": os.environ.get(\"DB_HOST\", \"database\"),\n \"PORT\": \"\",\n }\n }\n\n def show_debug_toolbar(request):\n from django.conf import settings\n return settings.DEBUG\n\n DEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': show_debug_toolbar,\n }\n\n ACCOUNT_EMAIL_VERIFICATION = \"none\"\n SESSION_COOKIE_DOMAIN = None\n CACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'cache:6379',\n }\n }\n\n BROKER_URL = \"redis://cache:6379/0\"\n CELERY_RESULT_BACKEND = \"redis://cache:6379/0\"\n CELERY_RESULT_SERIALIZER = \"json\"\n CELERY_ALWAYS_EAGER = False\n CELERY_TASK_IGNORE_RESULT = False\n\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\n RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.s3_storage.S3BuildMediaStorage'\n # Storage backend for build cached environments\n RTD_BUILD_ENVIRONMENT_STORAGE = 'readthedocs.storage.s3_storage.S3BuildEnvironmentStorage'\n # Storage backend for build languages\n RTD_BUILD_TOOLS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildToolsStorage'\n # Storage for static files (those collected with `collectstatic`)\n STATICFILES_STORAGE = 'readthedocs.storage.s3_storage.S3StaticStorage'\n\n AWS_ACCESS_KEY_ID = 'admin'\n AWS_SECRET_ACCESS_KEY = 'password'\n S3_MEDIA_STORAGE_BUCKET = 'media'\n S3_BUILD_COMMANDS_STORAGE_BUCKET = 'builds'\n S3_BUILD_ENVIRONMENT_STORAGE_BUCKET = 'envs'\n S3_BUILD_TOOLS_STORAGE_BUCKET = 'build-tools'\n S3_STATIC_STORAGE_BUCKET = 'static'\n S3_STATIC_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME = 'community.dev.readthedocs.io'\n\n AWS_AUTO_CREATE_BUCKET = True\n AWS_DEFAULT_ACL = 'public-read'\n AWS_BUCKET_ACL = 'public-read'\n AWS_S3_ENCRYPTION = False\n AWS_S3_SECURE_URLS = False\n AWS_S3_USE_SSL = False\n AWS_S3_ENDPOINT_URL = 'http://storage:9000/'\n AWS_QUERYSTRING_AUTH = False\n\n RTD_SAVE_BUILD_COMMANDS_TO_STORAGE = True\n RTD_BUILD_COMMANDS_STORAGE = 'readthedocs.storage.s3_storage.S3BuildCommandsStorage'\n BUILD_COLD_STORAGE_URL = 'http://storage:9000/builds'\n\n STATICFILES_DIRS = [\n os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),\n os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),\n ]\n\n # Remove the checks on the number of fields being submitted\n # This limit is mostly hit on large forms in the Django admin\n DATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\n # This allows us to have CORS work well in dev\n CORS_ORIGIN_ALLOW_ALL = True\n", "path": "readthedocs/settings/docker_compose.py"}]} | 2,922 | 151 |
gh_patches_debug_17455 | rasdani/github-patches | git_diff | modin-project__modin-7188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change "master" branch to "main"
We should update all refs to the new default branch.
</issue>
<code>
[start of scripts/release.py]
1 import argparse
2 import atexit
3 import collections
4 import json
5 import re
6 import sys
7 from pathlib import Path
8
9 import github
10 import pygit2
11 from packaging import version
12
13
14 class GithubUserResolver:
15 def __init__(self, email2commit, token):
16 self.__cache_file = Path(__file__).parent / "gh-users-cache.json"
17 self.__cache = (
18 json.loads(self.__cache_file.read_text())
19 if self.__cache_file.exists()
20 else {}
21 )
22 # filter unknown users hoping we'd be able to find them this time
23 self.__cache = {key: value for key, value in self.__cache.items() if value}
24 # using anonymous access if token not specified
25 self.__github = github.Github(token or None)
26 self.__modin_repo = self.__github.get_repo("modin-project/modin")
27 self.__email2commit = email2commit
28 atexit.register(self.__save)
29
30 def __search_commits(self, term):
31 if commit := self.__email2commit.get(term):
32 gh_commit = self.__modin_repo.get_commit(str(commit))
33 return gh_commit.author.login
34 return None
35
36 @staticmethod
37 def __is_email(term):
38 return re.match(r".*@.*\..*", term)
39
40 def __search_github(self, term):
41 search = f"in:email {term}" if self.__is_email(term) else f"fullname:{term}"
42 match = [user.login for user in self.__github.search_users(search)]
43 return match[0] if len(match) == 1 else None
44
45 def __try_user(self, term):
46 if self.__is_email(term):
47 return None
48 try:
49 return self.__github.get_user(term).login
50 except github.GithubException as ex:
51 if ex.status != 404:
52 raise
53 return None
54
55 def __resolve_single(self, term):
56 return (
57 self.__search_commits(term)
58 or self.__search_github(term)
59 or self.__try_user(term)
60 )
61
62 def __resolve_cache(self, name, email):
63 return self.__cache.get(f"{name} <{email}>", None)
64
65 def __register(self, name, email, match):
66 self.__cache[f"{name} <{email}>"] = match
67
68 def resolve(self, people):
69 logins, unknowns = set(), set()
70
71 for name, email in people:
72 if match := self.__resolve_cache(name, email):
73 logins.add(match)
74 elif match := self.__resolve_single(email):
75 self.__register(name, email, match)
76 logins.add(match)
77 else:
78 if match := self.__resolve_single(name):
79 logins.add(match)
80 else:
81 unknowns.add((name, email))
82 self.__register(name, email, match)
83
84 return logins, unknowns
85
86 def resolve_by_reviews(self, unknowns, email2pr):
87 logins, new_unknowns = set(), set()
88 for name, email in unknowns:
89 commit = self.__modin_repo.get_commit(str(email2pr[email]))
90 found = set()
91 for pull in commit.get_pulls():
92 for review in pull.get_reviews():
93 user = review.user
94 if user.name == name and (not user.email or user.email == email):
95 found.add(user.login)
96
97 if len(found) == 1:
98 self.__register(name, email, list(found)[0])
99 logins |= found
100 else:
101 new_unknowns.add((name, email))
102
103 return logins, new_unknowns
104
105 def __save(self):
106 self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))
107
108
109 class GitWrapper:
110 def __init__(self):
111 self.repo = pygit2.Repository(Path(__file__).parent)
112
113 def is_on_master(self):
114 return self.repo.references["refs/heads/master"] == self.repo.head
115
116 @staticmethod
117 def __get_tag_version(entry):
118 try:
119 return version.parse(entry.lstrip("refs/tags/"))
120 except version.InvalidVersion as ex:
121 return f'<bad version "{entry}": {ex}>'
122
123 def get_previous_release(self, rel_type):
124 tags = [
125 (entry, self.__get_tag_version(entry))
126 for entry in self.repo.references
127 if entry.startswith("refs/tags/")
128 ]
129 # filter away legacy versions (which aren't following the proper naming schema);
130 # also skip pre-releases
131 tags = [
132 (entry, ver)
133 for entry, ver in tags
134 if isinstance(ver, version.Version) and not ver.pre
135 ]
136 if rel_type == "minor":
137 # leave only minor releases
138 tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]
139 else:
140 assert rel_type == "patch"
141 prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])
142 return prev_ref, self.repo.references[prev_ref].peel(), prev_ver
143
144 def get_commits_upto(self, stop_commit):
145 history = []
146 for obj in self.repo.walk(self.repo.head.target):
147 if obj.id == stop_commit.id:
148 break
149 history.append(obj)
150 else:
151 raise ValueError("Current HEAD is not derived from previous release")
152 return history
153
154 def ensure_title_link(self, obj: pygit2.Commit):
155 title = obj.message.splitlines()[0]
156 if not re.match(r".*\(#(\d+)\)$", title):
157 title += f" ({obj.short_id})"
158 return title
159
160
161 def make_notes(args):
162 wrapper = GitWrapper()
163 release_type = "minor" if wrapper.is_on_master() else "patch"
164 sys.stderr.write(f"Detected release type: {release_type}\n")
165
166 prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)
167 sys.stderr.write(f"Previous {release_type} release: {prev_ref}\n")
168
169 next_major, next_minor, next_patch = prev_ver.release
170 if release_type == "minor":
171 next_minor += 1
172 elif release_type == "patch":
173 next_patch += 1
174 else:
175 raise ValueError(f"Unexpected release type: {release_type}")
176 next_ver = version.Version(f"{next_major}.{next_minor}.{next_patch}")
177
178 sys.stderr.write(f"Computing release notes for {prev_ver} -> {next_ver}...\n")
179 try:
180 history = wrapper.get_commits_upto(prev_commit)
181 except ValueError as ex:
182 sys.stderr.write(
183 f"{ex}: did you forget to checkout correct branch or pull tags?"
184 )
185 return 1
186 if not history:
187 sys.stderr.write(f"No commits since {prev_ver} found, nothing to generate!\n")
188 return 1
189
190 titles = collections.defaultdict(list)
191 people = set()
192 email2commit, email2pr = {}, {}
193 for obj in history:
194 title = obj.message.splitlines()[0]
195 titles[title.split("-")[0]].append(obj)
196 new_people = set(
197 re.findall(
198 r"(?:(?:Signed-off-by|Co-authored-by):\s*)([\w\s,]+?)\s*<([^>]+)>",
199 obj.message,
200 )
201 )
202 for _, email in new_people:
203 email2pr[email] = obj.id
204 people |= new_people
205 email2commit[obj.author.email] = obj.id
206 sys.stderr.write(f"Found {len(history)} commit(s) since {prev_ref}\n")
207
208 sys.stderr.write("Resolving contributors...\n")
209 user_resolver = GithubUserResolver(email2commit, args.token)
210 logins, unknowns = user_resolver.resolve(people)
211 new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)
212 logins |= new_logins
213 sys.stderr.write(f"Found {len(logins)} GitHub usernames.\n")
214 if unknowns:
215 sys.stderr.write(
216 f"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\n"
217 )
218
219 sections = [
220 ("Stability and Bugfixes", "FIX"),
221 ("Performance enhancements", "PERF"),
222 ("Refactor Codebase", "REFACTOR"),
223 ("Update testing suite", "TEST"),
224 ("Documentation improvements", "DOCS"),
225 ("New Features", "FEAT"),
226 ]
227
228 notes = rf"""Modin {next_ver}
229
230 <Please fill in short release summary>
231
232 Key Features and Updates Since {prev_ver}
233 -------------------------------{'-' * len(str(prev_ver))}
234 """
235
236 def _add_section(section, prs):
237 nonlocal notes
238 if prs:
239 notes += f"* {section}\n"
240 notes += "\n".join(
241 [
242 f" * {wrapper.ensure_title_link(obj)}"
243 for obj in sorted(prs, key=lambda obj: obj.message)
244 ]
245 )
246 notes += "\n"
247
248 for section, key in sections:
249 _add_section(section, titles.pop(key, None))
250
251 uncategorized = sum(titles.values(), [])
252 _add_section("Uncategorized improvements", uncategorized)
253
254 notes += r"""
255 Contributors
256 ------------
257 """
258 notes += "\n".join(f"@{login}" for login in sorted(logins)) + "\n"
259 notes += (
260 "\n".join(
261 f"<unknown-login> {name} <{email}>" for name, email in sorted(unknowns)
262 )
263 + "\n"
264 )
265
266 sys.stdout.write(notes)
267
268
269 def main():
270 parse = argparse.ArgumentParser()
271 parse.add_argument(
272 "--token",
273 type=str,
274 default="",
275 help="GitHub token for queries (optional, bumps up rate limit)",
276 )
277 parse.set_defaults(func=lambda _: parse.print_usage())
278 subparsers = parse.add_subparsers()
279
280 notes = subparsers.add_parser("notes", help="Generate release notes")
281 notes.set_defaults(func=make_notes)
282
283 args = parse.parse_args()
284 sys.exit(args.func(args))
285
286
287 if __name__ == "__main__":
288 main()
289
[end of scripts/release.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/release.py b/scripts/release.py
--- a/scripts/release.py
+++ b/scripts/release.py
@@ -110,8 +110,8 @@
def __init__(self):
self.repo = pygit2.Repository(Path(__file__).parent)
- def is_on_master(self):
- return self.repo.references["refs/heads/master"] == self.repo.head
+ def is_on_main(self):
+ return self.repo.references["refs/heads/main"] == self.repo.head
@staticmethod
def __get_tag_version(entry):
@@ -160,7 +160,7 @@
def make_notes(args):
wrapper = GitWrapper()
- release_type = "minor" if wrapper.is_on_master() else "patch"
+ release_type = "minor" if wrapper.is_on_main() else "patch"
sys.stderr.write(f"Detected release type: {release_type}\n")
prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)
| {"golden_diff": "diff --git a/scripts/release.py b/scripts/release.py\n--- a/scripts/release.py\n+++ b/scripts/release.py\n@@ -110,8 +110,8 @@\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n \n- def is_on_master(self):\n- return self.repo.references[\"refs/heads/master\"] == self.repo.head\n+ def is_on_main(self):\n+ return self.repo.references[\"refs/heads/main\"] == self.repo.head\n \n @staticmethod\n def __get_tag_version(entry):\n@@ -160,7 +160,7 @@\n \n def make_notes(args):\n wrapper = GitWrapper()\n- release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n+ release_type = \"minor\" if wrapper.is_on_main() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n \n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n", "issue": "Change \"master\" branch to \"main\"\nWe should update all refs to the new default branch.\n", "before_files": [{"content": "import argparse\nimport atexit\nimport collections\nimport json\nimport re\nimport sys\nfrom pathlib import Path\n\nimport github\nimport pygit2\nfrom packaging import version\n\n\nclass GithubUserResolver:\n def __init__(self, email2commit, token):\n self.__cache_file = Path(__file__).parent / \"gh-users-cache.json\"\n self.__cache = (\n json.loads(self.__cache_file.read_text())\n if self.__cache_file.exists()\n else {}\n )\n # filter unknown users hoping we'd be able to find them this time\n self.__cache = {key: value for key, value in self.__cache.items() if value}\n # using anonymous access if token not specified\n self.__github = github.Github(token or None)\n self.__modin_repo = self.__github.get_repo(\"modin-project/modin\")\n self.__email2commit = email2commit\n atexit.register(self.__save)\n\n def __search_commits(self, term):\n if commit := self.__email2commit.get(term):\n gh_commit = self.__modin_repo.get_commit(str(commit))\n return gh_commit.author.login\n return None\n\n @staticmethod\n def __is_email(term):\n return re.match(r\".*@.*\\..*\", term)\n\n def __search_github(self, term):\n search = f\"in:email {term}\" if self.__is_email(term) else f\"fullname:{term}\"\n match = [user.login for user in self.__github.search_users(search)]\n return match[0] if len(match) == 1 else None\n\n def __try_user(self, term):\n if self.__is_email(term):\n return None\n try:\n return self.__github.get_user(term).login\n except github.GithubException as ex:\n if ex.status != 404:\n raise\n return None\n\n def __resolve_single(self, term):\n return (\n self.__search_commits(term)\n or self.__search_github(term)\n or self.__try_user(term)\n )\n\n def __resolve_cache(self, name, email):\n return self.__cache.get(f\"{name} <{email}>\", None)\n\n def __register(self, name, email, match):\n self.__cache[f\"{name} <{email}>\"] = match\n\n def resolve(self, people):\n logins, unknowns = set(), set()\n\n for name, email in people:\n if match := self.__resolve_cache(name, email):\n logins.add(match)\n elif match := self.__resolve_single(email):\n self.__register(name, email, match)\n logins.add(match)\n else:\n if match := self.__resolve_single(name):\n logins.add(match)\n else:\n unknowns.add((name, email))\n self.__register(name, email, match)\n\n return logins, unknowns\n\n def resolve_by_reviews(self, unknowns, email2pr):\n logins, new_unknowns = set(), set()\n for name, email in unknowns:\n commit = self.__modin_repo.get_commit(str(email2pr[email]))\n found = set()\n for pull in commit.get_pulls():\n for review in pull.get_reviews():\n user = review.user\n if user.name == name and (not user.email or user.email == email):\n found.add(user.login)\n\n if len(found) == 1:\n self.__register(name, email, list(found)[0])\n logins |= found\n else:\n new_unknowns.add((name, email))\n\n return logins, new_unknowns\n\n def __save(self):\n self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))\n\n\nclass GitWrapper:\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n\n def is_on_master(self):\n return self.repo.references[\"refs/heads/master\"] == self.repo.head\n\n @staticmethod\n def __get_tag_version(entry):\n try:\n return version.parse(entry.lstrip(\"refs/tags/\"))\n except version.InvalidVersion as ex:\n return f'<bad version \"{entry}\": {ex}>'\n\n def get_previous_release(self, rel_type):\n tags = [\n (entry, self.__get_tag_version(entry))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n # filter away legacy versions (which aren't following the proper naming schema);\n # also skip pre-releases\n tags = [\n (entry, ver)\n for entry, ver in tags\n if isinstance(ver, version.Version) and not ver.pre\n ]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n else:\n assert rel_type == \"patch\"\n prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])\n return prev_ref, self.repo.references[prev_ref].peel(), prev_ver\n\n def get_commits_upto(self, stop_commit):\n history = []\n for obj in self.repo.walk(self.repo.head.target):\n if obj.id == stop_commit.id:\n break\n history.append(obj)\n else:\n raise ValueError(\"Current HEAD is not derived from previous release\")\n return history\n\n def ensure_title_link(self, obj: pygit2.Commit):\n title = obj.message.splitlines()[0]\n if not re.match(r\".*\\(#(\\d+)\\)$\", title):\n title += f\" ({obj.short_id})\"\n return title\n\n\ndef make_notes(args):\n wrapper = GitWrapper()\n release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n sys.stderr.write(f\"Previous {release_type} release: {prev_ref}\\n\")\n\n next_major, next_minor, next_patch = prev_ver.release\n if release_type == \"minor\":\n next_minor += 1\n elif release_type == \"patch\":\n next_patch += 1\n else:\n raise ValueError(f\"Unexpected release type: {release_type}\")\n next_ver = version.Version(f\"{next_major}.{next_minor}.{next_patch}\")\n\n sys.stderr.write(f\"Computing release notes for {prev_ver} -> {next_ver}...\\n\")\n try:\n history = wrapper.get_commits_upto(prev_commit)\n except ValueError as ex:\n sys.stderr.write(\n f\"{ex}: did you forget to checkout correct branch or pull tags?\"\n )\n return 1\n if not history:\n sys.stderr.write(f\"No commits since {prev_ver} found, nothing to generate!\\n\")\n return 1\n\n titles = collections.defaultdict(list)\n people = set()\n email2commit, email2pr = {}, {}\n for obj in history:\n title = obj.message.splitlines()[0]\n titles[title.split(\"-\")[0]].append(obj)\n new_people = set(\n re.findall(\n r\"(?:(?:Signed-off-by|Co-authored-by):\\s*)([\\w\\s,]+?)\\s*<([^>]+)>\",\n obj.message,\n )\n )\n for _, email in new_people:\n email2pr[email] = obj.id\n people |= new_people\n email2commit[obj.author.email] = obj.id\n sys.stderr.write(f\"Found {len(history)} commit(s) since {prev_ref}\\n\")\n\n sys.stderr.write(\"Resolving contributors...\\n\")\n user_resolver = GithubUserResolver(email2commit, args.token)\n logins, unknowns = user_resolver.resolve(people)\n new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)\n logins |= new_logins\n sys.stderr.write(f\"Found {len(logins)} GitHub usernames.\\n\")\n if unknowns:\n sys.stderr.write(\n f\"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\\n\"\n )\n\n sections = [\n (\"Stability and Bugfixes\", \"FIX\"),\n (\"Performance enhancements\", \"PERF\"),\n (\"Refactor Codebase\", \"REFACTOR\"),\n (\"Update testing suite\", \"TEST\"),\n (\"Documentation improvements\", \"DOCS\"),\n (\"New Features\", \"FEAT\"),\n ]\n\n notes = rf\"\"\"Modin {next_ver}\n\n<Please fill in short release summary>\n\nKey Features and Updates Since {prev_ver}\n-------------------------------{'-' * len(str(prev_ver))}\n\"\"\"\n\n def _add_section(section, prs):\n nonlocal notes\n if prs:\n notes += f\"* {section}\\n\"\n notes += \"\\n\".join(\n [\n f\" * {wrapper.ensure_title_link(obj)}\"\n for obj in sorted(prs, key=lambda obj: obj.message)\n ]\n )\n notes += \"\\n\"\n\n for section, key in sections:\n _add_section(section, titles.pop(key, None))\n\n uncategorized = sum(titles.values(), [])\n _add_section(\"Uncategorized improvements\", uncategorized)\n\n notes += r\"\"\"\nContributors\n------------\n\"\"\"\n notes += \"\\n\".join(f\"@{login}\" for login in sorted(logins)) + \"\\n\"\n notes += (\n \"\\n\".join(\n f\"<unknown-login> {name} <{email}>\" for name, email in sorted(unknowns)\n )\n + \"\\n\"\n )\n\n sys.stdout.write(notes)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n \"--token\",\n type=str,\n default=\"\",\n help=\"GitHub token for queries (optional, bumps up rate limit)\",\n )\n parse.set_defaults(func=lambda _: parse.print_usage())\n subparsers = parse.add_subparsers()\n\n notes = subparsers.add_parser(\"notes\", help=\"Generate release notes\")\n notes.set_defaults(func=make_notes)\n\n args = parse.parse_args()\n sys.exit(args.func(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/release.py"}]} | 3,504 | 220 |
gh_patches_debug_28332 | rasdani/github-patches | git_diff | Kinto__kinto-1076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_since overflow error: long too big to convert
When since is too big with the Postgresql backend it fails with an OverflowError: https://sentry.prod.mozaws.net/operations/kinto-prod-1/issues/382250/
</issue>
<code>
[start of kinto/core/resource/schema.py]
1 import warnings
2
3 import colander
4
5 from kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger,
6 FieldList, TimeStamp, URL)
7 from kinto.core.utils import native_value
8
9
10 class TimeStamp(TimeStamp):
11 """This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead."""
12
13 def __init__(self, *args, **kwargs):
14 message = ("`kinto.core.resource.schema.TimeStamp` is deprecated, "
15 "use `kinto.core.schema.TimeStamp` instead.")
16 warnings.warn(message, DeprecationWarning)
17 super(TimeStamp, self).__init__(*args, **kwargs)
18
19
20 class URL(URL):
21 """This schema is deprecated, you shoud use `kinto.core.schema.URL` instead."""
22
23 def __init__(self, *args, **kwargs):
24 message = ("`kinto.core.resource.schema.URL` is deprecated, "
25 "use `kinto.core.schema.URL` instead.")
26 warnings.warn(message, DeprecationWarning)
27 super(URL, self).__init__(*args, **kwargs)
28
29
30 # Resource related schemas
31
32
33 class ResourceSchema(colander.MappingSchema):
34 """Base resource schema, with *Cliquet* specific built-in options."""
35
36 class Options:
37 """
38 Resource schema options.
39
40 This is meant to be overriden for changing values:
41
42 .. code-block:: python
43
44 class Product(ResourceSchema):
45 reference = colander.SchemaNode(colander.String())
46
47 class Options:
48 readonly_fields = ('reference',)
49 """
50 readonly_fields = tuple()
51 """Fields that cannot be updated. Values for fields will have to be
52 provided either during record creation, through default values using
53 ``missing`` attribute or implementing a custom logic in
54 :meth:`kinto.core.resource.UserResource.process_record`.
55 """
56
57 preserve_unknown = True
58 """Define if unknown fields should be preserved or not.
59
60 The resource is schema-less by default. In other words, any field name
61 will be accepted on records. Set this to ``False`` in order to limit
62 the accepted fields to the ones defined in the schema.
63 """
64
65 @classmethod
66 def get_option(cls, attr):
67 default_value = getattr(ResourceSchema.Options, attr)
68 return getattr(cls.Options, attr, default_value)
69
70 @classmethod
71 def is_readonly(cls, field):
72 """Return True if specified field name is read-only.
73
74 :param str field: the field name in the schema
75 :returns: ``True`` if the specified field is read-only,
76 ``False`` otherwise.
77 :rtype: bool
78 """
79 return field in cls.get_option("readonly_fields")
80
81 def schema_type(self):
82 if self.get_option("preserve_unknown") is True:
83 unknown = 'preserve'
84 else:
85 unknown = 'ignore'
86 return colander.Mapping(unknown=unknown)
87
88
89 class PermissionsSchema(colander.SchemaNode):
90 """A permission mapping defines ACEs.
91
92 It has permission names as keys and principals as values.
93
94 ::
95
96 {
97 "write": ["fxa:af3e077eb9f5444a949ad65aa86e82ff"],
98 "groups:create": ["fxa:70a9335eecfe440fa445ba752a750f3d"]
99 }
100
101 """
102
103 def __init__(self, *args, **kwargs):
104 self.known_perms = kwargs.pop('permissions', tuple())
105 super(PermissionsSchema, self).__init__(*args, **kwargs)
106
107 for perm in self.known_perms:
108 self[perm] = self._get_node_principals(perm)
109
110 def schema_type(self):
111 if self.known_perms:
112 return colander.Mapping(unknown='raise')
113 else:
114 return colander.Mapping(unknown='preserve')
115
116 def deserialize(self, cstruct=colander.null):
117
118 # If permissions are not a mapping (e.g null or invalid), try deserializing
119 if not isinstance(cstruct, dict):
120 return super(PermissionsSchema, self).deserialize(cstruct)
121
122 # If permissions are listed, check fields and produce fancy error messages
123 if self.known_perms:
124 for perm in cstruct:
125 colander.OneOf(choices=self.known_perms)(self, perm)
126 return super(PermissionsSchema, self).deserialize(cstruct)
127
128 # Else deserialize the fields that are not on the schema
129 permissions = {}
130 perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String()))
131 for perm, principals in cstruct.items():
132 permissions[perm] = perm_schema.deserialize(principals)
133
134 return permissions
135
136 def _get_node_principals(self, perm):
137 principal = colander.SchemaNode(colander.String())
138 return colander.SchemaNode(colander.Sequence(), principal, name=perm,
139 missing=colander.drop)
140
141
142 # Header schemas
143
144
145 class HeaderSchema(colander.MappingSchema):
146 """Base schema used for validating and deserializing request headers. """
147
148 missing = colander.drop
149
150 if_match = HeaderQuotedInteger(name='If-Match')
151 if_none_match = HeaderQuotedInteger(name='If-None-Match')
152
153 @staticmethod
154 def schema_type():
155 return colander.Mapping(unknown='preserve')
156
157
158 class PatchHeaderSchema(HeaderSchema):
159 """Header schema used with PATCH requests."""
160
161 def response_behavior_validator():
162 return colander.OneOf(['full', 'light', 'diff'])
163
164 response_behaviour = HeaderField(colander.String(), name='Response-Behavior',
165 validator=response_behavior_validator())
166
167
168 # Querystring schemas
169
170
171 class QuerySchema(colander.MappingSchema):
172 """
173 Schema used for validating and deserializing querystrings. It will include
174 and try to guess the type of unknown fields (field filters) on deserialization.
175 """
176 missing = colander.drop
177
178 @staticmethod
179 def schema_type():
180 return colander.Mapping(unknown='ignore')
181
182 def deserialize(self, cstruct=colander.null):
183 """
184 Deserialize and validate the QuerySchema fields and try to deserialize and
185 get the native value of additional filds (field filters) that may be present
186 on the cstruct.
187
188 e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}
189 """
190 values = {}
191
192 schema_values = super(QuerySchema, self).deserialize(cstruct)
193 if schema_values is colander.drop:
194 return schema_values
195
196 # Deserialize querystring field filters (see docstring e.g)
197 for k, v in cstruct.items():
198 # Deserialize lists used on in_ and exclude_ filters
199 if k.startswith('in_') or k.startswith('exclude_'):
200 as_list = FieldList().deserialize(v)
201 if isinstance(as_list, list):
202 values[k] = [native_value(v) for v in as_list]
203 else:
204 values[k] = native_value(v)
205
206 values.update(schema_values)
207 return values
208
209
210 class CollectionQuerySchema(QuerySchema):
211 """Querystring schema used with collections."""
212
213 _limit = QueryField(colander.Integer())
214 _sort = FieldList()
215 _token = QueryField(colander.String())
216 _since = QueryField(colander.Integer())
217 _to = QueryField(colander.Integer())
218 _before = QueryField(colander.Integer())
219 id = QueryField(colander.String())
220 last_modified = QueryField(colander.Integer())
221
222
223 class RecordGetQuerySchema(QuerySchema):
224 """Querystring schema for GET record requests."""
225
226 _fields = FieldList()
227
228
229 class CollectionGetQuerySchema(CollectionQuerySchema):
230 """Querystring schema for GET collection requests."""
231
232 _fields = FieldList()
233
234
235 # Body Schemas
236
237
238 class RecordSchema(colander.MappingSchema):
239
240 @colander.deferred
241 def data(node, kwargs):
242 data = kwargs.get('data')
243 if data:
244 # Check if empty record is allowed.
245 # (e.g every schema fields have defaults)
246 try:
247 data.deserialize({})
248 except colander.Invalid:
249 pass
250 else:
251 data.default = {}
252 data.missing = colander.drop
253 return data
254
255 @colander.deferred
256 def permissions(node, kwargs):
257 def get_perms(node, kwargs):
258 return kwargs.get('permissions')
259 # Set if node is provided, else keep deferred. This allows binding the body
260 # on Resource first and bind permissions later if using SharableResource.
261 return get_perms(node, kwargs) or colander.deferred(get_perms)
262
263 @staticmethod
264 def schema_type():
265 return colander.Mapping(unknown='raise')
266
267
268 class JsonPatchOperationSchema(colander.MappingSchema):
269 """Single JSON Patch Operation."""
270
271 def op_validator():
272 op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']
273 return colander.OneOf(op_values)
274
275 def path_validator():
276 return colander.Regex('(/\w*)+')
277
278 op = colander.SchemaNode(colander.String(), validator=op_validator())
279 path = colander.SchemaNode(colander.String(), validator=path_validator())
280 from_ = colander.SchemaNode(colander.String(), name='from',
281 validator=path_validator(), missing=colander.drop)
282 value = colander.SchemaNode(Any(), missing=colander.drop)
283
284 @staticmethod
285 def schema_type():
286 return colander.Mapping(unknown='raise')
287
288
289 class JsonPatchBodySchema(colander.SequenceSchema):
290 """Body used with JSON Patch (application/json-patch+json) as in RFC 6902."""
291
292 operations = JsonPatchOperationSchema(missing=colander.drop)
293
294
295 # Request schemas
296
297
298 class RequestSchema(colander.MappingSchema):
299 """Base schema for kinto requests."""
300
301 @colander.deferred
302 def header(node, kwargs):
303 return kwargs.get('header')
304
305 @colander.deferred
306 def querystring(node, kwargs):
307 return kwargs.get('querystring')
308
309 def after_bind(self, node, kw):
310 # Set default bindings
311 if not self.get('header'):
312 self['header'] = HeaderSchema()
313 if not self.get('querystring'):
314 self['querystring'] = QuerySchema()
315
316
317 class PayloadRequestSchema(RequestSchema):
318 """Base schema for methods that use a JSON request body."""
319
320 @colander.deferred
321 def body(node, kwargs):
322 def get_body(node, kwargs):
323 return kwargs.get('body')
324 # Set if node is provided, else keep deferred (and allow bindind later)
325 return get_body(node, kwargs) or colander.deferred(get_body)
326
327
328 class JsonPatchRequestSchema(RequestSchema):
329 """JSON Patch (application/json-patch+json) request schema."""
330
331 body = JsonPatchBodySchema()
332 querystring = QuerySchema()
333 header = PatchHeaderSchema()
334
[end of kinto/core/resource/schema.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/resource/schema.py b/kinto/core/resource/schema.py
--- a/kinto/core/resource/schema.py
+++ b/kinto/core/resource/schema.py
@@ -1,3 +1,4 @@
+from __future__ import division
import warnings
import colander
@@ -6,6 +7,10 @@
FieldList, TimeStamp, URL)
from kinto.core.utils import native_value
+POSTGRESQL_MAX_INTEGER_VALUE = 2**64 // 2
+
+positive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE)
+
class TimeStamp(TimeStamp):
"""This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead."""
@@ -210,14 +215,14 @@
class CollectionQuerySchema(QuerySchema):
"""Querystring schema used with collections."""
- _limit = QueryField(colander.Integer())
+ _limit = QueryField(colander.Integer(), validator=positive_big_integer)
_sort = FieldList()
_token = QueryField(colander.String())
- _since = QueryField(colander.Integer())
- _to = QueryField(colander.Integer())
- _before = QueryField(colander.Integer())
+ _since = QueryField(colander.Integer(), validator=positive_big_integer)
+ _to = QueryField(colander.Integer(), validator=positive_big_integer)
+ _before = QueryField(colander.Integer(), validator=positive_big_integer)
id = QueryField(colander.String())
- last_modified = QueryField(colander.Integer())
+ last_modified = QueryField(colander.Integer(), validator=positive_big_integer)
class RecordGetQuerySchema(QuerySchema):
| {"golden_diff": "diff --git a/kinto/core/resource/schema.py b/kinto/core/resource/schema.py\n--- a/kinto/core/resource/schema.py\n+++ b/kinto/core/resource/schema.py\n@@ -1,3 +1,4 @@\n+from __future__ import division\n import warnings\n \n import colander\n@@ -6,6 +7,10 @@\n FieldList, TimeStamp, URL)\n from kinto.core.utils import native_value\n \n+POSTGRESQL_MAX_INTEGER_VALUE = 2**64 // 2\n+\n+positive_big_integer = colander.Range(min=0, max=POSTGRESQL_MAX_INTEGER_VALUE)\n+\n \n class TimeStamp(TimeStamp):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.\"\"\"\n@@ -210,14 +215,14 @@\n class CollectionQuerySchema(QuerySchema):\n \"\"\"Querystring schema used with collections.\"\"\"\n \n- _limit = QueryField(colander.Integer())\n+ _limit = QueryField(colander.Integer(), validator=positive_big_integer)\n _sort = FieldList()\n _token = QueryField(colander.String())\n- _since = QueryField(colander.Integer())\n- _to = QueryField(colander.Integer())\n- _before = QueryField(colander.Integer())\n+ _since = QueryField(colander.Integer(), validator=positive_big_integer)\n+ _to = QueryField(colander.Integer(), validator=positive_big_integer)\n+ _before = QueryField(colander.Integer(), validator=positive_big_integer)\n id = QueryField(colander.String())\n- last_modified = QueryField(colander.Integer())\n+ last_modified = QueryField(colander.Integer(), validator=positive_big_integer)\n \n \n class RecordGetQuerySchema(QuerySchema):\n", "issue": "_since overflow error: long too big to convert\nWhen since is too big with the Postgresql backend it fails with an OverflowError: https://sentry.prod.mozaws.net/operations/kinto-prod-1/issues/382250/\n", "before_files": [{"content": "import warnings\n\nimport colander\n\nfrom kinto.core.schema import (Any, HeaderField, QueryField, HeaderQuotedInteger,\n FieldList, TimeStamp, URL)\nfrom kinto.core.utils import native_value\n\n\nclass TimeStamp(TimeStamp):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.TimeStamp` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.TimeStamp` is deprecated, \"\n \"use `kinto.core.schema.TimeStamp` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(TimeStamp, self).__init__(*args, **kwargs)\n\n\nclass URL(URL):\n \"\"\"This schema is deprecated, you shoud use `kinto.core.schema.URL` instead.\"\"\"\n\n def __init__(self, *args, **kwargs):\n message = (\"`kinto.core.resource.schema.URL` is deprecated, \"\n \"use `kinto.core.schema.URL` instead.\")\n warnings.warn(message, DeprecationWarning)\n super(URL, self).__init__(*args, **kwargs)\n\n\n# Resource related schemas\n\n\nclass ResourceSchema(colander.MappingSchema):\n \"\"\"Base resource schema, with *Cliquet* specific built-in options.\"\"\"\n\n class Options:\n \"\"\"\n Resource schema options.\n\n This is meant to be overriden for changing values:\n\n .. code-block:: python\n\n class Product(ResourceSchema):\n reference = colander.SchemaNode(colander.String())\n\n class Options:\n readonly_fields = ('reference',)\n \"\"\"\n readonly_fields = tuple()\n \"\"\"Fields that cannot be updated. Values for fields will have to be\n provided either during record creation, through default values using\n ``missing`` attribute or implementing a custom logic in\n :meth:`kinto.core.resource.UserResource.process_record`.\n \"\"\"\n\n preserve_unknown = True\n \"\"\"Define if unknown fields should be preserved or not.\n\n The resource is schema-less by default. In other words, any field name\n will be accepted on records. Set this to ``False`` in order to limit\n the accepted fields to the ones defined in the schema.\n \"\"\"\n\n @classmethod\n def get_option(cls, attr):\n default_value = getattr(ResourceSchema.Options, attr)\n return getattr(cls.Options, attr, default_value)\n\n @classmethod\n def is_readonly(cls, field):\n \"\"\"Return True if specified field name is read-only.\n\n :param str field: the field name in the schema\n :returns: ``True`` if the specified field is read-only,\n ``False`` otherwise.\n :rtype: bool\n \"\"\"\n return field in cls.get_option(\"readonly_fields\")\n\n def schema_type(self):\n if self.get_option(\"preserve_unknown\") is True:\n unknown = 'preserve'\n else:\n unknown = 'ignore'\n return colander.Mapping(unknown=unknown)\n\n\nclass PermissionsSchema(colander.SchemaNode):\n \"\"\"A permission mapping defines ACEs.\n\n It has permission names as keys and principals as values.\n\n ::\n\n {\n \"write\": [\"fxa:af3e077eb9f5444a949ad65aa86e82ff\"],\n \"groups:create\": [\"fxa:70a9335eecfe440fa445ba752a750f3d\"]\n }\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.known_perms = kwargs.pop('permissions', tuple())\n super(PermissionsSchema, self).__init__(*args, **kwargs)\n\n for perm in self.known_perms:\n self[perm] = self._get_node_principals(perm)\n\n def schema_type(self):\n if self.known_perms:\n return colander.Mapping(unknown='raise')\n else:\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n\n # If permissions are not a mapping (e.g null or invalid), try deserializing\n if not isinstance(cstruct, dict):\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # If permissions are listed, check fields and produce fancy error messages\n if self.known_perms:\n for perm in cstruct:\n colander.OneOf(choices=self.known_perms)(self, perm)\n return super(PermissionsSchema, self).deserialize(cstruct)\n\n # Else deserialize the fields that are not on the schema\n permissions = {}\n perm_schema = colander.SequenceSchema(colander.SchemaNode(colander.String()))\n for perm, principals in cstruct.items():\n permissions[perm] = perm_schema.deserialize(principals)\n\n return permissions\n\n def _get_node_principals(self, perm):\n principal = colander.SchemaNode(colander.String())\n return colander.SchemaNode(colander.Sequence(), principal, name=perm,\n missing=colander.drop)\n\n\n# Header schemas\n\n\nclass HeaderSchema(colander.MappingSchema):\n \"\"\"Base schema used for validating and deserializing request headers. \"\"\"\n\n missing = colander.drop\n\n if_match = HeaderQuotedInteger(name='If-Match')\n if_none_match = HeaderQuotedInteger(name='If-None-Match')\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='preserve')\n\n\nclass PatchHeaderSchema(HeaderSchema):\n \"\"\"Header schema used with PATCH requests.\"\"\"\n\n def response_behavior_validator():\n return colander.OneOf(['full', 'light', 'diff'])\n\n response_behaviour = HeaderField(colander.String(), name='Response-Behavior',\n validator=response_behavior_validator())\n\n\n# Querystring schemas\n\n\nclass QuerySchema(colander.MappingSchema):\n \"\"\"\n Schema used for validating and deserializing querystrings. It will include\n and try to guess the type of unknown fields (field filters) on deserialization.\n \"\"\"\n missing = colander.drop\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='ignore')\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"\n Deserialize and validate the QuerySchema fields and try to deserialize and\n get the native value of additional filds (field filters) that may be present\n on the cstruct.\n\n e.g:: ?exclude_id=a,b&deleted=true -> {'exclude_id': ['a', 'b'], deleted: True}\n \"\"\"\n values = {}\n\n schema_values = super(QuerySchema, self).deserialize(cstruct)\n if schema_values is colander.drop:\n return schema_values\n\n # Deserialize querystring field filters (see docstring e.g)\n for k, v in cstruct.items():\n # Deserialize lists used on in_ and exclude_ filters\n if k.startswith('in_') or k.startswith('exclude_'):\n as_list = FieldList().deserialize(v)\n if isinstance(as_list, list):\n values[k] = [native_value(v) for v in as_list]\n else:\n values[k] = native_value(v)\n\n values.update(schema_values)\n return values\n\n\nclass CollectionQuerySchema(QuerySchema):\n \"\"\"Querystring schema used with collections.\"\"\"\n\n _limit = QueryField(colander.Integer())\n _sort = FieldList()\n _token = QueryField(colander.String())\n _since = QueryField(colander.Integer())\n _to = QueryField(colander.Integer())\n _before = QueryField(colander.Integer())\n id = QueryField(colander.String())\n last_modified = QueryField(colander.Integer())\n\n\nclass RecordGetQuerySchema(QuerySchema):\n \"\"\"Querystring schema for GET record requests.\"\"\"\n\n _fields = FieldList()\n\n\nclass CollectionGetQuerySchema(CollectionQuerySchema):\n \"\"\"Querystring schema for GET collection requests.\"\"\"\n\n _fields = FieldList()\n\n\n# Body Schemas\n\n\nclass RecordSchema(colander.MappingSchema):\n\n @colander.deferred\n def data(node, kwargs):\n data = kwargs.get('data')\n if data:\n # Check if empty record is allowed.\n # (e.g every schema fields have defaults)\n try:\n data.deserialize({})\n except colander.Invalid:\n pass\n else:\n data.default = {}\n data.missing = colander.drop\n return data\n\n @colander.deferred\n def permissions(node, kwargs):\n def get_perms(node, kwargs):\n return kwargs.get('permissions')\n # Set if node is provided, else keep deferred. This allows binding the body\n # on Resource first and bind permissions later if using SharableResource.\n return get_perms(node, kwargs) or colander.deferred(get_perms)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchOperationSchema(colander.MappingSchema):\n \"\"\"Single JSON Patch Operation.\"\"\"\n\n def op_validator():\n op_values = ['test', 'add', 'remove', 'replace', 'move', 'copy']\n return colander.OneOf(op_values)\n\n def path_validator():\n return colander.Regex('(/\\w*)+')\n\n op = colander.SchemaNode(colander.String(), validator=op_validator())\n path = colander.SchemaNode(colander.String(), validator=path_validator())\n from_ = colander.SchemaNode(colander.String(), name='from',\n validator=path_validator(), missing=colander.drop)\n value = colander.SchemaNode(Any(), missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass JsonPatchBodySchema(colander.SequenceSchema):\n \"\"\"Body used with JSON Patch (application/json-patch+json) as in RFC 6902.\"\"\"\n\n operations = JsonPatchOperationSchema(missing=colander.drop)\n\n\n# Request schemas\n\n\nclass RequestSchema(colander.MappingSchema):\n \"\"\"Base schema for kinto requests.\"\"\"\n\n @colander.deferred\n def header(node, kwargs):\n return kwargs.get('header')\n\n @colander.deferred\n def querystring(node, kwargs):\n return kwargs.get('querystring')\n\n def after_bind(self, node, kw):\n # Set default bindings\n if not self.get('header'):\n self['header'] = HeaderSchema()\n if not self.get('querystring'):\n self['querystring'] = QuerySchema()\n\n\nclass PayloadRequestSchema(RequestSchema):\n \"\"\"Base schema for methods that use a JSON request body.\"\"\"\n\n @colander.deferred\n def body(node, kwargs):\n def get_body(node, kwargs):\n return kwargs.get('body')\n # Set if node is provided, else keep deferred (and allow bindind later)\n return get_body(node, kwargs) or colander.deferred(get_body)\n\n\nclass JsonPatchRequestSchema(RequestSchema):\n \"\"\"JSON Patch (application/json-patch+json) request schema.\"\"\"\n\n body = JsonPatchBodySchema()\n querystring = QuerySchema()\n header = PatchHeaderSchema()\n", "path": "kinto/core/resource/schema.py"}]} | 3,848 | 366 |
gh_patches_debug_30939 | rasdani/github-patches | git_diff | keras-team__keras-nlp-357 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve our continuous testing for model presets
Opening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.
I would like to propose the following changes to our "network_tests" for presets:
- We collocate the preset testing within the model directory, and use test annotations to control how they are run.
- We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.
- We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.
</issue>
<code>
[start of keras_nlp/conftest.py]
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import sys
15
16 import pytest
17
18
19 def pytest_addoption(parser):
20 parser.addoption(
21 "--runslow", action="store_true", default=False, help="run slow tests"
22 )
23
24
25 def pytest_configure(config):
26 config.addinivalue_line("markers", "slow: mark test as slow to run")
27
28
29 def pytest_collection_modifyitems(config, items):
30 if config.getoption("--runslow"):
31 # --runslow given in cli: do not skip slow tests
32 return
33 skip_slow = pytest.mark.skip(reason="need --runslow option to run")
34 skip_xla = pytest.mark.skipif(
35 sys.platform == "darwin", reason="XLA unsupported on MacOS."
36 )
37
38 for item in items:
39 if "slow" in item.keywords:
40 item.add_marker(skip_slow)
41 if "jit_compile_true" in item.name:
42 item.add_marker(skip_xla)
43
[end of keras_nlp/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py
--- a/keras_nlp/conftest.py
+++ b/keras_nlp/conftest.py
@@ -18,25 +18,48 @@
def pytest_addoption(parser):
parser.addoption(
- "--runslow", action="store_true", default=False, help="run slow tests"
+ "--run_large",
+ action="store_true",
+ default=False,
+ help="run large tests",
+ )
+ parser.addoption(
+ "--run_extra_large",
+ action="store_true",
+ default=False,
+ help="run extra_large tests",
)
def pytest_configure(config):
- config.addinivalue_line("markers", "slow: mark test as slow to run")
+ config.addinivalue_line(
+ "markers", "large: mark test as being slow or requiring a network"
+ )
+ config.addinivalue_line(
+ "markers",
+ "extra_large: mark test as being too large to run continuously",
+ )
def pytest_collection_modifyitems(config, items):
- if config.getoption("--runslow"):
- # --runslow given in cli: do not skip slow tests
- return
- skip_slow = pytest.mark.skip(reason="need --runslow option to run")
+ run_extra_large_tests = config.getoption("--run_extra_large")
+ # Run large tests for --run_extra_large or --run_large.
+ run_large_tests = config.getoption("--run_large") or run_extra_large_tests
+
+ # Messages to annotate skipped tests with.
skip_xla = pytest.mark.skipif(
sys.platform == "darwin", reason="XLA unsupported on MacOS."
)
-
+ skip_large = pytest.mark.skipif(
+ not run_large_tests, reason="need --run_large option to run"
+ )
+ skip_extra_large = pytest.mark.skipif(
+ not run_extra_large_tests, reason="need --run_extra_large option to run"
+ )
for item in items:
- if "slow" in item.keywords:
- item.add_marker(skip_slow)
if "jit_compile_true" in item.name:
item.add_marker(skip_xla)
+ if "large" in item.keywords:
+ item.add_marker(skip_large)
+ if "extra_large" in item.keywords:
+ item.add_marker(skip_extra_large)
| {"golden_diff": "diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py\n--- a/keras_nlp/conftest.py\n+++ b/keras_nlp/conftest.py\n@@ -18,25 +18,48 @@\n \n def pytest_addoption(parser):\n parser.addoption(\n- \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n+ \"--run_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run large tests\",\n+ )\n+ parser.addoption(\n+ \"--run_extra_large\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"run extra_large tests\",\n )\n \n \n def pytest_configure(config):\n- config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n+ config.addinivalue_line(\n+ \"markers\", \"large: mark test as being slow or requiring a network\"\n+ )\n+ config.addinivalue_line(\n+ \"markers\",\n+ \"extra_large: mark test as being too large to run continuously\",\n+ )\n \n \n def pytest_collection_modifyitems(config, items):\n- if config.getoption(\"--runslow\"):\n- # --runslow given in cli: do not skip slow tests\n- return\n- skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n+ run_extra_large_tests = config.getoption(\"--run_extra_large\")\n+ # Run large tests for --run_extra_large or --run_large.\n+ run_large_tests = config.getoption(\"--run_large\") or run_extra_large_tests\n+\n+ # Messages to annotate skipped tests with.\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n-\n+ skip_large = pytest.mark.skipif(\n+ not run_large_tests, reason=\"need --run_large option to run\"\n+ )\n+ skip_extra_large = pytest.mark.skipif(\n+ not run_extra_large_tests, reason=\"need --run_extra_large option to run\"\n+ )\n for item in items:\n- if \"slow\" in item.keywords:\n- item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n+ if \"large\" in item.keywords:\n+ item.add_marker(skip_large)\n+ if \"extra_large\" in item.keywords:\n+ item.add_marker(skip_extra_large)\n", "issue": "Improve our continuous testing for model presets\nOpening an issue to track the changes proposed on https://github.com/keras-team/keras-nlp/pull/357, as it has gotten slightly larger in scope.\r\n\r\nI would like to propose the following changes to our \"network_tests\" for presets:\r\n\r\n - We collocate the preset testing within the model directory, and use test annotations to control how they are run.\r\n - We run the smallest available preset (per model) continuously on GCP, so we get some automated coverage for our preset code.\r\n - We actually test the output of our smallest available preset (with a relaxed float tolerance), so we can catch code updates that would break our checkpoints.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n skip_xla = pytest.mark.skipif(\n sys.platform == \"darwin\", reason=\"XLA unsupported on MacOS.\"\n )\n\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n if \"jit_compile_true\" in item.name:\n item.add_marker(skip_xla)\n", "path": "keras_nlp/conftest.py"}]} | 1,086 | 540 |
gh_patches_debug_59678 | rasdani/github-patches | git_diff | mozilla__bugbug-31 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create a classifier to detect bugs that need QA
Needed for https://github.com/mozilla/relman-auto-nag/issues/227.
To do this, we'll need to collect some labels.
We can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.
We can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.
</issue>
<code>
[start of bugbug/models/qaneeded.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from sklearn.feature_extraction import DictVectorizer
8 from sklearn.pipeline import FeatureUnion
9 from sklearn.pipeline import Pipeline
10
11 from bugbug import bug_features
12 from bugbug import labels
13 from bugbug.model import Model
14 from bugbug.utils import DictSelector
15
16
17 class QANeededModel(Model):
18 def __init__(self, lemmatization=False):
19 Model.__init__(self, lemmatization)
20
21 self.classes = labels.get_qa_needed_labels()
22
23 feature_extractors = [
24 bug_features.has_str(),
25 bug_features.has_regression_range(),
26 bug_features.severity(),
27 bug_features.keywords(),
28 bug_features.is_coverity_issue(),
29 bug_features.has_crash_signature(),
30 bug_features.has_url(),
31 bug_features.has_w3c_url(),
32 bug_features.has_github_url(),
33 bug_features.whiteboard(),
34 bug_features.patches(),
35 bug_features.landings(),
36 bug_features.title(),
37 bug_features.comments(),
38 ]
39
40 self.extraction_pipeline = Pipeline([
41 ('bug_extractor', bug_features.BugExtractor(feature_extractors)),
42 ('union', FeatureUnion(
43 transformer_list=[
44 ('data', Pipeline([
45 ('selector', DictSelector(key='data')),
46 ('vect', DictVectorizer()),
47 ])),
48
49 ('title', Pipeline([
50 ('selector', DictSelector(key='title')),
51 ('tfidf', self.text_vectorizer(stop_words='english')),
52 ])),
53
54 ('comments', Pipeline([
55 ('selector', DictSelector(key='comments')),
56 ('tfidf', self.text_vectorizer(stop_words='english')),
57 ])),
58 ],
59 )),
60 ])
61
62 self.clf = xgboost.XGBClassifier(n_jobs=16)
63
[end of bugbug/models/qaneeded.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py
--- a/bugbug/models/qaneeded.py
+++ b/bugbug/models/qaneeded.py
@@ -24,7 +24,7 @@
bug_features.has_str(),
bug_features.has_regression_range(),
bug_features.severity(),
- bug_features.keywords(),
+ bug_features.keywords(set(['qawanted'])),
bug_features.is_coverity_issue(),
bug_features.has_crash_signature(),
bug_features.has_url(),
| {"golden_diff": "diff --git a/bugbug/models/qaneeded.py b/bugbug/models/qaneeded.py\n--- a/bugbug/models/qaneeded.py\n+++ b/bugbug/models/qaneeded.py\n@@ -24,7 +24,7 @@\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n- bug_features.keywords(),\n+ bug_features.keywords(set(['qawanted'])),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n", "issue": "Create a classifier to detect bugs that need QA\nNeeded for https://github.com/mozilla/relman-auto-nag/issues/227.\r\n\r\nTo do this, we'll need to collect some labels.\r\nWe can automatically create some positive labels by getting bugs that have the `qawanted` keyword or that have `qe-verify` flag.\r\nWe can't automatically create negative labels because we can't be sure that QA was not needed when the keyword/flag was not set.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import FeatureUnion\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import labels\nfrom bugbug.model import Model\nfrom bugbug.utils import DictSelector\n\n\nclass QANeededModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.classes = labels.get_qa_needed_labels()\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.comments(),\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors)),\n ('union', FeatureUnion(\n transformer_list=[\n ('data', Pipeline([\n ('selector', DictSelector(key='data')),\n ('vect', DictVectorizer()),\n ])),\n\n ('title', Pipeline([\n ('selector', DictSelector(key='title')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n\n ('comments', Pipeline([\n ('selector', DictSelector(key='comments')),\n ('tfidf', self.text_vectorizer(stop_words='english')),\n ])),\n ],\n )),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n", "path": "bugbug/models/qaneeded.py"}]} | 1,170 | 115 |
gh_patches_debug_6590 | rasdani/github-patches | git_diff | napalm-automation__napalm-836 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
napalm validate is modifying the source dictionary with pop operations
```python
def napalm_validate(napalm, file=None, source=None):
complies = napalm.compliance_report(validation_file=file, validation_source=source)[
"complies"
]
return complies
```
and i'm using it like this:
```python
def whatever(task):
napalm = task.host.get_connection("napalm")
ping_gw = [
{
"ping": {
"_name": "ping_gw",
"_kwargs": {"destination": "10.0.0.1", "source": "Vlan1000"},
"success": {"packet_loss": 0},
"_mode": "strict",
}
}
]
print("1", task.host, ping_gw)
ping = napalm_validate(napalm, source=ping_gw)
print("2", task.host, ping_gw)
ping = napalm_validate(napalm, source=ping_gw)
```
which results in
```
1 host02 [{'ping': {'_name': 'ping_gw', '_kwargs':
{'destination': '10.0.0.1', 'source': 'Vlan1000'},
'success': {'packet_loss': 0}, '_mode': 'strict'}}]
2 host02 [{'ping': {'success': {'packet_loss': 0}}}]
```
is it because of `kwargs = expected_results.pop('_kwargs', {})` in `compliance_report`?
</issue>
<code>
[start of napalm/base/validate.py]
1 """
2 Validation methods for the NAPALM base.
3
4 See: https://napalm.readthedocs.io/en/latest/validate.html
5 """
6 from __future__ import unicode_literals
7
8 import yaml
9
10 from napalm.base.exceptions import ValidationException
11 from napalm.base.utils import py23_compat
12
13 import copy
14 import re
15
16
17 # We put it here to compile it only once
18 numeric_compare_regex = re.compile(r"^(<|>|<=|>=|==|!=)(\d+(\.\d+){0,1})$")
19
20
21 def _get_validation_file(validation_file):
22 try:
23 with open(validation_file, 'r') as stream:
24 try:
25 validation_source = yaml.safe_load(stream)
26 except yaml.YAMLError as exc:
27 raise ValidationException(exc)
28 except IOError:
29 raise ValidationException("File {0} not found.".format(validation_file))
30 return validation_source
31
32
33 def _mode(mode_string):
34 mode = {'strict': False}
35
36 for m in mode_string.split():
37 if m not in mode.keys():
38 raise ValidationException("mode '{}' not recognized".format(m))
39 mode[m] = True
40 return mode
41
42
43 def _compare_getter_list(src, dst, mode):
44 result = {"complies": True, "present": [], "missing": [], "extra": []}
45 for src_element in src:
46 found = False
47
48 i = 0
49 while True:
50 try:
51 intermediate_match = compare(src_element, dst[i])
52 if isinstance(intermediate_match, dict) and intermediate_match["complies"] or \
53 not isinstance(intermediate_match, dict) and intermediate_match:
54 found = True
55 result["present"].append(src_element)
56 dst.pop(i)
57 break
58 else:
59 i += 1
60 except IndexError:
61 break
62
63 if not found:
64 result["complies"] = False
65 result["missing"].append(src_element)
66
67 if mode["strict"] and dst:
68 result["extra"] = dst
69 result["complies"] = False
70
71 return result
72
73
74 def _compare_getter_dict(src, dst, mode):
75 result = {"complies": True, "present": {}, "missing": [], "extra": []}
76 dst = copy.deepcopy(dst) # Otherwise we are going to modify a "live" object
77
78 for key, src_element in src.items():
79 try:
80 dst_element = dst.pop(key)
81 result["present"][key] = {}
82 intermediate_result = compare(src_element, dst_element)
83
84 if isinstance(intermediate_result, dict):
85 nested = True
86
87 complies = intermediate_result["complies"]
88
89 if not complies:
90 result["present"][key]['diff'] = intermediate_result
91 else:
92 complies = intermediate_result
93 nested = False
94 if not complies:
95 result["present"][key]["expected_value"] = src_element
96 result["present"][key]["actual_value"] = dst_element
97
98 if not complies:
99 result["complies"] = False
100
101 result["present"][key]["complies"] = complies
102 result["present"][key]["nested"] = nested
103 except KeyError:
104 result["missing"].append(key)
105 result["complies"] = False
106
107 if mode["strict"] and dst:
108 result["extra"] = list(dst.keys())
109 result["complies"] = False
110
111 return result
112
113
114 def compare(src, dst):
115 if isinstance(src, py23_compat.string_types):
116 src = py23_compat.text_type(src)
117
118 if isinstance(src, dict):
119 mode = _mode(src.pop('_mode', ''))
120 if 'list' in src.keys():
121 if not isinstance(dst, list):
122 # This can happen with nested lists
123 return False
124
125 return _compare_getter_list(src['list'], dst, mode)
126 return _compare_getter_dict(src, dst, mode)
127
128 elif isinstance(src, py23_compat.string_types):
129 if src.startswith('<') or src.startswith('>'):
130 cmp_result = _compare_numeric(src, dst)
131 return cmp_result
132 else:
133 m = re.search(src, py23_compat.text_type(dst))
134 if m:
135 return bool(m)
136 else:
137 return src == dst
138
139 elif(type(src) == type(dst) == list):
140 pairs = zip(src, dst)
141 diff_lists = [[(k, x[k], y[k])
142 for k in x if not re.search(x[k], y[k])]
143 for x, y in pairs if x != y]
144 return empty_tree(diff_lists)
145
146 else:
147 return src == dst
148
149
150 def _compare_numeric(src_num, dst_num):
151 """Compare numerical values. You can use '<%d','>%d'."""
152 dst_num = float(dst_num)
153
154 match = numeric_compare_regex.match(src_num)
155 if not match:
156 error = "Failed numeric comparison. Collected: {}. Expected: {}".format(dst_num, src_num)
157 raise ValueError(error)
158
159 operand = {
160 "<": "__lt__",
161 ">": "__gt__",
162 ">=": "__ge__",
163 "<=": "__le__",
164 "==": "__eq__",
165 "!=": "__ne__",
166 }
167 return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))
168
169
170 def empty_tree(input_list):
171 """Recursively iterate through values in nested lists."""
172 for item in input_list:
173 if not isinstance(item, list) or not empty_tree(item):
174 return False
175 return True
176
177
178 def compliance_report(cls, validation_file=None, validation_source=None):
179 report = {}
180 if validation_file:
181 validation_source = _get_validation_file(validation_file)
182
183 for validation_check in validation_source:
184 for getter, expected_results in validation_check.items():
185 if getter == "get_config":
186 # TBD
187 pass
188 else:
189 key = expected_results.pop("_name", "") or getter
190
191 try:
192 kwargs = expected_results.pop('_kwargs', {})
193 actual_results = getattr(cls, getter)(**kwargs)
194 report[key] = compare(expected_results, actual_results)
195 except NotImplementedError:
196 report[key] = {"skipped": True, "reason": "NotImplemented"}
197
198 complies = all([e.get("complies", True) for e in report.values()])
199 report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)]
200 report["complies"] = complies
201 return report
202
[end of napalm/base/validate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napalm/base/validate.py b/napalm/base/validate.py
--- a/napalm/base/validate.py
+++ b/napalm/base/validate.py
@@ -180,6 +180,9 @@
if validation_file:
validation_source = _get_validation_file(validation_file)
+ # Otherwise we are going to modify a "live" object
+ validation_source = copy.deepcopy(validation_source)
+
for validation_check in validation_source:
for getter, expected_results in validation_check.items():
if getter == "get_config":
| {"golden_diff": "diff --git a/napalm/base/validate.py b/napalm/base/validate.py\n--- a/napalm/base/validate.py\n+++ b/napalm/base/validate.py\n@@ -180,6 +180,9 @@\n if validation_file:\n validation_source = _get_validation_file(validation_file)\n \n+ # Otherwise we are going to modify a \"live\" object\n+ validation_source = copy.deepcopy(validation_source)\n+\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n", "issue": "napalm validate is modifying the source dictionary with pop operations\n```python\r\ndef napalm_validate(napalm, file=None, source=None):\r\n complies = napalm.compliance_report(validation_file=file, validation_source=source)[\r\n \"complies\"\r\n ]\r\n return complies\r\n```\r\n\r\nand i'm using it like this:\r\n\r\n```python\r\ndef whatever(task):\r\n napalm = task.host.get_connection(\"napalm\")\r\n\r\n ping_gw = [\r\n {\r\n \"ping\": {\r\n \"_name\": \"ping_gw\",\r\n \"_kwargs\": {\"destination\": \"10.0.0.1\", \"source\": \"Vlan1000\"},\r\n \"success\": {\"packet_loss\": 0},\r\n \"_mode\": \"strict\",\r\n }\r\n }\r\n ]\r\n\r\n print(\"1\", task.host, ping_gw)\r\n ping = napalm_validate(napalm, source=ping_gw)\r\n\r\n print(\"2\", task.host, ping_gw)\r\n ping = napalm_validate(napalm, source=ping_gw)\r\n```\r\n\r\nwhich results in\r\n```\r\n1 host02 [{'ping': {'_name': 'ping_gw', '_kwargs': \r\n {'destination': '10.0.0.1', 'source': 'Vlan1000'}, \r\n 'success': {'packet_loss': 0}, '_mode': 'strict'}}]\r\n2 host02 [{'ping': {'success': {'packet_loss': 0}}}]\r\n```\r\n\r\nis it because of `kwargs = expected_results.pop('_kwargs', {})` in `compliance_report`?\n", "before_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport yaml\n\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base.utils import py23_compat\n\nimport copy\nimport re\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file):\n try:\n with open(validation_file, 'r') as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string):\n mode = {'strict': False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(src, dst, mode):\n result = {\"complies\": True, \"present\": [], \"missing\": [], \"extra\": []}\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if isinstance(intermediate_match, dict) and intermediate_match[\"complies\"] or \\\n not isinstance(intermediate_match, dict) and intermediate_match:\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(src, dst, mode):\n result = {\"complies\": True, \"present\": {}, \"missing\": [], \"extra\": []}\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key]['diff'] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\ndef compare(src, dst):\n if isinstance(src, py23_compat.string_types):\n src = py23_compat.text_type(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop('_mode', ''))\n if 'list' in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src['list'], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, py23_compat.string_types):\n if src.startswith('<') or src.startswith('>'):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n else:\n m = re.search(src, py23_compat.text_type(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif(type(src) == type(dst) == list):\n pairs = zip(src, dst)\n diff_lists = [[(k, x[k], y[k])\n for k in x if not re.search(x[k], y[k])]\n for x, y in pairs if x != y]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num, dst_num):\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(dst_num, src_num)\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef empty_tree(input_list):\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(cls, validation_file=None, validation_source=None):\n report = {}\n if validation_file:\n validation_source = _get_validation_file(validation_file)\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop('_kwargs', {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}]} | 2,779 | 124 |
gh_patches_debug_13990 | rasdani/github-patches | git_diff | coala__coala-5853 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Result.py: Add docstring regarding the case that line or column is None
The output generated by coala can have ```line``` being ```None``` or ```column``` being ```None```.
```column = None``` means there is an error with the whole line.
```line = None``` means the whole file. The reason behind it is that it's an unknown line means it can occur anywhere in the file.
All combinations with None values and numbers for line and column are allowed, except ```line=None``` and ```column=<some number>```
This should be clarified in the documentation.
(ref: http://api.coala.io/en/latest/coalib.results.html#module-coalib.results.Result)
</issue>
<code>
[start of coalib/results/Result.py]
1 import uuid
2 from os.path import relpath
3
4 from coala_utils.decorators import (
5 enforce_signature, generate_ordering, generate_repr, get_public_members)
6 from coalib.bearlib.aspects import aspectbase
7 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
8 from coalib.results.SourceRange import SourceRange
9
10
11 # Omit additional info, debug message and diffs for brevity
12 @generate_repr(('id', hex),
13 'origin',
14 'affected_code',
15 ('severity', RESULT_SEVERITY.reverse.get),
16 'confidence',
17 'message',
18 ('aspect', lambda aspect: type(aspect).__qualname__),
19 'applied_actions')
20 @generate_ordering('affected_code',
21 'severity',
22 'confidence',
23 'origin',
24 'message_base',
25 'message_arguments',
26 'aspect',
27 'additional_info',
28 'diffs',
29 'debug_msg',
30 'applied_actions')
31 class Result:
32 """
33 A result is anything that has an origin and a message.
34
35 Optionally it might affect a file.
36
37 Result messages can also have arguments. The message is python
38 style formatted with these arguments.
39
40 >>> r = Result('origin','{arg1} and {arg2}', \
41 message_arguments={'arg1': 'foo', 'arg2': 'bar'})
42 >>> r.message
43 'foo and bar'
44
45 Message arguments may be changed later. The result message
46 will also reflect these changes.
47
48 >>> r.message_arguments = {'arg1': 'spam', 'arg2': 'eggs'}
49 >>> r.message
50 'spam and eggs'
51
52 """
53
54 @enforce_signature
55 def __init__(self,
56 origin,
57 message: str,
58 affected_code: (tuple, list) = (),
59 severity: int = RESULT_SEVERITY.NORMAL,
60 additional_info: str = '',
61 debug_msg='',
62 diffs: (dict, None) = None,
63 confidence: int = 100,
64 aspect: (aspectbase, None) = None,
65 message_arguments: dict = {},
66 applied_actions: dict = {}):
67 """
68 :param origin:
69 Class name or creator object of this object.
70 :param message:
71 Base message to show with this result.
72 :param affected_code:
73 A tuple of ``SourceRange`` objects pointing to related positions
74 in the source code.
75 :param severity:
76 Severity of this result.
77 :param additional_info:
78 A long description holding additional information about the issue
79 and/or how to fix it. You can use this like a manual entry for a
80 category of issues.
81 :param debug_msg:
82 A message which may help the user find out why this result was
83 yielded.
84 :param diffs:
85 A dictionary with filename as key and ``Diff`` object
86 associated with it as value.
87 :param confidence:
88 A number between 0 and 100 describing the likelihood of this result
89 being a real issue.
90 :param aspect:
91 An aspectclass instance which this result is associated to.
92 Note that this should be a leaf of the aspect tree!
93 (If you have a node, spend some time figuring out which of
94 the leafs exactly your result belongs to.)
95 :param message_arguments:
96 Arguments to be provided to the base message.
97 :param applied_actions:
98 A dictionary that contains the result, file_dict, file_diff_dict and
99 the section for an action.
100 :raises ValueError:
101 Raised when confidence is not between 0 and 100.
102 :raises KeyError:
103 Raised when message_base can not be formatted with
104 message_arguments.
105 """
106 origin = origin or ''
107 if not isinstance(origin, str):
108 origin = origin.__class__.__name__
109 if severity not in RESULT_SEVERITY.reverse:
110 raise ValueError('severity is not a valid RESULT_SEVERITY')
111
112 self.origin = origin
113 self.message_base = message
114 self.message_arguments = message_arguments
115 self.applied_actions = applied_actions
116 if message_arguments:
117 self.message_base.format(**self.message_arguments)
118 self.debug_msg = debug_msg
119 self.additional_info = additional_info
120 # Sorting is important for tuple comparison
121 self.affected_code = tuple(sorted(affected_code))
122 self.severity = severity
123 if confidence < 0 or confidence > 100:
124 raise ValueError('Value of confidence should be between 0 and 100.')
125 self.confidence = confidence
126 self.diffs = diffs
127 self.id = uuid.uuid4().int
128 self.aspect = aspect
129 if self.aspect and not self.additional_info:
130 self.additional_info = '{} {}'.format(
131 aspect.Docs.importance_reason, aspect.Docs.fix_suggestions)
132
133 @property
134 def message(self):
135 if not self.message_arguments:
136 return self.message_base
137 return self.message_base.format(**self.message_arguments)
138
139 @message.setter
140 def message(self, value: str):
141 self.message_base = value
142
143 def set_applied_actions(self, applied_actions):
144 self.applied_actions = applied_actions
145
146 def get_applied_actions(self):
147 return self.applied_actions
148
149 @classmethod
150 @enforce_signature
151 def from_values(cls,
152 origin,
153 message: str,
154 file: str,
155 line: (int, None) = None,
156 column: (int, None) = None,
157 end_line: (int, None) = None,
158 end_column: (int, None) = None,
159 severity: int = RESULT_SEVERITY.NORMAL,
160 additional_info: str = '',
161 debug_msg='',
162 diffs: (dict, None) = None,
163 confidence: int = 100,
164 aspect: (aspectbase, None) = None,
165 message_arguments: dict = {}):
166 """
167 Creates a result with only one SourceRange with the given start and end
168 locations.
169
170 :param origin:
171 Class name or creator object of this object.
172 :param message:
173 Base message to show with this result.
174 :param message_arguments:
175 Arguments to be provided to the base message
176 :param file:
177 The related file.
178 :param line:
179 The first related line in the file. (First line is 1)
180 :param column:
181 The column indicating the first character. (First character is 1)
182 :param end_line:
183 The last related line in the file.
184 :param end_column:
185 The column indicating the last character.
186 :param severity:
187 Severity of this result.
188 :param additional_info:
189 A long description holding additional information about the issue
190 and/or how to fix it. You can use this like a manual entry for a
191 category of issues.
192 :param debug_msg:
193 A message which may help the user find out why this result was
194 yielded.
195 :param diffs:
196 A dictionary with filename as key and ``Diff`` object
197 associated with it as value.
198 :param confidence:
199 A number between 0 and 100 describing the likelihood of this result
200 being a real issue.
201 :param aspect:
202 An Aspect object which this result is associated to. Note that this
203 should be a leaf of the aspect tree! (If you have a node, spend
204 some time figuring out which of the leafs exactly your result
205 belongs to.)
206 """
207 source_range = SourceRange.from_values(file,
208 line,
209 column,
210 end_line,
211 end_column)
212
213 return cls(origin=origin,
214 message=message,
215 affected_code=(source_range,),
216 severity=severity,
217 additional_info=additional_info,
218 debug_msg=debug_msg,
219 diffs=diffs,
220 confidence=confidence,
221 aspect=aspect,
222 message_arguments=message_arguments)
223
224 def to_string_dict(self):
225 """
226 Makes a dictionary which has all keys and values as strings and
227 contains all the data that the base Result has.
228
229 FIXME: diffs are not serialized ATM.
230 FIXME: Only the first SourceRange of affected_code is serialized. If
231 there are more, this data is currently missing.
232
233 :return: Dictionary with keys and values as string.
234 """
235 retval = {}
236
237 members = ['id',
238 'additional_info',
239 'debug_msg',
240 'message',
241 'message_base',
242 'message_arguments',
243 'origin',
244 'confidence']
245
246 for member in members:
247 value = getattr(self, member)
248 retval[member] = '' if value is None else str(value)
249
250 retval['severity'] = str(RESULT_SEVERITY.reverse.get(
251 self.severity, ''))
252 if len(self.affected_code) > 0:
253 retval['file'] = self.affected_code[0].file
254 line = self.affected_code[0].start.line
255 retval['line_nr'] = '' if line is None else str(line)
256 else:
257 retval['file'], retval['line_nr'] = '', ''
258
259 return retval
260
261 @enforce_signature
262 def apply(self, file_dict: dict):
263 """
264 Applies all contained diffs to the given file_dict. This operation will
265 be done in-place.
266
267 :param file_dict: A dictionary containing all files with filename as
268 key and all lines a value. Will be modified.
269 """
270 for filename, diff in self.diffs.items():
271 file_dict[filename] = diff.modified
272
273 def __add__(self, other):
274 """
275 Joins those patches to one patch.
276
277 :param other: The other patch.
278 """
279 assert isinstance(self.diffs, dict)
280 assert isinstance(other.diffs, dict)
281
282 for filename in other.diffs:
283 if filename in self.diffs:
284 self.diffs[filename] += other.diffs[filename]
285 else:
286 self.diffs[filename] = other.diffs[filename]
287
288 return self
289
290 def overlaps(self, ranges):
291 """
292 Determines if the result overlaps with source ranges provided.
293
294 :param ranges: A list SourceRange objects to check for overlap.
295 :return: True if the ranges overlap with the result.
296 """
297 if isinstance(ranges, SourceRange):
298 ranges = [ranges]
299
300 for range in ranges:
301 for self_range in self.affected_code:
302 if range.overlaps(self_range):
303 return True
304
305 return False
306
307 def location_repr(self):
308 """
309 Retrieves a string, that briefly represents
310 the affected code of the result.
311
312 :return: A string containing all of the affected files
313 separated by a comma.
314 """
315
316 if not self.affected_code:
317 return 'the whole project'
318
319 # Set important to exclude duplicate file names
320 range_paths = set(sourcerange.file
321 for sourcerange in self.affected_code)
322
323 return ', '.join(repr(relpath(range_path))
324 for range_path in sorted(range_paths))
325
326 def __json__(self, use_relpath=False):
327 _dict = get_public_members(self)
328 if use_relpath and _dict['diffs']:
329 _dict['diffs'] = {relpath(file): diff
330 for file, diff in _dict['diffs'].items()}
331 _dict['aspect'] = type(self.aspect).__qualname__
332 return _dict
333
[end of coalib/results/Result.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/results/Result.py b/coalib/results/Result.py
--- a/coalib/results/Result.py
+++ b/coalib/results/Result.py
@@ -177,8 +177,12 @@
The related file.
:param line:
The first related line in the file. (First line is 1)
+ line = None means an unknown line occurs anywhere in the file.
:param column:
The column indicating the first character. (First character is 1)
+ column = None means there is an error with the whole line.
+ All combinations of None values are allowed for line and column,
+ Except line = None and column = <some number>
:param end_line:
The last related line in the file.
:param end_column:
| {"golden_diff": "diff --git a/coalib/results/Result.py b/coalib/results/Result.py\n--- a/coalib/results/Result.py\n+++ b/coalib/results/Result.py\n@@ -177,8 +177,12 @@\n The related file.\n :param line:\n The first related line in the file. (First line is 1)\n+ line = None means an unknown line occurs anywhere in the file.\n :param column:\n The column indicating the first character. (First character is 1)\n+ column = None means there is an error with the whole line.\n+ All combinations of None values are allowed for line and column,\n+ Except line = None and column = <some number>\n :param end_line:\n The last related line in the file.\n :param end_column:\n", "issue": "Result.py: Add docstring regarding the case that line or column is None\nThe output generated by coala can have ```line``` being ```None``` or ```column``` being ```None```.\r\n\r\n```column = None``` means there is an error with the whole line.\r\n```line = None``` means the whole file. The reason behind it is that it's an unknown line means it can occur anywhere in the file.\r\n\r\nAll combinations with None values and numbers for line and column are allowed, except ```line=None``` and ```column=<some number>```\r\n\r\nThis should be clarified in the documentation.\r\n\r\n(ref: http://api.coala.io/en/latest/coalib.results.html#module-coalib.results.Result)\n", "before_files": [{"content": "import uuid\nfrom os.path import relpath\n\nfrom coala_utils.decorators import (\n enforce_signature, generate_ordering, generate_repr, get_public_members)\nfrom coalib.bearlib.aspects import aspectbase\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.results.SourceRange import SourceRange\n\n\n# Omit additional info, debug message and diffs for brevity\n@generate_repr(('id', hex),\n 'origin',\n 'affected_code',\n ('severity', RESULT_SEVERITY.reverse.get),\n 'confidence',\n 'message',\n ('aspect', lambda aspect: type(aspect).__qualname__),\n 'applied_actions')\n@generate_ordering('affected_code',\n 'severity',\n 'confidence',\n 'origin',\n 'message_base',\n 'message_arguments',\n 'aspect',\n 'additional_info',\n 'diffs',\n 'debug_msg',\n 'applied_actions')\nclass Result:\n \"\"\"\n A result is anything that has an origin and a message.\n\n Optionally it might affect a file.\n\n Result messages can also have arguments. The message is python\n style formatted with these arguments.\n\n >>> r = Result('origin','{arg1} and {arg2}', \\\n message_arguments={'arg1': 'foo', 'arg2': 'bar'})\n >>> r.message\n 'foo and bar'\n\n Message arguments may be changed later. The result message\n will also reflect these changes.\n\n >>> r.message_arguments = {'arg1': 'spam', 'arg2': 'eggs'}\n >>> r.message\n 'spam and eggs'\n\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n origin,\n message: str,\n affected_code: (tuple, list) = (),\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {},\n applied_actions: dict = {}):\n \"\"\"\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param affected_code:\n A tuple of ``SourceRange`` objects pointing to related positions\n in the source code.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An aspectclass instance which this result is associated to.\n Note that this should be a leaf of the aspect tree!\n (If you have a node, spend some time figuring out which of\n the leafs exactly your result belongs to.)\n :param message_arguments:\n Arguments to be provided to the base message.\n :param applied_actions:\n A dictionary that contains the result, file_dict, file_diff_dict and\n the section for an action.\n :raises ValueError:\n Raised when confidence is not between 0 and 100.\n :raises KeyError:\n Raised when message_base can not be formatted with\n message_arguments.\n \"\"\"\n origin = origin or ''\n if not isinstance(origin, str):\n origin = origin.__class__.__name__\n if severity not in RESULT_SEVERITY.reverse:\n raise ValueError('severity is not a valid RESULT_SEVERITY')\n\n self.origin = origin\n self.message_base = message\n self.message_arguments = message_arguments\n self.applied_actions = applied_actions\n if message_arguments:\n self.message_base.format(**self.message_arguments)\n self.debug_msg = debug_msg\n self.additional_info = additional_info\n # Sorting is important for tuple comparison\n self.affected_code = tuple(sorted(affected_code))\n self.severity = severity\n if confidence < 0 or confidence > 100:\n raise ValueError('Value of confidence should be between 0 and 100.')\n self.confidence = confidence\n self.diffs = diffs\n self.id = uuid.uuid4().int\n self.aspect = aspect\n if self.aspect and not self.additional_info:\n self.additional_info = '{} {}'.format(\n aspect.Docs.importance_reason, aspect.Docs.fix_suggestions)\n\n @property\n def message(self):\n if not self.message_arguments:\n return self.message_base\n return self.message_base.format(**self.message_arguments)\n\n @message.setter\n def message(self, value: str):\n self.message_base = value\n\n def set_applied_actions(self, applied_actions):\n self.applied_actions = applied_actions\n\n def get_applied_actions(self):\n return self.applied_actions\n\n @classmethod\n @enforce_signature\n def from_values(cls,\n origin,\n message: str,\n file: str,\n line: (int, None) = None,\n column: (int, None) = None,\n end_line: (int, None) = None,\n end_column: (int, None) = None,\n severity: int = RESULT_SEVERITY.NORMAL,\n additional_info: str = '',\n debug_msg='',\n diffs: (dict, None) = None,\n confidence: int = 100,\n aspect: (aspectbase, None) = None,\n message_arguments: dict = {}):\n \"\"\"\n Creates a result with only one SourceRange with the given start and end\n locations.\n\n :param origin:\n Class name or creator object of this object.\n :param message:\n Base message to show with this result.\n :param message_arguments:\n Arguments to be provided to the base message\n :param file:\n The related file.\n :param line:\n The first related line in the file. (First line is 1)\n :param column:\n The column indicating the first character. (First character is 1)\n :param end_line:\n The last related line in the file.\n :param end_column:\n The column indicating the last character.\n :param severity:\n Severity of this result.\n :param additional_info:\n A long description holding additional information about the issue\n and/or how to fix it. You can use this like a manual entry for a\n category of issues.\n :param debug_msg:\n A message which may help the user find out why this result was\n yielded.\n :param diffs:\n A dictionary with filename as key and ``Diff`` object\n associated with it as value.\n :param confidence:\n A number between 0 and 100 describing the likelihood of this result\n being a real issue.\n :param aspect:\n An Aspect object which this result is associated to. Note that this\n should be a leaf of the aspect tree! (If you have a node, spend\n some time figuring out which of the leafs exactly your result\n belongs to.)\n \"\"\"\n source_range = SourceRange.from_values(file,\n line,\n column,\n end_line,\n end_column)\n\n return cls(origin=origin,\n message=message,\n affected_code=(source_range,),\n severity=severity,\n additional_info=additional_info,\n debug_msg=debug_msg,\n diffs=diffs,\n confidence=confidence,\n aspect=aspect,\n message_arguments=message_arguments)\n\n def to_string_dict(self):\n \"\"\"\n Makes a dictionary which has all keys and values as strings and\n contains all the data that the base Result has.\n\n FIXME: diffs are not serialized ATM.\n FIXME: Only the first SourceRange of affected_code is serialized. If\n there are more, this data is currently missing.\n\n :return: Dictionary with keys and values as string.\n \"\"\"\n retval = {}\n\n members = ['id',\n 'additional_info',\n 'debug_msg',\n 'message',\n 'message_base',\n 'message_arguments',\n 'origin',\n 'confidence']\n\n for member in members:\n value = getattr(self, member)\n retval[member] = '' if value is None else str(value)\n\n retval['severity'] = str(RESULT_SEVERITY.reverse.get(\n self.severity, ''))\n if len(self.affected_code) > 0:\n retval['file'] = self.affected_code[0].file\n line = self.affected_code[0].start.line\n retval['line_nr'] = '' if line is None else str(line)\n else:\n retval['file'], retval['line_nr'] = '', ''\n\n return retval\n\n @enforce_signature\n def apply(self, file_dict: dict):\n \"\"\"\n Applies all contained diffs to the given file_dict. This operation will\n be done in-place.\n\n :param file_dict: A dictionary containing all files with filename as\n key and all lines a value. Will be modified.\n \"\"\"\n for filename, diff in self.diffs.items():\n file_dict[filename] = diff.modified\n\n def __add__(self, other):\n \"\"\"\n Joins those patches to one patch.\n\n :param other: The other patch.\n \"\"\"\n assert isinstance(self.diffs, dict)\n assert isinstance(other.diffs, dict)\n\n for filename in other.diffs:\n if filename in self.diffs:\n self.diffs[filename] += other.diffs[filename]\n else:\n self.diffs[filename] = other.diffs[filename]\n\n return self\n\n def overlaps(self, ranges):\n \"\"\"\n Determines if the result overlaps with source ranges provided.\n\n :param ranges: A list SourceRange objects to check for overlap.\n :return: True if the ranges overlap with the result.\n \"\"\"\n if isinstance(ranges, SourceRange):\n ranges = [ranges]\n\n for range in ranges:\n for self_range in self.affected_code:\n if range.overlaps(self_range):\n return True\n\n return False\n\n def location_repr(self):\n \"\"\"\n Retrieves a string, that briefly represents\n the affected code of the result.\n\n :return: A string containing all of the affected files\n separated by a comma.\n \"\"\"\n\n if not self.affected_code:\n return 'the whole project'\n\n # Set important to exclude duplicate file names\n range_paths = set(sourcerange.file\n for sourcerange in self.affected_code)\n\n return ', '.join(repr(relpath(range_path))\n for range_path in sorted(range_paths))\n\n def __json__(self, use_relpath=False):\n _dict = get_public_members(self)\n if use_relpath and _dict['diffs']:\n _dict['diffs'] = {relpath(file): diff\n for file, diff in _dict['diffs'].items()}\n _dict['aspect'] = type(self.aspect).__qualname__\n return _dict\n", "path": "coalib/results/Result.py"}]} | 3,998 | 179 |
gh_patches_debug_54595 | rasdani/github-patches | git_diff | zulip__zulip-21726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create collapsible "Private messages" section in left sidebar
At present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:
1. Getting to a PM conversation generally requires multiple clicks.
2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.
3. It can be hard for new users to figure out how to view and send private messages.
In order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:
1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.
2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.
3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.
Note that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.
[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).
See also #11108.
</issue>
<code>
[start of tools/lib/capitalization.py]
1 import re
2 from typing import List, Match, Tuple
3
4 from bs4 import BeautifulSoup
5
6 # The phrases in this list will be ignored. The longest phrase is
7 # tried first; this removes the chance of smaller phrases changing
8 # the text before longer phrases are tried.
9 # The errors shown by `tools/check-capitalization` can be added to
10 # this list without any modification.
11 IGNORED_PHRASES = [
12 # Proper nouns and acronyms
13 r"API",
14 r"APNS",
15 r"Botserver",
16 r"Cookie Bot",
17 r"DevAuthBackend",
18 r"GCM",
19 r"GitHub",
20 r"Gravatar",
21 r"Help Center",
22 r"HTTP",
23 r"ID",
24 r"IDs",
25 r"IP",
26 r"JSON",
27 r"Kerberos",
28 r"LDAP",
29 r"Markdown",
30 r"OTP",
31 r"Pivotal",
32 r"PM",
33 r"PMs",
34 r"Slack",
35 r"Google",
36 r"Terms of Service",
37 r"Tuesday",
38 r"URL",
39 r"UUID",
40 r"Webathena",
41 r"WordPress",
42 r"Zephyr",
43 r"Zoom",
44 r"Zulip",
45 r"Zulip Server",
46 r"Zulip Account Security",
47 r"Zulip Security",
48 r"Zulip Cloud Standard",
49 r"BigBlueButton",
50 # Code things
51 r"\.zuliprc",
52 # BeautifulSoup will remove <z-user> which is horribly confusing,
53 # so we need more of the sentence.
54 r"<z-user></z-user> will have the same role",
55 # Things using "I"
56 r"I understand",
57 r"I'm",
58 r"I've",
59 # Specific short words
60 r"beta",
61 r"and",
62 r"bot",
63 r"e\.g\.",
64 r"enabled",
65 r"signups",
66 # Placeholders
67 r"keyword",
68 r"streamname",
69 r"user@example\.com",
70 # Fragments of larger strings
71 (r"your subscriptions on your Streams page"),
72 r"Add global time<br />Everyone sees global times in their own time zone\.",
73 r"user",
74 r"an unknown operating system",
75 r"Go to Settings",
76 # SPECIAL CASES
77 # Because topics usually are lower-case, this would look weird if it were capitalized
78 r"more topics",
79 # Used alone in a parenthetical where capitalized looks worse.
80 r"^deprecated$",
81 # Capital 'i' looks weird in reminders popover
82 r"in 1 hour",
83 r"in 20 minutes",
84 r"in 3 hours",
85 # these are used as topics
86 r"^new streams$",
87 r"^stream events$",
88 # These are used as example short names (e.g. an uncapitalized context):
89 r"^marketing$",
90 r"^cookie$",
91 # Used to refer custom time limits
92 r"\bN\b",
93 # Capital c feels obtrusive in clear status option
94 r"clear",
95 r"group private messages with \{recipient\}",
96 r"private messages with \{recipient\}",
97 r"private messages with yourself",
98 r"GIF",
99 # Emoji name placeholder
100 r"leafy green vegetable",
101 # Subdomain placeholder
102 r"your-organization-url",
103 # Used in invite modal
104 r"or",
105 # Used in GIPHY popover.
106 r"GIFs",
107 r"GIPHY",
108 # Used in our case studies
109 r"Technical University of Munich",
110 r"University of California San Diego",
111 # Used in stream creation form
112 r"email hidden",
113 # Use in compose box.
114 r"to send",
115 r"to add a new line",
116 # Used in showing Notification Bot read receipts message
117 "Notification Bot",
118 # Used in presence_enabled setting label
119 r"invisible mode off",
120 # Typeahead suggestions for "Pronouns" custom field type.
121 r"he/him",
122 r"she/her",
123 r"they/them",
124 ]
125
126 # Sort regexes in descending order of their lengths. As a result, the
127 # longer phrases will be ignored first.
128 IGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)
129
130 # Compile regexes to improve performance. This also extracts the
131 # text using BeautifulSoup and then removes extra whitespaces from
132 # it. This step enables us to add HTML in our regexes directly.
133 COMPILED_IGNORED_PHRASES = [
134 re.compile(" ".join(BeautifulSoup(regex, "lxml").text.split())) for regex in IGNORED_PHRASES
135 ]
136
137 SPLIT_BOUNDARY = "?.!" # Used to split string into sentences.
138 SPLIT_BOUNDARY_REGEX = re.compile(rf"[{SPLIT_BOUNDARY}]")
139
140 # Regexes which check capitalization in sentences.
141 DISALLOWED = [
142 r"^[a-z](?!\})", # Checks if the sentence starts with a lower case character.
143 r"^[A-Z][a-z]+[\sa-z0-9]+[A-Z]", # Checks if an upper case character exists
144 # after a lower case character when the first character is in upper case.
145 ]
146 DISALLOWED_REGEX = re.compile(r"|".join(DISALLOWED))
147
148 BANNED_WORDS = {
149 "realm": "The term realm should not appear in user-facing strings. Use organization instead.",
150 }
151
152
153 def get_safe_phrase(phrase: str) -> str:
154 """
155 Safe phrase is in lower case and doesn't contain characters which can
156 conflict with split boundaries. All conflicting characters are replaced
157 with low dash (_).
158 """
159 phrase = SPLIT_BOUNDARY_REGEX.sub("_", phrase)
160 return phrase.lower()
161
162
163 def replace_with_safe_phrase(matchobj: Match[str]) -> str:
164 """
165 The idea is to convert IGNORED_PHRASES into safe phrases, see
166 `get_safe_phrase()` function. The only exception is when the
167 IGNORED_PHRASE is at the start of the text or after a split
168 boundary; in this case, we change the first letter of the phrase
169 to upper case.
170 """
171 ignored_phrase = matchobj.group(0)
172 safe_string = get_safe_phrase(ignored_phrase)
173
174 start_index = matchobj.start()
175 complete_string = matchobj.string
176
177 is_string_start = start_index == 0
178 # We expect that there will be one space between split boundary
179 # and the next word.
180 punctuation = complete_string[max(start_index - 2, 0)]
181 is_after_split_boundary = punctuation in SPLIT_BOUNDARY
182 if is_string_start or is_after_split_boundary:
183 return safe_string.capitalize()
184
185 return safe_string
186
187
188 def get_safe_text(text: str) -> str:
189 """
190 This returns text which is rendered by BeautifulSoup and is in the
191 form that can be split easily and has all IGNORED_PHRASES processed.
192 """
193 soup = BeautifulSoup(text, "lxml")
194 text = " ".join(soup.text.split()) # Remove extra whitespaces.
195 for phrase_regex in COMPILED_IGNORED_PHRASES:
196 text = phrase_regex.sub(replace_with_safe_phrase, text)
197
198 return text
199
200
201 def is_capitalized(safe_text: str) -> bool:
202 sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)
203 return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)
204
205
206 def check_banned_words(text: str) -> List[str]:
207 lower_cased_text = text.lower()
208 errors = []
209 for word, reason in BANNED_WORDS.items():
210 if word in lower_cased_text:
211 # Hack: Should move this into BANNED_WORDS framework; for
212 # now, just hand-code the skips:
213 if "realm_name" in lower_cased_text:
214 continue
215 kwargs = dict(word=word, text=text, reason=reason)
216 msg = "{word} found in '{text}'. {reason}".format(**kwargs)
217 errors.append(msg)
218
219 return errors
220
221
222 def check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:
223 errors = []
224 ignored = []
225 banned_word_errors = []
226 for text in strings:
227 text = " ".join(text.split()) # Remove extra whitespaces.
228 safe_text = get_safe_text(text)
229 has_ignored_phrase = text != safe_text
230 capitalized = is_capitalized(safe_text)
231 if not capitalized:
232 errors.append(text)
233 elif has_ignored_phrase:
234 ignored.append(text)
235
236 banned_word_errors.extend(check_banned_words(text))
237
238 return sorted(errors), sorted(ignored), sorted(banned_word_errors)
239
[end of tools/lib/capitalization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -78,6 +78,9 @@
r"more topics",
# Used alone in a parenthetical where capitalized looks worse.
r"^deprecated$",
+ # We want the similar text in the Private Messages section to have the same capitalization.
+ r"more conversations",
+ r"back to streams",
# Capital 'i' looks weird in reminders popover
r"in 1 hour",
r"in 20 minutes",
| {"golden_diff": "diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py\n--- a/tools/lib/capitalization.py\n+++ b/tools/lib/capitalization.py\n@@ -78,6 +78,9 @@\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n+ # We want the similar text in the Private Messages section to have the same capitalization.\n+ r\"more conversations\",\n+ r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n", "issue": "Create collapsible \"Private messages\" section in left sidebar\nAt present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:\r\n\r\n1. Getting to a PM conversation generally requires multiple clicks.\r\n2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.\r\n3. It can be hard for new users to figure out how to view and send private messages.\r\n\r\nIn order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:\r\n\r\n1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.\r\n2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.\r\n3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.\r\n\r\nNote that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.\r\n\r\n[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).\r\n\r\nSee also #11108.\n", "before_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}]} | 3,415 | 140 |
gh_patches_debug_9063 | rasdani/github-patches | git_diff | pypa__virtualenv-1886 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`virtualenv --version` prints spurious error as of 20.0.24
**Issue**
When running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0.
**Environment**
Tested with Python 3.7 and 3.8, virtualenvs managed with pipenv
Ubuntu 18.04 on WSL
```
$ rm-rf tmp && mkdir tmp && cd tmp
$ pipenv install "virtualenv==20.0.23"
$ pipenv run virtualenv --version
virtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py
$ rm-rf tmp && mkdir tmp && cd tmp
$ pipenv install "virtualenv==20.0.24"
$ pipenv run virtualenv --version
virtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py
ERROR:root:SystemExit: 0
$ pipenv run virtualenv --version > /dev/null
ERROR:root:SystemExit: 0
$ echo $?
0
```
Nothing else is printed with `-vvv --with-traceback`
</issue>
<code>
[start of src/virtualenv/__main__.py]
1 from __future__ import absolute_import, print_function, unicode_literals
2
3 import logging
4 import os
5 import sys
6 from datetime import datetime
7
8
9 def run(args=None, options=None):
10 start = datetime.now()
11 from virtualenv.util.error import ProcessCallFailed
12 from virtualenv.run import cli_run
13
14 if args is None:
15 args = sys.argv[1:]
16 try:
17 session = cli_run(args, options)
18 logging.warning(LogSession(session, start))
19 except ProcessCallFailed as exception:
20 print("subprocess call failed for {} with code {}".format(exception.cmd, exception.code))
21 print(exception.out, file=sys.stdout, end="")
22 print(exception.err, file=sys.stderr, end="")
23 raise SystemExit(exception.code)
24
25
26 class LogSession(object):
27 def __init__(self, session, start):
28 self.session = session
29 self.start = start
30
31 def __str__(self):
32 from virtualenv.util.six import ensure_text
33
34 spec = self.session.creator.interpreter.spec
35 elapsed = (datetime.now() - self.start).total_seconds() * 1000
36 lines = [
37 "created virtual environment {} in {:.0f}ms".format(spec, elapsed),
38 " creator {}".format(ensure_text(str(self.session.creator))),
39 ]
40 if self.session.seeder.enabled:
41 lines += (
42 " seeder {}".format(ensure_text(str(self.session.seeder))),
43 " added seed packages: {}".format(
44 ", ".join(
45 sorted(
46 "==".join(i.stem.split("-"))
47 for i in self.session.creator.purelib.iterdir()
48 if i.suffix == ".dist-info"
49 ),
50 ),
51 ),
52 )
53 if self.session.activators:
54 lines.append(" activators {}".format(",".join(i.__class__.__name__ for i in self.session.activators)))
55 return os.linesep.join(lines)
56
57
58 def run_with_catch(args=None):
59 from virtualenv.config.cli.parser import VirtualEnvOptions
60
61 options = VirtualEnvOptions()
62 try:
63 run(args, options)
64 except (KeyboardInterrupt, SystemExit, Exception) as exception:
65 try:
66 if getattr(options, "with_traceback", False):
67 raise
68 else:
69 logging.error("%s: %s", type(exception).__name__, exception)
70 code = exception.code if isinstance(exception, SystemExit) else 1
71 sys.exit(code)
72 finally:
73 logging.shutdown() # force flush of log messages before the trace is printed
74
75
76 if __name__ == "__main__": # pragma: no cov
77 run_with_catch() # pragma: no cov
78
[end of src/virtualenv/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py
--- a/src/virtualenv/__main__.py
+++ b/src/virtualenv/__main__.py
@@ -66,7 +66,8 @@
if getattr(options, "with_traceback", False):
raise
else:
- logging.error("%s: %s", type(exception).__name__, exception)
+ if not (isinstance(exception, SystemExit) and exception.code == 0):
+ logging.error("%s: %s", type(exception).__name__, exception)
code = exception.code if isinstance(exception, SystemExit) else 1
sys.exit(code)
finally:
| {"golden_diff": "diff --git a/src/virtualenv/__main__.py b/src/virtualenv/__main__.py\n--- a/src/virtualenv/__main__.py\n+++ b/src/virtualenv/__main__.py\n@@ -66,7 +66,8 @@\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n- logging.error(\"%s: %s\", type(exception).__name__, exception)\n+ if not (isinstance(exception, SystemExit) and exception.code == 0):\n+ logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n", "issue": "`virtualenv --version` prints spurious error as of 20.0.24\n**Issue**\r\n\r\nWhen running `virtualenv --version`, a logger error is printed to stderr, though the return code is still 0.\r\n\r\n**Environment**\r\n\r\nTested with Python 3.7 and 3.8, virtualenvs managed with pipenv\r\nUbuntu 18.04 on WSL\r\n\r\n```\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.23\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.23 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\n\r\n$ rm-rf tmp && mkdir tmp && cd tmp\r\n$ pipenv install \"virtualenv==20.0.24\"\r\n$ pipenv run virtualenv --version\r\nvirtualenv 20.0.24 from tmp/.venv/lib/python3.7/site-packages/virtualenv/__init__.py\r\nERROR:root:SystemExit: 0\r\n$ pipenv run virtualenv --version > /dev/null\r\nERROR:root:SystemExit: 0\r\n$ echo $?\r\n0\r\n```\r\n\r\nNothing else is printed with `-vvv --with-traceback`\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\n\ndef run(args=None, options=None):\n start = datetime.now()\n from virtualenv.util.error import ProcessCallFailed\n from virtualenv.run import cli_run\n\n if args is None:\n args = sys.argv[1:]\n try:\n session = cli_run(args, options)\n logging.warning(LogSession(session, start))\n except ProcessCallFailed as exception:\n print(\"subprocess call failed for {} with code {}\".format(exception.cmd, exception.code))\n print(exception.out, file=sys.stdout, end=\"\")\n print(exception.err, file=sys.stderr, end=\"\")\n raise SystemExit(exception.code)\n\n\nclass LogSession(object):\n def __init__(self, session, start):\n self.session = session\n self.start = start\n\n def __str__(self):\n from virtualenv.util.six import ensure_text\n\n spec = self.session.creator.interpreter.spec\n elapsed = (datetime.now() - self.start).total_seconds() * 1000\n lines = [\n \"created virtual environment {} in {:.0f}ms\".format(spec, elapsed),\n \" creator {}\".format(ensure_text(str(self.session.creator))),\n ]\n if self.session.seeder.enabled:\n lines += (\n \" seeder {}\".format(ensure_text(str(self.session.seeder))),\n \" added seed packages: {}\".format(\n \", \".join(\n sorted(\n \"==\".join(i.stem.split(\"-\"))\n for i in self.session.creator.purelib.iterdir()\n if i.suffix == \".dist-info\"\n ),\n ),\n ),\n )\n if self.session.activators:\n lines.append(\" activators {}\".format(\",\".join(i.__class__.__name__ for i in self.session.activators)))\n return os.linesep.join(lines)\n\n\ndef run_with_catch(args=None):\n from virtualenv.config.cli.parser import VirtualEnvOptions\n\n options = VirtualEnvOptions()\n try:\n run(args, options)\n except (KeyboardInterrupt, SystemExit, Exception) as exception:\n try:\n if getattr(options, \"with_traceback\", False):\n raise\n else:\n logging.error(\"%s: %s\", type(exception).__name__, exception)\n code = exception.code if isinstance(exception, SystemExit) else 1\n sys.exit(code)\n finally:\n logging.shutdown() # force flush of log messages before the trace is printed\n\n\nif __name__ == \"__main__\": # pragma: no cov\n run_with_catch() # pragma: no cov\n", "path": "src/virtualenv/__main__.py"}]} | 1,529 | 151 |
gh_patches_debug_22538 | rasdani/github-patches | git_diff | networkx__networkx-1976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
all_node_cuts returns too few and incorrect cuts.
This could be a documentation issue, a bug or a user understanding issue. Are these cases pathological for the algorithm?
Given a square graph:
```
a -- b
| |
c -- d
```
Based on a cursory reading of the documentation, I would have expected all_node_cuts() to return:
```
[{'a','d'}, {'c','b'}]
```
I get `[{'a','c'}]` but if this is a valid node cut then surely {a,b}, {b,d}, {c,d} are also equally valid and a function called "all node cuts" should return them.
```
G = nx.Graph([('a','b'), ('a','c'), ('c','d'), ('b','d')])
print( G.nodes() )
print( G.edges() )
print( list(nx.all_node_cuts(G)) )
>>> ['a', 'c', 'b', 'd']
>>> [('a', 'c'), ('a', 'b'), ('c', 'd'), ('b', 'd')]
>>> [{'a', 'c'}]
```
Expanding to a hexagon, we see similar pattern of node cuts. There are many isometric node cuts omitted from the results list. Two of the proposed cuts fail to create "two or more connected components" as the documentation suggests.
```
G = nx.Graph([('a','b'), ('b','c'), ('c','d'),('d','e'), ('e','f'),('f','a')])
list(nx.all_node_cuts(G))
>>> [{'a', 'c'}, {'a', 'b'}, {'b', 'c'}]
```
</issue>
<code>
[start of networkx/algorithms/connectivity/kcutsets.py]
1 # -*- coding: utf-8 -*-
2 """
3 Kanevsky all minimum node k cutsets algorithm.
4 """
5 from operator import itemgetter
6
7 import networkx as nx
8 from .utils import build_auxiliary_node_connectivity
9 from networkx.algorithms.flow import (
10 build_residual_network,
11 edmonds_karp,
12 shortest_augmenting_path,
13 )
14 default_flow_func = edmonds_karp
15
16
17 __author__ = '\n'.join(['Jordi Torrents <[email protected]>'])
18
19 __all__ = ['all_node_cuts']
20
21 def all_node_cuts(G, k=None, flow_func=None):
22 r"""Returns all minimum k cutsets of an undirected graph G.
23
24 This implementation is based on Kanevsky's algorithm [1]_ for finding all
25 minimum-size node cut-sets of an undirected graph G; ie the set (or sets)
26 of nodes of cardinality equal to the node connectivity of G. Thus if
27 removed, would break G into two or more connected components.
28
29 Parameters
30 ----------
31 G : NetworkX graph
32 Undirected graph
33
34 k : Integer
35 Node connectivity of the input graph. If k is None, then it is
36 computed. Default value: None.
37
38 flow_func : function
39 Function to perform the underlying flow computations. Default value
40 edmonds_karp. This function performs better in sparse graphs with
41 right tailed degree distributions. shortest_augmenting_path will
42 perform better in denser graphs.
43
44
45 Returns
46 -------
47 cuts : a generator of node cutsets
48 Each node cutset has cardinality equal to the node connectivity of
49 the input graph.
50
51 Examples
52 --------
53 >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2
54 >>> G = nx.grid_2d_graph(5, 5)
55 >>> cutsets = list(nx.all_node_cuts(G))
56 >>> len(cutsets)
57 4
58 >>> all(2 == len(cutset) for cutset in cutsets)
59 True
60 >>> nx.node_connectivity(G)
61 2
62
63 Notes
64 -----
65 This implementation is based on the sequential algorithm for finding all
66 minimum-size separating vertex sets in a graph [1]_. The main idea is to
67 compute minimum cuts using local maximum flow computations among a set
68 of nodes of highest degree and all other non-adjacent nodes in the Graph.
69 Once we find a minimum cut, we add an edge between the high degree
70 node and the target node of the local maximum flow computation to make
71 sure that we will not find that minimum cut again.
72
73 See also
74 --------
75 node_connectivity
76 edmonds_karp
77 shortest_augmenting_path
78
79 References
80 ----------
81 .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex
82 sets in a graph. Networks 23(6), 533--541.
83 http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract
84
85 """
86 if not nx.is_connected(G):
87 raise nx.NetworkXError('Input graph is disconnected.')
88
89 # Initialize data structures.
90 # Keep track of the cuts already computed so we do not repeat them.
91 seen = []
92 # Even-Tarjan reduction is what we call auxiliary digraph
93 # for node connectivity.
94 H = build_auxiliary_node_connectivity(G)
95 mapping = H.graph['mapping']
96 R = build_residual_network(H, 'capacity')
97 kwargs = dict(capacity='capacity', residual=R)
98 # Define default flow function
99 if flow_func is None:
100 flow_func = default_flow_func
101 if flow_func is shortest_augmenting_path:
102 kwargs['two_phase'] = True
103 # Begin the actual algorithm
104 # step 1: Find node connectivity k of G
105 if k is None:
106 k = nx.node_connectivity(G, flow_func=flow_func)
107 # step 2:
108 # Find k nodes with top degree, call it X:
109 X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}
110 # Check if X is a k-node-cutset
111 if _is_separating_set(G, X):
112 seen.append(X)
113 yield X
114
115 for x in X:
116 # step 3: Compute local connectivity flow of x with all other
117 # non adjacent nodes in G
118 non_adjacent = set(G) - X - set(G[x])
119 for v in non_adjacent:
120 # step 4: compute maximum flow in an Even-Tarjan reduction H of G
121 # and step:5 build the associated residual network R
122 R = flow_func(H, '%sB' % mapping[x], '%sA' % mapping[v], **kwargs)
123 flow_value = R.graph['flow_value']
124
125 if flow_value == k:
126 ## Remove saturated edges form the residual network
127 saturated_edges = [(u, w, d) for (u, w, d) in
128 R.edges(data=True)
129 if d['capacity'] == d['flow']]
130 R.remove_edges_from(saturated_edges)
131 # step 6: shrink the strongly connected components of
132 # residual flow network R and call it L
133 L = nx.condensation(R)
134 cmap = L.graph['mapping']
135 # step 7: Compute antichains of L; they map to closed sets in H
136 # Any edge in H that links a closed set is part of a cutset
137 for antichain in nx.antichains(L):
138 # Nodes in an antichain of the condensation graph of
139 # the residual network map to a closed set of nodes that
140 # define a node partition of the auxiliary digraph H.
141 S = {n for n, scc in cmap.items() if scc in antichain}
142 # Find the cutset that links the node partition (S,~S) in H
143 cutset = set()
144 for u in S:
145 cutset.update((u, w) for w in H[u] if w not in S)
146 # The edges in H that form the cutset are internal edges
147 # (ie edges that represent a node of the original graph G)
148 node_cut = {H.node[n]['id'] for edge in cutset for n in edge}
149
150 if len(node_cut) == k:
151 if node_cut not in seen:
152 yield node_cut
153 seen.append(node_cut)
154 # Add an edge (x, v) to make sure that we do not
155 # find this cutset again. This is equivalent
156 # of adding the edge in the input graph
157 # G.add_edge(x, v) and then regenerate H and R:
158 # Add edges to the auxiliary digraph.
159 H.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
160 capacity=1)
161 H.add_edge('%sB' % mapping[v], '%sA' % mapping[x],
162 capacity=1)
163 # Add edges to the residual network.
164 R.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
165 capacity=1)
166 R.add_edge('%sA' % mapping[v], '%sB' % mapping[x],
167 capacity=1)
168 break
169 # Add again the saturated edges to reuse the residual network
170 R.add_edges_from(saturated_edges)
171
172
173 def _is_separating_set(G, cut):
174 """Assumes that the input graph is connected"""
175 if len(cut) == len(G) - 1:
176 return True
177
178 H = G.copy(with_data=False)
179 H.remove_nodes_from(cut)
180 if nx.is_connected(H):
181 return False
182 return True
183
[end of networkx/algorithms/connectivity/kcutsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py
--- a/networkx/algorithms/connectivity/kcutsets.py
+++ b/networkx/algorithms/connectivity/kcutsets.py
@@ -3,6 +3,7 @@
Kanevsky all minimum node k cutsets algorithm.
"""
from operator import itemgetter
+from itertools import combinations
import networkx as nx
from .utils import build_auxiliary_node_connectivity
@@ -86,6 +87,22 @@
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is disconnected.')
+ # Addess some corner cases first.
+ # For cycle graphs
+ if G.order() == G.size():
+ if all(2 == d for n, d in G.degree()):
+ seen = set()
+ for u in G:
+ for v in nx.non_neighbors(G, u):
+ if (u, v) not in seen and (v, u) not in seen:
+ yield {v, u}
+ seen.add((v, u))
+ return
+ # For complete Graphs
+ if nx.density(G) == 1:
+ for cut_set in combinations(G, len(G)-1):
+ yield set(cut_set)
+ return
# Initialize data structures.
# Keep track of the cuts already computed so we do not repeat them.
seen = []
| {"golden_diff": "diff --git a/networkx/algorithms/connectivity/kcutsets.py b/networkx/algorithms/connectivity/kcutsets.py\n--- a/networkx/algorithms/connectivity/kcutsets.py\n+++ b/networkx/algorithms/connectivity/kcutsets.py\n@@ -3,6 +3,7 @@\n Kanevsky all minimum node k cutsets algorithm.\n \"\"\"\n from operator import itemgetter\n+from itertools import combinations\n \n import networkx as nx\n from .utils import build_auxiliary_node_connectivity\n@@ -86,6 +87,22 @@\n if not nx.is_connected(G):\n raise nx.NetworkXError('Input graph is disconnected.')\n \n+ # Addess some corner cases first.\n+ # For cycle graphs\n+ if G.order() == G.size():\n+ if all(2 == d for n, d in G.degree()):\n+ seen = set()\n+ for u in G:\n+ for v in nx.non_neighbors(G, u):\n+ if (u, v) not in seen and (v, u) not in seen:\n+ yield {v, u}\n+ seen.add((v, u))\n+ return\n+ # For complete Graphs\n+ if nx.density(G) == 1:\n+ for cut_set in combinations(G, len(G)-1):\n+ yield set(cut_set)\n+ return\n # Initialize data structures.\n # Keep track of the cuts already computed so we do not repeat them.\n seen = []\n", "issue": "all_node_cuts returns too few and incorrect cuts.\nThis could be a documentation issue, a bug or a user understanding issue. Are these cases pathological for the algorithm?\n\nGiven a square graph:\n\n```\n a -- b\n | |\n c -- d\n```\n\nBased on a cursory reading of the documentation, I would have expected all_node_cuts() to return:\n\n```\n[{'a','d'}, {'c','b'}]\n```\n\nI get `[{'a','c'}]` but if this is a valid node cut then surely {a,b}, {b,d}, {c,d} are also equally valid and a function called \"all node cuts\" should return them. \n\n```\nG = nx.Graph([('a','b'), ('a','c'), ('c','d'), ('b','d')])\nprint( G.nodes() )\nprint( G.edges() )\nprint( list(nx.all_node_cuts(G)) )\n\n>>> ['a', 'c', 'b', 'd']\n>>> [('a', 'c'), ('a', 'b'), ('c', 'd'), ('b', 'd')]\n>>> [{'a', 'c'}]\n```\n\nExpanding to a hexagon, we see similar pattern of node cuts. There are many isometric node cuts omitted from the results list. Two of the proposed cuts fail to create \"two or more connected components\" as the documentation suggests.\n\n```\nG = nx.Graph([('a','b'), ('b','c'), ('c','d'),('d','e'), ('e','f'),('f','a')])\nlist(nx.all_node_cuts(G))\n>>> [{'a', 'c'}, {'a', 'b'}, {'b', 'c'}]\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nKanevsky all minimum node k cutsets algorithm.\n\"\"\"\nfrom operator import itemgetter\n\nimport networkx as nx\nfrom .utils import build_auxiliary_node_connectivity\nfrom networkx.algorithms.flow import (\n build_residual_network,\n edmonds_karp,\n shortest_augmenting_path,\n)\ndefault_flow_func = edmonds_karp\n\n\n__author__ = '\\n'.join(['Jordi Torrents <[email protected]>'])\n\n__all__ = ['all_node_cuts']\n\ndef all_node_cuts(G, k=None, flow_func=None):\n r\"\"\"Returns all minimum k cutsets of an undirected graph G. \n\n This implementation is based on Kanevsky's algorithm [1]_ for finding all\n minimum-size node cut-sets of an undirected graph G; ie the set (or sets) \n of nodes of cardinality equal to the node connectivity of G. Thus if \n removed, would break G into two or more connected components.\n \n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n k : Integer\n Node connectivity of the input graph. If k is None, then it is \n computed. Default value: None.\n\n flow_func : function\n Function to perform the underlying flow computations. Default value\n edmonds_karp. This function performs better in sparse graphs with\n right tailed degree distributions. shortest_augmenting_path will\n perform better in denser graphs.\n \n\n Returns\n -------\n cuts : a generator of node cutsets\n Each node cutset has cardinality equal to the node connectivity of\n the input graph.\n\n Examples\n --------\n >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2\n >>> G = nx.grid_2d_graph(5, 5)\n >>> cutsets = list(nx.all_node_cuts(G))\n >>> len(cutsets)\n 4\n >>> all(2 == len(cutset) for cutset in cutsets)\n True\n >>> nx.node_connectivity(G)\n 2\n\n Notes\n -----\n This implementation is based on the sequential algorithm for finding all\n minimum-size separating vertex sets in a graph [1]_. The main idea is to\n compute minimum cuts using local maximum flow computations among a set \n of nodes of highest degree and all other non-adjacent nodes in the Graph.\n Once we find a minimum cut, we add an edge between the high degree\n node and the target node of the local maximum flow computation to make \n sure that we will not find that minimum cut again.\n\n See also\n --------\n node_connectivity\n edmonds_karp\n shortest_augmenting_path\n\n References\n ----------\n .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex \n sets in a graph. Networks 23(6), 533--541.\n http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract\n\n \"\"\"\n if not nx.is_connected(G):\n raise nx.NetworkXError('Input graph is disconnected.')\n\n # Initialize data structures.\n # Keep track of the cuts already computed so we do not repeat them.\n seen = []\n # Even-Tarjan reduction is what we call auxiliary digraph \n # for node connectivity.\n H = build_auxiliary_node_connectivity(G)\n mapping = H.graph['mapping']\n R = build_residual_network(H, 'capacity')\n kwargs = dict(capacity='capacity', residual=R)\n # Define default flow function\n if flow_func is None:\n flow_func = default_flow_func\n if flow_func is shortest_augmenting_path:\n kwargs['two_phase'] = True\n # Begin the actual algorithm\n # step 1: Find node connectivity k of G\n if k is None:\n k = nx.node_connectivity(G, flow_func=flow_func)\n # step 2: \n # Find k nodes with top degree, call it X:\n X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}\n # Check if X is a k-node-cutset\n if _is_separating_set(G, X):\n seen.append(X)\n yield X\n\n for x in X:\n # step 3: Compute local connectivity flow of x with all other\n # non adjacent nodes in G\n non_adjacent = set(G) - X - set(G[x])\n for v in non_adjacent:\n # step 4: compute maximum flow in an Even-Tarjan reduction H of G\n # and step:5 build the associated residual network R\n R = flow_func(H, '%sB' % mapping[x], '%sA' % mapping[v], **kwargs)\n flow_value = R.graph['flow_value']\n\n if flow_value == k:\n ## Remove saturated edges form the residual network\n saturated_edges = [(u, w, d) for (u, w, d) in\n R.edges(data=True)\n if d['capacity'] == d['flow']]\n R.remove_edges_from(saturated_edges)\n # step 6: shrink the strongly connected components of \n # residual flow network R and call it L\n L = nx.condensation(R)\n cmap = L.graph['mapping']\n # step 7: Compute antichains of L; they map to closed sets in H\n # Any edge in H that links a closed set is part of a cutset\n for antichain in nx.antichains(L):\n # Nodes in an antichain of the condensation graph of\n # the residual network map to a closed set of nodes that\n # define a node partition of the auxiliary digraph H.\n S = {n for n, scc in cmap.items() if scc in antichain}\n # Find the cutset that links the node partition (S,~S) in H\n cutset = set()\n for u in S:\n cutset.update((u, w) for w in H[u] if w not in S)\n # The edges in H that form the cutset are internal edges\n # (ie edges that represent a node of the original graph G)\n node_cut = {H.node[n]['id'] for edge in cutset for n in edge}\n\n if len(node_cut) == k:\n if node_cut not in seen:\n yield node_cut\n seen.append(node_cut)\n # Add an edge (x, v) to make sure that we do not\n # find this cutset again. This is equivalent\n # of adding the edge in the input graph \n # G.add_edge(x, v) and then regenerate H and R:\n # Add edges to the auxiliary digraph.\n H.add_edge('%sB' % mapping[x], '%sA' % mapping[v],\n capacity=1)\n H.add_edge('%sB' % mapping[v], '%sA' % mapping[x],\n capacity=1)\n # Add edges to the residual network.\n R.add_edge('%sB' % mapping[x], '%sA' % mapping[v],\n capacity=1)\n R.add_edge('%sA' % mapping[v], '%sB' % mapping[x],\n capacity=1)\n break\n # Add again the saturated edges to reuse the residual network\n R.add_edges_from(saturated_edges)\n\n\ndef _is_separating_set(G, cut):\n \"\"\"Assumes that the input graph is connected\"\"\"\n if len(cut) == len(G) - 1:\n return True\n\n H = G.copy(with_data=False)\n H.remove_nodes_from(cut)\n if nx.is_connected(H):\n return False\n return True\n", "path": "networkx/algorithms/connectivity/kcutsets.py"}]} | 3,052 | 319 |
gh_patches_debug_10902 | rasdani/github-patches | git_diff | google__flax-362 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pooling: passing "sequence of `n` `(low, high)` integer pairs" resulting in TypeError
Trying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors:
`TypeError: Unknown padding type: (1, 1).`
`TypeError : unhashable type: 'list' `
Sample code for reproducing the bug:
```python3
from flax import nn
from jax import random
class FlaxModel(nn.Module):
def apply(self, x):
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])
return x
rng = random.PRNGKey(0)
model, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)])
```
</issue>
<code>
[start of flax/nn/pooling.py]
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as onp
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply
29 that pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides.
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 strides = (1,) + strides + (1,)
46 dims = (1,) + window_shape + (1,)
47 return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
48
49
50 def avg_pool(inputs, window_shape, strides=None, padding="VALID"):
51 """Pools the input by taking the average over a window.
52
53 Args:
54 inputs: input data with dimensions (batch, window dims..., features).
55 window_shape: a shape tuple defining the window to reduce over.
56 strides: a sequence of `n` integers, representing the inter-window
57 strides (default: `(1, ..., 1)`).
58 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
59 of `n` `(low, high)` integer pairs that give the padding to apply before
60 and after each spatial dimension (default: `'VALID'`).
61 Returns:
62 The average for each window slice.
63 """
64 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
65 y = y / onp.prod(window_shape)
66 return y
67
68
69 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
70 """Pools the input by taking the maximum of a window slice.
71
72 Args:
73 inputs: input data with dimensions (batch, window dims..., features).
74 window_shape: a shape tuple defining the window to reduce over.
75 strides: a sequence of `n` integers, representing the inter-window
76 strides (default: `(1, ..., 1)`).
77 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
78 of `n` `(low, high)` integer pairs that give the padding to apply before
79 and after each spatial dimension (default: `'VALID'`).
80 Returns:
81 The maximum for each window slice.
82 """
83 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
84 return y
85
[end of flax/nn/pooling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py
--- a/flax/nn/pooling.py
+++ b/flax/nn/pooling.py
@@ -44,6 +44,14 @@
strides = strides or (1,) * len(window_shape)
strides = (1,) + strides + (1,)
dims = (1,) + window_shape + (1,)
+ if not isinstance(padding, str):
+ padding = tuple(map(tuple, padding))
+ assert(len(padding) == len(window_shape)), (
+ f"padding {padding} must specify pads for same number of dims as "
+ f"window_shape {window_shape}")
+ assert(all([len(x) == 2 for x in padding])), (
+ f"each entry in padding {padding} must be length 2")
+ padding = ((0,0),) + padding + ((0,0),)
return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
| {"golden_diff": "diff --git a/flax/nn/pooling.py b/flax/nn/pooling.py\n--- a/flax/nn/pooling.py\n+++ b/flax/nn/pooling.py\n@@ -44,6 +44,14 @@\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n+ if not isinstance(padding, str):\n+ padding = tuple(map(tuple, padding))\n+ assert(len(padding) == len(window_shape)), (\n+ f\"padding {padding} must specify pads for same number of dims as \"\n+ f\"window_shape {window_shape}\")\n+ assert(all([len(x) == 2 for x in padding])), (\n+ f\"each entry in padding {padding} must be length 2\")\n+ padding = ((0,0),) + padding + ((0,0),)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n", "issue": "Pooling: passing \"sequence of `n` `(low, high)` integer pairs\" resulting in TypeError\nTrying to pass a tuple or list of tuples to a pool operation's padding parameter gives out the following errors: \r\n`TypeError: Unknown padding type: (1, 1).`\r\n`TypeError : unhashable type: 'list' `\r\n\r\n\r\nSample code for reproducing the bug:\r\n```python3\r\nfrom flax import nn\r\nfrom jax import random\r\n\r\nclass FlaxModel(nn.Module):\r\n def apply(self, x):\r\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=[(1, 1), (1, 1)])\r\n return x\r\n\r\nrng = random.PRNGKey(0)\r\nmodel, _ = FlaxModel.init_by_shape(rng, [(1, 100, 100, 1)])\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as onp\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n return lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / onp.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n", "path": "flax/nn/pooling.py"}]} | 1,707 | 225 |
gh_patches_debug_40184 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-1722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[vec] deal with class imbalances
</issue>
<code>
[start of paddlespeech/vector/utils/vector_utils.py]
1 # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def get_chunks(seg_dur, audio_id, audio_duration):
17 """Get all chunk segments from a utterance
18
19 Args:
20 seg_dur (float): segment chunk duration, seconds
21 audio_id (str): utterance name,
22 audio_duration (float): utterance duration, seconds
23
24 Returns:
25 List: all the chunk segments
26 """
27 num_chunks = int(audio_duration / seg_dur) # all in seconds
28 chunk_lst = [
29 audio_id + "_" + str(i * seg_dur) + "_" + str(i * seg_dur + seg_dur)
30 for i in range(num_chunks)
31 ]
32 return chunk_lst
33
34
35 def Q_from_tokens(token_num):
36 """Get prior model, data from uniform, would support others(guassian) in future
37 """
38 freq = [1] * token_num
39 Q = paddle.to_tensor(freq, dtype='float64')
40 return Q / Q.sum()
41
[end of paddlespeech/vector/utils/vector_utils.py]
[start of paddlespeech/vector/modules/loss.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # This is modified from SpeechBrain
15 # https://github.com/speechbrain/speechbrain/blob/085be635c07f16d42cd1295045bc46c407f1e15b/speechbrain/nnet/losses.py
16 import math
17
18 import paddle
19 import paddle.nn as nn
20 import paddle.nn.functional as F
21
22
23 class AngularMargin(nn.Layer):
24 def __init__(self, margin=0.0, scale=1.0):
25 """An implementation of Angular Margin (AM) proposed in the following
26 paper: '''Margin Matters: Towards More Discriminative Deep Neural Network
27 Embeddings for Speaker Recognition''' (https://arxiv.org/abs/1906.07317)
28
29 Args:
30 margin (float, optional): The margin for cosine similiarity. Defaults to 0.0.
31 scale (float, optional): The scale for cosine similiarity. Defaults to 1.0.
32 """
33 super(AngularMargin, self).__init__()
34 self.margin = margin
35 self.scale = scale
36
37 def forward(self, outputs, targets):
38 outputs = outputs - self.margin * targets
39 return self.scale * outputs
40
41
42 class AdditiveAngularMargin(AngularMargin):
43 def __init__(self, margin=0.0, scale=1.0, easy_margin=False):
44 """The Implementation of Additive Angular Margin (AAM) proposed
45 in the following paper: '''Margin Matters: Towards More Discriminative Deep Neural Network Embeddings for Speaker Recognition'''
46 (https://arxiv.org/abs/1906.07317)
47
48 Args:
49 margin (float, optional): margin factor. Defaults to 0.0.
50 scale (float, optional): scale factor. Defaults to 1.0.
51 easy_margin (bool, optional): easy_margin flag. Defaults to False.
52 """
53 super(AdditiveAngularMargin, self).__init__(margin, scale)
54 self.easy_margin = easy_margin
55
56 self.cos_m = math.cos(self.margin)
57 self.sin_m = math.sin(self.margin)
58 self.th = math.cos(math.pi - self.margin)
59 self.mm = math.sin(math.pi - self.margin) * self.margin
60
61 def forward(self, outputs, targets):
62 cosine = outputs.astype('float32')
63 sine = paddle.sqrt(1.0 - paddle.pow(cosine, 2))
64 phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)
65 if self.easy_margin:
66 phi = paddle.where(cosine > 0, phi, cosine)
67 else:
68 phi = paddle.where(cosine > self.th, phi, cosine - self.mm)
69 outputs = (targets * phi) + ((1.0 - targets) * cosine)
70 return self.scale * outputs
71
72
73 class LogSoftmaxWrapper(nn.Layer):
74 def __init__(self, loss_fn):
75 """Speaker identificatin loss function wrapper
76 including all of compositions of the loss transformation
77 Args:
78 loss_fn (_type_): the loss value of a batch
79 """
80 super(LogSoftmaxWrapper, self).__init__()
81 self.loss_fn = loss_fn
82 self.criterion = paddle.nn.KLDivLoss(reduction="sum")
83
84 def forward(self, outputs, targets, length=None):
85 targets = F.one_hot(targets, outputs.shape[1])
86 try:
87 predictions = self.loss_fn(outputs, targets)
88 except TypeError:
89 predictions = self.loss_fn(outputs)
90
91 predictions = F.log_softmax(predictions, axis=1)
92 loss = self.criterion(predictions, targets) / targets.sum()
93 return loss
94
95
96 class NCELoss(nn.Layer):
97 """Noise Contrastive Estimation loss funtion
98
99 Noise Contrastive Estimation (NCE) is an approximation method that is used to
100 work around the huge computational cost of large softmax layer.
101 The basic idea is to convert the prediction problem into classification problem
102 at training stage. It has been proved that these two criterions converges to
103 the same minimal point as long as noise distribution is close enough to real one.
104
105 NCE bridges the gap between generative models and discriminative models,
106 rather than simply speedup the softmax layer.
107 With NCE, you can turn almost anything into posterior with less effort (I think).
108
109 Refs:
110 NCE:http://www.cs.helsinki.fi/u/ahyvarin/papers/Gutmann10AISTATS.pdf
111 Thanks: https://github.com/mingen-pan/easy-to-use-NCE-RNN-for-Pytorch/blob/master/nce.py
112
113 Examples:
114 Q = Q_from_tokens(output_dim)
115 NCELoss(Q)
116 """
117
118 def __init__(self, Q, noise_ratio=100, Z_offset=9.5):
119 """Noise Contrastive Estimation loss funtion
120
121 Args:
122 Q (tensor): prior model, uniform or guassian
123 noise_ratio (int, optional): noise sampling times. Defaults to 100.
124 Z_offset (float, optional): scale of post processing the score. Defaults to 9.5.
125 """
126 super(NCELoss, self).__init__()
127 assert type(noise_ratio) is int
128 self.Q = paddle.to_tensor(Q, stop_gradient=False)
129 self.N = self.Q.shape[0]
130 self.K = noise_ratio
131 self.Z_offset = Z_offset
132
133 def forward(self, output, target):
134 """Forward inference
135
136 Args:
137 output (tensor): the model output, which is the input of loss function
138 """
139 output = paddle.reshape(output, [-1, self.N])
140 B = output.shape[0]
141 noise_idx = self.get_noise(B)
142 idx = self.get_combined_idx(target, noise_idx)
143 P_target, P_noise = self.get_prob(idx, output, sep_target=True)
144 Q_target, Q_noise = self.get_Q(idx)
145 loss = self.nce_loss(P_target, P_noise, Q_noise, Q_target)
146 return loss.mean()
147
148 def get_Q(self, idx, sep_target=True):
149 """Get prior model of batchsize data
150 """
151 idx_size = idx.size
152 prob_model = paddle.to_tensor(
153 self.Q.numpy()[paddle.reshape(idx, [-1]).numpy()])
154 prob_model = paddle.reshape(prob_model, [idx.shape[0], idx.shape[1]])
155 if sep_target:
156 return prob_model[:, 0], prob_model[:, 1:]
157 else:
158 return prob_model
159
160 def get_prob(self, idx, scores, sep_target=True):
161 """Post processing the score of post model(output of nn) of batchsize data
162 """
163 scores = self.get_scores(idx, scores)
164 scale = paddle.to_tensor([self.Z_offset], dtype='float32')
165 scores = paddle.add(scores, -scale)
166 prob = paddle.exp(scores)
167 if sep_target:
168 return prob[:, 0], prob[:, 1:]
169 else:
170 return prob
171
172 def get_scores(self, idx, scores):
173 """Get the score of post model(output of nn) of batchsize data
174 """
175 B, N = scores.shape
176 K = idx.shape[1]
177 idx_increment = paddle.to_tensor(
178 N * paddle.reshape(paddle.arange(B), [B, 1]) * paddle.ones([1, K]),
179 dtype="int64",
180 stop_gradient=False)
181 new_idx = idx_increment + idx
182 new_scores = paddle.index_select(
183 paddle.reshape(scores, [-1]), paddle.reshape(new_idx, [-1]))
184
185 return paddle.reshape(new_scores, [B, K])
186
187 def get_noise(self, batch_size, uniform=True):
188 """Select noise sample
189 """
190 if uniform:
191 noise = np.random.randint(self.N, size=self.K * batch_size)
192 else:
193 noise = np.random.choice(
194 self.N, self.K * batch_size, replace=True, p=self.Q.data)
195 noise = paddle.to_tensor(noise, dtype='int64', stop_gradient=False)
196 noise_idx = paddle.reshape(noise, [batch_size, self.K])
197 return noise_idx
198
199 def get_combined_idx(self, target_idx, noise_idx):
200 """Combined target and noise
201 """
202 target_idx = paddle.reshape(target_idx, [-1, 1])
203 return paddle.concat((target_idx, noise_idx), 1)
204
205 def nce_loss(self, prob_model, prob_noise_in_model, prob_noise,
206 prob_target_in_noise):
207 """Combined the loss of target and noise
208 """
209
210 def safe_log(tensor):
211 """Safe log
212 """
213 EPSILON = 1e-10
214 return paddle.log(EPSILON + tensor)
215
216 model_loss = safe_log(prob_model /
217 (prob_model + self.K * prob_target_in_noise))
218 model_loss = paddle.reshape(model_loss, [-1])
219
220 noise_loss = paddle.sum(
221 safe_log((self.K * prob_noise) /
222 (prob_noise_in_model + self.K * prob_noise)), -1)
223 noise_loss = paddle.reshape(noise_loss, [-1])
224
225 loss = -(model_loss + noise_loss)
226
227 return loss
228
[end of paddlespeech/vector/modules/loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddlespeech/vector/modules/loss.py b/paddlespeech/vector/modules/loss.py
--- a/paddlespeech/vector/modules/loss.py
+++ b/paddlespeech/vector/modules/loss.py
@@ -132,7 +132,7 @@
def forward(self, output, target):
"""Forward inference
-
+
Args:
output (tensor): the model output, which is the input of loss function
"""
@@ -161,7 +161,7 @@
"""Post processing the score of post model(output of nn) of batchsize data
"""
scores = self.get_scores(idx, scores)
- scale = paddle.to_tensor([self.Z_offset], dtype='float32')
+ scale = paddle.to_tensor([self.Z_offset], dtype='float64')
scores = paddle.add(scores, -scale)
prob = paddle.exp(scores)
if sep_target:
@@ -225,3 +225,65 @@
loss = -(model_loss + noise_loss)
return loss
+
+
+class FocalLoss(nn.Layer):
+ """This criterion is a implemenation of Focal Loss, which is proposed in
+ Focal Loss for Dense Object Detection.
+
+ Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
+
+ The losses are averaged across observations for each minibatch.
+
+ Args:
+ alpha(1D Tensor, Variable) : the scalar factor for this criterion
+ gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
+ putting more focus on hard, misclassified examples
+ size_average(bool): By default, the losses are averaged over observations for each minibatch.
+ However, if the field size_average is set to False, the losses are
+ instead summed for each minibatch.
+ """
+
+ def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=-100):
+ super(FocalLoss, self).__init__()
+ self.alpha = alpha
+ self.gamma = gamma
+ self.size_average = size_average
+ self.ce = nn.CrossEntropyLoss(
+ ignore_index=ignore_index, reduction="none")
+
+ def forward(self, outputs, targets):
+ """Forword inference.
+
+ Args:
+ outputs: input tensor
+ target: target label tensor
+ """
+ ce_loss = self.ce(outputs, targets)
+ pt = paddle.exp(-ce_loss)
+ focal_loss = self.alpha * (1 - pt)**self.gamma * ce_loss
+ if self.size_average:
+ return focal_loss.mean()
+ else:
+ return focal_loss.sum()
+
+
+if __name__ == "__main__":
+ import numpy as np
+ from paddlespeech.vector.utils.vector_utils import Q_from_tokens
+ paddle.set_device("cpu")
+
+ input_data = paddle.uniform([5, 100], dtype="float64")
+ label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)
+
+ input = paddle.to_tensor(input_data)
+ label = paddle.to_tensor(label_data)
+
+ loss1 = FocalLoss()
+ loss = loss1.forward(input, label)
+ print("loss: %.5f" % (loss))
+
+ Q = Q_from_tokens(100)
+ loss2 = NCELoss(Q)
+ loss = loss2.forward(input, label)
+ print("loss: %.5f" % (loss))
diff --git a/paddlespeech/vector/utils/vector_utils.py b/paddlespeech/vector/utils/vector_utils.py
--- a/paddlespeech/vector/utils/vector_utils.py
+++ b/paddlespeech/vector/utils/vector_utils.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import paddle
def get_chunks(seg_dur, audio_id, audio_duration):
| {"golden_diff": "diff --git a/paddlespeech/vector/modules/loss.py b/paddlespeech/vector/modules/loss.py\n--- a/paddlespeech/vector/modules/loss.py\n+++ b/paddlespeech/vector/modules/loss.py\n@@ -132,7 +132,7 @@\n \n def forward(self, output, target):\n \"\"\"Forward inference\n- \n+\n Args:\n output (tensor): the model output, which is the input of loss function\n \"\"\"\n@@ -161,7 +161,7 @@\n \"\"\"Post processing the score of post model(output of nn) of batchsize data\n \"\"\"\n scores = self.get_scores(idx, scores)\n- scale = paddle.to_tensor([self.Z_offset], dtype='float32')\n+ scale = paddle.to_tensor([self.Z_offset], dtype='float64')\n scores = paddle.add(scores, -scale)\n prob = paddle.exp(scores)\n if sep_target:\n@@ -225,3 +225,65 @@\n loss = -(model_loss + noise_loss)\n \n return loss\n+\n+\n+class FocalLoss(nn.Layer):\n+ \"\"\"This criterion is a implemenation of Focal Loss, which is proposed in \n+ Focal Loss for Dense Object Detection.\n+\n+ Loss(x, class) = - \\alpha (1-softmax(x)[class])^gamma \\log(softmax(x)[class])\n+\n+ The losses are averaged across observations for each minibatch.\n+\n+ Args:\n+ alpha(1D Tensor, Variable) : the scalar factor for this criterion\n+ gamma(float, double) : gamma > 0; reduces the relative loss for well-classi\ufb01ed examples (p > .5), \n+ putting more focus on hard, misclassi\ufb01ed examples\n+ size_average(bool): By default, the losses are averaged over observations for each minibatch.\n+ However, if the field size_average is set to False, the losses are\n+ instead summed for each minibatch.\n+ \"\"\"\n+\n+ def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=-100):\n+ super(FocalLoss, self).__init__()\n+ self.alpha = alpha\n+ self.gamma = gamma\n+ self.size_average = size_average\n+ self.ce = nn.CrossEntropyLoss(\n+ ignore_index=ignore_index, reduction=\"none\")\n+\n+ def forward(self, outputs, targets):\n+ \"\"\"Forword inference.\n+\n+ Args:\n+ outputs: input tensor\n+ target: target label tensor\n+ \"\"\"\n+ ce_loss = self.ce(outputs, targets)\n+ pt = paddle.exp(-ce_loss)\n+ focal_loss = self.alpha * (1 - pt)**self.gamma * ce_loss\n+ if self.size_average:\n+ return focal_loss.mean()\n+ else:\n+ return focal_loss.sum()\n+\n+\n+if __name__ == \"__main__\":\n+ import numpy as np\n+ from paddlespeech.vector.utils.vector_utils import Q_from_tokens\n+ paddle.set_device(\"cpu\")\n+\n+ input_data = paddle.uniform([5, 100], dtype=\"float64\")\n+ label_data = np.random.randint(0, 100, size=(5)).astype(np.int64)\n+\n+ input = paddle.to_tensor(input_data)\n+ label = paddle.to_tensor(label_data)\n+\n+ loss1 = FocalLoss()\n+ loss = loss1.forward(input, label)\n+ print(\"loss: %.5f\" % (loss))\n+\n+ Q = Q_from_tokens(100)\n+ loss2 = NCELoss(Q)\n+ loss = loss2.forward(input, label)\n+ print(\"loss: %.5f\" % (loss))\ndiff --git a/paddlespeech/vector/utils/vector_utils.py b/paddlespeech/vector/utils/vector_utils.py\n--- a/paddlespeech/vector/utils/vector_utils.py\n+++ b/paddlespeech/vector/utils/vector_utils.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import paddle\n \n \n def get_chunks(seg_dur, audio_id, audio_duration):\n", "issue": "[vec] deal with class imbalances\n\n", "before_files": [{"content": "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef get_chunks(seg_dur, audio_id, audio_duration):\n \"\"\"Get all chunk segments from a utterance\n\n Args:\n seg_dur (float): segment chunk duration, seconds\n audio_id (str): utterance name, \n audio_duration (float): utterance duration, seconds\n\n Returns:\n List: all the chunk segments \n \"\"\"\n num_chunks = int(audio_duration / seg_dur) # all in seconds\n chunk_lst = [\n audio_id + \"_\" + str(i * seg_dur) + \"_\" + str(i * seg_dur + seg_dur)\n for i in range(num_chunks)\n ]\n return chunk_lst\n\n\ndef Q_from_tokens(token_num):\n \"\"\"Get prior model, data from uniform, would support others(guassian) in future\n \"\"\"\n freq = [1] * token_num\n Q = paddle.to_tensor(freq, dtype='float64')\n return Q / Q.sum()\n", "path": "paddlespeech/vector/utils/vector_utils.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# This is modified from SpeechBrain\n# https://github.com/speechbrain/speechbrain/blob/085be635c07f16d42cd1295045bc46c407f1e15b/speechbrain/nnet/losses.py\nimport math\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\n\nclass AngularMargin(nn.Layer):\n def __init__(self, margin=0.0, scale=1.0):\n \"\"\"An implementation of Angular Margin (AM) proposed in the following\n paper: '''Margin Matters: Towards More Discriminative Deep Neural Network\n Embeddings for Speaker Recognition''' (https://arxiv.org/abs/1906.07317)\n\n Args:\n margin (float, optional): The margin for cosine similiarity. Defaults to 0.0.\n scale (float, optional): The scale for cosine similiarity. Defaults to 1.0.\n \"\"\"\n super(AngularMargin, self).__init__()\n self.margin = margin\n self.scale = scale\n\n def forward(self, outputs, targets):\n outputs = outputs - self.margin * targets\n return self.scale * outputs\n\n\nclass AdditiveAngularMargin(AngularMargin):\n def __init__(self, margin=0.0, scale=1.0, easy_margin=False):\n \"\"\"The Implementation of Additive Angular Margin (AAM) proposed\n in the following paper: '''Margin Matters: Towards More Discriminative Deep Neural Network Embeddings for Speaker Recognition'''\n (https://arxiv.org/abs/1906.07317)\n\n Args:\n margin (float, optional): margin factor. Defaults to 0.0.\n scale (float, optional): scale factor. Defaults to 1.0.\n easy_margin (bool, optional): easy_margin flag. Defaults to False.\n \"\"\"\n super(AdditiveAngularMargin, self).__init__(margin, scale)\n self.easy_margin = easy_margin\n\n self.cos_m = math.cos(self.margin)\n self.sin_m = math.sin(self.margin)\n self.th = math.cos(math.pi - self.margin)\n self.mm = math.sin(math.pi - self.margin) * self.margin\n\n def forward(self, outputs, targets):\n cosine = outputs.astype('float32')\n sine = paddle.sqrt(1.0 - paddle.pow(cosine, 2))\n phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)\n if self.easy_margin:\n phi = paddle.where(cosine > 0, phi, cosine)\n else:\n phi = paddle.where(cosine > self.th, phi, cosine - self.mm)\n outputs = (targets * phi) + ((1.0 - targets) * cosine)\n return self.scale * outputs\n\n\nclass LogSoftmaxWrapper(nn.Layer):\n def __init__(self, loss_fn):\n \"\"\"Speaker identificatin loss function wrapper \n including all of compositions of the loss transformation\n Args:\n loss_fn (_type_): the loss value of a batch\n \"\"\"\n super(LogSoftmaxWrapper, self).__init__()\n self.loss_fn = loss_fn\n self.criterion = paddle.nn.KLDivLoss(reduction=\"sum\")\n\n def forward(self, outputs, targets, length=None):\n targets = F.one_hot(targets, outputs.shape[1])\n try:\n predictions = self.loss_fn(outputs, targets)\n except TypeError:\n predictions = self.loss_fn(outputs)\n\n predictions = F.log_softmax(predictions, axis=1)\n loss = self.criterion(predictions, targets) / targets.sum()\n return loss\n\n\nclass NCELoss(nn.Layer):\n \"\"\"Noise Contrastive Estimation loss funtion\n\n Noise Contrastive Estimation (NCE) is an approximation method that is used to\n work around the huge computational cost of large softmax layer.\n The basic idea is to convert the prediction problem into classification problem\n at training stage. It has been proved that these two criterions converges to\n the same minimal point as long as noise distribution is close enough to real one.\n\n NCE bridges the gap between generative models and discriminative models,\n rather than simply speedup the softmax layer.\n With NCE, you can turn almost anything into posterior with less effort (I think).\n\n Refs:\n NCE\uff1ahttp://www.cs.helsinki.fi/u/ahyvarin/papers/Gutmann10AISTATS.pdf\n Thanks: https://github.com/mingen-pan/easy-to-use-NCE-RNN-for-Pytorch/blob/master/nce.py\n\n Examples:\n Q = Q_from_tokens(output_dim)\n NCELoss(Q)\n \"\"\"\n\n def __init__(self, Q, noise_ratio=100, Z_offset=9.5):\n \"\"\"Noise Contrastive Estimation loss funtion\n\n Args:\n Q (tensor): prior model, uniform or guassian\n noise_ratio (int, optional): noise sampling times. Defaults to 100.\n Z_offset (float, optional): scale of post processing the score. Defaults to 9.5.\n \"\"\"\n super(NCELoss, self).__init__()\n assert type(noise_ratio) is int\n self.Q = paddle.to_tensor(Q, stop_gradient=False)\n self.N = self.Q.shape[0]\n self.K = noise_ratio\n self.Z_offset = Z_offset\n\n def forward(self, output, target):\n \"\"\"Forward inference\n \n Args:\n output (tensor): the model output, which is the input of loss function\n \"\"\"\n output = paddle.reshape(output, [-1, self.N])\n B = output.shape[0]\n noise_idx = self.get_noise(B)\n idx = self.get_combined_idx(target, noise_idx)\n P_target, P_noise = self.get_prob(idx, output, sep_target=True)\n Q_target, Q_noise = self.get_Q(idx)\n loss = self.nce_loss(P_target, P_noise, Q_noise, Q_target)\n return loss.mean()\n\n def get_Q(self, idx, sep_target=True):\n \"\"\"Get prior model of batchsize data\n \"\"\"\n idx_size = idx.size\n prob_model = paddle.to_tensor(\n self.Q.numpy()[paddle.reshape(idx, [-1]).numpy()])\n prob_model = paddle.reshape(prob_model, [idx.shape[0], idx.shape[1]])\n if sep_target:\n return prob_model[:, 0], prob_model[:, 1:]\n else:\n return prob_model\n\n def get_prob(self, idx, scores, sep_target=True):\n \"\"\"Post processing the score of post model(output of nn) of batchsize data\n \"\"\"\n scores = self.get_scores(idx, scores)\n scale = paddle.to_tensor([self.Z_offset], dtype='float32')\n scores = paddle.add(scores, -scale)\n prob = paddle.exp(scores)\n if sep_target:\n return prob[:, 0], prob[:, 1:]\n else:\n return prob\n\n def get_scores(self, idx, scores):\n \"\"\"Get the score of post model(output of nn) of batchsize data\n \"\"\"\n B, N = scores.shape\n K = idx.shape[1]\n idx_increment = paddle.to_tensor(\n N * paddle.reshape(paddle.arange(B), [B, 1]) * paddle.ones([1, K]),\n dtype=\"int64\",\n stop_gradient=False)\n new_idx = idx_increment + idx\n new_scores = paddle.index_select(\n paddle.reshape(scores, [-1]), paddle.reshape(new_idx, [-1]))\n\n return paddle.reshape(new_scores, [B, K])\n\n def get_noise(self, batch_size, uniform=True):\n \"\"\"Select noise sample\n \"\"\"\n if uniform:\n noise = np.random.randint(self.N, size=self.K * batch_size)\n else:\n noise = np.random.choice(\n self.N, self.K * batch_size, replace=True, p=self.Q.data)\n noise = paddle.to_tensor(noise, dtype='int64', stop_gradient=False)\n noise_idx = paddle.reshape(noise, [batch_size, self.K])\n return noise_idx\n\n def get_combined_idx(self, target_idx, noise_idx):\n \"\"\"Combined target and noise\n \"\"\"\n target_idx = paddle.reshape(target_idx, [-1, 1])\n return paddle.concat((target_idx, noise_idx), 1)\n\n def nce_loss(self, prob_model, prob_noise_in_model, prob_noise,\n prob_target_in_noise):\n \"\"\"Combined the loss of target and noise\n \"\"\"\n\n def safe_log(tensor):\n \"\"\"Safe log\n \"\"\"\n EPSILON = 1e-10\n return paddle.log(EPSILON + tensor)\n\n model_loss = safe_log(prob_model /\n (prob_model + self.K * prob_target_in_noise))\n model_loss = paddle.reshape(model_loss, [-1])\n\n noise_loss = paddle.sum(\n safe_log((self.K * prob_noise) /\n (prob_noise_in_model + self.K * prob_noise)), -1)\n noise_loss = paddle.reshape(noise_loss, [-1])\n\n loss = -(model_loss + noise_loss)\n\n return loss\n", "path": "paddlespeech/vector/modules/loss.py"}]} | 3,695 | 930 |
gh_patches_debug_18242 | rasdani/github-patches | git_diff | Mailu__Mailu-1542 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dovecot does not use redis, so it should be removed from start script
In core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script.
</issue>
<code>
[start of core/dovecot/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import multiprocessing
6 import logging as log
7 import sys
8
9 from podop import run_server
10 from socrate import system, conf
11
12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
13
14 def start_podop():
15 os.setuid(8)
16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
17 run_server(0, "dovecot", "/tmp/podop.socket", [
18 ("quota", "url", url ),
19 ("auth", "url", url),
20 ("sieve", "url", url),
21 ])
22
23 # Actual startup script
24
25 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
26 os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
27 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
28 os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334")
29 if os.environ["WEBMAIL"] != "none":
30 os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
31
32 for dovecot_file in glob.glob("/conf/*.conf"):
33 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
34
35 os.makedirs("/conf/bin", exist_ok=True)
36 for script_file in glob.glob("/conf/*.script"):
37 out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
38 conf.jinja(script_file, os.environ, out_file)
39 os.chmod(out_file, 0o555)
40
41 # Run Podop, then postfix
42 multiprocessing.Process(target=start_podop).start()
43 os.system("chown mail:mail /mail")
44 os.system("chown -R mail:mail /var/lib/dovecot /conf")
45 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
46
[end of core/dovecot/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -21,13 +21,9 @@
])
# Actual startup script
-
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
-os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_WEBUI_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_WEBUI", "antispam:11334")
-if os.environ["WEBMAIL"] != "none":
- os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
| {"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -21,13 +21,9 @@\n ])\n \n # Actual startup script\n-\n os.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\n-os.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\n os.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\n os.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\n-if os.environ[\"WEBMAIL\"] != \"none\":\n- os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n \n for dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n", "issue": "Dovecot does not use redis, so it should be removed from start script \nIn core/dovecot/start.py REDIS_ADDRESS is resolved but redis is not used on dovecot. It should be removed from the script.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\n\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"REDIS_ADDRESS\"] = system.get_host_address_from_environment(\"REDIS\", \"redis\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_WEBUI_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_WEBUI\", \"antispam:11334\")\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.get_host_address_from_environment(\"WEBMAIL\", \"webmail\")\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\nos.makedirs(\"/conf/bin\", exist_ok=True)\nfor script_file in glob.glob(\"/conf/*.script\"):\n out_file = os.path.join(\"/conf/bin/\", os.path.basename(script_file).replace('.script',''))\n conf.jinja(script_file, os.environ, out_file)\n os.chmod(out_file, 0o555)\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]} | 1,143 | 230 |
gh_patches_debug_3694 | rasdani/github-patches | git_diff | ansible__ansible-22432 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tag attribute in ec2_snapshot_facts module should be dict instead of list
##### ISSUE TYPE
- Documentation Report
##### COMPONENT NAME
ansible-modules-extras/cloud/amazon/ec2_snapshot_facts.py
##### ANSIBLE VERSION
```
$ ansible --version
ansible 2.2.1.0
config file = /home/psd/.ansible.cfg
configured module search path = ['/home/psd/.ansible/library']
```
##### CONFIGURATION
N/A. Not configuration-specific
##### OS / ENVIRONMENT
N/A
##### SUMMARY
The returned snapshots dict have a tag attribute, it was already [converted to dict in the code](https://github.com/ansible/ansible-modules-extras/blob/devel/cloud/amazon/ec2_snapshot_facts.py#L196)
So the expected tag attr in this module is a dict, not a list(which is the return value type in boto3)
##### STEPS TO REPRODUCE
get snapshot_facts, we have an example output in the next section:
##### ACTUAL RESULTS
```
{
"description": "",
"encrypted": false,
"owner_id": "omitted",
"progress": "100%",
"snapshot_id": "snap-omitted",
"start_time": "2017-03-08T03:52:29+00:00",
"state": "completed",
"tags": {
"Name": "some name",
"creator": "cron on some machine",
"frequency": "hourly"
},
"volume_id": "vol-omitted",
"volume_size": 40
}
```
As you can see, the tags attr is a dict.
</issue>
<code>
[start of lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py]
1 #!/usr/bin/python
2 # This file is part of Ansible
3 #
4 # Ansible is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # Ansible is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
16
17 ANSIBLE_METADATA = {'status': ['preview'],
18 'supported_by': 'community',
19 'version': '1.0'}
20
21 DOCUMENTATION = '''
22 ---
23 module: ec2_snapshot_facts
24 short_description: Gather facts about ec2 volume snapshots in AWS
25 description:
26 - Gather facts about ec2 volume snapshots in AWS
27 version_added: "2.1"
28 author: "Rob White (@wimnat)"
29 options:
30 snapshot_ids:
31 description:
32 - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
33 required: false
34 default: []
35 owner_ids:
36 description:
37 - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
38 access are returned.
39 required: false
40 default: []
41 restorable_by_user_ids:
42 description:
43 - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
44 returned.
45 required: false
46 default: []
47 filters:
48 description:
49 - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
50 U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
51 names and values are case sensitive.
52 required: false
53 default: {}
54 notes:
55 - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
56 the account use the filter 'owner-id'.
57
58 extends_documentation_fragment:
59 - aws
60 - ec2
61 '''
62
63 EXAMPLES = '''
64 # Note: These examples do not set authentication details, see the AWS Guide for details.
65
66 # Gather facts about all snapshots, including public ones
67 - ec2_snapshot_facts:
68
69 # Gather facts about all snapshots owned by the account 0123456789
70 - ec2_snapshot_facts:
71 filters:
72 owner-id: 0123456789
73
74 # Or alternatively...
75 - ec2_snapshot_facts:
76 owner_ids:
77 - 0123456789
78
79 # Gather facts about a particular snapshot using ID
80 - ec2_snapshot_facts:
81 filters:
82 snapshot-id: snap-00112233
83
84 # Or alternatively...
85 - ec2_snapshot_facts:
86 snapshot_ids:
87 - snap-00112233
88
89 # Gather facts about any snapshot with a tag key Name and value Example
90 - ec2_snapshot_facts:
91 filters:
92 "tag:Name": Example
93
94 # Gather facts about any snapshot with an error status
95 - ec2_snapshot_facts:
96 filters:
97 status: error
98
99 '''
100
101 RETURN = '''
102 snapshot_id:
103 description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
104 type: string
105 sample: snap-01234567
106 volume_id:
107 description: The ID of the volume that was used to create the snapshot.
108 type: string
109 sample: vol-01234567
110 state:
111 description: The snapshot state (completed, pending or error).
112 type: string
113 sample: completed
114 state_message:
115 description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.
116 type: string
117 sample:
118 start_time:
119 description: The time stamp when the snapshot was initiated.
120 type: datetime
121 sample: 2015-02-12T02:14:02+00:00
122 progress:
123 description: The progress of the snapshot, as a percentage.
124 type: string
125 sample: 100%
126 owner_id:
127 description: The AWS account ID of the EBS snapshot owner.
128 type: string
129 sample: 099720109477
130 description:
131 description: The description for the snapshot.
132 type: string
133 sample: My important backup
134 volume_size:
135 description: The size of the volume, in GiB.
136 type: integer
137 sample: 8
138 owner_alias:
139 description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
140 type: string
141 sample: 033440102211
142 tags:
143 description: Any tags assigned to the snapshot.
144 type: list
145 sample: "{ 'my_tag_key': 'my_tag_value' }"
146 encrypted:
147 description: Indicates whether the snapshot is encrypted.
148 type: boolean
149 sample: True
150 kms_key_id:
151 description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
152 protect the volume encryption key for the parent volume.
153 type: string
154 sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456
155 data_encryption_key_id:
156 description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
157 corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
158 type: string
159 sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
160
161 '''
162
163 try:
164 import boto3
165 from botocore.exceptions import ClientError, NoCredentialsError
166 HAS_BOTO3 = True
167 except ImportError:
168 HAS_BOTO3 = False
169
170 from ansible.module_utils.basic import AnsibleModule
171 from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
172 boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
173 ec2_argument_spec, get_aws_connection_info)
174
175
176 def list_ec2_snapshots(connection, module):
177
178 snapshot_ids = module.params.get("snapshot_ids")
179 owner_ids = map(str, module.params.get("owner_ids"))
180 restorable_by_user_ids = module.params.get("restorable_by_user_ids")
181 filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
182
183 try:
184 snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
185 except ClientError as e:
186 module.fail_json(msg=e.message)
187
188 # Turn the boto3 result in to ansible_friendly_snaked_names
189 snaked_snapshots = []
190 for snapshot in snapshots['Snapshots']:
191 snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
192
193 # Turn the boto3 result in to ansible friendly tag dictionary
194 for snapshot in snaked_snapshots:
195 if 'tags' in snapshot:
196 snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])
197
198 module.exit_json(snapshots=snaked_snapshots)
199
200
201 def main():
202
203 argument_spec = ec2_argument_spec()
204 argument_spec.update(
205 dict(
206 snapshot_ids=dict(default=[], type='list'),
207 owner_ids=dict(default=[], type='list'),
208 restorable_by_user_ids=dict(default=[], type='list'),
209 filters=dict(default={}, type='dict')
210 )
211 )
212
213 module = AnsibleModule(argument_spec=argument_spec,
214 mutually_exclusive=[
215 ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
216 ]
217 )
218
219 if not HAS_BOTO3:
220 module.fail_json(msg='boto3 required for this module')
221
222 region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
223
224 if region:
225 connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
226 else:
227 module.fail_json(msg="region must be specified")
228
229 list_ec2_snapshots(connection, module)
230
231
232 if __name__ == '__main__':
233 main()
234
[end of lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
@@ -141,7 +141,7 @@
sample: 033440102211
tags:
description: Any tags assigned to the snapshot.
- type: list
+ type: dict
sample: "{ 'my_tag_key': 'my_tag_value' }"
encrypted:
description: Indicates whether the snapshot is encrypted.
| {"golden_diff": "diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py\n--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py\n+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py\n@@ -141,7 +141,7 @@\n sample: 033440102211\n tags:\n description: Any tags assigned to the snapshot.\n- type: list\n+ type: dict\n sample: \"{ 'my_tag_key': 'my_tag_value' }\"\n encrypted:\n description: Indicates whether the snapshot is encrypted.\n", "issue": "tag attribute in ec2_snapshot_facts module should be dict instead of list\n##### ISSUE TYPE\r\n - Documentation Report\r\n\r\n##### COMPONENT NAME\r\nansible-modules-extras/cloud/amazon/ec2_snapshot_facts.py\r\n\r\n##### ANSIBLE VERSION\r\n```\r\n$ ansible --version\r\nansible 2.2.1.0\r\n config file = /home/psd/.ansible.cfg\r\n configured module search path = ['/home/psd/.ansible/library']\r\n```\r\n\r\n##### CONFIGURATION\r\nN/A. Not configuration-specific\r\n\r\n##### OS / ENVIRONMENT\r\nN/A\r\n\r\n##### SUMMARY\r\nThe returned snapshots dict have a tag attribute, it was already [converted to dict in the code](https://github.com/ansible/ansible-modules-extras/blob/devel/cloud/amazon/ec2_snapshot_facts.py#L196)\r\n\r\nSo the expected tag attr in this module is a dict, not a list(which is the return value type in boto3)\r\n\r\n##### STEPS TO REPRODUCE\r\nget snapshot_facts, we have an example output in the next section:\r\n\r\n##### ACTUAL RESULTS\r\n\r\n```\r\n {\r\n \"description\": \"\",\r\n \"encrypted\": false,\r\n \"owner_id\": \"omitted\",\r\n \"progress\": \"100%\",\r\n \"snapshot_id\": \"snap-omitted\",\r\n \"start_time\": \"2017-03-08T03:52:29+00:00\",\r\n \"state\": \"completed\",\r\n \"tags\": {\r\n \"Name\": \"some name\",\r\n \"creator\": \"cron on some machine\",\r\n \"frequency\": \"hourly\"\r\n },\r\n \"volume_id\": \"vol-omitted\",\r\n \"volume_size\": 40\r\n }\r\n```\r\nAs you can see, the tags attr is a dict.\n", "before_files": [{"content": "#!/usr/bin/python\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'community',\n 'version': '1.0'}\n\nDOCUMENTATION = '''\n---\nmodule: ec2_snapshot_facts\nshort_description: Gather facts about ec2 volume snapshots in AWS\ndescription:\n - Gather facts about ec2 volume snapshots in AWS\nversion_added: \"2.1\"\nauthor: \"Rob White (@wimnat)\"\noptions:\n snapshot_ids:\n description:\n - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.\n required: false\n default: []\n owner_ids:\n description:\n - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \\\n access are returned.\n required: false\n default: []\n restorable_by_user_ids:\n description:\n - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \\\n returned.\n required: false\n default: []\n filters:\n description:\n - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \\\n U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \\\n names and values are case sensitive.\n required: false\n default: {}\nnotes:\n - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \\\n the account use the filter 'owner-id'.\n\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Gather facts about all snapshots, including public ones\n- ec2_snapshot_facts:\n\n# Gather facts about all snapshots owned by the account 0123456789\n- ec2_snapshot_facts:\n filters:\n owner-id: 0123456789\n\n# Or alternatively...\n- ec2_snapshot_facts:\n owner_ids:\n - 0123456789\n\n# Gather facts about a particular snapshot using ID\n- ec2_snapshot_facts:\n filters:\n snapshot-id: snap-00112233\n\n# Or alternatively...\n- ec2_snapshot_facts:\n snapshot_ids:\n - snap-00112233\n\n# Gather facts about any snapshot with a tag key Name and value Example\n- ec2_snapshot_facts:\n filters:\n \"tag:Name\": Example\n\n# Gather facts about any snapshot with an error status\n- ec2_snapshot_facts:\n filters:\n status: error\n\n'''\n\nRETURN = '''\nsnapshot_id:\n description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.\n type: string\n sample: snap-01234567\nvolume_id:\n description: The ID of the volume that was used to create the snapshot.\n type: string\n sample: vol-01234567\nstate:\n description: The snapshot state (completed, pending or error).\n type: string\n sample: completed\nstate_message:\n description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.\n type: string\n sample:\nstart_time:\n description: The time stamp when the snapshot was initiated.\n type: datetime\n sample: 2015-02-12T02:14:02+00:00\nprogress:\n description: The progress of the snapshot, as a percentage.\n type: string\n sample: 100%\nowner_id:\n description: The AWS account ID of the EBS snapshot owner.\n type: string\n sample: 099720109477\ndescription:\n description: The description for the snapshot.\n type: string\n sample: My important backup\nvolume_size:\n description: The size of the volume, in GiB.\n type: integer\n sample: 8\nowner_alias:\n description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.\n type: string\n sample: 033440102211\ntags:\n description: Any tags assigned to the snapshot.\n type: list\n sample: \"{ 'my_tag_key': 'my_tag_value' }\"\nencrypted:\n description: Indicates whether the snapshot is encrypted.\n type: boolean\n sample: True\nkms_key_id:\n description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \\\n protect the volume encryption key for the parent volume.\n type: string\n sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456\ndata_encryption_key_id:\n description: The data encryption key identifier for the snapshot. This value is a unique identifier that \\\n corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.\n type: string\n sample: \"arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456\"\n\n'''\n\ntry:\n import boto3\n from botocore.exceptions import ClientError, NoCredentialsError\n HAS_BOTO3 = True\nexcept ImportError:\n HAS_BOTO3 = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,\n boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,\n ec2_argument_spec, get_aws_connection_info)\n\n\ndef list_ec2_snapshots(connection, module):\n\n snapshot_ids = module.params.get(\"snapshot_ids\")\n owner_ids = map(str, module.params.get(\"owner_ids\"))\n restorable_by_user_ids = module.params.get(\"restorable_by_user_ids\")\n filters = ansible_dict_to_boto3_filter_list(module.params.get(\"filters\"))\n\n try:\n snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)\n except ClientError as e:\n module.fail_json(msg=e.message)\n\n # Turn the boto3 result in to ansible_friendly_snaked_names\n snaked_snapshots = []\n for snapshot in snapshots['Snapshots']:\n snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))\n\n # Turn the boto3 result in to ansible friendly tag dictionary\n for snapshot in snaked_snapshots:\n if 'tags' in snapshot:\n snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])\n\n module.exit_json(snapshots=snaked_snapshots)\n\n\ndef main():\n\n argument_spec = ec2_argument_spec()\n argument_spec.update(\n dict(\n snapshot_ids=dict(default=[], type='list'),\n owner_ids=dict(default=[], type='list'),\n restorable_by_user_ids=dict(default=[], type='list'),\n filters=dict(default={}, type='dict')\n )\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=[\n ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']\n ]\n )\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 required for this module')\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)\n\n if region:\n connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)\n else:\n module.fail_json(msg=\"region must be specified\")\n\n list_ec2_snapshots(connection, module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py"}]} | 3,479 | 147 |
gh_patches_debug_17431 | rasdani/github-patches | git_diff | translate__pootle-5736 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update_stores' last updated date doesn't tickle up to project overview/language list
When updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir.
Within a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date.
using pootle 2.8.0b5 (TDF)
screenshots to clarify. overview lists last update as e.g. 3 weeks ago:

drilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po)

(also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks…)
</issue>
<code>
[start of pootle/apps/pootle_data/project_data.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from .utils import RelatedStoresDataTool, RelatedTPsDataTool
10
11
12 class ProjectDataTool(RelatedTPsDataTool):
13 """Retrieves aggregate stats for a Project"""
14
15 cache_key_name = "project"
16
17 def filter_data(self, qs):
18 return qs.filter(tp__project=self.context)
19
20
21 class ProjectResourceDataTool(RelatedStoresDataTool):
22 group_by = ("store__translation_project__language__code", )
23 cache_key_name = "project_resource"
24
25 @property
26 def project_path(self):
27 return (
28 "/%s%s"
29 % (self.project_code, self.tp_path))
30
31 @property
32 def tp_path(self):
33 return (
34 "/%s%s"
35 % (self.dir_path,
36 self.filename))
37
38 def filter_data(self, qs):
39 return (
40 qs.filter(store__translation_project__project__code=self.project_code)
41 .filter(store__tp_path__startswith=self.tp_path))
42
43 @property
44 def context_name(self):
45 return "/projects%s" % self.project_path
46
47
48 class ProjectSetDataTool(RelatedTPsDataTool):
49 group_by = ("tp__project__code", )
50 cache_key_name = "projects"
51
52 def get_root_child_path(self, child):
53 return child[self.group_by[0]]
54
55 @property
56 def context_name(self):
57 return "ALL"
58
[end of pootle/apps/pootle_data/project_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py
--- a/pootle/apps/pootle_data/project_data.py
+++ b/pootle/apps/pootle_data/project_data.py
@@ -6,6 +6,8 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+from pootle.core.delegate import revision
+
from .utils import RelatedStoresDataTool, RelatedTPsDataTool
@@ -17,6 +19,11 @@
def filter_data(self, qs):
return qs.filter(tp__project=self.context)
+ @property
+ def rev_cache_key(self):
+ return revision.get(
+ self.context.__class__)(self.context.directory).get(key="stats")
+
class ProjectResourceDataTool(RelatedStoresDataTool):
group_by = ("store__translation_project__language__code", )
| {"golden_diff": "diff --git a/pootle/apps/pootle_data/project_data.py b/pootle/apps/pootle_data/project_data.py\n--- a/pootle/apps/pootle_data/project_data.py\n+++ b/pootle/apps/pootle_data/project_data.py\n@@ -6,6 +6,8 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+from pootle.core.delegate import revision\n+\n from .utils import RelatedStoresDataTool, RelatedTPsDataTool\n \n \n@@ -17,6 +19,11 @@\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n \n+ @property\n+ def rev_cache_key(self):\n+ return revision.get(\n+ self.context.__class__)(self.context.directory).get(key=\"stats\")\n+\n \n class ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n", "issue": "update_stores' last updated date doesn't tickle up to project overview/language list\nWhen updating against templates, the /projects/projectname/ listing doesn't reflect the **last update**, **unless** the update affected a file in the **toplevel** dir.\r\n\r\nWithin a language overview (/lang/projectname), changes deep in a directory hierarchy will also affect the parent directory's last-change date.\r\n\r\nusing pootle 2.8.0b5 (TDF)\r\n\r\nscreenshots to clarify. overview lists last update as e.g. 3 weeks ago:\r\n\r\n\r\ndrilling down to the language reveals that the files in xmlsecurity actually had been updated only 8 hours ago (in fact xmlsecurity/uiconfig/ui.po)\r\n\r\n\r\n(also sorting by the last updated is not working properly, goes from 10months to 8 hours, to 3weeks\u2026) \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom .utils import RelatedStoresDataTool, RelatedTPsDataTool\n\n\nclass ProjectDataTool(RelatedTPsDataTool):\n \"\"\"Retrieves aggregate stats for a Project\"\"\"\n\n cache_key_name = \"project\"\n\n def filter_data(self, qs):\n return qs.filter(tp__project=self.context)\n\n\nclass ProjectResourceDataTool(RelatedStoresDataTool):\n group_by = (\"store__translation_project__language__code\", )\n cache_key_name = \"project_resource\"\n\n @property\n def project_path(self):\n return (\n \"/%s%s\"\n % (self.project_code, self.tp_path))\n\n @property\n def tp_path(self):\n return (\n \"/%s%s\"\n % (self.dir_path,\n self.filename))\n\n def filter_data(self, qs):\n return (\n qs.filter(store__translation_project__project__code=self.project_code)\n .filter(store__tp_path__startswith=self.tp_path))\n\n @property\n def context_name(self):\n return \"/projects%s\" % self.project_path\n\n\nclass ProjectSetDataTool(RelatedTPsDataTool):\n group_by = (\"tp__project__code\", )\n cache_key_name = \"projects\"\n\n def get_root_child_path(self, child):\n return child[self.group_by[0]]\n\n @property\n def context_name(self):\n return \"ALL\"\n", "path": "pootle/apps/pootle_data/project_data.py"}]} | 1,368 | 219 |
gh_patches_debug_38407 | rasdani/github-patches | git_diff | wagtail__wagtail-556 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search: Make update_index update all backends
Currently, it only updates the default backend. It should update all search backends.
</issue>
<code>
[start of wagtail/wagtailsearch/management/commands/update_index.py]
1 from django.core.management.base import BaseCommand
2 from django.db import models
3
4 from wagtail.wagtailsearch import Indexed, get_search_backend
5
6
7 class Command(BaseCommand):
8 def handle(self, **options):
9 # Print info
10 self.stdout.write("Getting object list")
11
12 # Get list of indexed models
13 indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]
14
15 # Object set
16 object_set = {}
17
18 # Add all objects to object set and detect any duplicates
19 # Duplicates are caused when both a model and a derived model are indexed
20 # Eg, if BlogPost inherits from Page and both of these models are indexed
21 # If we were to add all objects from both models into the index, all the BlogPosts will have two entries
22 for model in indexed_models:
23 # Get toplevel content type
24 toplevel_content_type = model.indexed_get_toplevel_content_type()
25
26 # Loop through objects
27 for obj in model.get_indexed_objects():
28 # Get key for this object
29 key = toplevel_content_type + ':' + str(obj.pk)
30
31 # Check if this key already exists
32 if key in object_set:
33 # Conflict, work out who should get this space
34 # The object with the longest content type string gets the space
35 # Eg, "wagtailcore.Page-myapp.BlogPost" kicks out "wagtailcore.Page"
36 if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):
37 # Take the spot
38 object_set[key] = obj
39 else:
40 # Space free, take it
41 object_set[key] = obj
42
43 # Search backend
44 if 'backend' in options:
45 s = options['backend']
46 else:
47 s = get_search_backend()
48
49 # Reset the index
50 self.stdout.write("Reseting index")
51 s.reset_index()
52
53 # Add types
54 self.stdout.write("Adding types")
55 for model in indexed_models:
56 s.add_type(model)
57
58 # Add objects to index
59 self.stdout.write("Adding objects")
60 for result in s.add_bulk(object_set.values()):
61 self.stdout.write(result[0] + ' ' + str(result[1]))
62
63 # Refresh index
64 self.stdout.write("Refreshing index")
65 s.refresh_index()
66
[end of wagtail/wagtailsearch/management/commands/update_index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py
--- a/wagtail/wagtailsearch/management/commands/update_index.py
+++ b/wagtail/wagtailsearch/management/commands/update_index.py
@@ -1,11 +1,22 @@
+from optparse import make_option
+
from django.core.management.base import BaseCommand
from django.db import models
+from django.conf import settings
from wagtail.wagtailsearch import Indexed, get_search_backend
+def get_search_backends():
+ if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):
+ for backend in settings.WAGTAILSEARCH_BACKENDS.keys():
+ yield backend, get_search_backend(backend)
+ else:
+ yield 'default', get_search_backend('default')
+
+
class Command(BaseCommand):
- def handle(self, **options):
+ def get_object_list(self):
# Print info
self.stdout.write("Getting object list")
@@ -40,26 +51,51 @@
# Space free, take it
object_set[key] = obj
- # Search backend
- if 'backend' in options:
- s = options['backend']
- else:
- s = get_search_backend()
+ return indexed_models, object_set.values()
+
+ def update_backend(self, backend, models, object_list, backend_name=''):
+ # Print info
+ self.stdout.write("Updating backend: " + backend_name)
+
+ # Get backend
+ if backend is None:
+ backend = get_search_backend(backend_name)
# Reset the index
- self.stdout.write("Reseting index")
- s.reset_index()
+ self.stdout.write(backend_name + ": Reseting index")
+ backend.reset_index()
# Add types
- self.stdout.write("Adding types")
- for model in indexed_models:
- s.add_type(model)
+ self.stdout.write(backend_name + ": Adding types")
+ for model in models:
+ backend.add_type(model)
# Add objects to index
- self.stdout.write("Adding objects")
- for result in s.add_bulk(object_set.values()):
+ self.stdout.write(backend_name + ": Adding objects")
+ for result in backend.add_bulk(object_list):
self.stdout.write(result[0] + ' ' + str(result[1]))
# Refresh index
- self.stdout.write("Refreshing index")
- s.refresh_index()
+ self.stdout.write(backend_name + ": Refreshing index")
+ backend.refresh_index()
+
+ option_list = BaseCommand.option_list + (
+ make_option('--backend',
+ action='store',
+ dest='backend_name',
+ default=False,
+ help="Specify a backend to update",
+ ),
+ )
+
+ def handle(self, **options):
+ # Get object list
+ models, object_list = self.get_object_list()
+
+ # Update backends
+ if 'backend_name' in options:
+ backend = dict(get_search_backends())[options['backend_name']]
+ self.update_backend(backend, models, object_list, backend_name=options['backend_name'])
+ else:
+ for backend_name, backend in get_search_backends():
+ self.update_backend(backend, models, object_list, backend_name=backend_name)
| {"golden_diff": "diff --git a/wagtail/wagtailsearch/management/commands/update_index.py b/wagtail/wagtailsearch/management/commands/update_index.py\n--- a/wagtail/wagtailsearch/management/commands/update_index.py\n+++ b/wagtail/wagtailsearch/management/commands/update_index.py\n@@ -1,11 +1,22 @@\n+from optparse import make_option\n+\n from django.core.management.base import BaseCommand\n from django.db import models\n+from django.conf import settings\n \n from wagtail.wagtailsearch import Indexed, get_search_backend\n \n \n+def get_search_backends():\n+ if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'):\n+ for backend in settings.WAGTAILSEARCH_BACKENDS.keys():\n+ yield backend, get_search_backend(backend)\n+ else:\n+ yield 'default', get_search_backend('default')\n+\n+\n class Command(BaseCommand):\n- def handle(self, **options):\n+ def get_object_list(self):\n # Print info\n self.stdout.write(\"Getting object list\")\n \n@@ -40,26 +51,51 @@\n # Space free, take it\n object_set[key] = obj\n \n- # Search backend\n- if 'backend' in options:\n- s = options['backend']\n- else:\n- s = get_search_backend()\n+ return indexed_models, object_set.values()\n+\n+ def update_backend(self, backend, models, object_list, backend_name=''):\n+ # Print info\n+ self.stdout.write(\"Updating backend: \" + backend_name)\n+\n+ # Get backend\n+ if backend is None:\n+ backend = get_search_backend(backend_name)\n \n # Reset the index\n- self.stdout.write(\"Reseting index\")\n- s.reset_index()\n+ self.stdout.write(backend_name + \": Reseting index\")\n+ backend.reset_index()\n \n # Add types\n- self.stdout.write(\"Adding types\")\n- for model in indexed_models:\n- s.add_type(model)\n+ self.stdout.write(backend_name + \": Adding types\")\n+ for model in models:\n+ backend.add_type(model)\n \n # Add objects to index\n- self.stdout.write(\"Adding objects\")\n- for result in s.add_bulk(object_set.values()):\n+ self.stdout.write(backend_name + \": Adding objects\")\n+ for result in backend.add_bulk(object_list):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n \n # Refresh index\n- self.stdout.write(\"Refreshing index\")\n- s.refresh_index()\n+ self.stdout.write(backend_name + \": Refreshing index\")\n+ backend.refresh_index()\n+\n+ option_list = BaseCommand.option_list + (\n+ make_option('--backend',\n+ action='store',\n+ dest='backend_name',\n+ default=False,\n+ help=\"Specify a backend to update\",\n+ ),\n+ )\n+\n+ def handle(self, **options):\n+ # Get object list\n+ models, object_list = self.get_object_list()\n+\n+ # Update backends\n+ if 'backend_name' in options:\n+ backend = dict(get_search_backends())[options['backend_name']]\n+ self.update_backend(backend, models, object_list, backend_name=options['backend_name'])\n+ else:\n+ for backend_name, backend in get_search_backends():\n+ self.update_backend(backend, models, object_list, backend_name=backend_name)\n", "issue": "Search: Make update_index update all backends\nCurrently, it only updates the default backend. It should update all search backends.\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.db import models\n\nfrom wagtail.wagtailsearch import Indexed, get_search_backend\n\n\nclass Command(BaseCommand):\n def handle(self, **options):\n # Print info\n self.stdout.write(\"Getting object list\")\n\n # Get list of indexed models\n indexed_models = [model for model in models.get_models() if issubclass(model, Indexed)]\n\n # Object set\n object_set = {}\n\n # Add all objects to object set and detect any duplicates\n # Duplicates are caused when both a model and a derived model are indexed\n # Eg, if BlogPost inherits from Page and both of these models are indexed\n # If we were to add all objects from both models into the index, all the BlogPosts will have two entries\n for model in indexed_models:\n # Get toplevel content type\n toplevel_content_type = model.indexed_get_toplevel_content_type()\n\n # Loop through objects\n for obj in model.get_indexed_objects():\n # Get key for this object\n key = toplevel_content_type + ':' + str(obj.pk)\n\n # Check if this key already exists\n if key in object_set:\n # Conflict, work out who should get this space\n # The object with the longest content type string gets the space\n # Eg, \"wagtailcore.Page-myapp.BlogPost\" kicks out \"wagtailcore.Page\"\n if len(obj.indexed_get_content_type()) > len(object_set[key].indexed_get_content_type()):\n # Take the spot\n object_set[key] = obj\n else:\n # Space free, take it\n object_set[key] = obj\n\n # Search backend\n if 'backend' in options:\n s = options['backend']\n else:\n s = get_search_backend()\n\n # Reset the index\n self.stdout.write(\"Reseting index\")\n s.reset_index()\n\n # Add types\n self.stdout.write(\"Adding types\")\n for model in indexed_models:\n s.add_type(model)\n\n # Add objects to index\n self.stdout.write(\"Adding objects\")\n for result in s.add_bulk(object_set.values()):\n self.stdout.write(result[0] + ' ' + str(result[1]))\n\n # Refresh index\n self.stdout.write(\"Refreshing index\")\n s.refresh_index()\n", "path": "wagtail/wagtailsearch/management/commands/update_index.py"}]} | 1,207 | 747 |
gh_patches_debug_12742 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Speak to legal team about updating our T&Cs for GDPR
</issue>
<code>
[start of portal/admin.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2018, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48 readonly_fields = ['teacher']
49
50
51 class SchoolAdmin(admin.ModelAdmin):
52 search_fields = ['name', 'country', 'postcode', 'town']
53 list_filter = ['postcode', 'country']
54
55
56 class StudentAdmin(admin.ModelAdmin):
57 search_fields = ['new_user__first_name', 'new_user__last_name']
58 list_filter = ['class_field', 'class_field__teacher']
59 readonly_fields = ['user', 'new_user']
60 raw_id_fields = ['class_field', 'pending_class_request']
61
62
63 class TeacherAdmin(admin.ModelAdmin):
64 search_fields = ['new_user__first_name', 'new_user__last_name']
65 list_filter = ['school']
66 readonly_fields = ['user', 'new_user']
67 raw_id_fields = ['school', 'pending_join_request']
68
69
70 class UserProfileAdmin(admin.ModelAdmin):
71 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
72 list_filter = ['user__date_joined']
73 list_display = ['user', 'joined_recently']
74 readonly_fields = ['user']
75
76
77 class EmailVerificationAdmin(admin.ModelAdmin):
78 search_fields = ['new_user']
79
80
81 UserAdmin.list_display += ('date_joined',)
82 UserAdmin.list_filter += ('date_joined',)
83
84
85 admin.site.register(Class, ClassAdmin)
86 admin.site.register(Student, StudentAdmin)
87 admin.site.register(Guardian)
88 admin.site.register(Teacher, TeacherAdmin)
89 admin.site.register(School, SchoolAdmin)
90 admin.site.unregister(User)
91 admin.site.register(User, UserAdmin)
92 admin.site.register(UserProfile, UserProfileAdmin)
93 admin.site.register(FrontPageNews)
94 admin.site.register(EmailVerification, EmailVerificationAdmin)
95
[end of portal/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -68,14 +68,14 @@
class UserProfileAdmin(admin.ModelAdmin):
- search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
- search_fields = ['new_user']
+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']
UserAdmin.list_display += ('date_joined',)
| {"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -68,14 +68,14 @@\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n- search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n- search_fields = ['new_user']\n+ search_fields = ['user__first_name', 'user__last_name', 'user__username', 'user__date_joined']\n \n \n UserAdmin.list_display += ('date_joined',)\n", "issue": "Speak to legal team about updating our T&Cs for GDPR\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2018, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]} | 1,578 | 193 |
gh_patches_debug_22097 | rasdani/github-patches | git_diff | svthalia__concrexit-2199 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add filter/display of members-only value to document admin
### Is your feature request related to a problem? Please describe.
It is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value.
### Describe the solution you'd like
I'd like to see more information about the documents in the admin page so that I do not have to open the detail page.
### Motivation
Easier to manage these files.
### Describe alternatives you've considered
The only alternative is not doing this.
### Additional context
#2084 could have been prevented.
</issue>
<code>
[start of website/documents/admin.py]
1 """Registers admin interfaces for the documents module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4 from django.utils.translation import gettext_lazy as _
5
6 from documents import forms
7 from documents.models import (
8 AnnualDocument,
9 AssociationDocument,
10 EventDocument,
11 GeneralMeeting,
12 Minutes,
13 MiscellaneousDocument,
14 )
15 from documents.services import is_owner
16
17
18 class MinutesInline(admin.StackedInline):
19 """Inline for minutes of a general meeting."""
20
21 model = Minutes
22 form = forms.MinutesForm
23 extra = 0
24
25
26 @admin.register(GeneralMeeting)
27 class GeneralMeetingAdmin(ModelAdmin):
28 """Manage the general meetings."""
29
30 form = forms.GeneralMeetingForm
31 inlines = [
32 MinutesInline,
33 ]
34 list_filter = ("datetime",)
35
36
37 class LectureYearFilter(admin.SimpleListFilter):
38 """Filter the memberships on those started or ended in a lecture year."""
39
40 title = _("lecture year")
41 parameter_name = "lecture_year"
42
43 def lookups(self, request, model_admin):
44 if AnnualDocument.objects.count() > 0:
45 first_year = AnnualDocument.objects.order_by("year").first().year
46 last_year = AnnualDocument.objects.order_by("year").last().year
47
48 return [
49 (year, f"{year}-{year + 1}")
50 for year in range(last_year, first_year - 1, -1)
51 ]
52 return []
53
54 def queryset(self, request, queryset):
55 if not self.value():
56 return queryset
57
58 year = int(self.value())
59
60 return queryset.filter(year=year)
61
62
63 @admin.register(AnnualDocument)
64 class AnnualDocumentAdmin(ModelAdmin):
65 """Manage the annual documents."""
66
67 form = forms.AnnualDocumentForm
68 list_filter = (
69 LectureYearFilter,
70 "created",
71 "last_updated",
72 )
73
74
75 @admin.register(AssociationDocument)
76 class AssociationDocumentAdmin(ModelAdmin):
77 """Manage the association documents."""
78
79 form = forms.AssociationDocumentForm
80 list_filter = (
81 "created",
82 "last_updated",
83 )
84
85
86 @admin.register(EventDocument)
87 class EventDocumentAdmin(ModelAdmin):
88 """Manage the event documents."""
89
90 form = forms.EventDocumentForm
91 list_filter = (
92 "created",
93 "last_updated",
94 )
95
96 def has_change_permission(self, request, obj=None):
97 """Only allow access to the change form if the user is an owner."""
98 if obj is not None and not is_owner(request.member, obj):
99 return False
100 return super().has_change_permission(request, obj)
101
102 def has_delete_permission(self, request, obj=None):
103 """Only allow delete access if the user is an owner."""
104 if obj is not None and not is_owner(request.member, obj):
105 return False
106 return super().has_delete_permission(request, obj)
107
108
109 @admin.register(MiscellaneousDocument)
110 class MiscellaneousDocumentAdmin(ModelAdmin):
111 """Manage the miscellaneous documents."""
112
113 form = forms.MiscellaneousDocumentForm
114 list_filter = (
115 "created",
116 "last_updated",
117 )
118
[end of website/documents/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/documents/admin.py b/website/documents/admin.py
--- a/website/documents/admin.py
+++ b/website/documents/admin.py
@@ -69,6 +69,11 @@
LectureYearFilter,
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
@@ -80,6 +85,11 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
@@ -91,6 +101,11 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
def has_change_permission(self, request, obj=None):
@@ -114,4 +129,9 @@
list_filter = (
"created",
"last_updated",
+ "members_only",
+ )
+ list_display = (
+ "__str__",
+ "members_only",
)
| {"golden_diff": "diff --git a/website/documents/admin.py b/website/documents/admin.py\n--- a/website/documents/admin.py\n+++ b/website/documents/admin.py\n@@ -69,6 +69,11 @@\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -80,6 +85,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n \n@@ -91,6 +101,11 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n \n def has_change_permission(self, request, obj=None):\n@@ -114,4 +129,9 @@\n list_filter = (\n \"created\",\n \"last_updated\",\n+ \"members_only\",\n+ )\n+ list_display = (\n+ \"__str__\",\n+ \"members_only\",\n )\n", "issue": "Add filter/display of members-only value to document admin\n### Is your feature request related to a problem? Please describe.\r\nIt is not really issue to see which documents are marked as members only. And it is impossible to easily get a list with documents that have a true/false value.\r\n\r\n### Describe the solution you'd like\r\nI'd like to see more information about the documents in the admin page so that I do not have to open the detail page.\r\n\r\n### Motivation\r\nEasier to manage these files.\r\n\r\n### Describe alternatives you've considered\r\nThe only alternative is not doing this.\r\n\r\n### Additional context\r\n#2084 could have been prevented.\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the documents module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom documents import forms\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n EventDocument,\n GeneralMeeting,\n Minutes,\n MiscellaneousDocument,\n)\nfrom documents.services import is_owner\n\n\nclass MinutesInline(admin.StackedInline):\n \"\"\"Inline for minutes of a general meeting.\"\"\"\n\n model = Minutes\n form = forms.MinutesForm\n extra = 0\n\n\[email protected](GeneralMeeting)\nclass GeneralMeetingAdmin(ModelAdmin):\n \"\"\"Manage the general meetings.\"\"\"\n\n form = forms.GeneralMeetingForm\n inlines = [\n MinutesInline,\n ]\n list_filter = (\"datetime\",)\n\n\nclass LectureYearFilter(admin.SimpleListFilter):\n \"\"\"Filter the memberships on those started or ended in a lecture year.\"\"\"\n\n title = _(\"lecture year\")\n parameter_name = \"lecture_year\"\n\n def lookups(self, request, model_admin):\n if AnnualDocument.objects.count() > 0:\n first_year = AnnualDocument.objects.order_by(\"year\").first().year\n last_year = AnnualDocument.objects.order_by(\"year\").last().year\n\n return [\n (year, f\"{year}-{year + 1}\")\n for year in range(last_year, first_year - 1, -1)\n ]\n return []\n\n def queryset(self, request, queryset):\n if not self.value():\n return queryset\n\n year = int(self.value())\n\n return queryset.filter(year=year)\n\n\[email protected](AnnualDocument)\nclass AnnualDocumentAdmin(ModelAdmin):\n \"\"\"Manage the annual documents.\"\"\"\n\n form = forms.AnnualDocumentForm\n list_filter = (\n LectureYearFilter,\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](AssociationDocument)\nclass AssociationDocumentAdmin(ModelAdmin):\n \"\"\"Manage the association documents.\"\"\"\n\n form = forms.AssociationDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n\[email protected](EventDocument)\nclass EventDocumentAdmin(ModelAdmin):\n \"\"\"Manage the event documents.\"\"\"\n\n form = forms.EventDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow delete access if the user is an owner.\"\"\"\n if obj is not None and not is_owner(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n\[email protected](MiscellaneousDocument)\nclass MiscellaneousDocumentAdmin(ModelAdmin):\n \"\"\"Manage the miscellaneous documents.\"\"\"\n\n form = forms.MiscellaneousDocumentForm\n list_filter = (\n \"created\",\n \"last_updated\",\n )\n", "path": "website/documents/admin.py"}]} | 1,569 | 273 |
gh_patches_debug_8038 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-302 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NumberPrompt doesn't accept retry value
## Version
v4.5
## Describe the bug
When you send an invalid number to a `NumberPrompt`, it sends out a retry prompt.
When attempting to send a 2nd response after being reprompted, you get a timeout error.
## To Reproduce
1. Create a `NumberPrompt` object
2. When it prompts you for a number, send in a non-numeric value (e.g. `"hello"`)
* this will trigger a retry prompt (e.g. `"You must enter a number."`)
3. Try sending in another value--no matter what type of value, you get a timeout error


## Expected behavior
To be able to send in a 2nd value when reprompted
## Additional context
```python
async def test_number_prompt_retry(self):
async def exec_test(turn_context: TurnContext) -> None:
dialog_context: DialogContext = await dialogs.create_context(turn_context)
results: DialogTurnResult = await dialog_context.continue_dialog()
if results.status == DialogTurnStatus.Empty:
options = PromptOptions(
prompt=Activity(type=ActivityTypes.message, text="Enter a number."),
retry_prompt=Activity(
type=ActivityTypes.message, text="You must enter a number."
),
)
await dialog_context.prompt("NumberPrompt", options)
elif results.status == DialogTurnStatus.Complete:
number_result = results.result
await turn_context.send_activity(
MessageFactory.text(f"Bot received the number '{number_result}'.")
)
await convo_state.save_changes(turn_context)
adapter = TestAdapter(exec_test)
convo_state = ConversationState(MemoryStorage())
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
number_prompt = NumberPrompt(
dialog_id="NumberPrompt", validator=None, default_locale=Culture.English
)
dialogs.add(number_prompt)
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("Enter a number.")
# TODO: something is breaking in the validators or retry prompt
# where it does not accept the 2nd answer after reprompting the user
# for another value
step3 = await step2.send("hello")
step4 = await step3.assert_reply("You must enter a number.")
step5 = await step4.send("64")
await step5.assert_reply("Bot received the number '64'.")
```
[bug]
</issue>
<code>
[start of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import Callable, Dict
5
6 from recognizers_number import recognize_number
7 from recognizers_text import Culture, ModelResult
8 from babel.numbers import parse_decimal
9
10 from botbuilder.core.turn_context import TurnContext
11 from botbuilder.schema import ActivityTypes
12
13 from .prompt import Prompt, PromptValidatorContext
14 from .prompt_options import PromptOptions
15 from .prompt_recognizer_result import PromptRecognizerResult
16
17
18 class NumberPrompt(Prompt):
19 # TODO: PromptValidator needs to be fixed
20 # Does not accept answer as intended (times out)
21 def __init__(
22 self,
23 dialog_id: str,
24 validator: Callable[[PromptValidatorContext], bool] = None,
25 default_locale: str = None,
26 ):
27 super(NumberPrompt, self).__init__(dialog_id, validator)
28 self.default_locale = default_locale
29
30 async def on_prompt(
31 self,
32 turn_context: TurnContext,
33 state: Dict[str, object],
34 options: PromptOptions,
35 is_retry: bool,
36 ):
37 if not turn_context:
38 raise TypeError("NumberPrompt.on_prompt(): turn_context cannot be None.")
39 if not options:
40 raise TypeError("NumberPrompt.on_prompt(): options cannot be None.")
41
42 if is_retry and options.retry_prompt is not None:
43 turn_context.send_activity(options.retry_prompt)
44 elif options.prompt is not None:
45 await turn_context.send_activity(options.prompt)
46
47 async def on_recognize(
48 self,
49 turn_context: TurnContext,
50 state: Dict[str, object],
51 options: PromptOptions,
52 ) -> PromptRecognizerResult:
53 if not turn_context:
54 raise TypeError("NumberPrompt.on_recognize(): turn_context cannot be None.")
55
56 result = PromptRecognizerResult()
57 if turn_context.activity.type == ActivityTypes.message:
58 message = turn_context.activity
59 culture = self._get_culture(turn_context)
60 results: [ModelResult] = recognize_number(message.text, culture)
61
62 if results:
63 result.succeeded = True
64 result.value = parse_decimal(
65 results[0].resolution["value"], locale=culture.replace("-", "_")
66 )
67
68 return result
69
70 def _get_culture(self, turn_context: TurnContext):
71 culture = (
72 turn_context.activity.locale
73 if turn_context.activity.locale
74 else self.default_locale
75 )
76
77 if not culture:
78 culture = Culture.English
79
80 return culture
81
[end of libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py
@@ -40,7 +40,7 @@
raise TypeError("NumberPrompt.on_prompt(): options cannot be None.")
if is_retry and options.retry_prompt is not None:
- turn_context.send_activity(options.retry_prompt)
+ await turn_context.send_activity(options.retry_prompt)
elif options.prompt is not None:
await turn_context.send_activity(options.prompt)
| {"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py\n@@ -40,7 +40,7 @@\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n \n if is_retry and options.retry_prompt is not None:\n- turn_context.send_activity(options.retry_prompt)\n+ await turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n", "issue": "NumberPrompt doesn't accept retry value\n## Version\r\nv4.5\r\n\r\n## Describe the bug\r\nWhen you send an invalid number to a `NumberPrompt`, it sends out a retry prompt.\r\nWhen attempting to send a 2nd response after being reprompted, you get a timeout error.\r\n\r\n\r\n\r\n## To Reproduce\r\n1. Create a `NumberPrompt` object\r\n2. When it prompts you for a number, send in a non-numeric value (e.g. `\"hello\"`)\r\n * this will trigger a retry prompt (e.g. `\"You must enter a number.\"`)\r\n3. Try sending in another value--no matter what type of value, you get a timeout error\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n## Expected behavior\r\nTo be able to send in a 2nd value when reprompted\r\n\r\n## Additional context\r\n```python\r\nasync def test_number_prompt_retry(self):\r\n async def exec_test(turn_context: TurnContext) -> None:\r\n dialog_context: DialogContext = await dialogs.create_context(turn_context)\r\n\r\n results: DialogTurnResult = await dialog_context.continue_dialog()\r\n\r\n if results.status == DialogTurnStatus.Empty:\r\n options = PromptOptions(\r\n prompt=Activity(type=ActivityTypes.message, text=\"Enter a number.\"),\r\n retry_prompt=Activity(\r\n type=ActivityTypes.message, text=\"You must enter a number.\"\r\n ),\r\n )\r\n await dialog_context.prompt(\"NumberPrompt\", options)\r\n elif results.status == DialogTurnStatus.Complete:\r\n number_result = results.result\r\n await turn_context.send_activity(\r\n MessageFactory.text(f\"Bot received the number '{number_result}'.\")\r\n )\r\n\r\n await convo_state.save_changes(turn_context)\r\n\r\n adapter = TestAdapter(exec_test)\r\n\r\n convo_state = ConversationState(MemoryStorage())\r\n dialog_state = convo_state.create_property(\"dialogState\")\r\n dialogs = DialogSet(dialog_state)\r\n number_prompt = NumberPrompt(\r\n dialog_id=\"NumberPrompt\", validator=None, default_locale=Culture.English\r\n )\r\n dialogs.add(number_prompt)\r\n\r\n step1 = await adapter.send(\"hello\")\r\n step2 = await step1.assert_reply(\"Enter a number.\")\r\n # TODO: something is breaking in the validators or retry prompt\r\n # where it does not accept the 2nd answer after reprompting the user\r\n # for another value\r\n step3 = await step2.send(\"hello\")\r\n step4 = await step3.assert_reply(\"You must enter a number.\")\r\n step5 = await step4.send(\"64\")\r\n await step5.assert_reply(\"Bot received the number '64'.\")\r\n```\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import Callable, Dict\n\nfrom recognizers_number import recognize_number\nfrom recognizers_text import Culture, ModelResult\nfrom babel.numbers import parse_decimal\n\nfrom botbuilder.core.turn_context import TurnContext\nfrom botbuilder.schema import ActivityTypes\n\nfrom .prompt import Prompt, PromptValidatorContext\nfrom .prompt_options import PromptOptions\nfrom .prompt_recognizer_result import PromptRecognizerResult\n\n\nclass NumberPrompt(Prompt):\n # TODO: PromptValidator needs to be fixed\n # Does not accept answer as intended (times out)\n def __init__(\n self,\n dialog_id: str,\n validator: Callable[[PromptValidatorContext], bool] = None,\n default_locale: str = None,\n ):\n super(NumberPrompt, self).__init__(dialog_id, validator)\n self.default_locale = default_locale\n\n async def on_prompt(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n is_retry: bool,\n ):\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_prompt(): turn_context cannot be None.\")\n if not options:\n raise TypeError(\"NumberPrompt.on_prompt(): options cannot be None.\")\n\n if is_retry and options.retry_prompt is not None:\n turn_context.send_activity(options.retry_prompt)\n elif options.prompt is not None:\n await turn_context.send_activity(options.prompt)\n\n async def on_recognize(\n self,\n turn_context: TurnContext,\n state: Dict[str, object],\n options: PromptOptions,\n ) -> PromptRecognizerResult:\n if not turn_context:\n raise TypeError(\"NumberPrompt.on_recognize(): turn_context cannot be None.\")\n\n result = PromptRecognizerResult()\n if turn_context.activity.type == ActivityTypes.message:\n message = turn_context.activity\n culture = self._get_culture(turn_context)\n results: [ModelResult] = recognize_number(message.text, culture)\n\n if results:\n result.succeeded = True\n result.value = parse_decimal(\n results[0].resolution[\"value\"], locale=culture.replace(\"-\", \"_\")\n )\n\n return result\n\n def _get_culture(self, turn_context: TurnContext):\n culture = (\n turn_context.activity.locale\n if turn_context.activity.locale\n else self.default_locale\n )\n\n if not culture:\n culture = Culture.English\n\n return culture\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/number_prompt.py"}]} | 1,898 | 161 |
gh_patches_debug_24268 | rasdani/github-patches | git_diff | dmlc__gluon-nlp-832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc
http://gluon-nlp.mxnet.io/api/modules/data.html
does not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI
</issue>
<code>
[start of src/gluonnlp/data/__init__.py]
1 # coding: utf-8
2
3 # Licensed to the Apache Software Foundation (ASF) under one
4 # or more contributor license agreements. See the NOTICE file
5 # distributed with this work for additional information
6 # regarding copyright ownership. The ASF licenses this file
7 # to you under the Apache License, Version 2.0 (the
8 # "License"); you may not use this file except in compliance
9 # with the License. You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing,
14 # software distributed under the License is distributed on an
15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 # KIND, either express or implied. See the License for the
17 # specific language governing permissions and limitations
18 # under the License.
19
20 # pylint: disable=wildcard-import
21 """This module includes common utilities such as data readers and counter."""
22
23 from . import (batchify, candidate_sampler, conll, corpora, dataloader,
24 dataset, question_answering, registry, sampler, sentiment,
25 stream, transforms, translation, utils,
26 word_embedding_evaluation, intent_slot)
27 from .candidate_sampler import *
28 from .conll import *
29 from .glue import *
30 from .corpora import *
31 from .dataloader import *
32 from .dataset import *
33 from .question_answering import *
34 from .registry import *
35 from .sampler import *
36 from .sentiment import *
37 from .stream import *
38 from .transforms import *
39 from .translation import *
40 from .utils import *
41 from .word_embedding_evaluation import *
42 from .intent_slot import *
43
44 __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__
45 + dataset.__all__ + corpora.__all__ + sentiment.__all__ +
46 word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +
47 translation.__all__ + registry.__all__ + question_answering.__all__
48 + dataloader.__all__ + candidate_sampler.__all__)
49
[end of src/gluonnlp/data/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py
--- a/src/gluonnlp/data/__init__.py
+++ b/src/gluonnlp/data/__init__.py
@@ -23,7 +23,7 @@
from . import (batchify, candidate_sampler, conll, corpora, dataloader,
dataset, question_answering, registry, sampler, sentiment,
stream, transforms, translation, utils,
- word_embedding_evaluation, intent_slot)
+ word_embedding_evaluation, intent_slot, glue)
from .candidate_sampler import *
from .conll import *
from .glue import *
@@ -42,7 +42,8 @@
from .intent_slot import *
__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__
- + dataset.__all__ + corpora.__all__ + sentiment.__all__ +
- word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +
- translation.__all__ + registry.__all__ + question_answering.__all__
- + dataloader.__all__ + candidate_sampler.__all__)
+ + dataset.__all__ + corpora.__all__ + sentiment.__all__
+ + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__
+ + translation.__all__ + registry.__all__ + question_answering.__all__
+ + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__
+ + glue.__all__)
| {"golden_diff": "diff --git a/src/gluonnlp/data/__init__.py b/src/gluonnlp/data/__init__.py\n--- a/src/gluonnlp/data/__init__.py\n+++ b/src/gluonnlp/data/__init__.py\n@@ -23,7 +23,7 @@\n from . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n- word_embedding_evaluation, intent_slot)\n+ word_embedding_evaluation, intent_slot, glue)\n from .candidate_sampler import *\n from .conll import *\n from .glue import *\n@@ -42,7 +42,8 @@\n from .intent_slot import *\n \n __all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n- + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n- word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n- translation.__all__ + registry.__all__ + question_answering.__all__\n- + dataloader.__all__ + candidate_sampler.__all__)\n+ + dataset.__all__ + corpora.__all__ + sentiment.__all__\n+ + word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__\n+ + translation.__all__ + registry.__all__ + question_answering.__all__\n+ + dataloader.__all__ + candidate_sampler.__all__ + intent_slot.__all__\n+ + glue.__all__)\n", "issue": "ATIS/SNIPS datasets and GLUE datasets don't appear in the website API doc \nhttp://gluon-nlp.mxnet.io/api/modules/data.html\r\n\r\ndoes not show the details of ATISDataset/SNIPSDataset and GlueCoLA, GlueSST2, GlueSTSB, GlueQQP, GlueRTE, GlueMNLI, GlueQNLI, GlueWNLI\r\n\n", "before_files": [{"content": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=wildcard-import\n\"\"\"This module includes common utilities such as data readers and counter.\"\"\"\n\nfrom . import (batchify, candidate_sampler, conll, corpora, dataloader,\n dataset, question_answering, registry, sampler, sentiment,\n stream, transforms, translation, utils,\n word_embedding_evaluation, intent_slot)\nfrom .candidate_sampler import *\nfrom .conll import *\nfrom .glue import *\nfrom .corpora import *\nfrom .dataloader import *\nfrom .dataset import *\nfrom .question_answering import *\nfrom .registry import *\nfrom .sampler import *\nfrom .sentiment import *\nfrom .stream import *\nfrom .transforms import *\nfrom .translation import *\nfrom .utils import *\nfrom .word_embedding_evaluation import *\nfrom .intent_slot import *\n\n__all__ = (['batchify'] + utils.__all__ + transforms.__all__ + sampler.__all__\n + dataset.__all__ + corpora.__all__ + sentiment.__all__ +\n word_embedding_evaluation.__all__ + stream.__all__ + conll.__all__ +\n translation.__all__ + registry.__all__ + question_answering.__all__\n + dataloader.__all__ + candidate_sampler.__all__)\n", "path": "src/gluonnlp/data/__init__.py"}]} | 1,169 | 347 |
gh_patches_debug_29639 | rasdani/github-patches | git_diff | frappe__frappe-2519 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move app installation to background
Long installs timeout the installation of the app and leads to broken installs.
</issue>
<code>
[start of frappe/desk/page/applications/applications.py]
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # MIT License. See license.txt
3
4 from __future__ import unicode_literals
5 import frappe
6 import frappe.utils
7 import frappe.installer
8 import frappe.sessions
9 import subprocess
10 import os
11 import json
12 from frappe import _
13 from distutils.spawn import find_executable
14
15 @frappe.whitelist()
16 def get_app_list():
17 """Get list of all apps with properties, installed, category from hooks and
18 `frappe/data/app_listing/` if an entry exists"""
19 out = {}
20 installed = frappe.get_installed_apps()
21 for app in frappe.get_all_apps(True):
22 app_hooks = frappe.get_hooks(app_name=app)
23
24 if app not in installed and app_hooks.get('hide_in_installer'):
25 continue
26
27 out[app] = {}
28 for key in ("app_name", "app_title", "app_description", "app_icon",
29 "app_publisher", "app_version", "app_url", "app_color"):
30 val = app_hooks.get(key) or []
31 out[app][key] = val[0] if len(val) else ""
32
33 if app in installed:
34 out[app]["installed"] = 1
35
36 for app_from_list in get_app_listing().values():
37 if app_from_list.app_name in out:
38 out[app_from_list.app_name].update(app_from_list)
39 else:
40 if not frappe.conf.disallow_app_listing:
41 out[app_from_list.app_name] = app_from_list
42
43 return out
44
45 def get_app_listing():
46 """Get apps listed in `frappe/data/app_listing/`"""
47 apps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')
48 out = {}
49 for app in os.listdir(apps_listing_dir):
50 if app.endswith(".json"):
51 with open(os.path.join(apps_listing_dir, app)) as f:
52 out[app[:-5]] = frappe._dict(json.load(f))
53 return out
54
55 @frappe.whitelist()
56 def install_app(name):
57 """Install app, if app is not installed in local environment, install it via git url in
58 `frappe/data/app_listing/`"""
59 frappe.only_for("System Manager")
60
61 if name not in frappe.get_all_apps(True):
62 if not frappe.conf.disallow_app_listing:
63 get_app(name)
64 frappe.cache().delete_value(["app_hooks"])
65 # reload sys.path
66 import site
67 reload(site)
68 else:
69 # will only come via direct API
70 frappe.throw("Listing app not allowed")
71
72 app_hooks = frappe.get_hooks(app_name=name)
73 if app_hooks.get('hide_in_installer'):
74 frappe.throw(_("You cannot install this app"))
75
76 frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)},
77 user=frappe.session.user)
78
79 frappe.installer.install_app(name)
80
81 frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)},
82 user=frappe.session.user)
83
84 def get_app(name):
85 """Get app using git clone and install it in bench environment"""
86 app_listing = get_app_listing()
87 if name not in app_listing:
88 frappe.throw(_("Unknown app {0}").format(name))
89 raise frappe.ValidationError
90
91 frappe.publish_realtime("install_app_progress", {"status": _("Downloading App {0}").format(name)},
92 user=frappe.session.user)
93
94 args = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]
95
96 try:
97 subprocess.check_call(args, cwd=frappe.utils.get_bench_path(),
98 stderr=subprocess.STDOUT)
99 return "okay"
100 except subprocess.CalledProcessError as e:
101 frappe.msgprint("<b>" + " ".join(args) + "</b>")
102 frappe.msgprint(e.output)
103 return e.output
104
[end of frappe/desk/page/applications/applications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py
--- a/frappe/desk/page/applications/applications.py
+++ b/frappe/desk/page/applications/applications.py
@@ -11,6 +11,7 @@
import json
from frappe import _
from distutils.spawn import find_executable
+from frappe.utils.background_jobs import enqueue
@frappe.whitelist()
def get_app_list():
@@ -73,6 +74,12 @@
if app_hooks.get('hide_in_installer'):
frappe.throw(_("You cannot install this app"))
+ enqueue('frappe.desk.page.applications.applications.start_install', name=name)
+
+ frappe.msgprint(_('Queued for install'))
+
+
+def start_install(name):
frappe.publish_realtime("install_app_progress", {"status": _("Installing App {0}").format(name)},
user=frappe.session.user)
@@ -81,6 +88,20 @@
frappe.publish_realtime("install_app_progress", {"status": _("{0} Installed").format(name)},
user=frappe.session.user)
[email protected]()
+def remove_app(name):
+ """Remove installed app"""
+ frappe.only_for("System Manager")
+
+ if name in frappe.get_installed_apps():
+ enqueue('frappe.desk.page.applications.applications.start_remove', name=name)
+
+ frappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))
+
+def start_remove(name):
+ frappe.installer.remove_app(app_name=name, yes=True)
+ frappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))
+
def get_app(name):
"""Get app using git clone and install it in bench environment"""
app_listing = get_app_listing()
| {"golden_diff": "diff --git a/frappe/desk/page/applications/applications.py b/frappe/desk/page/applications/applications.py\n--- a/frappe/desk/page/applications/applications.py\n+++ b/frappe/desk/page/applications/applications.py\n@@ -11,6 +11,7 @@\n import json\n from frappe import _\n from distutils.spawn import find_executable\n+from frappe.utils.background_jobs import enqueue\n \n @frappe.whitelist()\n def get_app_list():\n@@ -73,6 +74,12 @@\n \tif app_hooks.get('hide_in_installer'):\n \t\tfrappe.throw(_(\"You cannot install this app\"))\n \n+\tenqueue('frappe.desk.page.applications.applications.start_install', name=name)\n+\n+\tfrappe.msgprint(_('Queued for install'))\n+\n+\n+def start_install(name):\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n \t\tuser=frappe.session.user)\n \n@@ -81,6 +88,20 @@\n \tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n \t\tuser=frappe.session.user)\n \[email protected]()\n+def remove_app(name):\n+\t\"\"\"Remove installed app\"\"\"\n+\tfrappe.only_for(\"System Manager\")\n+\n+\tif name in frappe.get_installed_apps():\n+\t\tenqueue('frappe.desk.page.applications.applications.start_remove', name=name)\n+\n+\tfrappe.msgprint(_('Queued for backup and removing {0}').format(frappe.bold(name)))\n+\n+def start_remove(name):\n+\tfrappe.installer.remove_app(app_name=name, yes=True)\n+\tfrappe.publish_realtime('msgprint', _('App {0} removed').format(frappe.bold(name)))\n+\n def get_app(name):\n \t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n \tapp_listing = get_app_listing()\n", "issue": "Move app installation to background\nLong installs timeout the installation of the app and leads to broken installs.\n\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nimport frappe.installer\nimport frappe.sessions\nimport subprocess\nimport os\nimport json\nfrom frappe import _\nfrom distutils.spawn import find_executable\n\[email protected]()\ndef get_app_list():\n\t\"\"\"Get list of all apps with properties, installed, category from hooks and\n\t`frappe/data/app_listing/` if an entry exists\"\"\"\n\tout = {}\n\tinstalled = frappe.get_installed_apps()\n\tfor app in frappe.get_all_apps(True):\n\t\tapp_hooks = frappe.get_hooks(app_name=app)\n\n\t\tif app not in installed and app_hooks.get('hide_in_installer'):\n\t\t\tcontinue\n\n\t\tout[app] = {}\n\t\tfor key in (\"app_name\", \"app_title\", \"app_description\", \"app_icon\",\n\t\t\t\"app_publisher\", \"app_version\", \"app_url\", \"app_color\"):\n\t\t\t val = app_hooks.get(key) or []\n\t\t\t out[app][key] = val[0] if len(val) else \"\"\n\n\t\tif app in installed:\n\t\t\tout[app][\"installed\"] = 1\n\n\tfor app_from_list in get_app_listing().values():\n\t\tif app_from_list.app_name in out:\n\t\t\tout[app_from_list.app_name].update(app_from_list)\n\t\telse:\n\t\t\tif not frappe.conf.disallow_app_listing:\n\t\t\t\tout[app_from_list.app_name] = app_from_list\n\n\treturn out\n\ndef get_app_listing():\n\t\"\"\"Get apps listed in `frappe/data/app_listing/`\"\"\"\n\tapps_listing_dir = os.path.join(os.path.dirname(frappe.__file__), 'data', 'app_listing')\n\tout = {}\n\tfor app in os.listdir(apps_listing_dir):\n\t\tif app.endswith(\".json\"):\n\t\t\twith open(os.path.join(apps_listing_dir, app)) as f:\n\t\t\t\tout[app[:-5]] = frappe._dict(json.load(f))\n\treturn out\n\[email protected]()\ndef install_app(name):\n\t\"\"\"Install app, if app is not installed in local environment, install it via git url in\n\t`frappe/data/app_listing/`\"\"\"\n\tfrappe.only_for(\"System Manager\")\n\n\tif name not in frappe.get_all_apps(True):\n\t\tif not frappe.conf.disallow_app_listing:\n\t\t\tget_app(name)\n\t\t\tfrappe.cache().delete_value([\"app_hooks\"])\n\t\t\t# reload sys.path\n\t\t\timport site\n\t\t\treload(site)\n\t\telse:\n\t\t\t# will only come via direct API\n\t\t\tfrappe.throw(\"Listing app not allowed\")\n\n\tapp_hooks = frappe.get_hooks(app_name=name)\n\tif app_hooks.get('hide_in_installer'):\n\t\tfrappe.throw(_(\"You cannot install this app\"))\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Installing App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\tfrappe.installer.install_app(name)\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"{0} Installed\").format(name)},\n\t\tuser=frappe.session.user)\n\ndef get_app(name):\n\t\"\"\"Get app using git clone and install it in bench environment\"\"\"\n\tapp_listing = get_app_listing()\n\tif name not in app_listing:\n\t\tfrappe.throw(_(\"Unknown app {0}\").format(name))\n\t\traise frappe.ValidationError\n\n\tfrappe.publish_realtime(\"install_app_progress\", {\"status\": _(\"Downloading App {0}\").format(name)},\n\t\tuser=frappe.session.user)\n\n\targs = [find_executable('bench'), 'get-app', name, app_listing[name]['repo_url']]\n\n\ttry:\n\t\tsubprocess.check_call(args, cwd=frappe.utils.get_bench_path(),\n\t\t\tstderr=subprocess.STDOUT)\n\t\treturn \"okay\"\n\texcept subprocess.CalledProcessError as e:\n\t\tfrappe.msgprint(\"<b>\" + \" \".join(args) + \"</b>\")\n\t\tfrappe.msgprint(e.output)\n\t\treturn e.output\n", "path": "frappe/desk/page/applications/applications.py"}]} | 1,662 | 421 |
gh_patches_debug_1748 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-750 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typing error in recognize_google() methode
In mycroft/stt/\_\_init\_\_.py line 74 :
Replacing mistyped 's' parameter by self.lang fixed the problem.
</issue>
<code>
[start of mycroft/stt/__init__.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17 from abc import ABCMeta, abstractmethod
18
19 from speech_recognition import Recognizer
20
21 from mycroft.api import STTApi
22 from mycroft.configuration import ConfigurationManager
23 from mycroft.util.log import getLogger
24
25 __author__ = "jdorleans"
26
27 LOG = getLogger("STT")
28
29
30 class STT(object):
31 __metaclass__ = ABCMeta
32
33 def __init__(self):
34 config_core = ConfigurationManager.get()
35 self.lang = str(self.init_language(config_core))
36 config_stt = config_core.get("stt", {})
37 self.config = config_stt.get(config_stt.get("module"), {})
38 self.credential = self.config.get("credential", {})
39 self.recognizer = Recognizer()
40
41 @staticmethod
42 def init_language(config_core):
43 langs = config_core.get("lang", "en-US").split("-")
44 return langs[0].lower() + "-" + langs[1].upper()
45
46 @abstractmethod
47 def execute(self, audio, language=None):
48 pass
49
50
51 class TokenSTT(STT):
52 __metaclass__ = ABCMeta
53
54 def __init__(self):
55 super(TokenSTT, self).__init__()
56 self.token = str(self.credential.get("token"))
57
58
59 class BasicSTT(STT):
60 __metaclass__ = ABCMeta
61
62 def __init__(self):
63 super(BasicSTT, self).__init__()
64 self.username = str(self.credential.get("username"))
65 self.password = str(self.credential.get("password"))
66
67
68 class GoogleSTT(TokenSTT):
69 def __init__(self):
70 super(GoogleSTT, self).__init__()
71
72 def execute(self, audio, language=None):
73 self.lang = language or self.lang
74 return self.recognizer.recognize_google(audio, self.token, s)
75
76
77 class WITSTT(TokenSTT):
78 def __init__(self):
79 super(WITSTT, self).__init__()
80
81 def execute(self, audio, language=None):
82 LOG.warn("WITSTT language should be configured at wit.ai settings.")
83 return self.recognizer.recognize_wit(audio, self.token)
84
85
86 class IBMSTT(BasicSTT):
87 def __init__(self):
88 super(IBMSTT, self).__init__()
89
90 def execute(self, audio, language=None):
91 self.lang = language or self.lang
92 return self.recognizer.recognize_ibm(audio, self.username,
93 self.password, self.lang)
94
95
96 class MycroftSTT(STT):
97 def __init__(self):
98 super(MycroftSTT, self).__init__()
99 self.api = STTApi()
100
101 def execute(self, audio, language=None):
102 self.lang = language or self.lang
103 return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]
104
105
106 class STTFactory(object):
107 CLASSES = {
108 "mycroft": MycroftSTT,
109 "google": GoogleSTT,
110 "wit": WITSTT,
111 "ibm": IBMSTT
112 }
113
114 @staticmethod
115 def create():
116 config = ConfigurationManager.get().get("stt", {})
117 module = config.get("module", "mycroft")
118 clazz = STTFactory.CLASSES.get(module)
119 return clazz()
120
[end of mycroft/stt/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py
--- a/mycroft/stt/__init__.py
+++ b/mycroft/stt/__init__.py
@@ -71,7 +71,7 @@
def execute(self, audio, language=None):
self.lang = language or self.lang
- return self.recognizer.recognize_google(audio, self.token, s)
+ return self.recognizer.recognize_google(audio, self.token, self.lang)
class WITSTT(TokenSTT):
| {"golden_diff": "diff --git a/mycroft/stt/__init__.py b/mycroft/stt/__init__.py\n--- a/mycroft/stt/__init__.py\n+++ b/mycroft/stt/__init__.py\n@@ -71,7 +71,7 @@\n \n def execute(self, audio, language=None):\n self.lang = language or self.lang\n- return self.recognizer.recognize_google(audio, self.token, s)\n+ return self.recognizer.recognize_google(audio, self.token, self.lang)\n \n \n class WITSTT(TokenSTT):\n", "issue": "Typing error in recognize_google() methode\nIn mycroft/stt/\\_\\_init\\_\\_.py line 74 :\r\nReplacing mistyped 's' parameter by self.lang fixed the problem.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\nfrom abc import ABCMeta, abstractmethod\n\nfrom speech_recognition import Recognizer\n\nfrom mycroft.api import STTApi\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.util.log import getLogger\n\n__author__ = \"jdorleans\"\n\nLOG = getLogger(\"STT\")\n\n\nclass STT(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n config_core = ConfigurationManager.get()\n self.lang = str(self.init_language(config_core))\n config_stt = config_core.get(\"stt\", {})\n self.config = config_stt.get(config_stt.get(\"module\"), {})\n self.credential = self.config.get(\"credential\", {})\n self.recognizer = Recognizer()\n\n @staticmethod\n def init_language(config_core):\n langs = config_core.get(\"lang\", \"en-US\").split(\"-\")\n return langs[0].lower() + \"-\" + langs[1].upper()\n\n @abstractmethod\n def execute(self, audio, language=None):\n pass\n\n\nclass TokenSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(TokenSTT, self).__init__()\n self.token = str(self.credential.get(\"token\"))\n\n\nclass BasicSTT(STT):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n super(BasicSTT, self).__init__()\n self.username = str(self.credential.get(\"username\"))\n self.password = str(self.credential.get(\"password\"))\n\n\nclass GoogleSTT(TokenSTT):\n def __init__(self):\n super(GoogleSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_google(audio, self.token, s)\n\n\nclass WITSTT(TokenSTT):\n def __init__(self):\n super(WITSTT, self).__init__()\n\n def execute(self, audio, language=None):\n LOG.warn(\"WITSTT language should be configured at wit.ai settings.\")\n return self.recognizer.recognize_wit(audio, self.token)\n\n\nclass IBMSTT(BasicSTT):\n def __init__(self):\n super(IBMSTT, self).__init__()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.recognizer.recognize_ibm(audio, self.username,\n self.password, self.lang)\n\n\nclass MycroftSTT(STT):\n def __init__(self):\n super(MycroftSTT, self).__init__()\n self.api = STTApi()\n\n def execute(self, audio, language=None):\n self.lang = language or self.lang\n return self.api.stt(audio.get_flac_data(), self.lang, 1)[0]\n\n\nclass STTFactory(object):\n CLASSES = {\n \"mycroft\": MycroftSTT,\n \"google\": GoogleSTT,\n \"wit\": WITSTT,\n \"ibm\": IBMSTT\n }\n\n @staticmethod\n def create():\n config = ConfigurationManager.get().get(\"stt\", {})\n module = config.get(\"module\", \"mycroft\")\n clazz = STTFactory.CLASSES.get(module)\n return clazz()\n", "path": "mycroft/stt/__init__.py"}]} | 1,715 | 123 |
gh_patches_debug_6541 | rasdani/github-patches | git_diff | pytorch__vision-5775 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong label mapping in vision/torchvision/datasets/flowers102.py
### 🐛 Describe the bug
from line 70 in the file vision/torchvision/datasets/flowers102.py
```
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id]) # Note: the bug is here, current the labels are ranged from 1 to 102, which should be ranged from 0 to 101, consider to change this line into self._labels.append(image_id_to_label[image_id] - 1) ?
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
```
### Versions
PyTorch version: 1.11.0
Is debug build: False
CUDA used to build PyTorch: 11.3
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.4 LTS (x86_64)
GCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0
Clang version: Could not collect
CMake version: version 3.16.3
Libc version: glibc-2.31
Python version: 3.8.13 (default, Mar 28 2022, 11:38:47) [GCC 7.5.0] (64-bit runtime)
Python platform: Linux-5.4.0-104-generic-x86_64-with-glibc2.17
Is CUDA available: True
CUDA runtime version: Could not collect
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 3090
Nvidia driver version: 510.54
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
Versions of relevant libraries:
[pip3] advertorch==0.2.3
[pip3] numpy==1.21.2
[pip3] torch==1.11.0
[pip3] torchaudio==0.11.0
[pip3] torchvision==0.7.0
[conda] advertorch 0.2.3 pypi_0 pypi
[conda] blas 1.0 mkl https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free
[conda] cudatoolkit 11.3.1 h2bc3f7f_2 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] ffmpeg 4.3 hf484d3e_0 pytorch
[conda] mkl 2021.4.0 h06a4308_640 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] mkl-service 2.4.0 py38h7f8727e_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] mkl_fft 1.3.1 py38hd3c417c_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] mkl_random 1.2.2 py38h51133e4_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] numpy 1.21.2 py38h20f2e39_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] numpy-base 1.21.2 py38h79a1101_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
[conda] pytorch 1.11.0 py3.8_cuda11.3_cudnn8.2.0_0 pytorch
[conda] pytorch-mutex 1.0 cuda pytorch
[conda] torchaudio 0.11.0 py38_cu113 pytorch
[conda] torchvision 0.7.0 pypi_0 pypi
cc @pmeier @YosuaMichael
</issue>
<code>
[start of torchvision/datasets/flowers102.py]
1 from pathlib import Path
2 from typing import Any, Tuple, Callable, Optional
3
4 import PIL.Image
5
6 from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
7 from .vision import VisionDataset
8
9
10 class Flowers102(VisionDataset):
11 """`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
12
13 .. warning::
14
15 This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
16
17 Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
18 flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
19 between 40 and 258 images.
20
21 The images have large scale, pose and light variations. In addition, there are categories that
22 have large variations within the category, and several very similar categories.
23
24 Args:
25 root (string): Root directory of the dataset.
26 split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
27 transform (callable, optional): A function/transform that takes in an PIL image and returns a
28 transformed version. E.g, ``transforms.RandomCrop``.
29 target_transform (callable, optional): A function/transform that takes in the target and transforms it.
30 download (bool, optional): If true, downloads the dataset from the internet and
31 puts it in root directory. If dataset is already downloaded, it is not
32 downloaded again.
33 """
34
35 _download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
36 _file_dict = { # filename, md5
37 "image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
38 "label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
39 "setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
40 }
41 _splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
42
43 def __init__(
44 self,
45 root: str,
46 split: str = "train",
47 transform: Optional[Callable] = None,
48 target_transform: Optional[Callable] = None,
49 download: bool = False,
50 ) -> None:
51 super().__init__(root, transform=transform, target_transform=target_transform)
52 self._split = verify_str_arg(split, "split", ("train", "val", "test"))
53 self._base_folder = Path(self.root) / "flowers-102"
54 self._images_folder = self._base_folder / "jpg"
55
56 if download:
57 self.download()
58
59 if not self._check_integrity():
60 raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
61
62 from scipy.io import loadmat
63
64 set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
65 image_ids = set_ids[self._splits_map[self._split]].tolist()
66
67 labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
68 image_id_to_label = dict(enumerate(labels["labels"].tolist(), 1))
69
70 self._labels = []
71 self._image_files = []
72 for image_id in image_ids:
73 self._labels.append(image_id_to_label[image_id])
74 self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
75
76 def __len__(self) -> int:
77 return len(self._image_files)
78
79 def __getitem__(self, idx) -> Tuple[Any, Any]:
80 image_file, label = self._image_files[idx], self._labels[idx]
81 image = PIL.Image.open(image_file).convert("RGB")
82
83 if self.transform:
84 image = self.transform(image)
85
86 if self.target_transform:
87 label = self.target_transform(label)
88
89 return image, label
90
91 def extra_repr(self) -> str:
92 return f"split={self._split}"
93
94 def _check_integrity(self):
95 if not (self._images_folder.exists() and self._images_folder.is_dir()):
96 return False
97
98 for id in ["label", "setid"]:
99 filename, md5 = self._file_dict[id]
100 if not check_integrity(str(self._base_folder / filename), md5):
101 return False
102 return True
103
104 def download(self):
105 if self._check_integrity():
106 return
107 download_and_extract_archive(
108 f"{self._download_url_prefix}{self._file_dict['image'][0]}",
109 str(self._base_folder),
110 md5=self._file_dict["image"][1],
111 )
112 for id in ["label", "setid"]:
113 filename, md5 = self._file_dict[id]
114 download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
115
[end of torchvision/datasets/flowers102.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/datasets/flowers102.py b/torchvision/datasets/flowers102.py
--- a/torchvision/datasets/flowers102.py
+++ b/torchvision/datasets/flowers102.py
@@ -65,7 +65,7 @@
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
- image_id_to_label = dict(enumerate(labels["labels"].tolist(), 1))
+ image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
| {"golden_diff": "diff --git a/torchvision/datasets/flowers102.py b/torchvision/datasets/flowers102.py\n--- a/torchvision/datasets/flowers102.py\n+++ b/torchvision/datasets/flowers102.py\n@@ -65,7 +65,7 @@\n image_ids = set_ids[self._splits_map[self._split]].tolist()\n \n labels = loadmat(self._base_folder / self._file_dict[\"label\"][0], squeeze_me=True)\n- image_id_to_label = dict(enumerate(labels[\"labels\"].tolist(), 1))\n+ image_id_to_label = dict(enumerate((labels[\"labels\"] - 1).tolist(), 1))\n \n self._labels = []\n self._image_files = []\n", "issue": "Wrong label mapping in vision/torchvision/datasets/flowers102.py\n### \ud83d\udc1b Describe the bug\n\nfrom line 70 in the file vision/torchvision/datasets/flowers102.py\r\n```\r\nself._labels = []\r\nself._image_files = []\r\nfor image_id in image_ids:\r\n self._labels.append(image_id_to_label[image_id]) # Note: the bug is here, current the labels are ranged from 1 to 102, which should be ranged from 0 to 101, consider to change this line into self._labels.append(image_id_to_label[image_id] - 1) ?\r\n self._image_files.append(self._images_folder / f\"image_{image_id:05d}.jpg\")\r\n```\n\n### Versions\n\nPyTorch version: 1.11.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.3\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.4 LTS (x86_64)\r\nGCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\r\nClang version: Could not collect\r\nCMake version: version 3.16.3\r\nLibc version: glibc-2.31\r\n\r\nPython version: 3.8.13 (default, Mar 28 2022, 11:38:47) [GCC 7.5.0] (64-bit runtime)\r\nPython platform: Linux-5.4.0-104-generic-x86_64-with-glibc2.17\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nGPU models and configuration: GPU 0: NVIDIA GeForce RTX 3090\r\nNvidia driver version: 510.54\r\ncuDNN version: Could not collect\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nVersions of relevant libraries:\r\n[pip3] advertorch==0.2.3\r\n[pip3] numpy==1.21.2\r\n[pip3] torch==1.11.0\r\n[pip3] torchaudio==0.11.0\r\n[pip3] torchvision==0.7.0\r\n[conda] advertorch 0.2.3 pypi_0 pypi\r\n[conda] blas 1.0 mkl https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free\r\n[conda] cudatoolkit 11.3.1 h2bc3f7f_2 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] ffmpeg 4.3 hf484d3e_0 pytorch\r\n[conda] mkl 2021.4.0 h06a4308_640 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] mkl-service 2.4.0 py38h7f8727e_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] mkl_fft 1.3.1 py38hd3c417c_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] mkl_random 1.2.2 py38h51133e4_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] numpy 1.21.2 py38h20f2e39_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] numpy-base 1.21.2 py38h79a1101_0 https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\r\n[conda] pytorch 1.11.0 py3.8_cuda11.3_cudnn8.2.0_0 pytorch\r\n[conda] pytorch-mutex 1.0 cuda pytorch\r\n[conda] torchaudio 0.11.0 py38_cu113 pytorch\r\n[conda] torchvision 0.7.0 pypi_0 pypi\n\ncc @pmeier @YosuaMichael\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Any, Tuple, Callable, Optional\n\nimport PIL.Image\n\nfrom .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg\nfrom .vision import VisionDataset\n\n\nclass Flowers102(VisionDataset):\n \"\"\"`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.\n\n .. warning::\n\n This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.\n\n Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The\n flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of\n between 40 and 258 images.\n\n The images have large scale, pose and light variations. In addition, there are categories that\n have large variations within the category, and several very similar categories.\n\n Args:\n root (string): Root directory of the dataset.\n split (string, optional): The dataset split, supports ``\"train\"`` (default), ``\"val\"``, or ``\"test\"``.\n transform (callable, optional): A function/transform that takes in an PIL image and returns a\n transformed version. E.g, ``transforms.RandomCrop``.\n target_transform (callable, optional): A function/transform that takes in the target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n _download_url_prefix = \"https://www.robots.ox.ac.uk/~vgg/data/flowers/102/\"\n _file_dict = { # filename, md5\n \"image\": (\"102flowers.tgz\", \"52808999861908f626f3c1f4e79d11fa\"),\n \"label\": (\"imagelabels.mat\", \"e0620be6f572b9609742df49c70aed4d\"),\n \"setid\": (\"setid.mat\", \"a5357ecc9cb78c4bef273ce3793fc85c\"),\n }\n _splits_map = {\"train\": \"trnid\", \"val\": \"valid\", \"test\": \"tstid\"}\n\n def __init__(\n self,\n root: str,\n split: str = \"train\",\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n super().__init__(root, transform=transform, target_transform=target_transform)\n self._split = verify_str_arg(split, \"split\", (\"train\", \"val\", \"test\"))\n self._base_folder = Path(self.root) / \"flowers-102\"\n self._images_folder = self._base_folder / \"jpg\"\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted. You can use download=True to download it\")\n\n from scipy.io import loadmat\n\n set_ids = loadmat(self._base_folder / self._file_dict[\"setid\"][0], squeeze_me=True)\n image_ids = set_ids[self._splits_map[self._split]].tolist()\n\n labels = loadmat(self._base_folder / self._file_dict[\"label\"][0], squeeze_me=True)\n image_id_to_label = dict(enumerate(labels[\"labels\"].tolist(), 1))\n\n self._labels = []\n self._image_files = []\n for image_id in image_ids:\n self._labels.append(image_id_to_label[image_id])\n self._image_files.append(self._images_folder / f\"image_{image_id:05d}.jpg\")\n\n def __len__(self) -> int:\n return len(self._image_files)\n\n def __getitem__(self, idx) -> Tuple[Any, Any]:\n image_file, label = self._image_files[idx], self._labels[idx]\n image = PIL.Image.open(image_file).convert(\"RGB\")\n\n if self.transform:\n image = self.transform(image)\n\n if self.target_transform:\n label = self.target_transform(label)\n\n return image, label\n\n def extra_repr(self) -> str:\n return f\"split={self._split}\"\n\n def _check_integrity(self):\n if not (self._images_folder.exists() and self._images_folder.is_dir()):\n return False\n\n for id in [\"label\", \"setid\"]:\n filename, md5 = self._file_dict[id]\n if not check_integrity(str(self._base_folder / filename), md5):\n return False\n return True\n\n def download(self):\n if self._check_integrity():\n return\n download_and_extract_archive(\n f\"{self._download_url_prefix}{self._file_dict['image'][0]}\",\n str(self._base_folder),\n md5=self._file_dict[\"image\"][1],\n )\n for id in [\"label\", \"setid\"]:\n filename, md5 = self._file_dict[id]\n download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)\n", "path": "torchvision/datasets/flowers102.py"}]} | 2,996 | 166 |
gh_patches_debug_11370 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-901 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Subscription.pull() fails with KeyError
Hi,
My code below fails with a KeyError coming from the library:
``` python
sub = Subscription(SUBSCRIPTION_NAME, Topic(INCOMING_TOPIC_NAME))
while True:
try:
msgs = sub.pull()
...
except:
msg = 'poll %s: %s' % (SUBSCRIPTION_NAME, traceback.format_exc())
print msg
```
Fails with the message:
```
poll extractors: Traceback (most recent call last):
File "main.py", line 46, in main
msgs = sub.pull()
File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py", line 176, in pull
for info in response['receivedMessages']]
KeyError: 'receivedMessages'
```
This seems like a bug on the pubsub library code.
</issue>
<code>
[start of gcloud/pubsub/subscription.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define API Subscriptions."""
16
17 from gcloud.exceptions import NotFound
18 from gcloud.pubsub.message import Message
19 from gcloud.pubsub.topic import Topic
20 from gcloud.pubsub._implicit_environ import _require_connection
21
22
23 class Subscription(object):
24 """Subscriptions receive messages published to their topics.
25
26 See:
27 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions
28
29 :type name: string
30 :param name: the name of the subscription
31
32 :type topic: :class:`gcloud.pubsub.topic.Topic`
33 :param topic: the topic to which the subscription belongs..
34
35 :type ack_deadline: int
36 :param ack_deadline: the deadline (in seconds) by which messages pulled
37 from the back-end must be acknowledged.
38
39 :type push_endpoint: string
40 :param push_endpoint: URL to which messages will be pushed by the back-end.
41 If not set, the application must pull messages.
42 """
43 def __init__(self, name, topic, ack_deadline=None, push_endpoint=None):
44 self.name = name
45 self.topic = topic
46 self.ack_deadline = ack_deadline
47 self.push_endpoint = push_endpoint
48
49 @classmethod
50 def from_api_repr(cls, resource, topics=None):
51 """Factory: construct a topic given its API representation
52
53 :type resource: dict
54 :param resource: topic resource representation returned from the API
55
56 :type topics: dict or None
57 :param topics: A mapping of topic names -> topics. If not passed,
58 the subscription will have a newly-created topic.
59
60 :rtype: :class:`gcloud.pubsub.subscription.Subscription`
61 """
62 if topics is None:
63 topics = {}
64 t_name = resource['topic']
65 topic = topics.get(t_name)
66 if topic is None:
67 topic = topics[t_name] = Topic.from_api_repr({'name': t_name})
68 _, _, _, name = resource['name'].split('/')
69 ack_deadline = resource.get('ackDeadlineSeconds')
70 push_config = resource.get('pushConfig', {})
71 push_endpoint = push_config.get('pushEndpoint')
72 return cls(name, topic, ack_deadline, push_endpoint)
73
74 @property
75 def path(self):
76 """URL path for the subscription's APIs"""
77 project = self.topic.project
78 return '/projects/%s/subscriptions/%s' % (project, self.name)
79
80 def create(self, connection=None):
81 """API call: create the subscription via a PUT request
82
83 See:
84 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/create
85
86 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
87 :param connection: the connection to use. If not passed,
88 falls back to the topic's connection.
89 """
90 data = {'topic': self.topic.full_name}
91
92 if self.ack_deadline is not None:
93 data['ackDeadline'] = self.ack_deadline
94
95 if self.push_endpoint is not None:
96 data['pushConfig'] = {'pushEndpoint': self.push_endpoint}
97
98 connection = _require_connection(connection)
99 connection.api_request(method='PUT', path=self.path, data=data)
100
101 def exists(self, connection=None):
102 """API call: test existence of the subscription via a GET request
103
104 See
105 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get
106
107 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
108 :param connection: the connection to use. If not passed,
109 falls back to the topic's connection.
110 """
111 connection = _require_connection(connection)
112 try:
113 connection.api_request(method='GET', path=self.path)
114 except NotFound:
115 return False
116 else:
117 return True
118
119 def reload(self, connection=None):
120 """API call: sync local subscription configuration via a GET request
121
122 See
123 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get
124
125 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
126 :param connection: the connection to use. If not passed,
127 falls back to the topic's connection.
128 """
129 connection = _require_connection(connection)
130 data = connection.api_request(method='GET', path=self.path)
131 self.ack_deadline = data.get('ackDeadline')
132 push_config = data.get('pushConfig', {})
133 self.push_endpoint = push_config.get('pushEndpoint')
134
135 def modify_push_configuration(self, push_endpoint, connection=None):
136 """API call: update the push endpoint for the subscription.
137
138 See:
139 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/modifyPushConfig
140
141 :type push_endpoint: string
142 :param push_endpoint: URL to which messages will be pushed by the
143 back-end. If None, the application must pull
144 messages.
145
146 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
147 :param connection: the connection to use. If not passed,
148 falls back to the topic's connection.
149 """
150 connection = _require_connection(connection)
151 data = {}
152 config = data['pushConfig'] = {}
153 if push_endpoint is not None:
154 config['pushEndpoint'] = push_endpoint
155 connection.api_request(method='POST',
156 path='%s:modifyPushConfig' % self.path,
157 data=data)
158 self.push_endpoint = push_endpoint
159
160 def pull(self, return_immediately=False, max_messages=1, connection=None):
161 """API call: retrieve messages for the subscription.
162
163 See:
164 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/pull
165
166 :type return_immediately: boolean
167 :param return_immediately: if True, the back-end returns even if no
168 messages are available; if False, the API
169 call blocks until one or more messages are
170 available.
171
172 :type max_messages: int
173 :param max_messages: the maximum number of messages to return.
174
175 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
176 :param connection: the connection to use. If not passed,
177 falls back to the topic's connection.
178
179 :rtype: list of (ack_id, message) tuples
180 :returns: sequence of tuples: ``ack_id`` is the ID to be used in a
181 subsequent call to :meth:`acknowledge`, and ``message``
182 is an instance of :class:`gcloud.pubsub.message.Message`.
183 """
184 connection = _require_connection(connection)
185 data = {'returnImmediately': return_immediately,
186 'maxMessages': max_messages}
187 response = connection.api_request(method='POST',
188 path='%s:pull' % self.path,
189 data=data)
190 return [(info['ackId'], Message.from_api_repr(info['message']))
191 for info in response['receivedMessages']]
192
193 def acknowledge(self, ack_ids, connection=None):
194 """API call: acknowledge retrieved messages for the subscription.
195
196 See:
197 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge
198
199 :type ack_ids: list of string
200 :param ack_ids: ack IDs of messages being acknowledged
201
202 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
203 :param connection: the connection to use. If not passed,
204 falls back to the topic's connection.
205 """
206 connection = _require_connection(connection)
207 data = {'ackIds': ack_ids}
208 connection.api_request(method='POST',
209 path='%s:acknowledge' % self.path,
210 data=data)
211
212 def modify_ack_deadline(self, ack_id, ack_deadline, connection=None):
213 """API call: update acknowledgement deadline for a retrieved message.
214
215 See:
216 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge
217
218 :type ack_id: string
219 :param ack_id: ack ID of message being updated
220
221 :type ack_deadline: int
222 :param ack_deadline: new deadline for the message, in seconds
223
224 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
225 :param connection: the connection to use. If not passed,
226 falls back to the topic's connection.
227 """
228 connection = _require_connection(connection)
229 data = {'ackId': ack_id, 'ackDeadlineSeconds': ack_deadline}
230 connection.api_request(method='POST',
231 path='%s:modifyAckDeadline' % self.path,
232 data=data)
233
234 def delete(self, connection=None):
235 """API call: delete the subscription via a DELETE request.
236
237 See:
238 https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/delete
239
240 :type connection: :class:`gcloud.pubsub.connection.Connection` or None
241 :param connection: the connection to use. If not passed,
242 falls back to the topic's connection.
243 """
244 connection = _require_connection(connection)
245 connection.api_request(method='DELETE', path=self.path)
246
[end of gcloud/pubsub/subscription.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gcloud/pubsub/subscription.py b/gcloud/pubsub/subscription.py
--- a/gcloud/pubsub/subscription.py
+++ b/gcloud/pubsub/subscription.py
@@ -188,7 +188,7 @@
path='%s:pull' % self.path,
data=data)
return [(info['ackId'], Message.from_api_repr(info['message']))
- for info in response['receivedMessages']]
+ for info in response.get('receivedMessages', ())]
def acknowledge(self, ack_ids, connection=None):
"""API call: acknowledge retrieved messages for the subscription.
| {"golden_diff": "diff --git a/gcloud/pubsub/subscription.py b/gcloud/pubsub/subscription.py\n--- a/gcloud/pubsub/subscription.py\n+++ b/gcloud/pubsub/subscription.py\n@@ -188,7 +188,7 @@\n path='%s:pull' % self.path,\n data=data)\n return [(info['ackId'], Message.from_api_repr(info['message']))\n- for info in response['receivedMessages']]\n+ for info in response.get('receivedMessages', ())]\n \n def acknowledge(self, ack_ids, connection=None):\n \"\"\"API call: acknowledge retrieved messages for the subscription.\n", "issue": "Subscription.pull() fails with KeyError\nHi,\n\nMy code below fails with a KeyError coming from the library:\n\n``` python\n sub = Subscription(SUBSCRIPTION_NAME, Topic(INCOMING_TOPIC_NAME))\n\n while True:\n try:\n msgs = sub.pull()\n ...\n except:\n msg = 'poll %s: %s' % (SUBSCRIPTION_NAME, traceback.format_exc())\n print msg\n```\n\nFails with the message:\n\n```\npoll extractors: Traceback (most recent call last):\n File \"main.py\", line 46, in main\n msgs = sub.pull()\n File \"/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py\", line 176, in pull\n for info in response['receivedMessages']]\nKeyError: 'receivedMessages'\n```\n\nThis seems like a bug on the pubsub library code.\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Subscriptions.\"\"\"\n\nfrom gcloud.exceptions import NotFound\nfrom gcloud.pubsub.message import Message\nfrom gcloud.pubsub.topic import Topic\nfrom gcloud.pubsub._implicit_environ import _require_connection\n\n\nclass Subscription(object):\n \"\"\"Subscriptions receive messages published to their topics.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions\n\n :type name: string\n :param name: the name of the subscription\n\n :type topic: :class:`gcloud.pubsub.topic.Topic`\n :param topic: the topic to which the subscription belongs..\n\n :type ack_deadline: int\n :param ack_deadline: the deadline (in seconds) by which messages pulled\n from the back-end must be acknowledged.\n\n :type push_endpoint: string\n :param push_endpoint: URL to which messages will be pushed by the back-end.\n If not set, the application must pull messages.\n \"\"\"\n def __init__(self, name, topic, ack_deadline=None, push_endpoint=None):\n self.name = name\n self.topic = topic\n self.ack_deadline = ack_deadline\n self.push_endpoint = push_endpoint\n\n @classmethod\n def from_api_repr(cls, resource, topics=None):\n \"\"\"Factory: construct a topic given its API representation\n\n :type resource: dict\n :param resource: topic resource representation returned from the API\n\n :type topics: dict or None\n :param topics: A mapping of topic names -> topics. If not passed,\n the subscription will have a newly-created topic.\n\n :rtype: :class:`gcloud.pubsub.subscription.Subscription`\n \"\"\"\n if topics is None:\n topics = {}\n t_name = resource['topic']\n topic = topics.get(t_name)\n if topic is None:\n topic = topics[t_name] = Topic.from_api_repr({'name': t_name})\n _, _, _, name = resource['name'].split('/')\n ack_deadline = resource.get('ackDeadlineSeconds')\n push_config = resource.get('pushConfig', {})\n push_endpoint = push_config.get('pushEndpoint')\n return cls(name, topic, ack_deadline, push_endpoint)\n\n @property\n def path(self):\n \"\"\"URL path for the subscription's APIs\"\"\"\n project = self.topic.project\n return '/projects/%s/subscriptions/%s' % (project, self.name)\n\n def create(self, connection=None):\n \"\"\"API call: create the subscription via a PUT request\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/create\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n data = {'topic': self.topic.full_name}\n\n if self.ack_deadline is not None:\n data['ackDeadline'] = self.ack_deadline\n\n if self.push_endpoint is not None:\n data['pushConfig'] = {'pushEndpoint': self.push_endpoint}\n\n connection = _require_connection(connection)\n connection.api_request(method='PUT', path=self.path, data=data)\n\n def exists(self, connection=None):\n \"\"\"API call: test existence of the subscription via a GET request\n\n See\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n try:\n connection.api_request(method='GET', path=self.path)\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, connection=None):\n \"\"\"API call: sync local subscription configuration via a GET request\n\n See\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/get\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n data = connection.api_request(method='GET', path=self.path)\n self.ack_deadline = data.get('ackDeadline')\n push_config = data.get('pushConfig', {})\n self.push_endpoint = push_config.get('pushEndpoint')\n\n def modify_push_configuration(self, push_endpoint, connection=None):\n \"\"\"API call: update the push endpoint for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/modifyPushConfig\n\n :type push_endpoint: string\n :param push_endpoint: URL to which messages will be pushed by the\n back-end. If None, the application must pull\n messages.\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n data = {}\n config = data['pushConfig'] = {}\n if push_endpoint is not None:\n config['pushEndpoint'] = push_endpoint\n connection.api_request(method='POST',\n path='%s:modifyPushConfig' % self.path,\n data=data)\n self.push_endpoint = push_endpoint\n\n def pull(self, return_immediately=False, max_messages=1, connection=None):\n \"\"\"API call: retrieve messages for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/pull\n\n :type return_immediately: boolean\n :param return_immediately: if True, the back-end returns even if no\n messages are available; if False, the API\n call blocks until one or more messages are\n available.\n\n :type max_messages: int\n :param max_messages: the maximum number of messages to return.\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n\n :rtype: list of (ack_id, message) tuples\n :returns: sequence of tuples: ``ack_id`` is the ID to be used in a\n subsequent call to :meth:`acknowledge`, and ``message``\n is an instance of :class:`gcloud.pubsub.message.Message`.\n \"\"\"\n connection = _require_connection(connection)\n data = {'returnImmediately': return_immediately,\n 'maxMessages': max_messages}\n response = connection.api_request(method='POST',\n path='%s:pull' % self.path,\n data=data)\n return [(info['ackId'], Message.from_api_repr(info['message']))\n for info in response['receivedMessages']]\n\n def acknowledge(self, ack_ids, connection=None):\n \"\"\"API call: acknowledge retrieved messages for the subscription.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge\n\n :type ack_ids: list of string\n :param ack_ids: ack IDs of messages being acknowledged\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n data = {'ackIds': ack_ids}\n connection.api_request(method='POST',\n path='%s:acknowledge' % self.path,\n data=data)\n\n def modify_ack_deadline(self, ack_id, ack_deadline, connection=None):\n \"\"\"API call: update acknowledgement deadline for a retrieved message.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/acknowledge\n\n :type ack_id: string\n :param ack_id: ack ID of message being updated\n\n :type ack_deadline: int\n :param ack_deadline: new deadline for the message, in seconds\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n data = {'ackId': ack_id, 'ackDeadlineSeconds': ack_deadline}\n connection.api_request(method='POST',\n path='%s:modifyAckDeadline' % self.path,\n data=data)\n\n def delete(self, connection=None):\n \"\"\"API call: delete the subscription via a DELETE request.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1beta2/projects/subscriptions/delete\n\n :type connection: :class:`gcloud.pubsub.connection.Connection` or None\n :param connection: the connection to use. If not passed,\n falls back to the topic's connection.\n \"\"\"\n connection = _require_connection(connection)\n connection.api_request(method='DELETE', path=self.path)\n", "path": "gcloud/pubsub/subscription.py"}]} | 3,474 | 133 |
gh_patches_debug_20733 | rasdani/github-patches | git_diff | pyca__cryptography-3539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove `return True` in OpenSSL ecdsa verify implementation
https://github.com/pyca/cryptography/blob/master/src/cryptography/hazmat/backends/openssl/ec.py#L89
This isn't part of our documented API
</issue>
<code>
[start of src/cryptography/hazmat/backends/openssl/ec.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography import utils
8 from cryptography.exceptions import (
9 InvalidSignature, UnsupportedAlgorithm, _Reasons
10 )
11 from cryptography.hazmat.backends.openssl.utils import (
12 _calculate_digest_and_algorithm
13 )
14 from cryptography.hazmat.primitives import hashes, serialization
15 from cryptography.hazmat.primitives.asymmetric import (
16 AsymmetricSignatureContext, AsymmetricVerificationContext, ec
17 )
18
19
20 def _check_signature_algorithm(signature_algorithm):
21 if not isinstance(signature_algorithm, ec.ECDSA):
22 raise UnsupportedAlgorithm(
23 "Unsupported elliptic curve signature algorithm.",
24 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
25
26
27 def _ec_key_curve_sn(backend, ec_key):
28 group = backend._lib.EC_KEY_get0_group(ec_key)
29 backend.openssl_assert(group != backend._ffi.NULL)
30
31 nid = backend._lib.EC_GROUP_get_curve_name(group)
32 # The following check is to find EC keys with unnamed curves and raise
33 # an error for now.
34 if nid == backend._lib.NID_undef:
35 raise NotImplementedError(
36 "ECDSA certificates with unnamed curves are unsupported "
37 "at this time"
38 )
39
40 curve_name = backend._lib.OBJ_nid2sn(nid)
41 backend.openssl_assert(curve_name != backend._ffi.NULL)
42
43 sn = backend._ffi.string(curve_name).decode('ascii')
44 return sn
45
46
47 def _mark_asn1_named_ec_curve(backend, ec_cdata):
48 """
49 Set the named curve flag on the EC_KEY. This causes OpenSSL to
50 serialize EC keys along with their curve OID which makes
51 deserialization easier.
52 """
53
54 backend._lib.EC_KEY_set_asn1_flag(
55 ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE
56 )
57
58
59 def _sn_to_elliptic_curve(backend, sn):
60 try:
61 return ec._CURVE_TYPES[sn]()
62 except KeyError:
63 raise UnsupportedAlgorithm(
64 "{0} is not a supported elliptic curve".format(sn),
65 _Reasons.UNSUPPORTED_ELLIPTIC_CURVE
66 )
67
68
69 def _ecdsa_sig_sign(backend, private_key, data):
70 max_size = backend._lib.ECDSA_size(private_key._ec_key)
71 backend.openssl_assert(max_size > 0)
72
73 sigbuf = backend._ffi.new("unsigned char[]", max_size)
74 siglen_ptr = backend._ffi.new("unsigned int[]", 1)
75 res = backend._lib.ECDSA_sign(
76 0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key
77 )
78 backend.openssl_assert(res == 1)
79 return backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]
80
81
82 def _ecdsa_sig_verify(backend, public_key, signature, data):
83 res = backend._lib.ECDSA_verify(
84 0, data, len(data), signature, len(signature), public_key._ec_key
85 )
86 if res != 1:
87 backend._consume_errors()
88 raise InvalidSignature
89 return True
90
91
92 @utils.register_interface(AsymmetricSignatureContext)
93 class _ECDSASignatureContext(object):
94 def __init__(self, backend, private_key, algorithm):
95 self._backend = backend
96 self._private_key = private_key
97 self._digest = hashes.Hash(algorithm, backend)
98
99 def update(self, data):
100 self._digest.update(data)
101
102 def finalize(self):
103 digest = self._digest.finalize()
104
105 return _ecdsa_sig_sign(self._backend, self._private_key, digest)
106
107
108 @utils.register_interface(AsymmetricVerificationContext)
109 class _ECDSAVerificationContext(object):
110 def __init__(self, backend, public_key, signature, algorithm):
111 self._backend = backend
112 self._public_key = public_key
113 self._signature = signature
114 self._digest = hashes.Hash(algorithm, backend)
115
116 def update(self, data):
117 self._digest.update(data)
118
119 def verify(self):
120 digest = self._digest.finalize()
121 return _ecdsa_sig_verify(
122 self._backend, self._public_key, self._signature, digest
123 )
124
125
126 @utils.register_interface(ec.EllipticCurvePrivateKeyWithSerialization)
127 class _EllipticCurvePrivateKey(object):
128 def __init__(self, backend, ec_key_cdata, evp_pkey):
129 self._backend = backend
130 _mark_asn1_named_ec_curve(backend, ec_key_cdata)
131 self._ec_key = ec_key_cdata
132 self._evp_pkey = evp_pkey
133
134 sn = _ec_key_curve_sn(backend, ec_key_cdata)
135 self._curve = _sn_to_elliptic_curve(backend, sn)
136
137 curve = utils.read_only_property("_curve")
138
139 def signer(self, signature_algorithm):
140 _check_signature_algorithm(signature_algorithm)
141 return _ECDSASignatureContext(
142 self._backend, self, signature_algorithm.algorithm
143 )
144
145 def exchange(self, algorithm, peer_public_key):
146 if not (
147 self._backend.elliptic_curve_exchange_algorithm_supported(
148 algorithm, self.curve
149 )
150 ):
151 raise UnsupportedAlgorithm(
152 "This backend does not support the ECDH algorithm.",
153 _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
154 )
155
156 if peer_public_key.curve.name != self.curve.name:
157 raise ValueError(
158 "peer_public_key and self are not on the same curve"
159 )
160
161 group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
162 z_len = (self._backend._lib.EC_GROUP_get_degree(group) + 7) // 8
163 self._backend.openssl_assert(z_len > 0)
164 z_buf = self._backend._ffi.new("uint8_t[]", z_len)
165 peer_key = self._backend._lib.EC_KEY_get0_public_key(
166 peer_public_key._ec_key
167 )
168
169 r = self._backend._lib.ECDH_compute_key(
170 z_buf, z_len, peer_key, self._ec_key, self._backend._ffi.NULL
171 )
172 self._backend.openssl_assert(r > 0)
173 return self._backend._ffi.buffer(z_buf)[:z_len]
174
175 def public_key(self):
176 group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
177 self._backend.openssl_assert(group != self._backend._ffi.NULL)
178
179 curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)
180
181 public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)
182 self._backend.openssl_assert(public_ec_key != self._backend._ffi.NULL)
183 public_ec_key = self._backend._ffi.gc(
184 public_ec_key, self._backend._lib.EC_KEY_free
185 )
186
187 point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
188 self._backend.openssl_assert(point != self._backend._ffi.NULL)
189
190 res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)
191 self._backend.openssl_assert(res == 1)
192
193 evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key)
194
195 return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey)
196
197 def private_numbers(self):
198 bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key)
199 private_value = self._backend._bn_to_int(bn)
200 return ec.EllipticCurvePrivateNumbers(
201 private_value=private_value,
202 public_numbers=self.public_key().public_numbers()
203 )
204
205 def private_bytes(self, encoding, format, encryption_algorithm):
206 return self._backend._private_key_bytes(
207 encoding,
208 format,
209 encryption_algorithm,
210 self._evp_pkey,
211 self._ec_key
212 )
213
214 def sign(self, data, signature_algorithm):
215 _check_signature_algorithm(signature_algorithm)
216 data, algorithm = _calculate_digest_and_algorithm(
217 self._backend, data, signature_algorithm._algorithm
218 )
219 return _ecdsa_sig_sign(self._backend, self, data)
220
221
222 @utils.register_interface(ec.EllipticCurvePublicKeyWithSerialization)
223 class _EllipticCurvePublicKey(object):
224 def __init__(self, backend, ec_key_cdata, evp_pkey):
225 self._backend = backend
226 _mark_asn1_named_ec_curve(backend, ec_key_cdata)
227 self._ec_key = ec_key_cdata
228 self._evp_pkey = evp_pkey
229
230 sn = _ec_key_curve_sn(backend, ec_key_cdata)
231 self._curve = _sn_to_elliptic_curve(backend, sn)
232
233 curve = utils.read_only_property("_curve")
234
235 def verifier(self, signature, signature_algorithm):
236 if not isinstance(signature, bytes):
237 raise TypeError("signature must be bytes.")
238
239 _check_signature_algorithm(signature_algorithm)
240 return _ECDSAVerificationContext(
241 self._backend, self, signature, signature_algorithm.algorithm
242 )
243
244 def public_numbers(self):
245 get_func, group = (
246 self._backend._ec_key_determine_group_get_func(self._ec_key)
247 )
248 point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
249 self._backend.openssl_assert(point != self._backend._ffi.NULL)
250
251 with self._backend._tmp_bn_ctx() as bn_ctx:
252 bn_x = self._backend._lib.BN_CTX_get(bn_ctx)
253 bn_y = self._backend._lib.BN_CTX_get(bn_ctx)
254
255 res = get_func(group, point, bn_x, bn_y, bn_ctx)
256 self._backend.openssl_assert(res == 1)
257
258 x = self._backend._bn_to_int(bn_x)
259 y = self._backend._bn_to_int(bn_y)
260
261 return ec.EllipticCurvePublicNumbers(
262 x=x,
263 y=y,
264 curve=self._curve
265 )
266
267 def public_bytes(self, encoding, format):
268 if format is serialization.PublicFormat.PKCS1:
269 raise ValueError(
270 "EC public keys do not support PKCS1 serialization"
271 )
272
273 return self._backend._public_key_bytes(
274 encoding,
275 format,
276 self,
277 self._evp_pkey,
278 None
279 )
280
281 def verify(self, signature, data, signature_algorithm):
282 _check_signature_algorithm(signature_algorithm)
283 data, algorithm = _calculate_digest_and_algorithm(
284 self._backend, data, signature_algorithm._algorithm
285 )
286 return _ecdsa_sig_verify(self._backend, self, signature, data)
287
[end of src/cryptography/hazmat/backends/openssl/ec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/backends/openssl/ec.py b/src/cryptography/hazmat/backends/openssl/ec.py
--- a/src/cryptography/hazmat/backends/openssl/ec.py
+++ b/src/cryptography/hazmat/backends/openssl/ec.py
@@ -86,7 +86,6 @@
if res != 1:
backend._consume_errors()
raise InvalidSignature
- return True
@utils.register_interface(AsymmetricSignatureContext)
@@ -118,7 +117,7 @@
def verify(self):
digest = self._digest.finalize()
- return _ecdsa_sig_verify(
+ _ecdsa_sig_verify(
self._backend, self._public_key, self._signature, digest
)
@@ -283,4 +282,4 @@
data, algorithm = _calculate_digest_and_algorithm(
self._backend, data, signature_algorithm._algorithm
)
- return _ecdsa_sig_verify(self._backend, self, signature, data)
+ _ecdsa_sig_verify(self._backend, self, signature, data)
| {"golden_diff": "diff --git a/src/cryptography/hazmat/backends/openssl/ec.py b/src/cryptography/hazmat/backends/openssl/ec.py\n--- a/src/cryptography/hazmat/backends/openssl/ec.py\n+++ b/src/cryptography/hazmat/backends/openssl/ec.py\n@@ -86,7 +86,6 @@\n if res != 1:\n backend._consume_errors()\n raise InvalidSignature\n- return True\n \n \n @utils.register_interface(AsymmetricSignatureContext)\n@@ -118,7 +117,7 @@\n \n def verify(self):\n digest = self._digest.finalize()\n- return _ecdsa_sig_verify(\n+ _ecdsa_sig_verify(\n self._backend, self._public_key, self._signature, digest\n )\n \n@@ -283,4 +282,4 @@\n data, algorithm = _calculate_digest_and_algorithm(\n self._backend, data, signature_algorithm._algorithm\n )\n- return _ecdsa_sig_verify(self._backend, self, signature, data)\n+ _ecdsa_sig_verify(self._backend, self, signature, data)\n", "issue": "Remove `return True` in OpenSSL ecdsa verify implementation\nhttps://github.com/pyca/cryptography/blob/master/src/cryptography/hazmat/backends/openssl/ec.py#L89\r\n\r\nThis isn't part of our documented API\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InvalidSignature, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.openssl.utils import (\n _calculate_digest_and_algorithm\n)\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.asymmetric import (\n AsymmetricSignatureContext, AsymmetricVerificationContext, ec\n)\n\n\ndef _check_signature_algorithm(signature_algorithm):\n if not isinstance(signature_algorithm, ec.ECDSA):\n raise UnsupportedAlgorithm(\n \"Unsupported elliptic curve signature algorithm.\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n\ndef _ec_key_curve_sn(backend, ec_key):\n group = backend._lib.EC_KEY_get0_group(ec_key)\n backend.openssl_assert(group != backend._ffi.NULL)\n\n nid = backend._lib.EC_GROUP_get_curve_name(group)\n # The following check is to find EC keys with unnamed curves and raise\n # an error for now.\n if nid == backend._lib.NID_undef:\n raise NotImplementedError(\n \"ECDSA certificates with unnamed curves are unsupported \"\n \"at this time\"\n )\n\n curve_name = backend._lib.OBJ_nid2sn(nid)\n backend.openssl_assert(curve_name != backend._ffi.NULL)\n\n sn = backend._ffi.string(curve_name).decode('ascii')\n return sn\n\n\ndef _mark_asn1_named_ec_curve(backend, ec_cdata):\n \"\"\"\n Set the named curve flag on the EC_KEY. This causes OpenSSL to\n serialize EC keys along with their curve OID which makes\n deserialization easier.\n \"\"\"\n\n backend._lib.EC_KEY_set_asn1_flag(\n ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE\n )\n\n\ndef _sn_to_elliptic_curve(backend, sn):\n try:\n return ec._CURVE_TYPES[sn]()\n except KeyError:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported elliptic curve\".format(sn),\n _Reasons.UNSUPPORTED_ELLIPTIC_CURVE\n )\n\n\ndef _ecdsa_sig_sign(backend, private_key, data):\n max_size = backend._lib.ECDSA_size(private_key._ec_key)\n backend.openssl_assert(max_size > 0)\n\n sigbuf = backend._ffi.new(\"unsigned char[]\", max_size)\n siglen_ptr = backend._ffi.new(\"unsigned int[]\", 1)\n res = backend._lib.ECDSA_sign(\n 0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key\n )\n backend.openssl_assert(res == 1)\n return backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]\n\n\ndef _ecdsa_sig_verify(backend, public_key, signature, data):\n res = backend._lib.ECDSA_verify(\n 0, data, len(data), signature, len(signature), public_key._ec_key\n )\n if res != 1:\n backend._consume_errors()\n raise InvalidSignature\n return True\n\n\[email protected]_interface(AsymmetricSignatureContext)\nclass _ECDSASignatureContext(object):\n def __init__(self, backend, private_key, algorithm):\n self._backend = backend\n self._private_key = private_key\n self._digest = hashes.Hash(algorithm, backend)\n\n def update(self, data):\n self._digest.update(data)\n\n def finalize(self):\n digest = self._digest.finalize()\n\n return _ecdsa_sig_sign(self._backend, self._private_key, digest)\n\n\[email protected]_interface(AsymmetricVerificationContext)\nclass _ECDSAVerificationContext(object):\n def __init__(self, backend, public_key, signature, algorithm):\n self._backend = backend\n self._public_key = public_key\n self._signature = signature\n self._digest = hashes.Hash(algorithm, backend)\n\n def update(self, data):\n self._digest.update(data)\n\n def verify(self):\n digest = self._digest.finalize()\n return _ecdsa_sig_verify(\n self._backend, self._public_key, self._signature, digest\n )\n\n\[email protected]_interface(ec.EllipticCurvePrivateKeyWithSerialization)\nclass _EllipticCurvePrivateKey(object):\n def __init__(self, backend, ec_key_cdata, evp_pkey):\n self._backend = backend\n _mark_asn1_named_ec_curve(backend, ec_key_cdata)\n self._ec_key = ec_key_cdata\n self._evp_pkey = evp_pkey\n\n sn = _ec_key_curve_sn(backend, ec_key_cdata)\n self._curve = _sn_to_elliptic_curve(backend, sn)\n\n curve = utils.read_only_property(\"_curve\")\n\n def signer(self, signature_algorithm):\n _check_signature_algorithm(signature_algorithm)\n return _ECDSASignatureContext(\n self._backend, self, signature_algorithm.algorithm\n )\n\n def exchange(self, algorithm, peer_public_key):\n if not (\n self._backend.elliptic_curve_exchange_algorithm_supported(\n algorithm, self.curve\n )\n ):\n raise UnsupportedAlgorithm(\n \"This backend does not support the ECDH algorithm.\",\n _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM\n )\n\n if peer_public_key.curve.name != self.curve.name:\n raise ValueError(\n \"peer_public_key and self are not on the same curve\"\n )\n\n group = self._backend._lib.EC_KEY_get0_group(self._ec_key)\n z_len = (self._backend._lib.EC_GROUP_get_degree(group) + 7) // 8\n self._backend.openssl_assert(z_len > 0)\n z_buf = self._backend._ffi.new(\"uint8_t[]\", z_len)\n peer_key = self._backend._lib.EC_KEY_get0_public_key(\n peer_public_key._ec_key\n )\n\n r = self._backend._lib.ECDH_compute_key(\n z_buf, z_len, peer_key, self._ec_key, self._backend._ffi.NULL\n )\n self._backend.openssl_assert(r > 0)\n return self._backend._ffi.buffer(z_buf)[:z_len]\n\n def public_key(self):\n group = self._backend._lib.EC_KEY_get0_group(self._ec_key)\n self._backend.openssl_assert(group != self._backend._ffi.NULL)\n\n curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)\n\n public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)\n self._backend.openssl_assert(public_ec_key != self._backend._ffi.NULL)\n public_ec_key = self._backend._ffi.gc(\n public_ec_key, self._backend._lib.EC_KEY_free\n )\n\n point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)\n self._backend.openssl_assert(point != self._backend._ffi.NULL)\n\n res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)\n self._backend.openssl_assert(res == 1)\n\n evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key)\n\n return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey)\n\n def private_numbers(self):\n bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key)\n private_value = self._backend._bn_to_int(bn)\n return ec.EllipticCurvePrivateNumbers(\n private_value=private_value,\n public_numbers=self.public_key().public_numbers()\n )\n\n def private_bytes(self, encoding, format, encryption_algorithm):\n return self._backend._private_key_bytes(\n encoding,\n format,\n encryption_algorithm,\n self._evp_pkey,\n self._ec_key\n )\n\n def sign(self, data, signature_algorithm):\n _check_signature_algorithm(signature_algorithm)\n data, algorithm = _calculate_digest_and_algorithm(\n self._backend, data, signature_algorithm._algorithm\n )\n return _ecdsa_sig_sign(self._backend, self, data)\n\n\[email protected]_interface(ec.EllipticCurvePublicKeyWithSerialization)\nclass _EllipticCurvePublicKey(object):\n def __init__(self, backend, ec_key_cdata, evp_pkey):\n self._backend = backend\n _mark_asn1_named_ec_curve(backend, ec_key_cdata)\n self._ec_key = ec_key_cdata\n self._evp_pkey = evp_pkey\n\n sn = _ec_key_curve_sn(backend, ec_key_cdata)\n self._curve = _sn_to_elliptic_curve(backend, sn)\n\n curve = utils.read_only_property(\"_curve\")\n\n def verifier(self, signature, signature_algorithm):\n if not isinstance(signature, bytes):\n raise TypeError(\"signature must be bytes.\")\n\n _check_signature_algorithm(signature_algorithm)\n return _ECDSAVerificationContext(\n self._backend, self, signature, signature_algorithm.algorithm\n )\n\n def public_numbers(self):\n get_func, group = (\n self._backend._ec_key_determine_group_get_func(self._ec_key)\n )\n point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)\n self._backend.openssl_assert(point != self._backend._ffi.NULL)\n\n with self._backend._tmp_bn_ctx() as bn_ctx:\n bn_x = self._backend._lib.BN_CTX_get(bn_ctx)\n bn_y = self._backend._lib.BN_CTX_get(bn_ctx)\n\n res = get_func(group, point, bn_x, bn_y, bn_ctx)\n self._backend.openssl_assert(res == 1)\n\n x = self._backend._bn_to_int(bn_x)\n y = self._backend._bn_to_int(bn_y)\n\n return ec.EllipticCurvePublicNumbers(\n x=x,\n y=y,\n curve=self._curve\n )\n\n def public_bytes(self, encoding, format):\n if format is serialization.PublicFormat.PKCS1:\n raise ValueError(\n \"EC public keys do not support PKCS1 serialization\"\n )\n\n return self._backend._public_key_bytes(\n encoding,\n format,\n self,\n self._evp_pkey,\n None\n )\n\n def verify(self, signature, data, signature_algorithm):\n _check_signature_algorithm(signature_algorithm)\n data, algorithm = _calculate_digest_and_algorithm(\n self._backend, data, signature_algorithm._algorithm\n )\n return _ecdsa_sig_verify(self._backend, self, signature, data)\n", "path": "src/cryptography/hazmat/backends/openssl/ec.py"}]} | 3,731 | 245 |
gh_patches_debug_38147 | rasdani/github-patches | git_diff | WeblateOrg__weblate-8675 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Freezing in certain strings
### Describe the issue
Hi.
We just updated Weblate to 4.15.1, and our instance is hanging when we access certain strings, weird strings (that should be ignored when creating the PO files, I know).
Instance logs, sometimes show this:
```
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
```
This is an example of the string that cause the issue:
https://github.com/freebsd/freebsd-doc-translate/blob/main/documentation/content/es/articles/serial-uart/_index.po#L38-L52
```
#. type: Plain text
#: documentation/content/en/articles/serial-uart/_index.adoc:48
msgid "'''"
msgstr "'''"
```
postgres be stuck in selects.
Do you know if there is something we can do here?
Regards.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
Go to any string like this:
```
#. type: Plain text
#: documentation/content/en/articles/serial-uart/_index.adoc:48
msgid "'''"
msgstr "'''"
```
### Expected behavior
_No response_
### Screenshots
_No response_
### Exception traceback
```pytb
Only this:
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040
```
### How do you run Weblate?
weblate.org service
### Weblate versions
`4.15.1`
We have updated docker containers from `4.10.1`.
### Weblate deploy checks
_No response_
### Additional context
_No response_
</issue>
<code>
[start of weblate/utils/db.py]
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 """Database specific code to extend Django."""
6
7 from django.db import connection, models
8 from django.db.models import Case, IntegerField, Sum, When
9 from django.db.models.lookups import PatternLookup
10
11 ESCAPED = frozenset(".\\+*?[^]$(){}=!<>|:-")
12
13 PG_TRGM = "CREATE INDEX {0}_{1}_fulltext ON trans_{0} USING GIN ({1} gin_trgm_ops {2})"
14 PG_DROP = "DROP INDEX {0}_{1}_fulltext"
15
16 MY_FTX = "CREATE FULLTEXT INDEX {0}_{1}_fulltext ON trans_{0}({1})"
17 MY_DROP = "ALTER TABLE trans_{0} DROP INDEX {0}_{1}_fulltext"
18
19
20 def conditional_sum(value=1, **cond):
21 """Wrapper to generate SUM on boolean/enum values."""
22 return Sum(Case(When(then=value, **cond), default=0, output_field=IntegerField()))
23
24
25 def using_postgresql():
26 return connection.vendor == "postgresql"
27
28
29 def adjust_similarity_threshold(value: float):
30 """
31 Adjusts pg_trgm.similarity_threshold for the % operator.
32
33 Ideally we would use directly similarity() in the search, but that doesn't seem
34 to use index, while using % does.
35 """
36 if not using_postgresql():
37 return
38 with connection.cursor() as cursor:
39 # The SELECT has to be executed first as othervise the trgm extension
40 # might not yet be loaded and GUC setting not possible.
41 if not hasattr(connection, "weblate_similarity"):
42 cursor.execute("SELECT show_limit()")
43 connection.weblate_similarity = cursor.fetchone()[0]
44 # Change setting only for reasonably big difference
45 if abs(connection.weblate_similarity - value) > 0.01:
46 cursor.execute("SELECT set_limit(%s)", [value])
47 connection.weblate_similarity = value
48
49
50 class PostgreSQLSearchLookup(PatternLookup):
51 lookup_name = "search"
52 param_pattern = "%s"
53
54 def as_sql(self, qn, connection):
55 lhs, lhs_params = self.process_lhs(qn, connection)
56 rhs, rhs_params = self.process_rhs(qn, connection)
57 params = lhs_params + rhs_params
58 return f"{lhs} %% {rhs} = true", params
59
60
61 class MySQLSearchLookup(models.Lookup):
62 lookup_name = "search"
63
64 def as_sql(self, compiler, connection):
65 lhs, lhs_params = self.process_lhs(compiler, connection)
66 rhs, rhs_params = self.process_rhs(compiler, connection)
67 params = lhs_params + rhs_params
68 return f"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)", params
69
70
71 class PostgreSQLSubstringLookup(PatternLookup):
72 """
73 Case insensitive substring lookup.
74
75 This is essentially same as icontains in Django, but utilizes ILIKE
76 operator which can use pg_trgm index.
77 """
78
79 lookup_name = "substring"
80
81 def as_sql(self, compiler, connection):
82 lhs, lhs_params = self.process_lhs(compiler, connection)
83 rhs, rhs_params = self.process_rhs(compiler, connection)
84 params = lhs_params + rhs_params
85 return f"{lhs} ILIKE {rhs}", params
86
87
88 class PostgreSQLILikeLookup(PostgreSQLSubstringLookup):
89 """
90 Case insensitive string lookup.
91
92 This is essentially same as iexact in Django, but utilizes ILIKE
93 operator which can use pg_trgm index.
94 """
95
96 lookup_name = "ilike"
97 param_pattern = "%s"
98
99
100 def re_escape(pattern):
101 """Escape for use in database regexp match.
102
103 This is based on re.escape, but that one escapes too much.
104 """
105 string = list(pattern)
106 for i, char in enumerate(pattern):
107 if char == "\000":
108 string[i] = "\\000"
109 elif char in ESCAPED:
110 string[i] = "\\" + char
111 return "".join(string)
112
[end of weblate/utils/db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/weblate/utils/db.py b/weblate/utils/db.py
--- a/weblate/utils/db.py
+++ b/weblate/utils/db.py
@@ -6,7 +6,7 @@
from django.db import connection, models
from django.db.models import Case, IntegerField, Sum, When
-from django.db.models.lookups import PatternLookup
+from django.db.models.lookups import IContains, IExact, PatternLookup
ESCAPED = frozenset(".\\+*?[^]$(){}=!<>|:-")
@@ -47,13 +47,27 @@
connection.weblate_similarity = value
-class PostgreSQLSearchLookup(PatternLookup):
+class PostgreSQLFallbackLookup(PatternLookup):
+ def __init__(self, lhs, rhs):
+ self.orig_lhs = lhs
+ self.orig_rhs = rhs
+ super().__init__(lhs, rhs)
+
+ def needs_fallback(self):
+ return isinstance(self.orig_rhs, str) and not any(
+ char.isalnum() for char in self.orig_rhs
+ )
+
+
+class PostgreSQLSearchLookup(PostgreSQLFallbackLookup):
lookup_name = "search"
param_pattern = "%s"
- def as_sql(self, qn, connection):
- lhs, lhs_params = self.process_lhs(qn, connection)
- rhs, rhs_params = self.process_rhs(qn, connection)
+ def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
+ lhs, lhs_params = self.process_lhs(compiler, connection)
+ rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return f"{lhs} %% {rhs} = true", params
@@ -68,7 +82,7 @@
return f"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)", params
-class PostgreSQLSubstringLookup(PatternLookup):
+class PostgreSQLSubstringLookup(PostgreSQLFallbackLookup):
"""
Case insensitive substring lookup.
@@ -79,6 +93,8 @@
lookup_name = "substring"
def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
@@ -96,6 +112,11 @@
lookup_name = "ilike"
param_pattern = "%s"
+ def as_sql(self, compiler, connection):
+ if self.needs_fallback():
+ return IExact(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)
+ return super().as_sql(compiler, connection)
+
def re_escape(pattern):
"""Escape for use in database regexp match.
| {"golden_diff": "diff --git a/weblate/utils/db.py b/weblate/utils/db.py\n--- a/weblate/utils/db.py\n+++ b/weblate/utils/db.py\n@@ -6,7 +6,7 @@\n \n from django.db import connection, models\n from django.db.models import Case, IntegerField, Sum, When\n-from django.db.models.lookups import PatternLookup\n+from django.db.models.lookups import IContains, IExact, PatternLookup\n \n ESCAPED = frozenset(\".\\\\+*?[^]$(){}=!<>|:-\")\n \n@@ -47,13 +47,27 @@\n connection.weblate_similarity = value\n \n \n-class PostgreSQLSearchLookup(PatternLookup):\n+class PostgreSQLFallbackLookup(PatternLookup):\n+ def __init__(self, lhs, rhs):\n+ self.orig_lhs = lhs\n+ self.orig_rhs = rhs\n+ super().__init__(lhs, rhs)\n+\n+ def needs_fallback(self):\n+ return isinstance(self.orig_rhs, str) and not any(\n+ char.isalnum() for char in self.orig_rhs\n+ )\n+\n+\n+class PostgreSQLSearchLookup(PostgreSQLFallbackLookup):\n lookup_name = \"search\"\n param_pattern = \"%s\"\n \n- def as_sql(self, qn, connection):\n- lhs, lhs_params = self.process_lhs(qn, connection)\n- rhs, rhs_params = self.process_rhs(qn, connection)\n+ def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n+ lhs, lhs_params = self.process_lhs(compiler, connection)\n+ rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} %% {rhs} = true\", params\n \n@@ -68,7 +82,7 @@\n return f\"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)\", params\n \n \n-class PostgreSQLSubstringLookup(PatternLookup):\n+class PostgreSQLSubstringLookup(PostgreSQLFallbackLookup):\n \"\"\"\n Case insensitive substring lookup.\n \n@@ -79,6 +93,8 @@\n lookup_name = \"substring\"\n \n def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IContains(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n@@ -96,6 +112,11 @@\n lookup_name = \"ilike\"\n param_pattern = \"%s\"\n \n+ def as_sql(self, compiler, connection):\n+ if self.needs_fallback():\n+ return IExact(self.orig_lhs, self.orig_rhs).as_sql(compiler, connection)\n+ return super().as_sql(compiler, connection)\n+\n \n def re_escape(pattern):\n \"\"\"Escape for use in database regexp match.\n", "issue": "Freezing in certain strings\n### Describe the issue\r\n\r\nHi.\r\n\r\nWe just updated Weblate to 4.15.1, and our instance is hanging when we access certain strings, weird strings (that should be ignored when creating the PO files, I know).\r\n\r\nInstance logs, sometimes show this:\r\n```\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n```\r\n\r\nThis is an example of the string that cause the issue:\r\n\r\nhttps://github.com/freebsd/freebsd-doc-translate/blob/main/documentation/content/es/articles/serial-uart/_index.po#L38-L52\r\n\r\n```\r\n#. type: Plain text\r\n#: documentation/content/en/articles/serial-uart/_index.adoc:48\r\nmsgid \"'''\"\r\nmsgstr \"'''\"\r\n```\r\n\r\npostgres be stuck in selects.\r\n\r\nDo you know if there is something we can do here?\r\n\r\nRegards.\r\n\r\n### I already tried\r\n\r\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\r\n- [X] I've searched for similar issues in this repository.\r\n\r\n### Steps to reproduce the behavior\r\n\r\nGo to any string like this:\r\n\r\n```\r\n#. type: Plain text\r\n#: documentation/content/en/articles/serial-uart/_index.adoc:48\r\nmsgid \"'''\"\r\nmsgstr \"'''\"\r\n```\r\n\r\n### Expected behavior\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Exception traceback\r\n\r\n```pytb\r\nOnly this:\r\n\r\n\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n[2023-01-24 12:54:51,272: DEBUG/90183] git: failure fatal: bad object 93f0b5592a265aa1ba11131707a710dbdcca0040\r\n```\r\n\r\n\r\n### How do you run Weblate?\r\n\r\nweblate.org service\r\n\r\n### Weblate versions\r\n\r\n`4.15.1`\r\nWe have updated docker containers from `4.10.1`.\r\n\r\n### Weblate deploy checks\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n\"\"\"Database specific code to extend Django.\"\"\"\n\nfrom django.db import connection, models\nfrom django.db.models import Case, IntegerField, Sum, When\nfrom django.db.models.lookups import PatternLookup\n\nESCAPED = frozenset(\".\\\\+*?[^]$(){}=!<>|:-\")\n\nPG_TRGM = \"CREATE INDEX {0}_{1}_fulltext ON trans_{0} USING GIN ({1} gin_trgm_ops {2})\"\nPG_DROP = \"DROP INDEX {0}_{1}_fulltext\"\n\nMY_FTX = \"CREATE FULLTEXT INDEX {0}_{1}_fulltext ON trans_{0}({1})\"\nMY_DROP = \"ALTER TABLE trans_{0} DROP INDEX {0}_{1}_fulltext\"\n\n\ndef conditional_sum(value=1, **cond):\n \"\"\"Wrapper to generate SUM on boolean/enum values.\"\"\"\n return Sum(Case(When(then=value, **cond), default=0, output_field=IntegerField()))\n\n\ndef using_postgresql():\n return connection.vendor == \"postgresql\"\n\n\ndef adjust_similarity_threshold(value: float):\n \"\"\"\n Adjusts pg_trgm.similarity_threshold for the % operator.\n\n Ideally we would use directly similarity() in the search, but that doesn't seem\n to use index, while using % does.\n \"\"\"\n if not using_postgresql():\n return\n with connection.cursor() as cursor:\n # The SELECT has to be executed first as othervise the trgm extension\n # might not yet be loaded and GUC setting not possible.\n if not hasattr(connection, \"weblate_similarity\"):\n cursor.execute(\"SELECT show_limit()\")\n connection.weblate_similarity = cursor.fetchone()[0]\n # Change setting only for reasonably big difference\n if abs(connection.weblate_similarity - value) > 0.01:\n cursor.execute(\"SELECT set_limit(%s)\", [value])\n connection.weblate_similarity = value\n\n\nclass PostgreSQLSearchLookup(PatternLookup):\n lookup_name = \"search\"\n param_pattern = \"%s\"\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} %% {rhs} = true\", params\n\n\nclass MySQLSearchLookup(models.Lookup):\n lookup_name = \"search\"\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"MATCH ({lhs}) AGAINST ({rhs} IN NATURAL LANGUAGE MODE)\", params\n\n\nclass PostgreSQLSubstringLookup(PatternLookup):\n \"\"\"\n Case insensitive substring lookup.\n\n This is essentially same as icontains in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"substring\"\n\n def as_sql(self, compiler, connection):\n lhs, lhs_params = self.process_lhs(compiler, connection)\n rhs, rhs_params = self.process_rhs(compiler, connection)\n params = lhs_params + rhs_params\n return f\"{lhs} ILIKE {rhs}\", params\n\n\nclass PostgreSQLILikeLookup(PostgreSQLSubstringLookup):\n \"\"\"\n Case insensitive string lookup.\n\n This is essentially same as iexact in Django, but utilizes ILIKE\n operator which can use pg_trgm index.\n \"\"\"\n\n lookup_name = \"ilike\"\n param_pattern = \"%s\"\n\n\ndef re_escape(pattern):\n \"\"\"Escape for use in database regexp match.\n\n This is based on re.escape, but that one escapes too much.\n \"\"\"\n string = list(pattern)\n for i, char in enumerate(pattern):\n if char == \"\\000\":\n string[i] = \"\\\\000\"\n elif char in ESCAPED:\n string[i] = \"\\\\\" + char\n return \"\".join(string)\n", "path": "weblate/utils/db.py"}]} | 2,311 | 655 |
gh_patches_debug_39507 | rasdani/github-patches | git_diff | Nitrate__Nitrate-1106 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop RPM package build completely
Major reason and consideration:
- reduce the effort to maintain the builds
- easy to pin the dependencies
- make it clear to install and distribute via container images
AC:
- [x] Remove from CI
- [ ] Remove the Fedora Copr project
- [x] Refactor the Containerfile to build images directly from the source tree
- [x] Update README and documentation to remove the content about RPM packages
</issue>
<code>
[start of container/init.py]
1 #!/usr/bin/python3
2
3 import logging
4 import os
5 import time
6
7 logging.basicConfig(
8 level=logging.INFO,
9 format='%(asctime)s %(levelname)s %(name)s: %(message)s'
10 )
11 logger = logging.getLogger('entrypoint')
12
13 import django
14 django.setup()
15
16 from django.contrib.auth.models import User
17 from django.core.management import call_command
18 from django.db import connection
19
20
21 def create_superuser():
22 username = os.environ.get('NITRATE_SUPERUSER_USERNAME')
23 password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')
24 email = os.environ.get('NITRATE_SUPERUSER_EMAIL')
25
26 if not (username and password and email):
27 logger.info(
28 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '
29 'Skip creating a superuser.'
30 )
31 return
32
33 try:
34 if User.objects.filter(username=username, email=email, is_superuser=True).exists():
35 logger.info('Superuser %s has been created.', username)
36 return
37 except: # noqa
38 pass
39
40 try:
41 User.objects.create_superuser(username, email=email, password=password)
42 logger.info('Superuser %s is created successfully.', username)
43 except Exception as e:
44 logger.warning('Failed to create superuser %s: %s', username, e)
45 logger.warning('Please check if the database is initialized properly.')
46
47
48 def set_default_permissions():
49 if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):
50 try:
51 call_command('setdefaultperms')
52 logger.info('Default groups are created and permissions are set to groups properly.')
53 except Exception as e:
54 logger.warning('Failed to run command setdefaultperms: %s', e)
55 logger.warning('Please check if the database is initialized properly.')
56 else:
57 logger.info(
58 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '
59 'Skip creating default groups and granting permissions to specific group.'
60 )
61
62
63 def migrate_db():
64 if os.environ.get('NITRATE_MIGRATE_DB'):
65 try:
66 call_command('migrate')
67 logger.info('Database is migrated successfully.')
68 except Exception as e:
69 logger.warning('Failed to migrate the database: %s', e)
70 else:
71 logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')
72
73
74 def wait_for_db():
75 while 1:
76 try:
77 connection.cursor()
78 except: # noqa
79 logger.debug('Failed to connect to database. Sleep for a while and try again ...')
80 time.sleep(0.5)
81 else:
82 break
83
84
85 if __name__ == '__main__':
86 wait_for_db()
87 migrate_db()
88 create_superuser()
89 set_default_permissions()
90
[end of container/init.py]
[start of contrib/scripts/make-release.py]
1 #!/usr/bin/env python3
2
3 import re
4 import argparse
5 import subprocess
6 from pathlib import Path
7
8 from datetime import datetime
9 from typing import Tuple
10 from pygit2 import Commit, Repository
11
12
13 def extract_short_log(commit: Commit) -> Tuple[str, None or str]:
14 lines = commit.message.split('\n')
15 subject = lines[0]
16 match = re.search(r'\((#\d+)\)$', subject)
17 return subject, match.groups()[0] if match else None
18
19
20 def generate_changelog(args: argparse.Namespace):
21 repo: Repository = Repository(args.repo or '.')
22 if args.since_version:
23 release_tag = repo.revparse_single(args.since_version)
24 else:
25 release_tag = repo.revparse_single(repo.describe().split('-')[0])
26
27 walker = repo.walk(repo.head.target)
28 walker.hide(release_tag.id)
29 logs = []
30 found_issue_keys = []
31
32 for commit in walker:
33 subject, issue_key = extract_short_log(commit)
34 if issue_key is not None:
35 found_issue_keys.append(issue_key)
36 subject = subject.replace(issue_key, f'`{issue_key}`_')
37 logs.append(f'* {subject}')
38
39 logs.append('')
40 found_issue_keys.sort()
41 for item in found_issue_keys:
42 logs.append(f'.. _{item}: https://github.com/Nitrate/Nitrate/issues/{item[1:]}')
43
44 return '\n'.join(logs)
45
46
47 def validate_version(value):
48 if value.startswith('v'):
49 raise argparse.ArgumentTypeError('Version should not be prefixed with v.')
50 return value
51
52
53 parser = argparse.ArgumentParser()
54 parser.add_argument('--repo', help='Path to git repository.')
55 parser.add_argument('--since-version', required=False,
56 type=validate_version,
57 help='Collect commits since this version.')
58 parser.add_argument('new_version', metavar='NEW_VERSION',
59 type=validate_version,
60 help='The version to be released.')
61
62 args = parser.parse_args()
63 new_version = args.new_version
64
65 Path('VERSION.txt').unlink()
66 Path('VERSION.txt').write_text(new_version, "utf-8")
67
68 template = Path('contrib/scripts/release-notes.tmpl.rst').read_text("utf-8")
69 Path(f'docs/source/releases/{new_version}.rst').write_text(
70 template.format(
71 new_version=new_version,
72 doc_ref=new_version,
73 title_marker=len(new_version) * '=',
74 change_logs=generate_changelog(args),
75 release_date=datetime.now().strftime('%b %d, %Y')
76 ),
77 "utf-8",
78 )
79
80 readme_md = Path('container/README.md')
81 content = readme_md.read_text("utf-8")
82 readme_md.unlink()
83 readme_md.write_text(
84 re.sub(r'quay.io/nitrate/nitrate:\d+\.\d+(\.\d+)?',
85 f'quay.io/nitrate/nitrate:{new_version}',
86 content),
87 "utf-8",
88 )
89
90 subprocess.check_call([
91 'rpmdev-bumpspec',
92 '-n', new_version,
93 '-c', f'Built for version {new_version}',
94 'python-nitrate-tcms.spec'
95 ])
96
[end of contrib/scripts/make-release.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/container/init.py b/container/init.py
deleted file mode 100755
--- a/container/init.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python3
-
-import logging
-import os
-import time
-
-logging.basicConfig(
- level=logging.INFO,
- format='%(asctime)s %(levelname)s %(name)s: %(message)s'
-)
-logger = logging.getLogger('entrypoint')
-
-import django
-django.setup()
-
-from django.contrib.auth.models import User
-from django.core.management import call_command
-from django.db import connection
-
-
-def create_superuser():
- username = os.environ.get('NITRATE_SUPERUSER_USERNAME')
- password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')
- email = os.environ.get('NITRATE_SUPERUSER_EMAIL')
-
- if not (username and password and email):
- logger.info(
- 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '
- 'Skip creating a superuser.'
- )
- return
-
- try:
- if User.objects.filter(username=username, email=email, is_superuser=True).exists():
- logger.info('Superuser %s has been created.', username)
- return
- except: # noqa
- pass
-
- try:
- User.objects.create_superuser(username, email=email, password=password)
- logger.info('Superuser %s is created successfully.', username)
- except Exception as e:
- logger.warning('Failed to create superuser %s: %s', username, e)
- logger.warning('Please check if the database is initialized properly.')
-
-
-def set_default_permissions():
- if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):
- try:
- call_command('setdefaultperms')
- logger.info('Default groups are created and permissions are set to groups properly.')
- except Exception as e:
- logger.warning('Failed to run command setdefaultperms: %s', e)
- logger.warning('Please check if the database is initialized properly.')
- else:
- logger.info(
- 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '
- 'Skip creating default groups and granting permissions to specific group.'
- )
-
-
-def migrate_db():
- if os.environ.get('NITRATE_MIGRATE_DB'):
- try:
- call_command('migrate')
- logger.info('Database is migrated successfully.')
- except Exception as e:
- logger.warning('Failed to migrate the database: %s', e)
- else:
- logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')
-
-
-def wait_for_db():
- while 1:
- try:
- connection.cursor()
- except: # noqa
- logger.debug('Failed to connect to database. Sleep for a while and try again ...')
- time.sleep(0.5)
- else:
- break
-
-
-if __name__ == '__main__':
- wait_for_db()
- migrate_db()
- create_superuser()
- set_default_permissions()
diff --git a/contrib/scripts/make-release.py b/contrib/scripts/make-release.py
--- a/contrib/scripts/make-release.py
+++ b/contrib/scripts/make-release.py
@@ -2,7 +2,6 @@
import re
import argparse
-import subprocess
from pathlib import Path
from datetime import datetime
@@ -76,20 +75,3 @@
),
"utf-8",
)
-
-readme_md = Path('container/README.md')
-content = readme_md.read_text("utf-8")
-readme_md.unlink()
-readme_md.write_text(
- re.sub(r'quay.io/nitrate/nitrate:\d+\.\d+(\.\d+)?',
- f'quay.io/nitrate/nitrate:{new_version}',
- content),
- "utf-8",
-)
-
-subprocess.check_call([
- 'rpmdev-bumpspec',
- '-n', new_version,
- '-c', f'Built for version {new_version}',
- 'python-nitrate-tcms.spec'
-])
| {"golden_diff": "diff --git a/container/init.py b/container/init.py\ndeleted file mode 100755\n--- a/container/init.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-#!/usr/bin/python3\n-\n-import logging\n-import os\n-import time\n-\n-logging.basicConfig(\n- level=logging.INFO,\n- format='%(asctime)s %(levelname)s %(name)s: %(message)s'\n-)\n-logger = logging.getLogger('entrypoint')\n-\n-import django\n-django.setup()\n-\n-from django.contrib.auth.models import User\n-from django.core.management import call_command\n-from django.db import connection\n-\n-\n-def create_superuser():\n- username = os.environ.get('NITRATE_SUPERUSER_USERNAME')\n- password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')\n- email = os.environ.get('NITRATE_SUPERUSER_EMAIL')\n-\n- if not (username and password and email):\n- logger.info(\n- 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '\n- 'Skip creating a superuser.'\n- )\n- return\n-\n- try:\n- if User.objects.filter(username=username, email=email, is_superuser=True).exists():\n- logger.info('Superuser %s has been created.', username)\n- return\n- except: # noqa\n- pass\n-\n- try:\n- User.objects.create_superuser(username, email=email, password=password)\n- logger.info('Superuser %s is created successfully.', username)\n- except Exception as e:\n- logger.warning('Failed to create superuser %s: %s', username, e)\n- logger.warning('Please check if the database is initialized properly.')\n-\n-\n-def set_default_permissions():\n- if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):\n- try:\n- call_command('setdefaultperms')\n- logger.info('Default groups are created and permissions are set to groups properly.')\n- except Exception as e:\n- logger.warning('Failed to run command setdefaultperms: %s', e)\n- logger.warning('Please check if the database is initialized properly.')\n- else:\n- logger.info(\n- 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '\n- 'Skip creating default groups and granting permissions to specific group.'\n- )\n-\n-\n-def migrate_db():\n- if os.environ.get('NITRATE_MIGRATE_DB'):\n- try:\n- call_command('migrate')\n- logger.info('Database is migrated successfully.')\n- except Exception as e:\n- logger.warning('Failed to migrate the database: %s', e)\n- else:\n- logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')\n-\n-\n-def wait_for_db():\n- while 1:\n- try:\n- connection.cursor()\n- except: # noqa\n- logger.debug('Failed to connect to database. Sleep for a while and try again ...')\n- time.sleep(0.5)\n- else:\n- break\n-\n-\n-if __name__ == '__main__':\n- wait_for_db()\n- migrate_db()\n- create_superuser()\n- set_default_permissions()\ndiff --git a/contrib/scripts/make-release.py b/contrib/scripts/make-release.py\n--- a/contrib/scripts/make-release.py\n+++ b/contrib/scripts/make-release.py\n@@ -2,7 +2,6 @@\n \n import re\n import argparse\n-import subprocess\n from pathlib import Path\n \n from datetime import datetime\n@@ -76,20 +75,3 @@\n ),\n \"utf-8\",\n )\n-\n-readme_md = Path('container/README.md')\n-content = readme_md.read_text(\"utf-8\")\n-readme_md.unlink()\n-readme_md.write_text(\n- re.sub(r'quay.io/nitrate/nitrate:\\d+\\.\\d+(\\.\\d+)?',\n- f'quay.io/nitrate/nitrate:{new_version}',\n- content),\n- \"utf-8\",\n-)\n-\n-subprocess.check_call([\n- 'rpmdev-bumpspec',\n- '-n', new_version,\n- '-c', f'Built for version {new_version}',\n- 'python-nitrate-tcms.spec'\n-])\n", "issue": "Drop RPM package build completely\nMajor reason and consideration:\r\n\r\n- reduce the effort to maintain the builds\r\n- easy to pin the dependencies\r\n- make it clear to install and distribute via container images\r\n\r\nAC:\r\n\r\n- [x] Remove from CI\r\n- [ ] Remove the Fedora Copr project\r\n- [x] Refactor the Containerfile to build images directly from the source tree\r\n- [x] Update README and documentation to remove the content about RPM packages\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport logging\nimport os\nimport time\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(name)s: %(message)s'\n)\nlogger = logging.getLogger('entrypoint')\n\nimport django\ndjango.setup()\n\nfrom django.contrib.auth.models import User\nfrom django.core.management import call_command\nfrom django.db import connection\n\n\ndef create_superuser():\n username = os.environ.get('NITRATE_SUPERUSER_USERNAME')\n password = os.environ.get('NITRATE_SUPERUSER_PASSWORD')\n email = os.environ.get('NITRATE_SUPERUSER_EMAIL')\n\n if not (username and password and email):\n logger.info(\n 'NITRATE_SUPERUSER_USERNAME, NITRATE_SUPERUSER_PASSWORD and NITRATE_SUPERUSER_EMAIL are not set. '\n 'Skip creating a superuser.'\n )\n return\n\n try:\n if User.objects.filter(username=username, email=email, is_superuser=True).exists():\n logger.info('Superuser %s has been created.', username)\n return\n except: # noqa\n pass\n\n try:\n User.objects.create_superuser(username, email=email, password=password)\n logger.info('Superuser %s is created successfully.', username)\n except Exception as e:\n logger.warning('Failed to create superuser %s: %s', username, e)\n logger.warning('Please check if the database is initialized properly.')\n\n\ndef set_default_permissions():\n if os.environ.get('NITRATE_SET_DEFAULT_PERMS'):\n try:\n call_command('setdefaultperms')\n logger.info('Default groups are created and permissions are set to groups properly.')\n except Exception as e:\n logger.warning('Failed to run command setdefaultperms: %s', e)\n logger.warning('Please check if the database is initialized properly.')\n else:\n logger.info(\n 'Environment variable NITRATE_SET_DEFAULT_PERMS is not set. '\n 'Skip creating default groups and granting permissions to specific group.'\n )\n\n\ndef migrate_db():\n if os.environ.get('NITRATE_MIGRATE_DB'):\n try:\n call_command('migrate')\n logger.info('Database is migrated successfully.')\n except Exception as e:\n logger.warning('Failed to migrate the database: %s', e)\n else:\n logger.info('Environment variable NITRATE_MIGRATE_DB is not set. Skip migrating database.')\n\n\ndef wait_for_db():\n while 1:\n try:\n connection.cursor()\n except: # noqa\n logger.debug('Failed to connect to database. Sleep for a while and try again ...')\n time.sleep(0.5)\n else:\n break\n\n\nif __name__ == '__main__':\n wait_for_db()\n migrate_db()\n create_superuser()\n set_default_permissions()\n", "path": "container/init.py"}, {"content": "#!/usr/bin/env python3\n\nimport re\nimport argparse\nimport subprocess\nfrom pathlib import Path\n\nfrom datetime import datetime\nfrom typing import Tuple\nfrom pygit2 import Commit, Repository\n\n\ndef extract_short_log(commit: Commit) -> Tuple[str, None or str]:\n lines = commit.message.split('\\n')\n subject = lines[0]\n match = re.search(r'\\((#\\d+)\\)$', subject)\n return subject, match.groups()[0] if match else None\n\n\ndef generate_changelog(args: argparse.Namespace):\n repo: Repository = Repository(args.repo or '.')\n if args.since_version:\n release_tag = repo.revparse_single(args.since_version)\n else:\n release_tag = repo.revparse_single(repo.describe().split('-')[0])\n\n walker = repo.walk(repo.head.target)\n walker.hide(release_tag.id)\n logs = []\n found_issue_keys = []\n\n for commit in walker:\n subject, issue_key = extract_short_log(commit)\n if issue_key is not None:\n found_issue_keys.append(issue_key)\n subject = subject.replace(issue_key, f'`{issue_key}`_')\n logs.append(f'* {subject}')\n\n logs.append('')\n found_issue_keys.sort()\n for item in found_issue_keys:\n logs.append(f'.. _{item}: https://github.com/Nitrate/Nitrate/issues/{item[1:]}')\n\n return '\\n'.join(logs)\n\n\ndef validate_version(value):\n if value.startswith('v'):\n raise argparse.ArgumentTypeError('Version should not be prefixed with v.')\n return value\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--repo', help='Path to git repository.')\nparser.add_argument('--since-version', required=False,\n type=validate_version,\n help='Collect commits since this version.')\nparser.add_argument('new_version', metavar='NEW_VERSION',\n type=validate_version,\n help='The version to be released.')\n\nargs = parser.parse_args()\nnew_version = args.new_version\n\nPath('VERSION.txt').unlink()\nPath('VERSION.txt').write_text(new_version, \"utf-8\")\n\ntemplate = Path('contrib/scripts/release-notes.tmpl.rst').read_text(\"utf-8\")\nPath(f'docs/source/releases/{new_version}.rst').write_text(\n template.format(\n new_version=new_version,\n doc_ref=new_version,\n title_marker=len(new_version) * '=',\n change_logs=generate_changelog(args),\n release_date=datetime.now().strftime('%b %d, %Y')\n ),\n \"utf-8\",\n)\n\nreadme_md = Path('container/README.md')\ncontent = readme_md.read_text(\"utf-8\")\nreadme_md.unlink()\nreadme_md.write_text(\n re.sub(r'quay.io/nitrate/nitrate:\\d+\\.\\d+(\\.\\d+)?',\n f'quay.io/nitrate/nitrate:{new_version}',\n content),\n \"utf-8\",\n)\n\nsubprocess.check_call([\n 'rpmdev-bumpspec',\n '-n', new_version,\n '-c', f'Built for version {new_version}',\n 'python-nitrate-tcms.spec'\n])\n", "path": "contrib/scripts/make-release.py"}]} | 2,265 | 931 |
gh_patches_debug_18654 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move dev dependencies from setup.py to Pipfile
The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.
Move dev dependencies from setup.py to Pipfile
The dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 """ Setup.py """
3
4 import os
5 import sys
6
7 from setuptools import find_packages, setup
8
9 # Add source directory to PATH variable to enable import of version number
10 sys.path.append(os.path.abspath('src'))
11 # pylint: disable=wrong-import-position
12 from backend.settings import VERSION
13
14 setup(
15 name='integreat_cms',
16 version=VERSION,
17 packages=find_packages('src'),
18 package_dir={'': 'src'},
19 include_package_data=True,
20 scripts=['src/integreat-cms-cli'],
21 data_files=[
22 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])
23 for root, _, files in os.walk('src/cms/templates/')
24 ] + [
25 (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])
26 for root, _, files in os.walk('src/cms/static/')
27 ] + [
28 ('usr/lib/systemd/system/', ['systemd/[email protected]'])
29 ],
30 install_requires=[
31 'cffi',
32 'Django~=2.2.13',
33 'django-cors-headers',
34 'django-filer',
35 'django-mptt',
36 'django-widget-tweaks',
37 'idna',
38 'lxml',
39 'psycopg2-binary',
40 'python-dateutil',
41 'requests',
42 'rules',
43 'six',
44 'webauthn',
45 ],
46 extras_require={
47 'dev': [
48 'django-compressor',
49 'django-compressor-toolkit',
50 'packaging',
51 'pylint',
52 'pylint-django',
53 'pylint_runner',
54 'sphinx',
55 'sphinxcontrib-django',
56 'sphinx_rtd_theme',
57 'coverage',
58 'django_coverage_plugin',
59 ]
60 },
61 author='Integreat App Project',
62 author_email='[email protected]',
63 description='Content Management System for the Integreat App',
64 license='GPL-2.0-or-later',
65 keywords='Django Integreat CMS',
66 url='http://github.com/Integreat/',
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Intended Audience :: Developers',
70 'Programming Language :: Python :: 3.7',
71 ]
72 )
73
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
from backend.settings import VERSION
setup(
- name='integreat_cms',
+ name='integreat-cms',
version=VERSION,
packages=find_packages('src'),
package_dir={'': 'src'},
@@ -43,21 +43,6 @@
'six',
'webauthn',
],
- extras_require={
- 'dev': [
- 'django-compressor',
- 'django-compressor-toolkit',
- 'packaging',
- 'pylint',
- 'pylint-django',
- 'pylint_runner',
- 'sphinx',
- 'sphinxcontrib-django',
- 'sphinx_rtd_theme',
- 'coverage',
- 'django_coverage_plugin',
- ]
- },
author='Integreat App Project',
author_email='[email protected]',
description='Content Management System for the Integreat App',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n from backend.settings import VERSION\n \n setup(\n- name='integreat_cms',\n+ name='integreat-cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n@@ -43,21 +43,6 @@\n 'six',\n 'webauthn',\n ],\n- extras_require={\n- 'dev': [\n- 'django-compressor',\n- 'django-compressor-toolkit',\n- 'packaging',\n- 'pylint',\n- 'pylint-django',\n- 'pylint_runner',\n- 'sphinx',\n- 'sphinxcontrib-django',\n- 'sphinx_rtd_theme',\n- 'coverage',\n- 'django_coverage_plugin',\n- ]\n- },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n", "issue": "Move dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\nMove dev dependencies from setup.py to Pipfile\nThe dev extra dependencies in setup.py are not required anymore, because we don't install it with setuptools for local development. Instead, we could move all dependencies in the `extras_require`-section to the Pipfile, which would have the advantage to be able to install new dev dependencies with `pipenv install <new-dependency>`.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\" Setup.py \"\"\"\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Add source directory to PATH variable to enable import of version number\nsys.path.append(os.path.abspath('src'))\n# pylint: disable=wrong-import-position\nfrom backend.settings import VERSION\n\nsetup(\n name='integreat_cms',\n version=VERSION,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n scripts=['src/integreat-cms-cli'],\n data_files=[\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/templates/')\n ] + [\n (f'lib/integreat-{root}', [os.path.join(root, f) for f in files])\n for root, _, files in os.walk('src/cms/static/')\n ] + [\n ('usr/lib/systemd/system/', ['systemd/[email protected]'])\n ],\n install_requires=[\n 'cffi',\n 'Django~=2.2.13',\n 'django-cors-headers',\n 'django-filer',\n 'django-mptt',\n 'django-widget-tweaks',\n 'idna',\n 'lxml',\n 'psycopg2-binary',\n 'python-dateutil',\n 'requests',\n 'rules',\n 'six',\n 'webauthn',\n ],\n extras_require={\n 'dev': [\n 'django-compressor',\n 'django-compressor-toolkit',\n 'packaging',\n 'pylint',\n 'pylint-django',\n 'pylint_runner',\n 'sphinx',\n 'sphinxcontrib-django',\n 'sphinx_rtd_theme',\n 'coverage',\n 'django_coverage_plugin',\n ]\n },\n author='Integreat App Project',\n author_email='[email protected]',\n description='Content Management System for the Integreat App',\n license='GPL-2.0-or-later',\n keywords='Django Integreat CMS',\n url='http://github.com/Integreat/',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.7',\n ]\n)\n", "path": "setup.py"}]} | 1,328 | 235 |
gh_patches_debug_17962 | rasdani/github-patches | git_diff | hylang__hy-2115 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.10 on Windows fails: AttributeError: module 'collections' has no attribute 'Callable'
Thanks to https://github.com/pyreadline/pyreadline/issues/65, Hy on Python 3.10 on Windows no longer starts, and a backtrace similar to the following is the result:
```console
Traceback (most recent call last):
File "c:\python\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\python\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Python\Scripts\hy.exe\__main__.py", line 4, in <module>
File "c:\python\lib\site-packages\hy\cmdline.py", line 37, in <module>
from hy.completer import completion, Completer
File "c:\python\lib\site-packages\hy\completer.py", line 18, in <module>
import readline
File "c:\python\lib\site-packages\readline.py", line 34, in <module>
rl = Readline()
File "c:\python\lib\site-packages\pyreadline\rlmain.py", line 422, in __init__
BaseReadline.__init__(self)
File "c:\python\lib\site-packages\pyreadline\rlmain.py", line 62, in __init__
mode.init_editing_mode(None)
File "c:\python\lib\site-packages\pyreadline\modes\emacs.py", line 633, in init_editing_mode
self._bind_key('space', self.self_insert)
File "c:\python\lib\site-packages\pyreadline\modes\basemode.py", line 162, in _bind_key
if not callable(func):
File "c:\python\lib\site-packages\pyreadline\py3k_compat.py", line 8, in callable
return isinstance(x, collections.Callable)
AttributeError: module 'collections' has no attribute 'Callable'
```
Unfortunately from that bug (and the repository in general), it would appear that `pyreadline` is no longer actively maintained (last update of any kind was in 2015), so for Hy to continue to support Windows on future Python versions will require some amount of workaround (a fork of `pyreadline`, dropping readline support on Windows, etc).
I'm not sure if there's a way to specify that Python 3.10+ and Windows are simply mutually incompatible, but that would be the "simplest" workaround if there's a clean way to specify that. :see_no_evil:
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright 2021 the authors.
3 # This file is part of Hy, which is free software licensed under the Expat
4 # license. See the LICENSE.
5
6 import glob
7 import importlib
8 import inspect
9 import os
10 import sys
11
12 from setuptools import find_packages, setup
13 from setuptools.command.install import install
14 import fastentrypoints # Monkey-patches setuptools.
15
16 from get_version import __version__
17
18 os.chdir(os.path.split(os.path.abspath(__file__))[0])
19
20 PKG = "hy"
21
22 long_description = """Hy is a Python <--> Lisp layer. It helps
23 make things work nicer, and lets Python and the Hy lisp variant play
24 nice together. """
25
26 class Install(install):
27 def __compile_hy_bytecode(self):
28 for path in sorted(glob.iglob('hy/**.hy', recursive=True)):
29 importlib.util.cache_from_source(path, optimize=self.optimize)
30
31 def run(self):
32 # Don't bother messing around with deps if they wouldn't be installed anyway.
33 # Code is based on setuptools's install.py.
34 if not (self.old_and_unmanageable or self.single_version_externally_managed
35 or not self._called_from_setup(inspect.currentframe())):
36 easy_install = self.distribution.get_command_class('easy_install')
37
38 cmd = easy_install(
39 self.distribution, args="x", root=self.root, record=self.record,
40 )
41 cmd.ensure_finalized()
42 cmd.always_copy_from = '.'
43 cmd.package_index.scan(glob.glob('*.egg'))
44
45 cmd.args = self.distribution.install_requires
46
47 # Avoid deprecation warnings on new setuptools versions.
48 if 'show_deprecation' in inspect.signature(cmd.run).parameters:
49 cmd.run(show_deprecation=False)
50 else:
51 cmd.run()
52
53 # Make sure any new packages get picked up.
54 import site
55 importlib.reload(site)
56 importlib.invalidate_caches()
57
58 self.__compile_hy_bytecode()
59
60 # The deps won't be reinstalled because of:
61 # https://github.com/pypa/setuptools/issues/456
62 return install.run(self)
63
64 setup(
65 name=PKG,
66 version=__version__,
67 install_requires=[
68 'rply>=0.7.7',
69 'funcparserlib>=0.3.6',
70 'colorama',
71 'astor>=0.8 ; python_version < "3.9"',
72 'pyreadline>=2.1 ; os_name == "nt"',
73 ],
74 cmdclass=dict(install=Install),
75 entry_points={
76 'console_scripts': [
77 'hy = hy.cmdline:hy_main',
78 'hy3 = hy.cmdline:hy_main',
79 'hyc = hy.cmdline:hyc_main',
80 'hyc3 = hy.cmdline:hyc_main',
81 'hy2py = hy.cmdline:hy2py_main',
82 'hy2py3 = hy.cmdline:hy2py_main',
83 ]
84 },
85 packages=find_packages(exclude=['tests*']),
86 package_data={
87 'hy.contrib': ['*.hy', '__pycache__/*'],
88 'hy.core': ['*.hy', '__pycache__/*'],
89 'hy.extra': ['*.hy', '__pycache__/*'],
90 },
91 data_files=[
92 ('get_version', ['get_version.py'])
93 ],
94 author="Paul Tagliamonte",
95 author_email="[email protected]",
96 long_description=long_description,
97 description='Lisp and Python love each other.',
98 license="Expat",
99 url="http://hylang.org/",
100 platforms=['any'],
101 classifiers=[
102 "Development Status :: 4 - Beta",
103 "Intended Audience :: Developers",
104 "License :: DFSG approved",
105 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
106 "Operating System :: OS Independent",
107 "Programming Language :: Lisp",
108 "Programming Language :: Python",
109 "Programming Language :: Python :: 3",
110 "Programming Language :: Python :: 3.6",
111 "Programming Language :: Python :: 3.7",
112 "Programming Language :: Python :: 3.8",
113 "Programming Language :: Python :: 3.9",
114 "Programming Language :: Python :: 3.10",
115 "Topic :: Software Development :: Code Generators",
116 "Topic :: Software Development :: Compilers",
117 "Topic :: Software Development :: Libraries",
118 ]
119 )
120
[end of setup.py]
[start of hy/completer.py]
1 # Copyright 2021 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9 import builtins
10
11 import hy.macros
12 import hy.compiler
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if docomplete:
28 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
29 readline_bind = "bind ^I rl_complete"
30 else:
31 readline_bind = "tab: complete"
32
33
34 class Completer(object):
35
36 def __init__(self, namespace={}):
37 if not isinstance(namespace, dict):
38 raise TypeError('namespace must be a dictionary')
39 self.namespace = namespace
40 self.path = [builtins.__dict__,
41 namespace]
42
43 namespace.setdefault('__macros__', {})
44
45 self.path.append(namespace['__macros__'])
46
47 def attr_matches(self, text):
48 # Borrowed from IPython's completer
49 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
50
51 if m:
52 expr, attr = m.group(1, 3)
53 attr = attr.replace("-", "_")
54 expr = expr.replace("-", "_")
55 else:
56 return []
57
58 try:
59 obj = eval(expr, self.namespace)
60 words = dir(obj)
61 except Exception:
62 return []
63
64 n = len(attr)
65 matches = []
66 for w in words:
67 if w[:n] == attr:
68 matches.append("{}.{}".format(
69 expr.replace("_", "-"), w.replace("_", "-")))
70 return matches
71
72 def global_matches(self, text):
73 matches = []
74 for p in self.path:
75 for k in p.keys():
76 if isinstance(k, str):
77 k = k.replace("_", "-")
78 if k.startswith(text):
79 matches.append(k)
80 return matches
81
82 def complete(self, text, state):
83 if "." in text:
84 matches = self.attr_matches(text)
85 else:
86 matches = self.global_matches(text)
87 try:
88 return matches[state]
89 except IndexError:
90 return None
91
92
93 @contextlib.contextmanager
94 def completion(completer=None):
95 delims = "()[]{} "
96 if not completer:
97 completer = Completer()
98
99 if docomplete:
100 readline.set_completer(completer.complete)
101 readline.set_completer_delims(delims)
102
103 history = os.environ.get(
104 "HY_HISTORY", os.path.expanduser("~/.hy-history"))
105 readline.parse_and_bind("set blink-matching-paren on")
106
107 try:
108 readline.read_history_file(history)
109 except IOError:
110 pass
111
112 readline.parse_and_bind(readline_bind)
113
114 try:
115 yield
116 finally:
117 if docomplete:
118 try:
119 readline.write_history_file(history)
120 except IOError:
121 pass
122
[end of hy/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -16,13 +16,14 @@
try:
import readline
-except ImportError:
- try:
- import pyreadline.rlmain
- import pyreadline.unicode_helper # NOQA
- import readline
- except ImportError:
+except AttributeError as e:
+ # https://github.com/pyreadline/pyreadline/issues/65
+ if "module 'collections' has no attribute 'Callable'" in str(e):
docomplete = False
+ else:
+ raise
+except ImportError:
+ docomplete = False
if docomplete:
if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,7 +69,6 @@
'funcparserlib>=0.3.6',
'colorama',
'astor>=0.8 ; python_version < "3.9"',
- 'pyreadline>=2.1 ; os_name == "nt"',
],
cmdclass=dict(install=Install),
entry_points={
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -16,13 +16,14 @@\n \n try:\n import readline\n-except ImportError:\n- try:\n- import pyreadline.rlmain\n- import pyreadline.unicode_helper # NOQA\n- import readline\n- except ImportError:\n+except AttributeError as e:\n+ # https://github.com/pyreadline/pyreadline/issues/65\n+ if \"module 'collections' has no attribute 'Callable'\" in str(e):\n docomplete = False\n+ else:\n+ raise\n+except ImportError:\n+ docomplete = False\n \n if docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,7 +69,6 @@\n 'funcparserlib>=0.3.6',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n- 'pyreadline>=2.1 ; os_name == \"nt\"',\n ],\n cmdclass=dict(install=Install),\n entry_points={\n", "issue": "Python 3.10 on Windows fails: AttributeError: module 'collections' has no attribute 'Callable'\nThanks to https://github.com/pyreadline/pyreadline/issues/65, Hy on Python 3.10 on Windows no longer starts, and a backtrace similar to the following is the result:\r\n\r\n```console\r\nTraceback (most recent call last):\r\n File \"c:\\python\\lib\\runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"c:\\python\\lib\\runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Python\\Scripts\\hy.exe\\__main__.py\", line 4, in <module>\r\n File \"c:\\python\\lib\\site-packages\\hy\\cmdline.py\", line 37, in <module>\r\n from hy.completer import completion, Completer\r\n File \"c:\\python\\lib\\site-packages\\hy\\completer.py\", line 18, in <module>\r\n import readline\r\n File \"c:\\python\\lib\\site-packages\\readline.py\", line 34, in <module>\r\n rl = Readline()\r\n File \"c:\\python\\lib\\site-packages\\pyreadline\\rlmain.py\", line 422, in __init__\r\n BaseReadline.__init__(self)\r\n File \"c:\\python\\lib\\site-packages\\pyreadline\\rlmain.py\", line 62, in __init__\r\n mode.init_editing_mode(None)\r\n File \"c:\\python\\lib\\site-packages\\pyreadline\\modes\\emacs.py\", line 633, in init_editing_mode\r\n self._bind_key('space', self.self_insert)\r\n File \"c:\\python\\lib\\site-packages\\pyreadline\\modes\\basemode.py\", line 162, in _bind_key\r\n if not callable(func):\r\n File \"c:\\python\\lib\\site-packages\\pyreadline\\py3k_compat.py\", line 8, in callable\r\n return isinstance(x, collections.Callable)\r\nAttributeError: module 'collections' has no attribute 'Callable'\r\n```\r\n\r\nUnfortunately from that bug (and the repository in general), it would appear that `pyreadline` is no longer actively maintained (last update of any kind was in 2015), so for Hy to continue to support Windows on future Python versions will require some amount of workaround (a fork of `pyreadline`, dropping readline support on Windows, etc).\r\n\r\nI'm not sure if there's a way to specify that Python 3.10+ and Windows are simply mutually incompatible, but that would be the \"simplest\" workaround if there's a clean way to specify that. :see_no_evil:\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport glob\nimport importlib\nimport inspect\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nclass Install(install):\n def __compile_hy_bytecode(self):\n for path in sorted(glob.iglob('hy/**.hy', recursive=True)):\n importlib.util.cache_from_source(path, optimize=self.optimize)\n\n def run(self):\n # Don't bother messing around with deps if they wouldn't be installed anyway.\n # Code is based on setuptools's install.py.\n if not (self.old_and_unmanageable or self.single_version_externally_managed\n or not self._called_from_setup(inspect.currentframe())):\n easy_install = self.distribution.get_command_class('easy_install')\n\n cmd = easy_install(\n self.distribution, args=\"x\", root=self.root, record=self.record,\n )\n cmd.ensure_finalized()\n cmd.always_copy_from = '.'\n cmd.package_index.scan(glob.glob('*.egg'))\n\n cmd.args = self.distribution.install_requires\n\n # Avoid deprecation warnings on new setuptools versions.\n if 'show_deprecation' in inspect.signature(cmd.run).parameters:\n cmd.run(show_deprecation=False)\n else:\n cmd.run()\n\n # Make sure any new packages get picked up.\n import site\n importlib.reload(site)\n importlib.invalidate_caches()\n\n self.__compile_hy_bytecode()\n\n # The deps won't be reinstalled because of:\n # https://github.com/pypa/setuptools/issues/456\n return install.run(self)\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=0.3.6',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n 'pyreadline>=2.1 ; os_name == \"nt\"',\n ],\n cmdclass=dict(install=Install),\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}, {"content": "# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [builtins.__dict__,\n namespace]\n\n namespace.setdefault('__macros__', {})\n\n self.path.append(namespace['__macros__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def complete(self, text, state):\n if \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.environ.get(\n \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except IOError:\n pass\n", "path": "hy/completer.py"}]} | 3,297 | 289 |
gh_patches_debug_9950 | rasdani/github-patches | git_diff | ManimCommunity__manim-684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add (opengraph) metadata to documentation
Previews to links to the documentation are currently not available due to missing opengraph metadata.
Also, a description meta tag should be added.
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import subprocess
15 import sys
16 from distutils.sysconfig import get_python_lib
17 from pathlib import Path
18
19 sys.path.insert(0, os.path.abspath("."))
20
21
22 if os.environ.get("READTHEDOCS") == "True":
23 site_path = get_python_lib()
24 # bindings for pangocffi, cairocffi, pangocairocffi need to be generated
25 subprocess.run(["python", "pangocffi/ffi_build.py"], cwd=site_path)
26 subprocess.run(["python", "cairocffi/ffi_build.py"], cwd=site_path)
27 subprocess.run(["python", "pangocairocffi/ffi_build.py"], cwd=site_path)
28 # we need to add ffmpeg to the path
29 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries")
30 # the included binary is named ffmpeg-linux..., create a symlink
31 [ffmpeg_bin] = [
32 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-")
33 ]
34 os.symlink(
35 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg")
36 )
37 os.environ["PATH"] += os.pathsep + ffmpeg_path
38
39
40 # -- Project information -----------------------------------------------------
41
42 project = "Manim"
43 copyright = "2020, The Manim Community Dev Team"
44 author = "The Manim Community Dev Team"
45
46
47 # -- General configuration ---------------------------------------------------
48
49 # Add any Sphinx extension module names here, as strings. They can be
50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
51 # ones.
52 extensions = [
53 "sphinx.ext.autodoc",
54 "recommonmark",
55 "sphinx_copybutton",
56 "sphinx.ext.napoleon",
57 "sphinx.ext.autosummary",
58 "sphinx.ext.doctest",
59 "manim_directive",
60 ]
61
62 # Automatically generate stub pages when using the .. autosummary directive
63 autosummary_generate = True
64
65 # controls whether functions documented by the autofunction directive
66 # appear with their full module names
67 add_module_names = False
68
69 # Add any paths that contain templates here, relative to this directory.
70 templates_path = ["_templates"]
71
72 # List of patterns, relative to source directory, that match files and
73 # directories to ignore when looking for source files.
74 # This pattern also affects html_static_path and html_extra_path.
75 exclude_patterns = []
76
77
78 # -- Options for HTML output -------------------------------------------------
79
80 # The theme to use for HTML and HTML Help pages. See the documentation for
81 # a list of builtin themes.
82 #
83 import guzzle_sphinx_theme
84
85 html_theme_path = guzzle_sphinx_theme.html_theme_path()
86 html_theme = "guzzle_sphinx_theme"
87 html_favicon = str(Path("_static/favicon.ico"))
88
89 # There's a standing issue with Sphinx's new-style sidebars. This is a
90 # workaround. Taken from
91 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826
92 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
93
94 # Register the theme as an extension to generate a sitemap.xml
95 extensions.append("guzzle_sphinx_theme")
96
97 # Add any paths that contain custom static files (such as style sheets) here,
98 # relative to this directory. They are copied after the builtin static files,
99 # so a file named "default.css" will overwrite the builtin "default.css".
100 html_static_path = ["_static"]
101
102 # This specifies any additional css files that will override the theme's
103 html_css_files = ["custom.css"]
104
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -56,6 +56,7 @@
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
+ "sphinxext.opengraph",
"manim_directive",
]
@@ -101,3 +102,8 @@
# This specifies any additional css files that will override the theme's
html_css_files = ["custom.css"]
+
+# opengraph settings
+ogp_image = "https://www.manim.community/logo.png"
+ogp_site_name = "Manim Community | Documentation"
+ogp_site_url = "https://docs.manim.community/"
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -56,6 +56,7 @@\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n+ \"sphinxext.opengraph\",\n \"manim_directive\",\n ]\n \n@@ -101,3 +102,8 @@\n \n # This specifies any additional css files that will override the theme's\n html_css_files = [\"custom.css\"]\n+\n+# opengraph settings\n+ogp_image = \"https://www.manim.community/logo.png\"\n+ogp_site_name = \"Manim Community | Documentation\"\n+ogp_site_url = \"https://docs.manim.community/\"\n", "issue": "Add (opengraph) metadata to documentation\nPreviews to links to the documentation are currently not available due to missing opengraph metadata.\r\n\r\nAlso, a description meta tag should be added.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}]} | 1,670 | 171 |
gh_patches_debug_11488 | rasdani/github-patches | git_diff | pytorch__vision-355 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
utils.save_image fails when passing list of images
utils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list.
Passing in a list should be possible according to the function's documentation.
</issue>
<code>
[start of torchvision/utils.py]
1 import torch
2 import math
3 irange = range
4
5
6 def make_grid(tensor, nrow=8, padding=2,
7 normalize=False, range=None, scale_each=False, pad_value=0):
8 """Make a grid of images.
9
10 Args:
11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
12 or a list of images all of the same size.
13 nrow (int, optional): Number of images displayed in each row of the grid.
14 The Final grid size is (B / nrow, nrow). Default is 8.
15 padding (int, optional): amount of padding. Default is 2.
16 normalize (bool, optional): If True, shift the image to the range (0, 1),
17 by subtracting the minimum and dividing by the maximum pixel value.
18 range (tuple, optional): tuple (min, max) where min and max are numbers,
19 then these numbers are used to normalize the image. By default, min and max
20 are computed from the tensor.
21 scale_each (bool, optional): If True, scale each image in the batch of
22 images separately rather than the (min, max) over all images.
23 pad_value (float, optional): Value for the padded pixels.
24
25 Example:
26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
27
28 """
29 if not (torch.is_tensor(tensor) or
30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
32
33 # if list of tensors, convert to a 4D mini-batch Tensor
34 if isinstance(tensor, list):
35 tensor = torch.stack(tensor, dim=0)
36
37 if tensor.dim() == 2: # single image H x W
38 tensor = tensor.view(1, tensor.size(0), tensor.size(1))
39 if tensor.dim() == 3: # single image
40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
41 tensor = torch.cat((tensor, tensor, tensor), 0)
42 return tensor
43 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
44 tensor = torch.cat((tensor, tensor, tensor), 1)
45
46 if normalize is True:
47 tensor = tensor.clone() # avoid modifying tensor in-place
48 if range is not None:
49 assert isinstance(range, tuple), \
50 "range has to be a tuple (min, max) if specified. min and max are numbers"
51
52 def norm_ip(img, min, max):
53 img.clamp_(min=min, max=max)
54 img.add_(-min).div_(max - min)
55
56 def norm_range(t, range):
57 if range is not None:
58 norm_ip(t, range[0], range[1])
59 else:
60 norm_ip(t, t.min(), t.max())
61
62 if scale_each is True:
63 for t in tensor: # loop over mini-batch dimension
64 norm_range(t, range)
65 else:
66 norm_range(tensor, range)
67
68 # make the mini-batch of images into a grid
69 nmaps = tensor.size(0)
70 xmaps = min(nrow, nmaps)
71 ymaps = int(math.ceil(float(nmaps) / xmaps))
72 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
73 grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)
74 k = 0
75 for y in irange(ymaps):
76 for x in irange(xmaps):
77 if k >= nmaps:
78 break
79 grid.narrow(1, y * height + padding, height - padding)\
80 .narrow(2, x * width + padding, width - padding)\
81 .copy_(tensor[k])
82 k = k + 1
83 return grid
84
85
86 def save_image(tensor, filename, nrow=8, padding=2,
87 normalize=False, range=None, scale_each=False, pad_value=0):
88 """Save a given Tensor into an image file.
89
90 Args:
91 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
92 saves the tensor as a grid of images by calling ``make_grid``.
93 **kwargs: Other arguments are documented in ``make_grid``.
94 """
95 from PIL import Image
96 tensor = tensor.cpu()
97 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
98 normalize=normalize, range=range, scale_each=scale_each)
99 ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
100 im = Image.fromarray(ndarr)
101 im.save(filename)
102
[end of torchvision/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -93,9 +93,8 @@
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
- tensor = tensor.cpu()
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
- ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()
+ ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
im = Image.fromarray(ndarr)
im.save(filename)
| {"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -93,9 +93,8 @@\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n- tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n- ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n+ ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "issue": "utils.save_image fails when passing list of images\nutils.save_image fails when passing in a list of images, as the code tries to call .cpu on the list. \r\nPassing in a list should be possible according to the function's documentation.\n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The Final grid size is (B / nrow, nrow). Default is 8.\n padding (int, optional): amount of padding. Default is 2.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by subtracting the minimum and dividing by the maximum pixel value.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If True, scale each image in the batch of\n images separately rather than the (min, max) over all images.\n pad_value (float, optional): Value for the padded pixels.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.view(1, tensor.size(0), tensor.size(1))\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n return tensor\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, t.min(), t.max())\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}]} | 1,888 | 198 |
gh_patches_debug_4144 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1676 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Source Request]:
### Municipality / Region
Bedburg
### Collection Calendar Webpage
https://buerger-portal-bedburg.azurewebsites.net/calendar/
### Example Address
Am Schirkerhof 2, Bedburg, Broich
### Collection Data Format
Something different (add to additional information)
### Additional Information
The city of Bedburg change its Waste Collection Provider. Thus the old Abfall+ Source doesn't work anymore. The new provider is Drekopf. Unfortunately they do not use the existing Drekopf API. The Webpage provided by Drekopf has multiple outputs. You can download the calendar via PDF, XLS, CSV and ICS, but i can't find the url. The underlying API seems to be C-Trace, but when calling this API, credentials are requested. Maybe i am missing something...
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py]
1 import datetime
2 import re
3 from dataclasses import dataclass
4 from typing import List, Literal, Optional, TypedDict, Union
5
6 import requests
7 from waste_collection_schedule import Collection # type: ignore[attr-defined]
8
9 TITLE = "Bürgerportal"
10 URL = "https://www.c-trace.de"
11 DESCRIPTION = "Source for waste collection in multiple service areas."
12
13
14 def EXTRA_INFO():
15 return [{"title": s["title"], "url": s["url"]} for s in SERVICE_MAP]
16
17
18 TEST_CASES = {
19 "Cochem-Zell": {
20 "operator": "cochem_zell",
21 "district": "Bullay",
22 "subdistrict": "Bullay",
23 "street": "Layenweg",
24 "number": 3,
25 },
26 "Alb-Donau": {
27 "operator": "alb_donau",
28 "district": "Blaubeuren",
29 "street": "Alberstraße",
30 "number": 3,
31 },
32 "Biedenkopf": {
33 "operator": "biedenkopf",
34 "district": "Biedenkopf",
35 "subdistrict": "Breidenstein",
36 "street": "Auf dem Hammer",
37 "number": 1,
38 },
39 }
40
41 ICON_MAP = {
42 "mobil": "mdi:truck",
43 "bio": "mdi:leaf",
44 "papier": "mdi:package-variant",
45 "verpackung": "mdi:recycle",
46 "gelb": "mdi:recycle",
47 "lvp": "mdi:recycle",
48 "rest": "mdi:trash-can",
49 "gruen": "mdi:forest",
50 "grün": "mdi:forest",
51 "baum": "mdi:forest",
52 "schnitt": "mdi:forest",
53 "schad": "mdi:biohazard",
54 }
55 API_HEADERS = {
56 "Accept": "application/json, text/plain;q=0.5",
57 "Cache-Control": "no-cache",
58 }
59 Operator = Literal["cochem_zell", "alb_donau", "biedenkopf"]
60
61 SERVICE_MAP = [
62 {
63 "title": "KV Cochem-Zell",
64 "url": "https://www.cochem-zell-online.de/",
65 "api_url": "https://buerger-portal-cochemzell.azurewebsites.net/api",
66 "operator": "cochem_zell",
67 },
68 {
69 "title": "Abfallwirtschaft Alb-Donau-Kreis",
70 "url": "https://www.aw-adk.de/",
71 "api_url": "https://buerger-portal-albdonaukreisabfallwirtschaft.azurewebsites.net/api",
72 "operator": "alb_donau",
73 },
74 {
75 "title": "MZV Biedenkopf",
76 "url": "https://mzv-biedenkopf.de/",
77 "api_url": "https://biedenkopfmzv.buergerportal.digital/api",
78 "operator": "biedenkopf",
79 },
80 ]
81
82
83 # This datalcass is used for adding entries to a set and remove duplicate entries.
84 # The default `Collection` extends the standard dict and thus is not hashable.
85 @dataclass(frozen=True, eq=True)
86 class CollectionEntry:
87 date: datetime.date
88 waste_type: str
89 icon: Optional[str]
90
91 def export(self) -> Collection:
92 return Collection(self.date, self.waste_type, self.icon)
93
94
95 def quote_none(value: Optional[str]) -> str:
96 if value is None:
97 return "null"
98
99 return f"'{value}'"
100
101
102 def get_api_map():
103 return {s["operator"]: s["api_url"] for s in SERVICE_MAP}
104
105
106 class Source:
107 def __init__(
108 self,
109 operator: Operator,
110 district: str,
111 street: str,
112 subdistrict: Optional[str] = None,
113 number: Union[int, str, None] = None,
114 show_volume: bool = False,
115 ):
116 self.api_url = get_api_map()[operator]
117 self.district = district
118 self.subdistrict = subdistrict
119 self.street = street
120 self.number = number
121 self.show_volume = show_volume
122
123 def fetch(self) -> list[Collection]:
124 session = requests.session()
125 session.headers.update(API_HEADERS)
126
127 year = datetime.datetime.now().year
128 entries: set[CollectionEntry] = set()
129
130 district_id = self.fetch_district_id(session)
131 street_id = self.fetch_street_id(session, district_id)
132 # Eventually verify house number in the future
133
134 params = {
135 "$expand": "Abfuhrplan,Abfuhrplan/GefaesstarifArt/Abfallart,Abfuhrplan/GefaesstarifArt/Volumen",
136 "$orderby": "Abfuhrplan/GefaesstarifArt/Abfallart/Name,Abfuhrplan/GefaesstarifArt/Volumen/VolumenWert",
137 "orteId": district_id,
138 "strassenId": street_id,
139 "jahr": year,
140 }
141
142 if self.number:
143 params["hausNr"] = (f"'{self.number}'",)
144
145 res = session.get(
146 f"{self.api_url}/AbfuhrtermineAbJahr",
147 params=params,
148 )
149 res.raise_for_status()
150 payload: CollectionsRes = res.json()
151
152 date_regex = re.compile(r"\d+")
153
154 for collection in payload["d"]:
155 if date_match := re.search(date_regex, collection["Termin"]):
156 timestamp = float(date_match.group())
157 date = datetime.datetime.utcfromtimestamp(timestamp / 1000).date()
158 waste_type = collection["Abfuhrplan"]["GefaesstarifArt"]["Abfallart"][
159 "Name"
160 ]
161 icon = None
162
163 for icon_type, tested_icon in ICON_MAP.items():
164 if icon_type.lower() in waste_type.lower():
165 icon = tested_icon
166
167 if self.show_volume:
168 volume = int(
169 collection["Abfuhrplan"]["GefaesstarifArt"]["Volumen"][
170 "VolumenWert"
171 ]
172 )
173 waste_type = f"{waste_type} ({volume} l)"
174
175 entries.add(CollectionEntry(date, waste_type, icon))
176
177 if len(entries) == 0:
178 raise ValueError(
179 "No collections found! Please verify that your configuration is correct."
180 )
181
182 return [entry.export() for entry in entries]
183
184 def fetch_district_id(self, session: requests.Session) -> int:
185 res = session.get(
186 f"{self.api_url}/OrteMitOrtsteilen",
187 headers=API_HEADERS,
188 )
189 res.raise_for_status()
190 payload: DistrictsRes = res.json()
191
192 try:
193 return next(
194 entry["OrteId"]
195 for entry in payload["d"]
196 if entry["Ortsname"] == self.district
197 and entry["Ortsteilname"] == self.subdistrict
198 )
199 except StopIteration:
200 raise ValueError(
201 "District id cannot be fetched. "
202 "Please make sure that you entered a subdistrict if there is a comma on the website."
203 )
204
205 def fetch_street_id(self, session: requests.Session, district_id: int):
206 res = session.get(
207 f"{self.api_url}/Strassen",
208 params={
209 "$filter": f"Ort/OrteId eq {district_id} and OrtsteilName eq {quote_none(self.subdistrict)}",
210 "$orderby": "Name asc",
211 },
212 headers=API_HEADERS,
213 )
214 res.raise_for_status()
215 payload: StreetsRes = res.json()
216
217 try:
218 return next(
219 entry["StrassenId"]
220 for entry in payload["d"]
221 if entry["Name"] == self.street
222 )
223 except StopIteration:
224 raise ValueError(
225 "Street ID cannot be fetched. Please verify your configuration."
226 )
227
228
229 # Typed dictionaries for the API
230 # Automatically generated using https://pytyper.dev/
231
232
233 class DistrictRes(TypedDict):
234 OrteId: int
235 Ortsname: str
236 Ortsteilname: Optional[str]
237
238
239 class DistrictsRes(TypedDict):
240 d: List[DistrictRes]
241
242
243 class StreetRes(TypedDict):
244 StrassenId: int
245 Name: str
246 Plz: str
247
248
249 class StreetsRes(TypedDict):
250 d: List[StreetRes]
251
252
253 class Capacity(TypedDict):
254 VolumenId: int
255 VolumenWert: str
256
257
258 class WasteType(TypedDict):
259 AbfallartenId: int
260 Code: str
261 Name: str
262 Farbe: str
263 IsBio: bool
264 IsPapier: bool
265 IsRest: bool
266 IsWertstoff: bool
267 Bemerkung: None
268 Aktiv: None
269 IsSchadstoff: None
270
271
272 class ContainerType(TypedDict):
273 GefaesstarifArtenId: int
274 BescheidText: None
275 BescheidTextLeerungsgebuehr: None
276 Bezeichnung: str
277 GefaesstarifArtVerwenden: bool
278 GefaesstarifArtVerwendenAbfallkalender: bool
279 Bemerkung: None
280 Volumen: Capacity
281 Abfallart: WasteType
282 # Abfuhrrhythmus: Abfuhrrhythmus
283
284
285 class CollectionPlan(TypedDict):
286 AbfuhrplaeneId: int
287 Jahr: int
288 GefaesstarifArt: ContainerType
289 # AbfallartenObj: Abfuhrrhythmus
290
291
292 class CollectionRes(TypedDict):
293 AbfuhrtermineId: int
294 Termin: str
295 Abfuhrplan: CollectionPlan
296
297
298 class CollectionsRes(TypedDict):
299 d: List[CollectionRes]
300
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py
@@ -77,6 +77,12 @@
"api_url": "https://biedenkopfmzv.buergerportal.digital/api",
"operator": "biedenkopf",
},
+ {
+ "title": "Bürgerportal Bedburg",
+ "url": "https://www.bedburg.de/",
+ "api_url": "https://buerger-portal-bedburg.azurewebsites.net",
+ "operator": "bedburg",
+ },
]
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py\n@@ -77,6 +77,12 @@\n \"api_url\": \"https://biedenkopfmzv.buergerportal.digital/api\",\n \"operator\": \"biedenkopf\",\n },\n+ {\n+ \"title\": \"B\u00fcrgerportal Bedburg\",\n+ \"url\": \"https://www.bedburg.de/\",\n+ \"api_url\": \"https://buerger-portal-bedburg.azurewebsites.net\",\n+ \"operator\": \"bedburg\",\n+ },\n ]\n", "issue": "[Source Request]: \n### Municipality / Region\n\nBedburg\n\n### Collection Calendar Webpage\n\nhttps://buerger-portal-bedburg.azurewebsites.net/calendar/\n\n### Example Address\n\nAm Schirkerhof 2, Bedburg, Broich\n\n### Collection Data Format\n\nSomething different (add to additional information)\n\n### Additional Information\n\nThe city of Bedburg change its Waste Collection Provider. Thus the old Abfall+ Source doesn't work anymore. The new provider is Drekopf. Unfortunately they do not use the existing Drekopf API. The Webpage provided by Drekopf has multiple outputs. You can download the calendar via PDF, XLS, CSV and ICS, but i can't find the url. The underlying API seems to be C-Trace, but when calling this API, credentials are requested. Maybe i am missing something...\n", "before_files": [{"content": "import datetime\nimport re\nfrom dataclasses import dataclass\nfrom typing import List, Literal, Optional, TypedDict, Union\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"B\u00fcrgerportal\"\nURL = \"https://www.c-trace.de\"\nDESCRIPTION = \"Source for waste collection in multiple service areas.\"\n\n\ndef EXTRA_INFO():\n return [{\"title\": s[\"title\"], \"url\": s[\"url\"]} for s in SERVICE_MAP]\n\n\nTEST_CASES = {\n \"Cochem-Zell\": {\n \"operator\": \"cochem_zell\",\n \"district\": \"Bullay\",\n \"subdistrict\": \"Bullay\",\n \"street\": \"Layenweg\",\n \"number\": 3,\n },\n \"Alb-Donau\": {\n \"operator\": \"alb_donau\",\n \"district\": \"Blaubeuren\",\n \"street\": \"Alberstra\u00dfe\",\n \"number\": 3,\n },\n \"Biedenkopf\": {\n \"operator\": \"biedenkopf\",\n \"district\": \"Biedenkopf\",\n \"subdistrict\": \"Breidenstein\",\n \"street\": \"Auf dem Hammer\",\n \"number\": 1,\n },\n}\n\nICON_MAP = {\n \"mobil\": \"mdi:truck\",\n \"bio\": \"mdi:leaf\",\n \"papier\": \"mdi:package-variant\",\n \"verpackung\": \"mdi:recycle\",\n \"gelb\": \"mdi:recycle\",\n \"lvp\": \"mdi:recycle\",\n \"rest\": \"mdi:trash-can\",\n \"gruen\": \"mdi:forest\",\n \"gr\u00fcn\": \"mdi:forest\",\n \"baum\": \"mdi:forest\",\n \"schnitt\": \"mdi:forest\",\n \"schad\": \"mdi:biohazard\",\n}\nAPI_HEADERS = {\n \"Accept\": \"application/json, text/plain;q=0.5\",\n \"Cache-Control\": \"no-cache\",\n}\nOperator = Literal[\"cochem_zell\", \"alb_donau\", \"biedenkopf\"]\n\nSERVICE_MAP = [\n {\n \"title\": \"KV Cochem-Zell\",\n \"url\": \"https://www.cochem-zell-online.de/\",\n \"api_url\": \"https://buerger-portal-cochemzell.azurewebsites.net/api\",\n \"operator\": \"cochem_zell\",\n },\n {\n \"title\": \"Abfallwirtschaft Alb-Donau-Kreis\",\n \"url\": \"https://www.aw-adk.de/\",\n \"api_url\": \"https://buerger-portal-albdonaukreisabfallwirtschaft.azurewebsites.net/api\",\n \"operator\": \"alb_donau\",\n },\n {\n \"title\": \"MZV Biedenkopf\",\n \"url\": \"https://mzv-biedenkopf.de/\",\n \"api_url\": \"https://biedenkopfmzv.buergerportal.digital/api\",\n \"operator\": \"biedenkopf\",\n },\n]\n\n\n# This datalcass is used for adding entries to a set and remove duplicate entries.\n# The default `Collection` extends the standard dict and thus is not hashable.\n@dataclass(frozen=True, eq=True)\nclass CollectionEntry:\n date: datetime.date\n waste_type: str\n icon: Optional[str]\n\n def export(self) -> Collection:\n return Collection(self.date, self.waste_type, self.icon)\n\n\ndef quote_none(value: Optional[str]) -> str:\n if value is None:\n return \"null\"\n\n return f\"'{value}'\"\n\n\ndef get_api_map():\n return {s[\"operator\"]: s[\"api_url\"] for s in SERVICE_MAP}\n\n\nclass Source:\n def __init__(\n self,\n operator: Operator,\n district: str,\n street: str,\n subdistrict: Optional[str] = None,\n number: Union[int, str, None] = None,\n show_volume: bool = False,\n ):\n self.api_url = get_api_map()[operator]\n self.district = district\n self.subdistrict = subdistrict\n self.street = street\n self.number = number\n self.show_volume = show_volume\n\n def fetch(self) -> list[Collection]:\n session = requests.session()\n session.headers.update(API_HEADERS)\n\n year = datetime.datetime.now().year\n entries: set[CollectionEntry] = set()\n\n district_id = self.fetch_district_id(session)\n street_id = self.fetch_street_id(session, district_id)\n # Eventually verify house number in the future\n\n params = {\n \"$expand\": \"Abfuhrplan,Abfuhrplan/GefaesstarifArt/Abfallart,Abfuhrplan/GefaesstarifArt/Volumen\",\n \"$orderby\": \"Abfuhrplan/GefaesstarifArt/Abfallart/Name,Abfuhrplan/GefaesstarifArt/Volumen/VolumenWert\",\n \"orteId\": district_id,\n \"strassenId\": street_id,\n \"jahr\": year,\n }\n\n if self.number:\n params[\"hausNr\"] = (f\"'{self.number}'\",)\n\n res = session.get(\n f\"{self.api_url}/AbfuhrtermineAbJahr\",\n params=params,\n )\n res.raise_for_status()\n payload: CollectionsRes = res.json()\n\n date_regex = re.compile(r\"\\d+\")\n\n for collection in payload[\"d\"]:\n if date_match := re.search(date_regex, collection[\"Termin\"]):\n timestamp = float(date_match.group())\n date = datetime.datetime.utcfromtimestamp(timestamp / 1000).date()\n waste_type = collection[\"Abfuhrplan\"][\"GefaesstarifArt\"][\"Abfallart\"][\n \"Name\"\n ]\n icon = None\n\n for icon_type, tested_icon in ICON_MAP.items():\n if icon_type.lower() in waste_type.lower():\n icon = tested_icon\n\n if self.show_volume:\n volume = int(\n collection[\"Abfuhrplan\"][\"GefaesstarifArt\"][\"Volumen\"][\n \"VolumenWert\"\n ]\n )\n waste_type = f\"{waste_type} ({volume} l)\"\n\n entries.add(CollectionEntry(date, waste_type, icon))\n\n if len(entries) == 0:\n raise ValueError(\n \"No collections found! Please verify that your configuration is correct.\"\n )\n\n return [entry.export() for entry in entries]\n\n def fetch_district_id(self, session: requests.Session) -> int:\n res = session.get(\n f\"{self.api_url}/OrteMitOrtsteilen\",\n headers=API_HEADERS,\n )\n res.raise_for_status()\n payload: DistrictsRes = res.json()\n\n try:\n return next(\n entry[\"OrteId\"]\n for entry in payload[\"d\"]\n if entry[\"Ortsname\"] == self.district\n and entry[\"Ortsteilname\"] == self.subdistrict\n )\n except StopIteration:\n raise ValueError(\n \"District id cannot be fetched. \"\n \"Please make sure that you entered a subdistrict if there is a comma on the website.\"\n )\n\n def fetch_street_id(self, session: requests.Session, district_id: int):\n res = session.get(\n f\"{self.api_url}/Strassen\",\n params={\n \"$filter\": f\"Ort/OrteId eq {district_id} and OrtsteilName eq {quote_none(self.subdistrict)}\",\n \"$orderby\": \"Name asc\",\n },\n headers=API_HEADERS,\n )\n res.raise_for_status()\n payload: StreetsRes = res.json()\n\n try:\n return next(\n entry[\"StrassenId\"]\n for entry in payload[\"d\"]\n if entry[\"Name\"] == self.street\n )\n except StopIteration:\n raise ValueError(\n \"Street ID cannot be fetched. Please verify your configuration.\"\n )\n\n\n# Typed dictionaries for the API\n# Automatically generated using https://pytyper.dev/\n\n\nclass DistrictRes(TypedDict):\n OrteId: int\n Ortsname: str\n Ortsteilname: Optional[str]\n\n\nclass DistrictsRes(TypedDict):\n d: List[DistrictRes]\n\n\nclass StreetRes(TypedDict):\n StrassenId: int\n Name: str\n Plz: str\n\n\nclass StreetsRes(TypedDict):\n d: List[StreetRes]\n\n\nclass Capacity(TypedDict):\n VolumenId: int\n VolumenWert: str\n\n\nclass WasteType(TypedDict):\n AbfallartenId: int\n Code: str\n Name: str\n Farbe: str\n IsBio: bool\n IsPapier: bool\n IsRest: bool\n IsWertstoff: bool\n Bemerkung: None\n Aktiv: None\n IsSchadstoff: None\n\n\nclass ContainerType(TypedDict):\n GefaesstarifArtenId: int\n BescheidText: None\n BescheidTextLeerungsgebuehr: None\n Bezeichnung: str\n GefaesstarifArtVerwenden: bool\n GefaesstarifArtVerwendenAbfallkalender: bool\n Bemerkung: None\n Volumen: Capacity\n Abfallart: WasteType\n # Abfuhrrhythmus: Abfuhrrhythmus\n\n\nclass CollectionPlan(TypedDict):\n AbfuhrplaeneId: int\n Jahr: int\n GefaesstarifArt: ContainerType\n # AbfallartenObj: Abfuhrrhythmus\n\n\nclass CollectionRes(TypedDict):\n AbfuhrtermineId: int\n Termin: str\n Abfuhrplan: CollectionPlan\n\n\nclass CollectionsRes(TypedDict):\n d: List[CollectionRes]\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/buergerportal_de.py"}]} | 3,696 | 190 |
gh_patches_debug_35355 | rasdani/github-patches | git_diff | scikit-image__scikit-image-2134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`min_size` is not strictly conformed in the implementation of felzenszwalb
## Description
With `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm.
## Way to reproduce
```
>>> I = skimage.io.imread('dragonbaby.jpg')
>>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80)
>>> (fz==9).sum()
1
```

</issue>
<code>
[start of skimage/segmentation/_felzenszwalb.py]
1 import numpy as np
2
3 from .._shared.utils import warn
4 from ._felzenszwalb_cy import _felzenszwalb_grey
5
6
7 def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
8 """Computes Felsenszwalb's efficient graph based image segmentation.
9
10 Produces an oversegmentation of a multichannel (i.e. RGB) image
11 using a fast, minimum spanning tree based clustering on the image grid.
12 The parameter ``scale`` sets an observation level. Higher scale means
13 less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,
14 used for smoothing the image prior to segmentation.
15
16 The number of produced segments as well as their size can only be
17 controlled indirectly through ``scale``. Segment size within an image can
18 vary greatly depending on local contrast.
19
20 For RGB images, the algorithm computes a separate segmentation for each
21 channel and then combines these. The combined segmentation is the
22 intersection of the separate segmentations on the color channels.
23
24 Parameters
25 ----------
26 image : (width, height, 3) or (width, height) ndarray
27 Input image.
28 scale : float
29 Free parameter. Higher means larger clusters.
30 sigma : float
31 Width of Gaussian kernel used in preprocessing.
32 min_size : int
33 Minimum component size. Enforced using postprocessing.
34
35 Returns
36 -------
37 segment_mask : (width, height) ndarray
38 Integer mask indicating segment labels.
39
40 References
41 ----------
42 .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and
43 Huttenlocher, D.P. International Journal of Computer Vision, 2004
44
45 Examples
46 --------
47 >>> from skimage.segmentation import felzenszwalb
48 >>> from skimage.data import coffee
49 >>> img = coffee()
50 >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
51 """
52
53 if image.ndim == 2:
54 # assume single channel image
55 return _felzenszwalb_grey(image, scale=scale, sigma=sigma,
56 min_size=min_size)
57
58 elif image.ndim != 3:
59 raise ValueError("Felzenswalb segmentation can only operate on RGB and"
60 " grey images, but input array of ndim %d given."
61 % image.ndim)
62
63 # assume we got 2d image with multiple channels
64 n_channels = image.shape[2]
65 if n_channels != 3:
66 warn("Got image with %d channels. Is that really what you"
67 " wanted?" % image.shape[2])
68 segmentations = []
69 # compute quickshift for each channel
70 for c in range(n_channels):
71 channel = np.ascontiguousarray(image[:, :, c])
72 s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,
73 min_size=min_size)
74 segmentations.append(s)
75
76 # put pixels in same segment only if in the same segment in all images
77 # we do this by combining the channels to one number
78 n0 = segmentations[0].max() + 1
79 n1 = segmentations[1].max() + 1
80 segmentation = (segmentations[0] + segmentations[1] * n0
81 + segmentations[2] * n0 * n1)
82 # make segment labels consecutive numbers starting at 0
83 labels = np.unique(segmentation, return_inverse=True)[1]
84 return labels.reshape(image.shape[:2])
85
[end of skimage/segmentation/_felzenszwalb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py
--- a/skimage/segmentation/_felzenszwalb.py
+++ b/skimage/segmentation/_felzenszwalb.py
@@ -1,7 +1,7 @@
import numpy as np
from .._shared.utils import warn
-from ._felzenszwalb_cy import _felzenszwalb_grey
+from ._felzenszwalb_cy import _felzenszwalb_cython
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
@@ -17,9 +17,8 @@
controlled indirectly through ``scale``. Segment size within an image can
vary greatly depending on local contrast.
- For RGB images, the algorithm computes a separate segmentation for each
- channel and then combines these. The combined segmentation is the
- intersection of the separate segmentations on the color channels.
+ For RGB images, the algorithm uses the euclidean distance between pixels in
+ color space.
Parameters
----------
@@ -50,35 +49,6 @@
>>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
"""
- if image.ndim == 2:
- # assume single channel image
- return _felzenszwalb_grey(image, scale=scale, sigma=sigma,
- min_size=min_size)
-
- elif image.ndim != 3:
- raise ValueError("Felzenswalb segmentation can only operate on RGB and"
- " grey images, but input array of ndim %d given."
- % image.ndim)
-
- # assume we got 2d image with multiple channels
- n_channels = image.shape[2]
- if n_channels != 3:
- warn("Got image with %d channels. Is that really what you"
- " wanted?" % image.shape[2])
- segmentations = []
- # compute quickshift for each channel
- for c in range(n_channels):
- channel = np.ascontiguousarray(image[:, :, c])
- s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,
- min_size=min_size)
- segmentations.append(s)
-
- # put pixels in same segment only if in the same segment in all images
- # we do this by combining the channels to one number
- n0 = segmentations[0].max() + 1
- n1 = segmentations[1].max() + 1
- segmentation = (segmentations[0] + segmentations[1] * n0
- + segmentations[2] * n0 * n1)
- # make segment labels consecutive numbers starting at 0
- labels = np.unique(segmentation, return_inverse=True)[1]
- return labels.reshape(image.shape[:2])
+ image = np.atleast_3d(image)
+ return _felzenszwalb_cython(image, scale=scale, sigma=sigma,
+ min_size=min_size)
| {"golden_diff": "diff --git a/skimage/segmentation/_felzenszwalb.py b/skimage/segmentation/_felzenszwalb.py\n--- a/skimage/segmentation/_felzenszwalb.py\n+++ b/skimage/segmentation/_felzenszwalb.py\n@@ -1,7 +1,7 @@\n import numpy as np\n \n from .._shared.utils import warn\n-from ._felzenszwalb_cy import _felzenszwalb_grey\n+from ._felzenszwalb_cy import _felzenszwalb_cython\n \n \n def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n@@ -17,9 +17,8 @@\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n \n- For RGB images, the algorithm computes a separate segmentation for each\n- channel and then combines these. The combined segmentation is the\n- intersection of the separate segmentations on the color channels.\n+ For RGB images, the algorithm uses the euclidean distance between pixels in\n+ color space.\n \n Parameters\n ----------\n@@ -50,35 +49,6 @@\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n \n- if image.ndim == 2:\n- # assume single channel image\n- return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n- min_size=min_size)\n-\n- elif image.ndim != 3:\n- raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n- \" grey images, but input array of ndim %d given.\"\n- % image.ndim)\n-\n- # assume we got 2d image with multiple channels\n- n_channels = image.shape[2]\n- if n_channels != 3:\n- warn(\"Got image with %d channels. Is that really what you\"\n- \" wanted?\" % image.shape[2])\n- segmentations = []\n- # compute quickshift for each channel\n- for c in range(n_channels):\n- channel = np.ascontiguousarray(image[:, :, c])\n- s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n- min_size=min_size)\n- segmentations.append(s)\n-\n- # put pixels in same segment only if in the same segment in all images\n- # we do this by combining the channels to one number\n- n0 = segmentations[0].max() + 1\n- n1 = segmentations[1].max() + 1\n- segmentation = (segmentations[0] + segmentations[1] * n0\n- + segmentations[2] * n0 * n1)\n- # make segment labels consecutive numbers starting at 0\n- labels = np.unique(segmentation, return_inverse=True)[1]\n- return labels.reshape(image.shape[:2])\n+ image = np.atleast_3d(image)\n+ return _felzenszwalb_cython(image, scale=scale, sigma=sigma,\n+ min_size=min_size)\n", "issue": "`min_size` is not strictly conformed in the implementation of felzenszwalb\n## Description\n\nWith `min_size` specified, there're still some segments with sizes that less than it. I don't know if it is an inherent flaw of the algorithm.\n## Way to reproduce\n\n```\n>>> I = skimage.io.imread('dragonbaby.jpg')\n>>> fz = felzenszwalb(I, scale=300, sigma=0.8, min_size=80)\n>>> (fz==9).sum()\n1\n```\n\n\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .._shared.utils import warn\nfrom ._felzenszwalb_cy import _felzenszwalb_grey\n\n\ndef felzenszwalb(image, scale=1, sigma=0.8, min_size=20):\n \"\"\"Computes Felsenszwalb's efficient graph based image segmentation.\n\n Produces an oversegmentation of a multichannel (i.e. RGB) image\n using a fast, minimum spanning tree based clustering on the image grid.\n The parameter ``scale`` sets an observation level. Higher scale means\n less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,\n used for smoothing the image prior to segmentation.\n\n The number of produced segments as well as their size can only be\n controlled indirectly through ``scale``. Segment size within an image can\n vary greatly depending on local contrast.\n\n For RGB images, the algorithm computes a separate segmentation for each\n channel and then combines these. The combined segmentation is the\n intersection of the separate segmentations on the color channels.\n\n Parameters\n ----------\n image : (width, height, 3) or (width, height) ndarray\n Input image.\n scale : float\n Free parameter. Higher means larger clusters.\n sigma : float\n Width of Gaussian kernel used in preprocessing.\n min_size : int\n Minimum component size. Enforced using postprocessing.\n\n Returns\n -------\n segment_mask : (width, height) ndarray\n Integer mask indicating segment labels.\n\n References\n ----------\n .. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and\n Huttenlocher, D.P. International Journal of Computer Vision, 2004\n\n Examples\n --------\n >>> from skimage.segmentation import felzenszwalb\n >>> from skimage.data import coffee\n >>> img = coffee()\n >>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)\n \"\"\"\n\n if image.ndim == 2:\n # assume single channel image\n return _felzenszwalb_grey(image, scale=scale, sigma=sigma,\n min_size=min_size)\n\n elif image.ndim != 3:\n raise ValueError(\"Felzenswalb segmentation can only operate on RGB and\"\n \" grey images, but input array of ndim %d given.\"\n % image.ndim)\n\n # assume we got 2d image with multiple channels\n n_channels = image.shape[2]\n if n_channels != 3:\n warn(\"Got image with %d channels. Is that really what you\"\n \" wanted?\" % image.shape[2])\n segmentations = []\n # compute quickshift for each channel\n for c in range(n_channels):\n channel = np.ascontiguousarray(image[:, :, c])\n s = _felzenszwalb_grey(channel, scale=scale, sigma=sigma,\n min_size=min_size)\n segmentations.append(s)\n\n # put pixels in same segment only if in the same segment in all images\n # we do this by combining the channels to one number\n n0 = segmentations[0].max() + 1\n n1 = segmentations[1].max() + 1\n segmentation = (segmentations[0] + segmentations[1] * n0\n + segmentations[2] * n0 * n1)\n # make segment labels consecutive numbers starting at 0\n labels = np.unique(segmentation, return_inverse=True)[1]\n return labels.reshape(image.shape[:2])\n", "path": "skimage/segmentation/_felzenszwalb.py"}]} | 1,669 | 711 |
gh_patches_debug_248 | rasdani/github-patches | git_diff | statsmodels__statsmodels-3976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The compat modules should use absolute imports
The [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18.
Please consider adding
```python
from __future__ import absolute_import
```
to the compat modules.
</issue>
<code>
[start of statsmodels/compat/collections.py]
1 '''backported compatibility functions for Python's collections
2
3 '''
4
5 try:
6 #python >= 2.7
7 from collections import OrderedDict
8 except ImportError:
9 #http://code.activestate.com/recipes/576693/
10 #author: Raymond Hettinger
11 from .ordereddict import OrderedDict
12
13 try:
14 #python >= 2.7
15 from collections import Counter
16 except ImportError:
17 #http://code.activestate.com/recipes/576611/
18 #author: Raymond Hettinger
19 from .counter import Counter
20
[end of statsmodels/compat/collections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py
--- a/statsmodels/compat/collections.py
+++ b/statsmodels/compat/collections.py
@@ -1,6 +1,7 @@
'''backported compatibility functions for Python's collections
'''
+from __future__ import absolute_import
try:
#python >= 2.7
| {"golden_diff": "diff --git a/statsmodels/compat/collections.py b/statsmodels/compat/collections.py\n--- a/statsmodels/compat/collections.py\n+++ b/statsmodels/compat/collections.py\n@@ -1,6 +1,7 @@\n '''backported compatibility functions for Python's collections\n \n '''\n+from __future__ import absolute_import\n \n try:\n #python >= 2.7\n", "issue": "The compat modules should use absolute imports\nThe [statsmodels.compat.collections](https://github.com/statsmodels/statsmodels/blob/a88830efc3a99cfbe0ebc9fbfd77820fe748fc59/statsmodels/compat/collections.py#L7) imports the namesake standard library module without requesting absolute imports. While it seems to work in many cases, it causes a problem to packages that override `__import__`. See enlnt/pyq#18.\r\n\r\nPlease consider adding\r\n\r\n```python\r\nfrom __future__ import absolute_import\r\n```\r\nto the compat modules.\n", "before_files": [{"content": "'''backported compatibility functions for Python's collections\n\n'''\n\ntry:\n #python >= 2.7\n from collections import OrderedDict\nexcept ImportError:\n #http://code.activestate.com/recipes/576693/\n #author: Raymond Hettinger\n from .ordereddict import OrderedDict\n\ntry:\n #python >= 2.7\n from collections import Counter\nexcept ImportError:\n #http://code.activestate.com/recipes/576611/\n #author: Raymond Hettinger\n from .counter import Counter\n", "path": "statsmodels/compat/collections.py"}]} | 828 | 81 |
gh_patches_debug_16439 | rasdani/github-patches | git_diff | streamlit__streamlit-761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warn users when they try to run Streamlit on an `ipynb` file.
# Problem
[Some users](https://discuss.streamlit.io/t/app-loading-problem/199/7?u=adrien_treuille) have tried to run Streamlit on a Jupyter notebook (`.ipnyb`) file and are confused when this doesn't work.
# Solution
It would be great if for _any_ non `.py` file, Streamlit behaved as follows:
```bash
$ streamlit run my_msft_word_file.docx
Error: Streamlit requires raw Python (.py) files, not .docx.
For more information, please see https://streamlit.io/docs
$ streamlit run my_notebook.ipnyb
Error: Streamlit requires raw Python (.py) files, not .ipnyb.
For more information, please see https://streamlit.io/docs
```
</issue>
<code>
[start of lib/streamlit/cli.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2018-2019 Streamlit Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """This is a script which is run when the Streamlit package is executed."""
17
18 # Python 2/3 compatibility
19 from __future__ import print_function, division, absolute_import
20
21 # Not importing unicode_literals from __future__ because click doesn't like it.
22 from streamlit.compatibility import setup_2_3_shims
23
24 setup_2_3_shims(globals())
25
26 from streamlit import config as _config
27
28 import os
29 import click
30
31 import streamlit
32 from streamlit.credentials import Credentials, check_credentials
33 from streamlit import version
34 import streamlit.bootstrap as bootstrap
35 from streamlit.case_converters import to_snake_case
36
37 LOG_LEVELS = ["error", "warning", "info", "debug"]
38
39 NEW_VERSION_TEXT = """
40 %(new_version)s
41
42 See what's new at https://discuss.streamlit.io/c/announcements
43
44 Enter the following command to upgrade:
45 %(prompt)s %(command)s
46 """ % {
47 "new_version": click.style(
48 "A new version of Streamlit is available.", fg="blue", bold=True
49 ),
50 "prompt": click.style("$", fg="blue"),
51 "command": click.style("pip install streamlit --upgrade", bold=True),
52 }
53
54
55 def _convert_config_option_to_click_option(config_option):
56 """Composes given config option options as options for click lib."""
57 option = "--{}".format(config_option.key)
58 param = config_option.key.replace(".", "_")
59 description = config_option.description
60 if config_option.deprecated:
61 description += "\n {} - {}".format(
62 config_option.deprecation_text, config_option.deprecation_date
63 )
64 envvar = "STREAMLIT_{}".format(to_snake_case(param).upper())
65
66 return {
67 "param": param,
68 "description": description,
69 "type": config_option.type,
70 "option": option,
71 "envvar": envvar,
72 }
73
74
75 def configurator_options(func):
76 """Decorator that adds config param keys to click dynamically."""
77 for _, value in reversed(_config._config_options.items()):
78 parsed_parameter = _convert_config_option_to_click_option(value)
79 config_option = click.option(
80 parsed_parameter["option"],
81 parsed_parameter["param"],
82 help=parsed_parameter["description"],
83 type=parsed_parameter["type"],
84 show_envvar=True,
85 envvar=parsed_parameter["envvar"],
86 )
87 func = config_option(func)
88 return func
89
90
91 def _apply_config_options_from_cli(kwargs):
92 """The "streamlit run" command supports passing Streamlit's config options
93 as flags.
94
95 This function reads through all config flags, massage them, and
96 pass them to _set_config() overriding default values and values set via
97 config.toml file
98
99 """
100 for config_option in kwargs:
101 if kwargs[config_option] is not None:
102 config_option_def_key = config_option.replace("_", ".")
103
104 _config._set_option(
105 config_option_def_key,
106 kwargs[config_option],
107 "command-line argument or environment variable",
108 )
109
110
111 # Fetch remote file at url_path to script_path
112 def _download_remote(script_path, url_path):
113 import requests
114
115 with open(script_path, "wb") as fp:
116 try:
117 resp = requests.get(url_path)
118 resp.raise_for_status()
119 fp.write(resp.content)
120 except requests.exceptions.RequestException as e:
121 raise click.BadParameter(("Unable to fetch {}.\n{}".format(url_path, e)))
122
123
124 @click.group(context_settings={"auto_envvar_prefix": "STREAMLIT"})
125 @click.option("--log_level", show_default=True, type=click.Choice(LOG_LEVELS))
126 @click.version_option(prog_name="Streamlit")
127 @click.pass_context
128 def main(ctx, log_level="info"):
129 """Try out a demo with:
130
131 $ streamlit hello
132
133 Or use the line below to run your own script:
134
135 $ streamlit run your_script.py
136 """
137
138 if log_level:
139 import streamlit.logger
140
141 streamlit.logger.set_log_level(log_level.upper())
142
143
144 @main.command("help")
145 @click.pass_context
146 def help(ctx):
147 """Print this help message."""
148 # Pretend user typed 'streamlit --help' instead of 'streamlit help'.
149 import sys
150
151 assert len(sys.argv) == 2 # This is always true, but let's assert anyway.
152 sys.argv[1] = "--help"
153 main()
154
155
156 @main.command("version")
157 @click.pass_context
158 def main_version(ctx):
159 """Print Streamlit's version number."""
160 # Pretend user typed 'streamlit --version' instead of 'streamlit version'
161 import sys
162
163 assert len(sys.argv) == 2 # This is always true, but let's assert anyway.
164 sys.argv[1] = "--version"
165 main()
166
167
168 @main.command("docs")
169 def main_docs():
170 """Show help in browser."""
171 print("Showing help page in browser...")
172 from streamlit import util
173
174 util.open_browser("https://streamlit.io/docs")
175
176
177 @main.command("hello")
178 @configurator_options
179 def main_hello(**kwargs):
180 """Runs the Hello World script."""
181 from streamlit.hello import hello
182
183 _apply_config_options_from_cli(kwargs)
184
185 filename = hello.__file__
186
187 # For Python 2 when Streamlit is actually installed (make install rather
188 # than make develop).
189 if filename.endswith(".pyc"):
190 filename = "%s.py" % filename[:-4]
191
192 _main_run(filename)
193
194
195 @main.command("run")
196 @configurator_options
197 @click.argument("target", required=True, envvar="STREAMLIT_RUN_TARGET")
198 @click.argument("args", nargs=-1)
199 def main_run(target, args=None, **kwargs):
200 """Run a Python script, piping stderr to Streamlit.
201
202 The script can be local or it can be an url. In the latter case, Streamlit
203 will download the script to a temporary file and runs this file.
204
205 """
206 from validators import url
207
208 _apply_config_options_from_cli(kwargs)
209
210 if url(target):
211 from streamlit.temporary_directory import TemporaryDirectory
212
213 with TemporaryDirectory() as temp_dir:
214 from urllib.parse import urlparse
215 from streamlit import url_util
216
217 path = urlparse(target).path
218 script_path = os.path.join(temp_dir, path.strip("/").rsplit("/", 1)[-1])
219 # if this is a GitHub/Gist blob url, convert to a raw URL first.
220 target = url_util.process_gitblob_url(target)
221 _download_remote(script_path, target)
222 _main_run(script_path, args)
223 else:
224 if not os.path.exists(target):
225 raise click.BadParameter("File does not exist: {}".format(target))
226 _main_run(target, args)
227
228
229 # Utility function to compute the command line as a string
230 def _get_command_line_as_string():
231 import subprocess
232
233 cmd_line_as_list = [click.get_current_context().parent.command_path]
234 cmd_line_as_list.extend(click.get_os_args())
235 return subprocess.list2cmdline(cmd_line_as_list)
236
237
238 def _main_run(file, args=[]):
239 command_line = _get_command_line_as_string()
240
241 # Set a global flag indicating that we're "within" streamlit.
242 streamlit._is_running_with_streamlit = True
243
244 # Check credentials.
245 check_credentials()
246
247 # Notify if streamlit is out of date.
248 if version.should_show_new_version_notice():
249 click.echo(NEW_VERSION_TEXT)
250
251 bootstrap.run(file, command_line, args)
252
253
254 # DEPRECATED
255
256 # TODO: Remove after 2019-09-01
257 @main.command("clear_cache", deprecated=True, hidden=True)
258 @click.pass_context
259 def main_clear_cache(ctx):
260 """Deprecated."""
261 click.echo(click.style('Use "cache clear" instead.', fg="red"))
262 ctx.invoke(cache_clear)
263
264
265 # TODO: Remove after 2019-09-01
266 @main.command("show_config", deprecated=True, hidden=True)
267 @click.pass_context
268 def main_show_config(ctx):
269 """Deprecated."""
270 click.echo(click.style('Use "config show" instead.', fg="red"))
271 ctx.invoke(config_show)
272
273
274 # SUBCOMMAND: cache
275
276
277 @main.group("cache")
278 def cache():
279 """Manage the Streamlit cache."""
280 pass
281
282
283 @cache.command("clear")
284 def cache_clear():
285 """Clear the Streamlit on-disk cache."""
286 import streamlit.caching
287
288 result = streamlit.caching.clear_cache()
289 cache_path = streamlit.caching.get_cache_path()
290 if result:
291 print("Cleared directory %s." % cache_path)
292 else:
293 print("Nothing to clear at %s." % cache_path)
294
295
296 # SUBCOMMAND: config
297
298
299 @main.group("config")
300 def config():
301 """Manage Streamlit's config settings."""
302 pass
303
304
305 @config.command("show")
306 @configurator_options
307 def config_show(**kwargs):
308 """Show all of Streamlit's config settings."""
309
310 _apply_config_options_from_cli(kwargs)
311
312 _config.show_config()
313
314
315 # SUBCOMMAND: activate
316
317
318 @main.group("activate", invoke_without_command=True)
319 @click.pass_context
320 def activate(ctx):
321 """Activate Streamlit by entering your email."""
322 if not ctx.invoked_subcommand:
323 Credentials.get_current().activate()
324
325
326 @activate.command("reset")
327 def activate_reset():
328 """Reset Activation Credentials."""
329 Credentials.get_current().reset()
330
331
332 if __name__ == "__main__":
333 main()
334
[end of lib/streamlit/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/cli.py b/lib/streamlit/cli.py
--- a/lib/streamlit/cli.py
+++ b/lib/streamlit/cli.py
@@ -34,7 +34,9 @@
import streamlit.bootstrap as bootstrap
from streamlit.case_converters import to_snake_case
-LOG_LEVELS = ["error", "warning", "info", "debug"]
+ACCEPTED_FILE_EXTENSIONS = ("py", "py3")
+
+LOG_LEVELS = ("error", "warning", "info", "debug")
NEW_VERSION_TEXT = """
%(new_version)s
@@ -207,6 +209,10 @@
_apply_config_options_from_cli(kwargs)
+ _, extension = os.path.splitext(target)
+ if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:
+ raise click.BadArgumentUsage("Streamlit requires raw Python (.py) files, not %s.\nFor more information, please see https://streamlit.io/docs" % extension)
+
if url(target):
from streamlit.temporary_directory import TemporaryDirectory
| {"golden_diff": "diff --git a/lib/streamlit/cli.py b/lib/streamlit/cli.py\n--- a/lib/streamlit/cli.py\n+++ b/lib/streamlit/cli.py\n@@ -34,7 +34,9 @@\n import streamlit.bootstrap as bootstrap\n from streamlit.case_converters import to_snake_case\n \n-LOG_LEVELS = [\"error\", \"warning\", \"info\", \"debug\"]\n+ACCEPTED_FILE_EXTENSIONS = (\"py\", \"py3\")\n+\n+LOG_LEVELS = (\"error\", \"warning\", \"info\", \"debug\")\n \n NEW_VERSION_TEXT = \"\"\"\n %(new_version)s\n@@ -207,6 +209,10 @@\n \n _apply_config_options_from_cli(kwargs)\n \n+ _, extension = os.path.splitext(target)\n+ if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:\n+ raise click.BadArgumentUsage(\"Streamlit requires raw Python (.py) files, not %s.\\nFor more information, please see https://streamlit.io/docs\" % extension)\n+\n if url(target):\n from streamlit.temporary_directory import TemporaryDirectory\n", "issue": "Warn users when they try to run Streamlit on an `ipynb` file.\n# Problem\r\n\r\n[Some users](https://discuss.streamlit.io/t/app-loading-problem/199/7?u=adrien_treuille) have tried to run Streamlit on a Jupyter notebook (`.ipnyb`) file and are confused when this doesn't work.\r\n\r\n# Solution\r\n\r\nIt would be great if for _any_ non `.py` file, Streamlit behaved as follows:\r\n\r\n```bash\r\n$ streamlit run my_msft_word_file.docx\r\n\r\nError: Streamlit requires raw Python (.py) files, not .docx.\r\nFor more information, please see https://streamlit.io/docs\r\n\r\n$ streamlit run my_notebook.ipnyb\r\n\r\nError: Streamlit requires raw Python (.py) files, not .ipnyb.\r\nFor more information, please see https://streamlit.io/docs\r\n```\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a script which is run when the Streamlit package is executed.\"\"\"\n\n# Python 2/3 compatibility\nfrom __future__ import print_function, division, absolute_import\n\n# Not importing unicode_literals from __future__ because click doesn't like it.\nfrom streamlit.compatibility import setup_2_3_shims\n\nsetup_2_3_shims(globals())\n\nfrom streamlit import config as _config\n\nimport os\nimport click\n\nimport streamlit\nfrom streamlit.credentials import Credentials, check_credentials\nfrom streamlit import version\nimport streamlit.bootstrap as bootstrap\nfrom streamlit.case_converters import to_snake_case\n\nLOG_LEVELS = [\"error\", \"warning\", \"info\", \"debug\"]\n\nNEW_VERSION_TEXT = \"\"\"\n %(new_version)s\n\n See what's new at https://discuss.streamlit.io/c/announcements\n\n Enter the following command to upgrade:\n %(prompt)s %(command)s\n\"\"\" % {\n \"new_version\": click.style(\n \"A new version of Streamlit is available.\", fg=\"blue\", bold=True\n ),\n \"prompt\": click.style(\"$\", fg=\"blue\"),\n \"command\": click.style(\"pip install streamlit --upgrade\", bold=True),\n}\n\n\ndef _convert_config_option_to_click_option(config_option):\n \"\"\"Composes given config option options as options for click lib.\"\"\"\n option = \"--{}\".format(config_option.key)\n param = config_option.key.replace(\".\", \"_\")\n description = config_option.description\n if config_option.deprecated:\n description += \"\\n {} - {}\".format(\n config_option.deprecation_text, config_option.deprecation_date\n )\n envvar = \"STREAMLIT_{}\".format(to_snake_case(param).upper())\n\n return {\n \"param\": param,\n \"description\": description,\n \"type\": config_option.type,\n \"option\": option,\n \"envvar\": envvar,\n }\n\n\ndef configurator_options(func):\n \"\"\"Decorator that adds config param keys to click dynamically.\"\"\"\n for _, value in reversed(_config._config_options.items()):\n parsed_parameter = _convert_config_option_to_click_option(value)\n config_option = click.option(\n parsed_parameter[\"option\"],\n parsed_parameter[\"param\"],\n help=parsed_parameter[\"description\"],\n type=parsed_parameter[\"type\"],\n show_envvar=True,\n envvar=parsed_parameter[\"envvar\"],\n )\n func = config_option(func)\n return func\n\n\ndef _apply_config_options_from_cli(kwargs):\n \"\"\"The \"streamlit run\" command supports passing Streamlit's config options\n as flags.\n\n This function reads through all config flags, massage them, and\n pass them to _set_config() overriding default values and values set via\n config.toml file\n\n \"\"\"\n for config_option in kwargs:\n if kwargs[config_option] is not None:\n config_option_def_key = config_option.replace(\"_\", \".\")\n\n _config._set_option(\n config_option_def_key,\n kwargs[config_option],\n \"command-line argument or environment variable\",\n )\n\n\n# Fetch remote file at url_path to script_path\ndef _download_remote(script_path, url_path):\n import requests\n\n with open(script_path, \"wb\") as fp:\n try:\n resp = requests.get(url_path)\n resp.raise_for_status()\n fp.write(resp.content)\n except requests.exceptions.RequestException as e:\n raise click.BadParameter((\"Unable to fetch {}.\\n{}\".format(url_path, e)))\n\n\[email protected](context_settings={\"auto_envvar_prefix\": \"STREAMLIT\"})\[email protected](\"--log_level\", show_default=True, type=click.Choice(LOG_LEVELS))\[email protected]_option(prog_name=\"Streamlit\")\[email protected]_context\ndef main(ctx, log_level=\"info\"):\n \"\"\"Try out a demo with:\n\n $ streamlit hello\n\n Or use the line below to run your own script:\n\n $ streamlit run your_script.py\n \"\"\"\n\n if log_level:\n import streamlit.logger\n\n streamlit.logger.set_log_level(log_level.upper())\n\n\[email protected](\"help\")\[email protected]_context\ndef help(ctx):\n \"\"\"Print this help message.\"\"\"\n # Pretend user typed 'streamlit --help' instead of 'streamlit help'.\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--help\"\n main()\n\n\[email protected](\"version\")\[email protected]_context\ndef main_version(ctx):\n \"\"\"Print Streamlit's version number.\"\"\"\n # Pretend user typed 'streamlit --version' instead of 'streamlit version'\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--version\"\n main()\n\n\[email protected](\"docs\")\ndef main_docs():\n \"\"\"Show help in browser.\"\"\"\n print(\"Showing help page in browser...\")\n from streamlit import util\n\n util.open_browser(\"https://streamlit.io/docs\")\n\n\[email protected](\"hello\")\n@configurator_options\ndef main_hello(**kwargs):\n \"\"\"Runs the Hello World script.\"\"\"\n from streamlit.hello import hello\n\n _apply_config_options_from_cli(kwargs)\n\n filename = hello.__file__\n\n # For Python 2 when Streamlit is actually installed (make install rather\n # than make develop).\n if filename.endswith(\".pyc\"):\n filename = \"%s.py\" % filename[:-4]\n\n _main_run(filename)\n\n\[email protected](\"run\")\n@configurator_options\[email protected](\"target\", required=True, envvar=\"STREAMLIT_RUN_TARGET\")\[email protected](\"args\", nargs=-1)\ndef main_run(target, args=None, **kwargs):\n \"\"\"Run a Python script, piping stderr to Streamlit.\n\n The script can be local or it can be an url. In the latter case, Streamlit\n will download the script to a temporary file and runs this file.\n\n \"\"\"\n from validators import url\n\n _apply_config_options_from_cli(kwargs)\n\n if url(target):\n from streamlit.temporary_directory import TemporaryDirectory\n\n with TemporaryDirectory() as temp_dir:\n from urllib.parse import urlparse\n from streamlit import url_util\n\n path = urlparse(target).path\n script_path = os.path.join(temp_dir, path.strip(\"/\").rsplit(\"/\", 1)[-1])\n # if this is a GitHub/Gist blob url, convert to a raw URL first.\n target = url_util.process_gitblob_url(target)\n _download_remote(script_path, target)\n _main_run(script_path, args)\n else:\n if not os.path.exists(target):\n raise click.BadParameter(\"File does not exist: {}\".format(target))\n _main_run(target, args)\n\n\n# Utility function to compute the command line as a string\ndef _get_command_line_as_string():\n import subprocess\n\n cmd_line_as_list = [click.get_current_context().parent.command_path]\n cmd_line_as_list.extend(click.get_os_args())\n return subprocess.list2cmdline(cmd_line_as_list)\n\n\ndef _main_run(file, args=[]):\n command_line = _get_command_line_as_string()\n\n # Set a global flag indicating that we're \"within\" streamlit.\n streamlit._is_running_with_streamlit = True\n\n # Check credentials.\n check_credentials()\n\n # Notify if streamlit is out of date.\n if version.should_show_new_version_notice():\n click.echo(NEW_VERSION_TEXT)\n\n bootstrap.run(file, command_line, args)\n\n\n# DEPRECATED\n\n# TODO: Remove after 2019-09-01\[email protected](\"clear_cache\", deprecated=True, hidden=True)\[email protected]_context\ndef main_clear_cache(ctx):\n \"\"\"Deprecated.\"\"\"\n click.echo(click.style('Use \"cache clear\" instead.', fg=\"red\"))\n ctx.invoke(cache_clear)\n\n\n# TODO: Remove after 2019-09-01\[email protected](\"show_config\", deprecated=True, hidden=True)\[email protected]_context\ndef main_show_config(ctx):\n \"\"\"Deprecated.\"\"\"\n click.echo(click.style('Use \"config show\" instead.', fg=\"red\"))\n ctx.invoke(config_show)\n\n\n# SUBCOMMAND: cache\n\n\[email protected](\"cache\")\ndef cache():\n \"\"\"Manage the Streamlit cache.\"\"\"\n pass\n\n\[email protected](\"clear\")\ndef cache_clear():\n \"\"\"Clear the Streamlit on-disk cache.\"\"\"\n import streamlit.caching\n\n result = streamlit.caching.clear_cache()\n cache_path = streamlit.caching.get_cache_path()\n if result:\n print(\"Cleared directory %s.\" % cache_path)\n else:\n print(\"Nothing to clear at %s.\" % cache_path)\n\n\n# SUBCOMMAND: config\n\n\[email protected](\"config\")\ndef config():\n \"\"\"Manage Streamlit's config settings.\"\"\"\n pass\n\n\[email protected](\"show\")\n@configurator_options\ndef config_show(**kwargs):\n \"\"\"Show all of Streamlit's config settings.\"\"\"\n\n _apply_config_options_from_cli(kwargs)\n\n _config.show_config()\n\n\n# SUBCOMMAND: activate\n\n\[email protected](\"activate\", invoke_without_command=True)\[email protected]_context\ndef activate(ctx):\n \"\"\"Activate Streamlit by entering your email.\"\"\"\n if not ctx.invoked_subcommand:\n Credentials.get_current().activate()\n\n\[email protected](\"reset\")\ndef activate_reset():\n \"\"\"Reset Activation Credentials.\"\"\"\n Credentials.get_current().reset()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/streamlit/cli.py"}]} | 3,835 | 229 |
gh_patches_debug_2646 | rasdani/github-patches | git_diff | jupyter__docker-stacks-1964 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] - Healthcheck fails when using proxy
### What docker image(s) are you using?
base-notebook
### Host OS system and architecture running docker image
Windows 11 as host and linux/amd64 for docker
### What Docker command are you running?
docker compose up with the following dockerfile:
```Dockerfile
version: '3.4'
services:
datamining:
container_name: xxxx
image: xxxx
build:
context: .
dockerfile: ./Dockerfile
ports:
- "8888:8888"
volumes:
- xxxx:/home/jovyan/work
environment:
- DOCKER_STACKS_JUPYTER_CMD=lab
restart: on-failure
```
### How to Reproduce the problem?
Precondition is that the machine has to operate in a corporate environment using the companies proxy.
Start the container as above.
Check the state of the container with ```docker container ls```
The container is marked as unhealthy.
### Command output
```bash session
abcdefghijk "tini -g -- start-no…" x hours ago Up x hours (unhealthy) 0.0.0.0:8888->8888/tcp xxxx
```
### Expected behavior
```abcdedfghi abcdefghijk "tini -g -- start-no…" x hours ago Up x hours (healthy) 0.0.0.0:8888->8888/tcp xxxx```
### Actual behavior
After investigating the issue the problem is that docker_healthcheck.py does not run successfully giving the following error message:
```
Traceback (most recent call last):
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 790, in urlopen
response = self._make_request(
^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 536, in _make_request
response = conn.getresponse()
^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connection.py", line 461, in getresponse
httplib_response = super().getresponse()
^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/http/client.py", line 1378, in getresponse
response.begin()
File "/opt/conda/lib/python3.11/http/client.py", line 318, in begin
version, status, reason = self._read_status()
^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/http/client.py", line 287, in _read_status
raise RemoteDisconnected("Remote end closed connection without"
http.client.RemoteDisconnected: Remote end closed connection without response
The above exception was the direct cause of the following exception:
urllib3.exceptions.ProxyError: ('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response'))
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.11/site-packages/requests/adapters.py", line 486, in send
resp = conn.urlopen(
^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py", line 844, in urlopen
retries = retries.increment(
^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/urllib3/util/retry.py", line 515, in increment
raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
urllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/etc/jupyter/docker_healthcheck.py", line 19, in <module>
r = requests.get(url, verify=False) # request without SSL verification
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/api.py", line 73, in get
return request("get", url, params=params, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/api.py", line 59, in request
return session.request(method=method, url=url, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/conda/lib/python3.11/site-packages/requests/adapters.py", line 513, in send
raise ProxyError(e, request=request)
requests.exceptions.ProxyError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))
```
### Anything else?
After investigating the issue further I came to the conclusion that using the proxy will be the problem. So I applied the following fix to ```docker_healthcheck.py```:
```python
proxies = {
"http": None,
"https": None,
}
r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification
```
Now the healthcheck works!
### Latest Docker version
- [X] I've updated my Docker version to the latest available, and the issue still persists
</issue>
<code>
[start of base-notebook/docker_healthcheck.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4 import json
5 import os
6 from pathlib import Path
7
8 import requests
9
10 # A number of operations below deliberately don't check for possible errors
11 # As this is a healthcheck, it should succeed or raise an exception on error
12
13 runtime_dir = Path("/home/") / os.environ["NB_USER"] / ".local/share/jupyter/runtime/"
14 json_file = next(runtime_dir.glob("*server-*.json"))
15
16 url = json.loads(json_file.read_bytes())["url"]
17 url = url + "api"
18
19 r = requests.get(url, verify=False) # request without SSL verification
20 r.raise_for_status()
21 print(r.content)
22
[end of base-notebook/docker_healthcheck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/base-notebook/docker_healthcheck.py b/base-notebook/docker_healthcheck.py
--- a/base-notebook/docker_healthcheck.py
+++ b/base-notebook/docker_healthcheck.py
@@ -16,6 +16,11 @@
url = json.loads(json_file.read_bytes())["url"]
url = url + "api"
-r = requests.get(url, verify=False) # request without SSL verification
+proxies = {
+ "http": "",
+ "https": "",
+}
+
+r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification
r.raise_for_status()
print(r.content)
| {"golden_diff": "diff --git a/base-notebook/docker_healthcheck.py b/base-notebook/docker_healthcheck.py\n--- a/base-notebook/docker_healthcheck.py\n+++ b/base-notebook/docker_healthcheck.py\n@@ -16,6 +16,11 @@\n url = json.loads(json_file.read_bytes())[\"url\"]\n url = url + \"api\"\n \n-r = requests.get(url, verify=False) # request without SSL verification\n+proxies = {\n+ \"http\": \"\",\n+ \"https\": \"\",\n+}\n+\n+r = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\n r.raise_for_status()\n print(r.content)\n", "issue": "[BUG] - Healthcheck fails when using proxy\n### What docker image(s) are you using?\r\n\r\nbase-notebook\r\n\r\n### Host OS system and architecture running docker image\r\n\r\nWindows 11 as host and linux/amd64 for docker\r\n\r\n### What Docker command are you running?\r\n\r\ndocker compose up with the following dockerfile:\r\n\r\n```Dockerfile\r\nversion: '3.4'\r\n\r\nservices:\r\n datamining:\r\n container_name: xxxx\r\n image: xxxx\r\n build:\r\n context: .\r\n dockerfile: ./Dockerfile\r\n ports:\r\n - \"8888:8888\"\r\n volumes:\r\n - xxxx:/home/jovyan/work\r\n environment:\r\n - DOCKER_STACKS_JUPYTER_CMD=lab\r\n restart: on-failure\r\n```\r\n\r\n### How to Reproduce the problem?\r\n\r\nPrecondition is that the machine has to operate in a corporate environment using the companies proxy.\r\nStart the container as above.\r\nCheck the state of the container with ```docker container ls```\r\nThe container is marked as unhealthy.\r\n\r\n### Command output\r\n\r\n```bash session\r\nabcdefghijk \"tini -g -- start-no\u2026\" x hours ago Up x hours (unhealthy) 0.0.0.0:8888->8888/tcp xxxx\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\n```abcdedfghi abcdefghijk \"tini -g -- start-no\u2026\" x hours ago Up x hours (healthy) 0.0.0.0:8888->8888/tcp xxxx```\r\n\r\n### Actual behavior\r\n\r\nAfter investigating the issue the problem is that docker_healthcheck.py does not run successfully giving the following error message:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 790, in urlopen\r\n response = self._make_request(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 536, in _make_request\r\n response = conn.getresponse()\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connection.py\", line 461, in getresponse\r\n httplib_response = super().getresponse()\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 1378, in getresponse\r\n response.begin()\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 318, in begin\r\n version, status, reason = self._read_status()\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/http/client.py\", line 287, in _read_status\r\n raise RemoteDisconnected(\"Remote end closed connection without\"\r\nhttp.client.RemoteDisconnected: Remote end closed connection without response\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nurllib3.exceptions.ProxyError: ('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response'))\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/adapters.py\", line 486, in send\r\n resp = conn.urlopen(\r\n ^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 844, in urlopen\r\n retries = retries.increment(\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/urllib3/util/retry.py\", line 515, in increment\r\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/etc/jupyter/docker_healthcheck.py\", line 19, in <module>\r\n r = requests.get(url, verify=False) # request without SSL verification\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/api.py\", line 73, in get\r\n return request(\"get\", url, params=params, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/api.py\", line 59, in request\r\n return session.request(method=method, url=url, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\r\n resp = self.send(prep, **send_kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\r\n r = adapter.send(request, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/conda/lib/python3.11/site-packages/requests/adapters.py\", line 513, in send\r\n raise ProxyError(e, request=request)\r\nrequests.exceptions.ProxyError: HTTPConnectionPool(host='host.docker.internal', port=9000): Max retries exceeded with url: http://7702f0e1c7d4:8888/api (Caused by ProxyError('Unable to connect to proxy', RemoteDisconnected('Remote end closed connection without response')))\r\n```\r\n\r\n### Anything else?\r\n\r\nAfter investigating the issue further I came to the conclusion that using the proxy will be the problem. So I applied the following fix to ```docker_healthcheck.py```:\r\n```python\r\nproxies = {\r\n \"http\": None,\r\n \"https\": None,\r\n}\r\n\r\nr = requests.get(url, proxies=proxies, verify=False) # request without SSL verification\r\n```\r\nNow the healthcheck works!\r\n\r\n### Latest Docker version\r\n\r\n- [X] I've updated my Docker version to the latest available, and the issue still persists\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport json\nimport os\nfrom pathlib import Path\n\nimport requests\n\n# A number of operations below deliberately don't check for possible errors\n# As this is a healthcheck, it should succeed or raise an exception on error\n\nruntime_dir = Path(\"/home/\") / os.environ[\"NB_USER\"] / \".local/share/jupyter/runtime/\"\njson_file = next(runtime_dir.glob(\"*server-*.json\"))\n\nurl = json.loads(json_file.read_bytes())[\"url\"]\nurl = url + \"api\"\n\nr = requests.get(url, verify=False) # request without SSL verification\nr.raise_for_status()\nprint(r.content)\n", "path": "base-notebook/docker_healthcheck.py"}]} | 2,174 | 139 |
gh_patches_debug_26103 | rasdani/github-patches | git_diff | pytorch__PiPPy-528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Buck run device error
buck run reported the following error:
```
[trainer1]:RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__native_layer_norm)
```
</issue>
<code>
[start of pippy/utils.py]
1 # Copyright (c) Meta Platforms, Inc. and affiliates
2 import os
3 import socket
4 import logging
5
6 # Pinning process to a separate GPU if not yet done by launch script
7 # Notes:
8 # 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main
9 # thread
10 # 2. Must be done before `import torch` at which point CUDA context may be created
11 cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
12 if (cuda_devices_str is None # not set
13 or len(cuda_devices_str.split(',')) > 1): # or set to all devices
14 # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
15 local_rank_str = os.getenv('LOCAL_RANK')
16 if local_rank_str is not None:
17 os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
18 print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
19
20 import torch
21 import torch.multiprocessing as mp
22 import torch.distributed.rpc as rpc
23
24
25 VERBOSE = bool(int(os.environ.get('VERBOSE', False)))
26
27 if VERBOSE:
28 logging.getLogger().setLevel(logging.DEBUG)
29
30
31 def has_efa() -> bool:
32 try:
33 import subprocess
34 return subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"],
35 stdout=subprocess.DEVNULL,
36 stderr=subprocess.DEVNULL).returncode == 0
37 except FileNotFoundError:
38 return False
39 except PermissionError:
40 return False
41
42
43 def tp_transports():
44 return ["shm", "uv"] if has_efa() else None
45
46
47 def run_pippy(run_master, args, *extra_args):
48 if not hasattr(args, 'world_size'):
49 assert hasattr(args, 'pp_group_size')
50 args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1
51 else:
52 if not hasattr(args, 'dp_group_size'):
53 args.pp_group_size = args.pp_group_size if hasattr(args, 'pp_group_size') else args.world_size
54 assert args.world_size % args.pp_group_size == 0
55 args.dp_group_size = args.world_size // args.pp_group_size
56 elif not hasattr(args, 'pp_group_size'):
57 args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1
58 assert args.world_size % args.dp_group_size == 0
59 args.pp_group_size = args.world_size // args.dp_group_size
60 else:
61 pass
62 # TODO: doesn't work for PiPPyTrainingArguments
63 # assert args.world_size == args.dp_group_size * args.pp_group_size
64
65 actual_world_size = args.dp_group_size * args.pp_group_size
66 print(f'[PiPPy] World size: {actual_world_size}, '
67 f'DP group size: {args.dp_group_size}, '
68 f'PP group size: {args.pp_group_size}')
69
70 if args.rank == -1:
71 mp.spawn(run_worker, args=(run_master, args, *extra_args), nprocs=actual_world_size, join=True)
72 elif args.rank < actual_world_size:
73 run_worker(args.rank, run_master, args, *extra_args)
74 else:
75 print("I'm unused, exiting")
76
77
78 def run_worker(rank, run_master, args, *extra_args):
79 args.rank = rank
80
81 os.environ['MASTER_ADDR'] = args.master_addr
82 os.environ['MASTER_PORT'] = args.master_port
83
84 actual_world_size = args.dp_group_size * args.pp_group_size
85
86 # TODO: Move to training args, blocked by: cannot pickle 'TensorPipeRpcBackendOptions' object
87 # Exclude IB for metadata transport due to lack of EFA support on AWS
88 options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=512,
89 rpc_timeout=1800,
90 _transports=tp_transports())
91 if args.cuda:
92 n_devs = torch.cuda.device_count()
93 if n_devs > 0:
94 dev_id = rank % n_devs
95 for i in range(actual_world_size):
96 options.set_device_map(f"worker{i}", {dev_id: i % n_devs})
97 # Does not seem effective for RPC device pinning. TODO
98 # options.set_devices([f'cuda:{dev_id}'])
99 else:
100 args.cuda = 0
101 print('Warning: no CUDA device found. Running on CPU instead.')
102
103 args.device = f'cuda:{dev_id}' if args.cuda else 'cpu'
104 print(f"rank = {rank} host/pid/device = "
105 f"{socket.gethostname()}/{os.getpid()}/{args.device}")
106
107 # Init DDP process group
108 backend = "nccl" if args.cuda else "gloo"
109 torch.distributed.init_process_group(backend=backend, rank=rank, world_size=actual_world_size)
110
111 rpc.init_rpc(
112 f"worker{rank}",
113 rank=rank,
114 world_size=actual_world_size,
115 rpc_backend_options=options
116 )
117
118 global dp_pg_per_pp_rank
119 dp_ranks_per_pp_rank = torch.arange(actual_world_size).reshape(args.pp_group_size,
120 args.dp_group_size).tolist()
121 dp_pg_per_pp_rank = [torch.distributed.new_group(ranks) for ranks in dp_ranks_per_pp_rank]
122
123 pp_ranks_per_dp_group = [[i * args.dp_group_size + rank for i in range(args.pp_group_size)]
124 for rank in range(args.dp_group_size)]
125
126 args.driver_group = torch.distributed.new_group(list(range(args.dp_group_size)))
127
128 global exclude_master
129 exclude_master = args.exclude_master if hasattr(args, 'exclude_master') else 0
130
131 if rank >= 0 and rank // args.dp_group_size == 0:
132 args.driver_index = rank
133 args.local_driver_index = os.getenv('LOCAL_RANK', rank)
134 run_master(pp_ranks_per_dp_group[rank], args, *extra_args)
135 rpc.shutdown()
136
[end of pippy/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pippy/utils.py b/pippy/utils.py
--- a/pippy/utils.py
+++ b/pippy/utils.py
@@ -8,14 +8,17 @@
# 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main
# thread
# 2. Must be done before `import torch` at which point CUDA context may be created
-cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
-if (cuda_devices_str is None # not set
- or len(cuda_devices_str.split(',')) > 1): # or set to all devices
- # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
- local_rank_str = os.getenv('LOCAL_RANK')
- if local_rank_str is not None:
- os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
- print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
+# 3. Currently this is enabled by default (as long as #1 is not implemented in RPC). Users may set `PIPPY_PIN_DEVICE` to
+# 0 to disable the pinning
+if os.getenv('PIPPY_PIN_DEVICE', '1') == '1':
+ cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')
+ if (cuda_devices_str is None # not set
+ or len(cuda_devices_str.split(',')) > 1): # or set to all devices
+ # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information
+ local_rank_str = os.getenv('LOCAL_RANK')
+ if local_rank_str is not None:
+ os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str
+ print(f"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}")
import torch
import torch.multiprocessing as mp
| {"golden_diff": "diff --git a/pippy/utils.py b/pippy/utils.py\n--- a/pippy/utils.py\n+++ b/pippy/utils.py\n@@ -8,14 +8,17 @@\n # 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main\n # thread\n # 2. Must be done before `import torch` at which point CUDA context may be created\n-cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\n-if (cuda_devices_str is None # not set\n- or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n- # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n- local_rank_str = os.getenv('LOCAL_RANK')\n- if local_rank_str is not None:\n- os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n- print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n+# 3. Currently this is enabled by default (as long as #1 is not implemented in RPC). Users may set `PIPPY_PIN_DEVICE` to\n+# 0 to disable the pinning\n+if os.getenv('PIPPY_PIN_DEVICE', '1') == '1':\n+ cuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\n+ if (cuda_devices_str is None # not set\n+ or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n+ # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n+ local_rank_str = os.getenv('LOCAL_RANK')\n+ if local_rank_str is not None:\n+ os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n+ print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n \n import torch\n import torch.multiprocessing as mp\n", "issue": "Buck run device error\nbuck run reported the following error:\r\n```\r\n[trainer1]:RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0! (when checking argument for argument weight in method wrapper__native_layer_norm)\r\n```\n", "before_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates\nimport os\nimport socket\nimport logging\n\n# Pinning process to a separate GPU if not yet done by launch script\n# Notes:\n# 1. Needed to work around the issue of RPC not automatically pinning spawned worker threads to CUDA device of the main\n# thread\n# 2. Must be done before `import torch` at which point CUDA context may be created\ncuda_devices_str = os.getenv('CUDA_VISIBLE_DEVICES')\nif (cuda_devices_str is None # not set\n or len(cuda_devices_str.split(',')) > 1): # or set to all devices\n # If launchers like Torchrun sets `LOCAL_RANK`, we would use this information\n local_rank_str = os.getenv('LOCAL_RANK')\n if local_rank_str is not None:\n os.environ['CUDA_VISIBLE_DEVICES'] = local_rank_str\n print(f\"Pinning local process {local_rank_str} to gpu {os.getenv('CUDA_VISIBLE_DEVICES')}\")\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.distributed.rpc as rpc\n\n\nVERBOSE = bool(int(os.environ.get('VERBOSE', False)))\n\nif VERBOSE:\n logging.getLogger().setLevel(logging.DEBUG)\n\n\ndef has_efa() -> bool:\n try:\n import subprocess\n return subprocess.run([\"fi_info\", \"-p\", \"efa\", \"-t\", \"FI_EP_RDM\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n except FileNotFoundError:\n return False\n except PermissionError:\n return False\n\n\ndef tp_transports():\n return [\"shm\", \"uv\"] if has_efa() else None\n\n\ndef run_pippy(run_master, args, *extra_args):\n if not hasattr(args, 'world_size'):\n assert hasattr(args, 'pp_group_size')\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n else:\n if not hasattr(args, 'dp_group_size'):\n args.pp_group_size = args.pp_group_size if hasattr(args, 'pp_group_size') else args.world_size\n assert args.world_size % args.pp_group_size == 0\n args.dp_group_size = args.world_size // args.pp_group_size\n elif not hasattr(args, 'pp_group_size'):\n args.dp_group_size = args.dp_group_size if hasattr(args, 'dp_group_size') else 1\n assert args.world_size % args.dp_group_size == 0\n args.pp_group_size = args.world_size // args.dp_group_size\n else:\n pass\n # TODO: doesn't work for PiPPyTrainingArguments\n # assert args.world_size == args.dp_group_size * args.pp_group_size\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n print(f'[PiPPy] World size: {actual_world_size}, '\n f'DP group size: {args.dp_group_size}, '\n f'PP group size: {args.pp_group_size}')\n\n if args.rank == -1:\n mp.spawn(run_worker, args=(run_master, args, *extra_args), nprocs=actual_world_size, join=True)\n elif args.rank < actual_world_size:\n run_worker(args.rank, run_master, args, *extra_args)\n else:\n print(\"I'm unused, exiting\")\n\n\ndef run_worker(rank, run_master, args, *extra_args):\n args.rank = rank\n\n os.environ['MASTER_ADDR'] = args.master_addr\n os.environ['MASTER_PORT'] = args.master_port\n\n actual_world_size = args.dp_group_size * args.pp_group_size\n\n # TODO: Move to training args, blocked by: cannot pickle 'TensorPipeRpcBackendOptions' object\n # Exclude IB for metadata transport due to lack of EFA support on AWS\n options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=512,\n rpc_timeout=1800,\n _transports=tp_transports())\n if args.cuda:\n n_devs = torch.cuda.device_count()\n if n_devs > 0:\n dev_id = rank % n_devs\n for i in range(actual_world_size):\n options.set_device_map(f\"worker{i}\", {dev_id: i % n_devs})\n # Does not seem effective for RPC device pinning. TODO\n # options.set_devices([f'cuda:{dev_id}'])\n else:\n args.cuda = 0\n print('Warning: no CUDA device found. Running on CPU instead.')\n\n args.device = f'cuda:{dev_id}' if args.cuda else 'cpu'\n print(f\"rank = {rank} host/pid/device = \"\n f\"{socket.gethostname()}/{os.getpid()}/{args.device}\")\n\n # Init DDP process group\n backend = \"nccl\" if args.cuda else \"gloo\"\n torch.distributed.init_process_group(backend=backend, rank=rank, world_size=actual_world_size)\n\n rpc.init_rpc(\n f\"worker{rank}\",\n rank=rank,\n world_size=actual_world_size,\n rpc_backend_options=options\n )\n\n global dp_pg_per_pp_rank\n dp_ranks_per_pp_rank = torch.arange(actual_world_size).reshape(args.pp_group_size,\n args.dp_group_size).tolist()\n dp_pg_per_pp_rank = [torch.distributed.new_group(ranks) for ranks in dp_ranks_per_pp_rank]\n\n pp_ranks_per_dp_group = [[i * args.dp_group_size + rank for i in range(args.pp_group_size)]\n for rank in range(args.dp_group_size)]\n\n args.driver_group = torch.distributed.new_group(list(range(args.dp_group_size)))\n\n global exclude_master\n exclude_master = args.exclude_master if hasattr(args, 'exclude_master') else 0\n\n if rank >= 0 and rank // args.dp_group_size == 0:\n args.driver_index = rank\n args.local_driver_index = os.getenv('LOCAL_RANK', rank)\n run_master(pp_ranks_per_dp_group[rank], args, *extra_args)\n rpc.shutdown()\n", "path": "pippy/utils.py"}]} | 2,203 | 418 |
gh_patches_debug_34128 | rasdani/github-patches | git_diff | conda__conda-5133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
signal handler can only be used in main thread
Often get these with conda-build tests
https://travis-ci.org/conda/conda/jobs/225296134#L1380
```
Traceback (most recent call last):
File "/home/travis/build/conda/conda/conda-build/conda_build/build.py", line 688, in create_env
execute_actions(actions, index, verbose=config.debug)
File "/home/travis/build/conda/conda/conda/plan.py", line 612, in execute_actions
execute_instructions(plan, index, verbose)
File "/home/travis/build/conda/conda/conda/instructions.py", line 243, in execute_instructions
cmd(state, arg)
File "/home/travis/build/conda/conda/conda/instructions.py", line 98, in PROGRESSIVEFETCHEXTRACT_CMD
progressive_fetch_extract.execute()
File "/home/travis/build/conda/conda/conda/core/package_cache.py", line 491, in execute
with signal_handler(conda_signal_handler):
File "/home/travis/miniconda/lib/python3.6/contextlib.py", line 82, in __enter__
return next(self.gen)
File "/home/travis/build/conda/conda/conda/common/signals.py", line 41, in signal_handler
prev_handler = signal.signal(sig, handler)
File "/home/travis/miniconda/lib/python3.6/signal.py", line 47, in signal
handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
ValueError: signal only works in main thread
```
signal handler can only be used in main thread
Often get these with conda-build tests
https://travis-ci.org/conda/conda/jobs/225296134#L1380
```
Traceback (most recent call last):
File "/home/travis/build/conda/conda/conda-build/conda_build/build.py", line 688, in create_env
execute_actions(actions, index, verbose=config.debug)
File "/home/travis/build/conda/conda/conda/plan.py", line 612, in execute_actions
execute_instructions(plan, index, verbose)
File "/home/travis/build/conda/conda/conda/instructions.py", line 243, in execute_instructions
cmd(state, arg)
File "/home/travis/build/conda/conda/conda/instructions.py", line 98, in PROGRESSIVEFETCHEXTRACT_CMD
progressive_fetch_extract.execute()
File "/home/travis/build/conda/conda/conda/core/package_cache.py", line 491, in execute
with signal_handler(conda_signal_handler):
File "/home/travis/miniconda/lib/python3.6/contextlib.py", line 82, in __enter__
return next(self.gen)
File "/home/travis/build/conda/conda/conda/common/signals.py", line 41, in signal_handler
prev_handler = signal.signal(sig, handler)
File "/home/travis/miniconda/lib/python3.6/signal.py", line 47, in signal
handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
ValueError: signal only works in main thread
```
</issue>
<code>
[start of conda/exports.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from functools import partial
5 from logging import getLogger
6 from warnings import warn
7
8 log = getLogger(__name__)
9
10 from . import CondaError # NOQA
11 CondaError = CondaError
12
13 from . import compat, plan # NOQA
14 compat, plan = compat, plan
15
16 from .api import get_index # NOQA
17 get_index = get_index
18
19 from .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA
20 from .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA
21 add_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix
22 specs_from_args, spec_from_line = specs_from_args, spec_from_line
23 specs_from_url = specs_from_url
24
25 from .cli.conda_argparse import ArgumentParser # NOQA
26 ArgumentParser = ArgumentParser
27
28 from .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA
29 PY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA
30 from .gateways.connection import CondaSession # NOQA
31 CondaSession = CondaSession
32
33 from .common.toposort import _toposort
34 _toposort = _toposort
35
36 from .gateways.disk.link import lchmod # NOQA
37 lchmod = lchmod
38
39 from .fetch import TmpDownload # NOQA
40 TmpDownload = TmpDownload
41 handle_proxy_407 = lambda x, y: warn("handle_proxy_407 is deprecated. "
42 "Now handled by CondaSession.")
43 from .core.index import dist_str_in_index, fetch_index # NOQA
44 dist_str_in_index, fetch_index = dist_str_in_index, fetch_index
45 from .core.package_cache import download, rm_fetched # NOQA
46 download, rm_fetched = download, rm_fetched
47
48 from .install import package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA
49 package_cache, prefix_placeholder, rm_rf, symlink_conda = package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA
50
51 from .gateways.disk.delete import delete_trash, move_to_trash # NOQA
52 delete_trash, move_to_trash = delete_trash, move_to_trash
53
54 from .core.linked_data import is_linked, linked, linked_data # NOQA
55 is_linked, linked, linked_data = is_linked, linked, linked_data
56
57 from .misc import untracked, walk_prefix # NOQA
58 untracked, walk_prefix = untracked, walk_prefix
59
60 from .resolve import MatchSpec, NoPackagesFound, Resolve, Unsatisfiable, normalized_version # NOQA
61 MatchSpec, NoPackagesFound, Resolve = MatchSpec, NoPackagesFound, Resolve
62 Unsatisfiable, normalized_version = Unsatisfiable, normalized_version
63
64 from .signature import KEYS, KEYS_DIR, hash_file, verify # NOQA
65 KEYS, KEYS_DIR = KEYS, KEYS_DIR
66 hash_file, verify = hash_file, verify
67
68 from .utils import hashsum_file, human_bytes, memoized, unix_path_to_win, win_path_to_unix, url_path # NOQA
69 hashsum_file, human_bytes = hashsum_file, human_bytes
70 memoized, unix_path_to_win = memoized, unix_path_to_win
71 win_path_to_unix, url_path = win_path_to_unix, url_path
72
73 from .gateways.disk.read import compute_md5sum # NOQA
74 md5_file = compute_md5sum
75
76 from .config import sys_rc_path # NOQA
77 sys_rc_path = sys_rc_path
78
79 from .models.version import VersionOrder # NOQA
80 VersionOrder = VersionOrder
81
82 import conda.base.context # NOQA
83 from .base.context import get_prefix as context_get_prefix, non_x86_linux_machines # NOQA
84 non_x86_linux_machines = non_x86_linux_machines
85
86 from ._vendor.auxlib.entity import EntityEncoder # NOQA
87 EntityEncoder = EntityEncoder
88 from .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
89 DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
90 get_prefix = partial(context_get_prefix, conda.base.context.context)
91 get_default_urls = lambda: DEFAULT_CHANNELS
92
93 arch_name = conda.base.context.context.arch_name
94 binstar_upload = conda.base.context.context.anaconda_upload
95 bits = conda.base.context.context.bits
96 default_prefix = conda.base.context.context.default_prefix
97 default_python = conda.base.context.context.default_python
98 envs_dirs = conda.base.context.context.envs_dirs
99 pkgs_dirs = conda.base.context.context.pkgs_dirs
100 platform = conda.base.context.context.platform
101 root_dir = conda.base.context.context.root_prefix
102 root_writable = conda.base.context.context.root_writable
103 subdir = conda.base.context.context.subdir
104 from .models.channel import get_conda_build_local_url # NOQA
105 get_rc_urls = lambda: list(conda.base.context.context.channels)
106 get_local_urls = lambda: list(get_conda_build_local_url()) or []
107 load_condarc = lambda fn: conda.base.context.reset_context([fn])
108 from .exceptions import PaddingError # NOQA
109 PaddingError = PaddingError
110 from .gateways.disk.link import CrossPlatformStLink # NOQA
111 CrossPlatformStLink = CrossPlatformStLink
112
113 from .models.enums import FileMode # NOQA
114 FileMode = FileMode
115 from .models.enums import PathType # NOQA
116 PathType = PathType
117
118
119 if PY3:
120 import configparser # NOQA # pragma: py2 no cover
121 else:
122 import ConfigParser as configparser # NOQA # pragma: py3 no cover
123 configparser = configparser
124
125
126 from .compat import TemporaryDirectory # NOQA
127 TemporaryDirectory = TemporaryDirectory
128
129 from .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA
130 ACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call
131
132 from .core.repodata import cache_fn_url # NOQA
133 cache_fn_url = cache_fn_url
134
135
136 class Completer(object):
137 def get_items(self):
138 return self._get_items()
139
140 def __contains__(self, item):
141 return True
142
143 def __iter__(self):
144 return iter(self.get_items())
145
146 class InstalledPackages(object): pass # NOQA
147
[end of conda/exports.py]
[start of conda/common/signals.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from contextlib import contextmanager
5 from logging import getLogger
6 import signal
7
8 from .compat import iteritems
9
10 log = getLogger(__name__)
11
12 INTERRUPT_SIGNALS = (
13 'SIGABRT',
14 'SIGINT',
15 'SIGTERM',
16 'SIGQUIT',
17 'SIGBREAK',
18 )
19
20
21 def get_signal_name(signum):
22 """
23 Examples:
24 >>> from signal import SIGINT
25 >>> get_signal_name(SIGINT)
26 'SIGINT'
27
28 """
29 return next((k for k, v in iteritems(signal.__dict__)
30 if v == signum and k.startswith('SIG') and not k.startswith('SIG_')),
31 None)
32
33
34 @contextmanager
35 def signal_handler(handler):
36 previous_handlers = []
37 for signame in INTERRUPT_SIGNALS:
38 sig = getattr(signal, signame, None)
39 if sig:
40 log.debug("registering handler for %s", signame)
41 prev_handler = signal.signal(sig, handler)
42 previous_handlers.append((sig, prev_handler))
43 try:
44 yield
45 finally:
46 standard_handlers = signal.SIG_IGN, signal.SIG_DFL
47 for sig, previous_handler in previous_handlers:
48 if callable(previous_handler) or previous_handler in standard_handlers:
49 log.debug("de-registering handler for %s", sig)
50 signal.signal(sig, previous_handler)
51
[end of conda/common/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/common/signals.py b/conda/common/signals.py
--- a/conda/common/signals.py
+++ b/conda/common/signals.py
@@ -4,6 +4,7 @@
from contextlib import contextmanager
from logging import getLogger
import signal
+import threading
from .compat import iteritems
@@ -17,7 +18,6 @@
'SIGBREAK',
)
-
def get_signal_name(signum):
"""
Examples:
@@ -33,18 +33,23 @@
@contextmanager
def signal_handler(handler):
- previous_handlers = []
+ _thread_local = threading.local()
+ _thread_local.previous_handlers = []
for signame in INTERRUPT_SIGNALS:
sig = getattr(signal, signame, None)
if sig:
log.debug("registering handler for %s", signame)
- prev_handler = signal.signal(sig, handler)
- previous_handlers.append((sig, prev_handler))
+ try:
+ prev_handler = signal.signal(sig, handler)
+ _thread_local.previous_handlers.append((sig, prev_handler))
+ except ValueError as e:
+ # ValueError: signal only works in main thread
+ log.debug('%r', e)
try:
yield
finally:
standard_handlers = signal.SIG_IGN, signal.SIG_DFL
- for sig, previous_handler in previous_handlers:
+ for sig, previous_handler in _thread_local.previous_handlers:
if callable(previous_handler) or previous_handler in standard_handlers:
log.debug("de-registering handler for %s", sig)
signal.signal(sig, previous_handler)
diff --git a/conda/exports.py b/conda/exports.py
--- a/conda/exports.py
+++ b/conda/exports.py
@@ -30,7 +30,7 @@
from .gateways.connection import CondaSession # NOQA
CondaSession = CondaSession
-from .common.toposort import _toposort
+from .common.toposort import _toposort # NOQA
_toposort = _toposort
from .gateways.disk.link import lchmod # NOQA
| {"golden_diff": "diff --git a/conda/common/signals.py b/conda/common/signals.py\n--- a/conda/common/signals.py\n+++ b/conda/common/signals.py\n@@ -4,6 +4,7 @@\n from contextlib import contextmanager\n from logging import getLogger\n import signal\n+import threading\n \n from .compat import iteritems\n \n@@ -17,7 +18,6 @@\n 'SIGBREAK',\n )\n \n-\n def get_signal_name(signum):\n \"\"\"\n Examples:\n@@ -33,18 +33,23 @@\n \n @contextmanager\n def signal_handler(handler):\n- previous_handlers = []\n+ _thread_local = threading.local()\n+ _thread_local.previous_handlers = []\n for signame in INTERRUPT_SIGNALS:\n sig = getattr(signal, signame, None)\n if sig:\n log.debug(\"registering handler for %s\", signame)\n- prev_handler = signal.signal(sig, handler)\n- previous_handlers.append((sig, prev_handler))\n+ try:\n+ prev_handler = signal.signal(sig, handler)\n+ _thread_local.previous_handlers.append((sig, prev_handler))\n+ except ValueError as e:\n+ # ValueError: signal only works in main thread\n+ log.debug('%r', e)\n try:\n yield\n finally:\n standard_handlers = signal.SIG_IGN, signal.SIG_DFL\n- for sig, previous_handler in previous_handlers:\n+ for sig, previous_handler in _thread_local.previous_handlers:\n if callable(previous_handler) or previous_handler in standard_handlers:\n log.debug(\"de-registering handler for %s\", sig)\n signal.signal(sig, previous_handler)\ndiff --git a/conda/exports.py b/conda/exports.py\n--- a/conda/exports.py\n+++ b/conda/exports.py\n@@ -30,7 +30,7 @@\n from .gateways.connection import CondaSession # NOQA\n CondaSession = CondaSession\n \n-from .common.toposort import _toposort\n+from .common.toposort import _toposort # NOQA\n _toposort = _toposort\n \n from .gateways.disk.link import lchmod # NOQA\n", "issue": "signal handler can only be used in main thread\nOften get these with conda-build tests\r\n\r\nhttps://travis-ci.org/conda/conda/jobs/225296134#L1380\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/travis/build/conda/conda/conda-build/conda_build/build.py\", line 688, in create_env\r\n execute_actions(actions, index, verbose=config.debug)\r\n File \"/home/travis/build/conda/conda/conda/plan.py\", line 612, in execute_actions\r\n execute_instructions(plan, index, verbose)\r\n File \"/home/travis/build/conda/conda/conda/instructions.py\", line 243, in execute_instructions\r\n cmd(state, arg)\r\n File \"/home/travis/build/conda/conda/conda/instructions.py\", line 98, in PROGRESSIVEFETCHEXTRACT_CMD\r\n progressive_fetch_extract.execute()\r\n File \"/home/travis/build/conda/conda/conda/core/package_cache.py\", line 491, in execute\r\n with signal_handler(conda_signal_handler):\r\n File \"/home/travis/miniconda/lib/python3.6/contextlib.py\", line 82, in __enter__\r\n return next(self.gen)\r\n File \"/home/travis/build/conda/conda/conda/common/signals.py\", line 41, in signal_handler\r\n prev_handler = signal.signal(sig, handler)\r\n File \"/home/travis/miniconda/lib/python3.6/signal.py\", line 47, in signal\r\n handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))\r\nValueError: signal only works in main thread\r\n```\nsignal handler can only be used in main thread\nOften get these with conda-build tests\r\n\r\nhttps://travis-ci.org/conda/conda/jobs/225296134#L1380\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/travis/build/conda/conda/conda-build/conda_build/build.py\", line 688, in create_env\r\n execute_actions(actions, index, verbose=config.debug)\r\n File \"/home/travis/build/conda/conda/conda/plan.py\", line 612, in execute_actions\r\n execute_instructions(plan, index, verbose)\r\n File \"/home/travis/build/conda/conda/conda/instructions.py\", line 243, in execute_instructions\r\n cmd(state, arg)\r\n File \"/home/travis/build/conda/conda/conda/instructions.py\", line 98, in PROGRESSIVEFETCHEXTRACT_CMD\r\n progressive_fetch_extract.execute()\r\n File \"/home/travis/build/conda/conda/conda/core/package_cache.py\", line 491, in execute\r\n with signal_handler(conda_signal_handler):\r\n File \"/home/travis/miniconda/lib/python3.6/contextlib.py\", line 82, in __enter__\r\n return next(self.gen)\r\n File \"/home/travis/build/conda/conda/conda/common/signals.py\", line 41, in signal_handler\r\n prev_handler = signal.signal(sig, handler)\r\n File \"/home/travis/miniconda/lib/python3.6/signal.py\", line 47, in signal\r\n handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))\r\nValueError: signal only works in main thread\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import partial\nfrom logging import getLogger\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .api import get_index # NOQA\nget_index = get_index\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .fetch import TmpDownload # NOQA\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index # NOQA\ndist_str_in_index, fetch_index = dist_str_in_index, fetch_index\nfrom .core.package_cache import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\npackage_cache, prefix_placeholder, rm_rf, symlink_conda = package_cache, prefix_placeholder, rm_rf, symlink_conda # NOQA\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.linked_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, NoPackagesFound, Resolve, Unsatisfiable, normalized_version # NOQA\nMatchSpec, NoPackagesFound, Resolve = MatchSpec, NoPackagesFound, Resolve\nUnsatisfiable, normalized_version = Unsatisfiable, normalized_version\n\nfrom .signature import KEYS, KEYS_DIR, hash_file, verify # NOQA\nKEYS, KEYS_DIR = KEYS, KEYS_DIR\nhash_file, verify = hash_file, verify\n\nfrom .utils import hashsum_file, human_bytes, memoized, unix_path_to_win, win_path_to_unix, url_path # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nmemoized, unix_path_to_win = memoized, unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .config import sys_rc_path # NOQA\nsys_rc_path = sys_rc_path\n\nfrom .models.version import VersionOrder # NOQA\nVersionOrder = VersionOrder\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix as context_get_prefix, non_x86_linux_machines # NOQA\nnon_x86_linux_machines = non_x86_linux_machines\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_prefix = partial(context_get_prefix, conda.base.context.context)\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError # NOQA\nPaddingError = PaddingError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\n\nif PY3:\n import configparser # NOQA # pragma: py2 no cover\nelse:\n import ConfigParser as configparser # NOQA # pragma: py3 no cover\nconfigparser = configparser\n\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.repodata import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object):\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\nclass InstalledPackages(object): pass # NOQA\n", "path": "conda/exports.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom contextlib import contextmanager\nfrom logging import getLogger\nimport signal\n\nfrom .compat import iteritems\n\nlog = getLogger(__name__)\n\nINTERRUPT_SIGNALS = (\n 'SIGABRT',\n 'SIGINT',\n 'SIGTERM',\n 'SIGQUIT',\n 'SIGBREAK',\n)\n\n\ndef get_signal_name(signum):\n \"\"\"\n Examples:\n >>> from signal import SIGINT\n >>> get_signal_name(SIGINT)\n 'SIGINT'\n\n \"\"\"\n return next((k for k, v in iteritems(signal.__dict__)\n if v == signum and k.startswith('SIG') and not k.startswith('SIG_')),\n None)\n\n\n@contextmanager\ndef signal_handler(handler):\n previous_handlers = []\n for signame in INTERRUPT_SIGNALS:\n sig = getattr(signal, signame, None)\n if sig:\n log.debug(\"registering handler for %s\", signame)\n prev_handler = signal.signal(sig, handler)\n previous_handlers.append((sig, prev_handler))\n try:\n yield\n finally:\n standard_handlers = signal.SIG_IGN, signal.SIG_DFL\n for sig, previous_handler in previous_handlers:\n if callable(previous_handler) or previous_handler in standard_handlers:\n log.debug(\"de-registering handler for %s\", sig)\n signal.signal(sig, previous_handler)\n", "path": "conda/common/signals.py"}]} | 3,482 | 472 |
gh_patches_debug_12321 | rasdani/github-patches | git_diff | conan-io__conan-9073 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] CMakeDeps generator: component and package with the same name
Hi!
During refactoring #8971 behavior of `tvalue` [macro](https://github.com/conan-io/conan/blob/80e99ca9a7d730e95adbc8bde5aa33da4f4fbb44/conan/tools/cmake/cmakedeps/templates/target_configuration.py#L35) was changed by adding some special case when `pkg_name` and `comp_name` are the same.
It accidentally break this case, because `tvalue` macro is inlined in declaration of several variables (see [this](https://github.com/conan-io/conan/blob/80e99ca9a7d730e95adbc8bde5aa33da4f4fbb44/conan/tools/cmake/cmakedeps/templates/target_configuration.py#L98) for example). So these variables have one name (`{pkg_name}_{comp_name}_...`) during setting and the other name (`{comp_name}_...`) during using it in `set_property`.
cc @lasote
</issue>
<code>
[start of conan/tools/cmake/cmakedeps/templates/target_configuration.py]
1 import textwrap
2
3 from conan.tools.cmake.cmakedeps.templates import CMakeDepsFileTemplate, get_component_alias, \
4 get_target_namespace
5
6 """
7
8 FooTarget-release.cmake
9
10 """
11
12
13 class TargetConfigurationTemplate(CMakeDepsFileTemplate):
14
15 @property
16 def filename(self):
17 return "{}Target-{}.cmake".format(self.file_name,
18 self.cmakedeps.configuration.lower())
19
20 @property
21 def context(self):
22 deps_targets_names = self.get_deps_targets_names() \
23 if not self.conanfile.is_build_context else []
24 return {"pkg_name": self.pkg_name,
25 "target_namespace": self.target_namespace,
26 "config_suffix": self.config_suffix,
27 "deps_targets_names": ";".join(deps_targets_names),
28 "components_names": self.get_required_components_names(),
29 "configuration": self.cmakedeps.configuration}
30
31 @property
32 def template(self):
33 return textwrap.dedent("""\
34
35 {%- macro tvalue(pkg_name, comp_name, var, config_suffix) -%}
36 {%- if comp_name == pkg_name -%}
37 {{'${'+pkg_name+'_'+var+config_suffix+'}'}}
38 {%- else -%}
39 {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}
40 {%- endif -%}
41 {%- endmacro -%}
42
43 ########### VARIABLES #######################################################################
44 #############################################################################################
45
46 set({{ pkg_name }}_COMPILE_OPTIONS{{ config_suffix }}
47 "$<$<COMPILE_LANGUAGE:CXX>{{ ':${' }}{{ pkg_name }}_COMPILE_OPTIONS_CXX{{ config_suffix }}}>"
48 "$<$<COMPILE_LANGUAGE:C>{{ ':${' }}{{ pkg_name }}_COMPILE_OPTIONS_C{{ config_suffix }}}>")
49
50 set({{ pkg_name }}_LINKER_FLAGS{{ config_suffix }}
51 "$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>{{ ':${' }}{{ pkg_name }}_SHARED_LINK_FLAGS{{ config_suffix }}}>"
52 "$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>{{ ':${' }}{{ pkg_name }}_SHARED_LINK_FLAGS{{ config_suffix }}}>"
53 "$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,EXECUTABLE>{{ ':${' }}{{ pkg_name }}_EXE_LINK_FLAGS{{ config_suffix }}}>")
54
55 set({{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }} "") # Will be filled later
56 conan_find_apple_frameworks({{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }} "{{ '${' }}{{ pkg_name }}_FRAMEWORKS{{ config_suffix }}}" "{{ '${' }}{{ pkg_name }}_FRAMEWORK_DIRS{{ config_suffix }}}")
57
58 # Gather all the libraries that should be linked to the targets (do not touch existing variables)
59 set(_{{ pkg_name }}_DEPENDENCIES{{ config_suffix }} "{{ '${' }}{{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }}} {{ '${' }}{{ pkg_name }}_SYSTEM_LIBS{{ config_suffix }}} {{ deps_targets_names }}")
60
61 set({{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} "") # Will be filled later
62 set({{ pkg_name }}_LIBRARIES{{ config_suffix }} "") # Will be filled later
63 conan_package_library_targets("{{ '${' }}{{ pkg_name }}_LIBS{{ config_suffix }}}" # libraries
64 "{{ '${' }}{{ pkg_name }}_LIB_DIRS{{ config_suffix }}}" # package_libdir
65 "{{ '${' }}_{{ pkg_name }}_DEPENDENCIES{{ config_suffix }}}" # deps
66 {{ pkg_name }}_LIBRARIES{{ config_suffix }} # out_libraries
67 {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} # out_libraries_targets
68 "{{ config_suffix }}" # config_suffix
69 "{{ pkg_name }}") # package_name
70
71 foreach(_FRAMEWORK {{ '${' }}{{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }}})
72 list(APPEND {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} ${_FRAMEWORK})
73 list(APPEND {{ pkg_name }}_LIBRARIES{{ config_suffix }} ${_FRAMEWORK})
74 endforeach()
75
76 foreach(_SYSTEM_LIB {{ '${' }}{{ pkg_name }}_SYSTEM_LIBS{{ config_suffix }}})
77 list(APPEND {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} ${_SYSTEM_LIB})
78 list(APPEND {{ pkg_name }}_LIBRARIES{{ config_suffix }} ${_SYSTEM_LIB})
79 endforeach()
80
81 # We need to add our requirements too
82 set({{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} {{ '"${' }}{{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }}{{ '};' }}{{ deps_targets_names }}")
83 set({{ pkg_name }}_LIBRARIES{{ config_suffix }} {{ '"${' }}{{ pkg_name }}_LIBRARIES{{ config_suffix }}{{ '};' }}{{ deps_targets_names }}")
84
85 # FIXME: What is the result of this for multi-config? All configs adding themselves to path?
86 set(CMAKE_MODULE_PATH {{ '${' }}{{ pkg_name }}_BUILD_DIRS{{ config_suffix }}} {{ '${' }}CMAKE_MODULE_PATH})
87 set(CMAKE_PREFIX_PATH {{ '${' }}{{ pkg_name }}_BUILD_DIRS{{ config_suffix }}} {{ '${' }}CMAKE_PREFIX_PATH})
88
89 {%- for comp_name in components_names %}
90
91 ########## COMPONENT {{ comp_name }} FIND LIBRARIES & FRAMEWORKS / DYNAMIC VARS #############
92
93 set({{ pkg_name }}_{{ comp_name }}_FRAMEWORKS_FOUND{{ config_suffix }} "")
94 conan_find_apple_frameworks({{ pkg_name }}_{{ comp_name }}_FRAMEWORKS_FOUND{{ config_suffix }} "{{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORKS'+config_suffix+'}' }}" "{{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORK_DIRS'+config_suffix+'}' }}")
95
96 set({{ pkg_name }}_{{ comp_name }}_LIB_TARGETS{{ config_suffix }} "")
97 set({{ pkg_name }}_{{ comp_name }}_NOT_USED{{ config_suffix }} "")
98 set({{ pkg_name }}_{{ comp_name }}_LIBS_FRAMEWORKS_DEPS{{ config_suffix }} {{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORKS_FOUND'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_SYSTEM_LIBS'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_DEPENDENCIES'+config_suffix+'}' }})
99 conan_package_library_targets("{{ '${'+pkg_name+'_'+comp_name+'_LIBS'+config_suffix+'}' }}"
100 "{{ '${'+pkg_name+'_'+comp_name+'_LIB_DIRS'+config_suffix+'}' }}"
101 "{{ '${'+pkg_name+'_'+comp_name+'_LIBS_FRAMEWORKS_DEPS'+config_suffix+'}' }}"
102 {{ pkg_name }}_{{ comp_name }}_NOT_USED{{ config_suffix }}
103 {{ pkg_name }}_{{ comp_name }}_LIB_TARGETS{{ config_suffix }}
104 "{{ config_suffix }}"
105 "{{ pkg_name }}_{{ comp_name }}")
106
107 set({{ pkg_name }}_{{ comp_name }}_LINK_LIBS{{ config_suffix }} {{ '${'+pkg_name+'_'+comp_name+'_LIB_TARGETS'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_LIBS_FRAMEWORKS_DEPS'+config_suffix+'}' }})
108 {%- endfor %}
109
110
111
112 ########## GLOBAL TARGET PROPERTIES {{ configuration }} ########################################
113 set_property(TARGET {{target_namespace}}::{{target_namespace}}
114 PROPERTY INTERFACE_LINK_LIBRARIES
115 $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_LIBRARIES_TARGETS{{config_suffix}}}
116 ${{'{'}}{{pkg_name}}_LINKER_FLAGS{{config_suffix}}}> APPEND)
117 set_property(TARGET {{target_namespace}}::{{target_namespace}}
118 PROPERTY INTERFACE_INCLUDE_DIRECTORIES
119 $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_INCLUDE_DIRS{{config_suffix}}}> APPEND)
120 set_property(TARGET {{target_namespace}}::{{target_namespace}}
121 PROPERTY INTERFACE_COMPILE_DEFINITIONS
122 $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_COMPILE_DEFINITIONS{{config_suffix}}}> APPEND)
123 set_property(TARGET {{target_namespace}}::{{target_namespace}}
124 PROPERTY INTERFACE_COMPILE_OPTIONS
125 $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_COMPILE_OPTIONS{{config_suffix}}}> APPEND)
126
127 ########## COMPONENTS TARGET PROPERTIES {{ configuration }} ########################################
128
129 {%- for comp_name in components_names %}
130
131 ########## COMPONENT {{ comp_name }} TARGET PROPERTIES ######################################
132 set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_LINK_LIBRARIES
133 $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'LINK_LIBS', config_suffix)}}
134 {{tvalue(pkg_name, comp_name, 'LINKER_FLAGS', config_suffix)}}> APPEND)
135 set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_INCLUDE_DIRECTORIES
136 $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'INCLUDE_DIRS', config_suffix)}}> APPEND)
137 set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_COMPILE_DEFINITIONS
138 $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'COMPILE_DEFINITIONS', config_suffix)}}> APPEND)
139 set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_COMPILE_OPTIONS
140 $<$<CONFIG:{{ configuration }}>:
141 {{tvalue(pkg_name, comp_name, 'COMPILE_OPTIONS_C', config_suffix)}}
142 {{tvalue(pkg_name, comp_name, 'COMPILE_OPTIONS_CXX', config_suffix)}}> APPEND)
143 set({{ pkg_name }}_{{ comp_name }}_TARGET_PROPERTIES TRUE)
144
145 {%- endfor %}
146
147 """)
148
149 def get_required_components_names(self):
150 """Returns a list of component_name"""
151 ret = []
152 sorted_comps = self.conanfile.new_cpp_info.get_sorted_components()
153 for comp_name, comp in sorted_comps.items():
154 ret.append(get_component_alias(self.conanfile, comp_name))
155 ret.reverse()
156 return ret
157
158 def get_deps_targets_names(self):
159 """
160 - [{foo}::{bar}, ] of the required
161 """
162 ret = []
163
164 # Get a list of dependencies target names
165 # Declared cppinfo.requires or .components[].requires
166 if self.conanfile.new_cpp_info.required_components:
167 for dep_name, component_name in self.conanfile.new_cpp_info.required_components:
168 if not dep_name:
169 # Internal dep (no another component)
170 dep_name = get_target_namespace(self.conanfile)
171 req = self.conanfile
172 else:
173 req = self.conanfile.dependencies.transitive_host_requires[dep_name]
174 dep_name = get_target_namespace(req)
175
176 component_name = get_component_alias(req, component_name)
177 ret.append("{}::{}".format(dep_name, component_name))
178 elif self.conanfile.dependencies.host_requires:
179 # Regular external "conanfile.requires" declared, not cpp_info requires
180 ret = ["{p}::{p}".format(p=get_target_namespace(r))
181 for r in self.conanfile.dependencies.host_requires]
182 return ret
183
[end of conan/tools/cmake/cmakedeps/templates/target_configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conan/tools/cmake/cmakedeps/templates/target_configuration.py b/conan/tools/cmake/cmakedeps/templates/target_configuration.py
--- a/conan/tools/cmake/cmakedeps/templates/target_configuration.py
+++ b/conan/tools/cmake/cmakedeps/templates/target_configuration.py
@@ -33,11 +33,7 @@
return textwrap.dedent("""\
{%- macro tvalue(pkg_name, comp_name, var, config_suffix) -%}
- {%- if comp_name == pkg_name -%}
- {{'${'+pkg_name+'_'+var+config_suffix+'}'}}
- {%- else -%}
- {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}
- {%- endif -%}
+ {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}
{%- endmacro -%}
########### VARIABLES #######################################################################
| {"golden_diff": "diff --git a/conan/tools/cmake/cmakedeps/templates/target_configuration.py b/conan/tools/cmake/cmakedeps/templates/target_configuration.py\n--- a/conan/tools/cmake/cmakedeps/templates/target_configuration.py\n+++ b/conan/tools/cmake/cmakedeps/templates/target_configuration.py\n@@ -33,11 +33,7 @@\n return textwrap.dedent(\"\"\"\\\n \n {%- macro tvalue(pkg_name, comp_name, var, config_suffix) -%}\n- {%- if comp_name == pkg_name -%}\n- {{'${'+pkg_name+'_'+var+config_suffix+'}'}}\n- {%- else -%}\n- {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}\n- {%- endif -%}\n+ {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}\n {%- endmacro -%}\n \n ########### VARIABLES #######################################################################\n", "issue": "[bug] CMakeDeps generator: component and package with the same name\nHi!\r\n\r\nDuring refactoring #8971 behavior of `tvalue` [macro](https://github.com/conan-io/conan/blob/80e99ca9a7d730e95adbc8bde5aa33da4f4fbb44/conan/tools/cmake/cmakedeps/templates/target_configuration.py#L35) was changed by adding some special case when `pkg_name` and `comp_name` are the same. \r\n\r\nIt accidentally break this case, because `tvalue` macro is inlined in declaration of several variables (see [this](https://github.com/conan-io/conan/blob/80e99ca9a7d730e95adbc8bde5aa33da4f4fbb44/conan/tools/cmake/cmakedeps/templates/target_configuration.py#L98) for example). So these variables have one name (`{pkg_name}_{comp_name}_...`) during setting and the other name (`{comp_name}_...`) during using it in `set_property`.\r\n\r\ncc @lasote \n", "before_files": [{"content": "import textwrap\n\nfrom conan.tools.cmake.cmakedeps.templates import CMakeDepsFileTemplate, get_component_alias, \\\n get_target_namespace\n\n\"\"\"\n\nFooTarget-release.cmake\n\n\"\"\"\n\n\nclass TargetConfigurationTemplate(CMakeDepsFileTemplate):\n\n @property\n def filename(self):\n return \"{}Target-{}.cmake\".format(self.file_name,\n self.cmakedeps.configuration.lower())\n\n @property\n def context(self):\n deps_targets_names = self.get_deps_targets_names() \\\n if not self.conanfile.is_build_context else []\n return {\"pkg_name\": self.pkg_name,\n \"target_namespace\": self.target_namespace,\n \"config_suffix\": self.config_suffix,\n \"deps_targets_names\": \";\".join(deps_targets_names),\n \"components_names\": self.get_required_components_names(),\n \"configuration\": self.cmakedeps.configuration}\n\n @property\n def template(self):\n return textwrap.dedent(\"\"\"\\\n\n {%- macro tvalue(pkg_name, comp_name, var, config_suffix) -%}\n {%- if comp_name == pkg_name -%}\n {{'${'+pkg_name+'_'+var+config_suffix+'}'}}\n {%- else -%}\n {{'${'+pkg_name+'_'+comp_name+'_'+var+config_suffix+'}'}}\n {%- endif -%}\n {%- endmacro -%}\n\n ########### VARIABLES #######################################################################\n #############################################################################################\n\n set({{ pkg_name }}_COMPILE_OPTIONS{{ config_suffix }}\n \"$<$<COMPILE_LANGUAGE:CXX>{{ ':${' }}{{ pkg_name }}_COMPILE_OPTIONS_CXX{{ config_suffix }}}>\"\n \"$<$<COMPILE_LANGUAGE:C>{{ ':${' }}{{ pkg_name }}_COMPILE_OPTIONS_C{{ config_suffix }}}>\")\n\n set({{ pkg_name }}_LINKER_FLAGS{{ config_suffix }}\n \"$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>{{ ':${' }}{{ pkg_name }}_SHARED_LINK_FLAGS{{ config_suffix }}}>\"\n \"$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>{{ ':${' }}{{ pkg_name }}_SHARED_LINK_FLAGS{{ config_suffix }}}>\"\n \"$<$<STREQUAL{{ ':$' }}<TARGET_PROPERTY:TYPE>,EXECUTABLE>{{ ':${' }}{{ pkg_name }}_EXE_LINK_FLAGS{{ config_suffix }}}>\")\n\n set({{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }} \"\") # Will be filled later\n conan_find_apple_frameworks({{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }} \"{{ '${' }}{{ pkg_name }}_FRAMEWORKS{{ config_suffix }}}\" \"{{ '${' }}{{ pkg_name }}_FRAMEWORK_DIRS{{ config_suffix }}}\")\n\n # Gather all the libraries that should be linked to the targets (do not touch existing variables)\n set(_{{ pkg_name }}_DEPENDENCIES{{ config_suffix }} \"{{ '${' }}{{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }}} {{ '${' }}{{ pkg_name }}_SYSTEM_LIBS{{ config_suffix }}} {{ deps_targets_names }}\")\n\n set({{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} \"\") # Will be filled later\n set({{ pkg_name }}_LIBRARIES{{ config_suffix }} \"\") # Will be filled later\n conan_package_library_targets(\"{{ '${' }}{{ pkg_name }}_LIBS{{ config_suffix }}}\" # libraries\n \"{{ '${' }}{{ pkg_name }}_LIB_DIRS{{ config_suffix }}}\" # package_libdir\n \"{{ '${' }}_{{ pkg_name }}_DEPENDENCIES{{ config_suffix }}}\" # deps\n {{ pkg_name }}_LIBRARIES{{ config_suffix }} # out_libraries\n {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} # out_libraries_targets\n \"{{ config_suffix }}\" # config_suffix\n \"{{ pkg_name }}\") # package_name\n\n foreach(_FRAMEWORK {{ '${' }}{{ pkg_name }}_FRAMEWORKS_FOUND{{ config_suffix }}})\n list(APPEND {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} ${_FRAMEWORK})\n list(APPEND {{ pkg_name }}_LIBRARIES{{ config_suffix }} ${_FRAMEWORK})\n endforeach()\n\n foreach(_SYSTEM_LIB {{ '${' }}{{ pkg_name }}_SYSTEM_LIBS{{ config_suffix }}})\n list(APPEND {{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} ${_SYSTEM_LIB})\n list(APPEND {{ pkg_name }}_LIBRARIES{{ config_suffix }} ${_SYSTEM_LIB})\n endforeach()\n\n # We need to add our requirements too\n set({{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }} {{ '\"${' }}{{ pkg_name }}_LIBRARIES_TARGETS{{ config_suffix }}{{ '};' }}{{ deps_targets_names }}\")\n set({{ pkg_name }}_LIBRARIES{{ config_suffix }} {{ '\"${' }}{{ pkg_name }}_LIBRARIES{{ config_suffix }}{{ '};' }}{{ deps_targets_names }}\")\n\n # FIXME: What is the result of this for multi-config? All configs adding themselves to path?\n set(CMAKE_MODULE_PATH {{ '${' }}{{ pkg_name }}_BUILD_DIRS{{ config_suffix }}} {{ '${' }}CMAKE_MODULE_PATH})\n set(CMAKE_PREFIX_PATH {{ '${' }}{{ pkg_name }}_BUILD_DIRS{{ config_suffix }}} {{ '${' }}CMAKE_PREFIX_PATH})\n\n {%- for comp_name in components_names %}\n\n ########## COMPONENT {{ comp_name }} FIND LIBRARIES & FRAMEWORKS / DYNAMIC VARS #############\n\n set({{ pkg_name }}_{{ comp_name }}_FRAMEWORKS_FOUND{{ config_suffix }} \"\")\n conan_find_apple_frameworks({{ pkg_name }}_{{ comp_name }}_FRAMEWORKS_FOUND{{ config_suffix }} \"{{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORKS'+config_suffix+'}' }}\" \"{{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORK_DIRS'+config_suffix+'}' }}\")\n\n set({{ pkg_name }}_{{ comp_name }}_LIB_TARGETS{{ config_suffix }} \"\")\n set({{ pkg_name }}_{{ comp_name }}_NOT_USED{{ config_suffix }} \"\")\n set({{ pkg_name }}_{{ comp_name }}_LIBS_FRAMEWORKS_DEPS{{ config_suffix }} {{ '${'+pkg_name+'_'+comp_name+'_FRAMEWORKS_FOUND'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_SYSTEM_LIBS'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_DEPENDENCIES'+config_suffix+'}' }})\n conan_package_library_targets(\"{{ '${'+pkg_name+'_'+comp_name+'_LIBS'+config_suffix+'}' }}\"\n \"{{ '${'+pkg_name+'_'+comp_name+'_LIB_DIRS'+config_suffix+'}' }}\"\n \"{{ '${'+pkg_name+'_'+comp_name+'_LIBS_FRAMEWORKS_DEPS'+config_suffix+'}' }}\"\n {{ pkg_name }}_{{ comp_name }}_NOT_USED{{ config_suffix }}\n {{ pkg_name }}_{{ comp_name }}_LIB_TARGETS{{ config_suffix }}\n \"{{ config_suffix }}\"\n \"{{ pkg_name }}_{{ comp_name }}\")\n\n set({{ pkg_name }}_{{ comp_name }}_LINK_LIBS{{ config_suffix }} {{ '${'+pkg_name+'_'+comp_name+'_LIB_TARGETS'+config_suffix+'}' }} {{ '${'+pkg_name+'_'+comp_name+'_LIBS_FRAMEWORKS_DEPS'+config_suffix+'}' }})\n {%- endfor %}\n\n\n\n ########## GLOBAL TARGET PROPERTIES {{ configuration }} ########################################\n set_property(TARGET {{target_namespace}}::{{target_namespace}}\n PROPERTY INTERFACE_LINK_LIBRARIES\n $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_LIBRARIES_TARGETS{{config_suffix}}}\n ${{'{'}}{{pkg_name}}_LINKER_FLAGS{{config_suffix}}}> APPEND)\n set_property(TARGET {{target_namespace}}::{{target_namespace}}\n PROPERTY INTERFACE_INCLUDE_DIRECTORIES\n $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_INCLUDE_DIRS{{config_suffix}}}> APPEND)\n set_property(TARGET {{target_namespace}}::{{target_namespace}}\n PROPERTY INTERFACE_COMPILE_DEFINITIONS\n $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_COMPILE_DEFINITIONS{{config_suffix}}}> APPEND)\n set_property(TARGET {{target_namespace}}::{{target_namespace}}\n PROPERTY INTERFACE_COMPILE_OPTIONS\n $<$<CONFIG:{{configuration}}>:${{'{'}}{{pkg_name}}_COMPILE_OPTIONS{{config_suffix}}}> APPEND)\n\n ########## COMPONENTS TARGET PROPERTIES {{ configuration }} ########################################\n\n {%- for comp_name in components_names %}\n\n ########## COMPONENT {{ comp_name }} TARGET PROPERTIES ######################################\n set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_LINK_LIBRARIES\n $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'LINK_LIBS', config_suffix)}}\n {{tvalue(pkg_name, comp_name, 'LINKER_FLAGS', config_suffix)}}> APPEND)\n set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_INCLUDE_DIRECTORIES\n $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'INCLUDE_DIRS', config_suffix)}}> APPEND)\n set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_COMPILE_DEFINITIONS\n $<$<CONFIG:{{ configuration }}>:{{tvalue(pkg_name, comp_name, 'COMPILE_DEFINITIONS', config_suffix)}}> APPEND)\n set_property(TARGET {{ target_namespace }}::{{ comp_name }} PROPERTY INTERFACE_COMPILE_OPTIONS\n $<$<CONFIG:{{ configuration }}>:\n {{tvalue(pkg_name, comp_name, 'COMPILE_OPTIONS_C', config_suffix)}}\n {{tvalue(pkg_name, comp_name, 'COMPILE_OPTIONS_CXX', config_suffix)}}> APPEND)\n set({{ pkg_name }}_{{ comp_name }}_TARGET_PROPERTIES TRUE)\n\n {%- endfor %}\n\n \"\"\")\n\n def get_required_components_names(self):\n \"\"\"Returns a list of component_name\"\"\"\n ret = []\n sorted_comps = self.conanfile.new_cpp_info.get_sorted_components()\n for comp_name, comp in sorted_comps.items():\n ret.append(get_component_alias(self.conanfile, comp_name))\n ret.reverse()\n return ret\n\n def get_deps_targets_names(self):\n \"\"\"\n - [{foo}::{bar}, ] of the required\n \"\"\"\n ret = []\n\n # Get a list of dependencies target names\n # Declared cppinfo.requires or .components[].requires\n if self.conanfile.new_cpp_info.required_components:\n for dep_name, component_name in self.conanfile.new_cpp_info.required_components:\n if not dep_name:\n # Internal dep (no another component)\n dep_name = get_target_namespace(self.conanfile)\n req = self.conanfile\n else:\n req = self.conanfile.dependencies.transitive_host_requires[dep_name]\n dep_name = get_target_namespace(req)\n\n component_name = get_component_alias(req, component_name)\n ret.append(\"{}::{}\".format(dep_name, component_name))\n elif self.conanfile.dependencies.host_requires:\n # Regular external \"conanfile.requires\" declared, not cpp_info requires\n ret = [\"{p}::{p}\".format(p=get_target_namespace(r))\n for r in self.conanfile.dependencies.host_requires]\n return ret\n", "path": "conan/tools/cmake/cmakedeps/templates/target_configuration.py"}]} | 3,707 | 206 |
gh_patches_debug_22807 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Double in mapping thrown E7001 error
*cfn-lint version: cfn-lint 0.20.1*
*Description of issue.*
When a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]`
Examples:
With double value:

Changed to Int:

Example CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt)
</issue>
<code>
[start of src/cfnlint/rules/mappings/Configuration.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import six
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21
22 class Configuration(CloudFormationLintRule):
23 """Check if Mappings are configured correctly"""
24 id = 'E7001'
25 shortdesc = 'Mappings are appropriately configured'
26 description = 'Check if Mappings are properly configured'
27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'
28 tags = ['mappings']
29
30 def match(self, cfn):
31 """Check CloudFormation Parameters"""
32
33 matches = []
34
35 mappings = cfn.template.get('Mappings', {})
36 if mappings:
37 for mapname, mapobj in mappings.items():
38 if not isinstance(mapobj, dict):
39 message = 'Mapping {0} has invalid property'
40 matches.append(RuleMatch(
41 ['Mappings', mapname],
42 message.format(mapname)
43 ))
44 else:
45 for firstkey in mapobj:
46 firstkeyobj = mapobj[firstkey]
47 if not isinstance(firstkeyobj, dict):
48 message = 'Mapping {0} has invalid property at {1}'
49 matches.append(RuleMatch(
50 ['Mappings', mapname, firstkey],
51 message.format(mapname, firstkeyobj)
52 ))
53 else:
54 for secondkey in firstkeyobj:
55 if not isinstance(
56 firstkeyobj[secondkey],
57 (six.string_types, list, six.integer_types)):
58 message = 'Mapping {0} has invalid property at {1}'
59 matches.append(RuleMatch(
60 ['Mappings', mapname, firstkey, secondkey],
61 message.format(mapname, secondkey)
62 ))
63
64 return matches
65
[end of src/cfnlint/rules/mappings/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py
--- a/src/cfnlint/rules/mappings/Configuration.py
+++ b/src/cfnlint/rules/mappings/Configuration.py
@@ -32,6 +32,8 @@
matches = []
+ valid_map_types = (six.string_types, list, six.integer_types, float)
+
mappings = cfn.template.get('Mappings', {})
if mappings:
for mapname, mapobj in mappings.items():
@@ -53,8 +55,7 @@
else:
for secondkey in firstkeyobj:
if not isinstance(
- firstkeyobj[secondkey],
- (six.string_types, list, six.integer_types)):
+ firstkeyobj[secondkey], valid_map_types):
message = 'Mapping {0} has invalid property at {1}'
matches.append(RuleMatch(
['Mappings', mapname, firstkey, secondkey],
| {"golden_diff": "diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py\n--- a/src/cfnlint/rules/mappings/Configuration.py\n+++ b/src/cfnlint/rules/mappings/Configuration.py\n@@ -32,6 +32,8 @@\n \n matches = []\n \n+ valid_map_types = (six.string_types, list, six.integer_types, float)\n+\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n@@ -53,8 +55,7 @@\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n- firstkeyobj[secondkey],\n- (six.string_types, list, six.integer_types)):\n+ firstkeyobj[secondkey], valid_map_types):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n", "issue": "Double in mapping thrown E7001 error\n*cfn-lint version: cfn-lint 0.20.1*\r\n\r\n*Description of issue.*\r\nWhen a mapping value is a double (ex. 1.1) it returns the error `E7001:Mapping [map] has invalid property at [property]`\r\n\r\nExamples:\r\nWith double value:\r\n\r\n\r\nChanged to Int:\r\n\r\n\r\nExample CFT: [environment.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3179852/environment.yaml.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'\n tags = ['mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if not isinstance(\n firstkeyobj[secondkey],\n (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}]} | 1,478 | 215 |
gh_patches_debug_23136 | rasdani/github-patches | git_diff | ivy-llc__ivy-26394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
index_add
index_add function
</issue>
<code>
[start of ivy/functional/frontends/paddle/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 with_supported_device_and_dtypes,
10 )
11
12
13 @with_unsupported_dtypes({"2.5.2 and below": ("float16", "bfloat16")}, "paddle")
14 @to_ivy_arrays_and_back
15 def abs(x, name=None):
16 return ivy.abs(x)
17
18
19 @with_supported_dtypes(
20 {"2.5.2 and below": ("bool", "float32", "float64", "int32", "int64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def broadcast_to(x, shape, name=None):
25 return ivy.broadcast_to(x, shape)
26
27
28 @with_supported_dtypes(
29 {
30 "2.5.2 and below": (
31 "bool",
32 "float16",
33 "float32",
34 "float64",
35 "int32",
36 "int64",
37 "uint8",
38 )
39 },
40 "paddle",
41 )
42 @to_ivy_arrays_and_back
43 def cast(x, dtype):
44 return ivy.astype(x, dtype)
45
46
47 @with_unsupported_dtypes({"2.5.2 and below": ("int8", "int16")}, "paddle")
48 @to_ivy_arrays_and_back
49 def concat(x, axis, name=None):
50 return ivy.concat(x, axis=axis)
51
52
53 @with_supported_dtypes(
54 {"2.5.2 and below": ("bool", "float32", "float64", "int32", "int64")},
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def expand(x, shape, name=None):
59 return ivy.expand(x, shape)
60
61
62 @with_unsupported_dtypes(
63 {"2.5.2 and below": ("int8", "uint8", "int16", "float16")},
64 "paddle",
65 )
66 @to_ivy_arrays_and_back
67 def flip(x, axis, name=None):
68 return ivy.flip(x, axis=axis)
69
70
71 @with_supported_dtypes(
72 {"2.5.2 and below": ("bool", "float32", "float64", "int32", "int64")},
73 "paddle",
74 )
75 @to_ivy_arrays_and_back
76 def gather(params, indices, axis=-1, batch_dims=0, name=None):
77 return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)
78
79
80 @with_unsupported_dtypes(
81 {"2.5.2 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
82 "paddle",
83 )
84 @to_ivy_arrays_and_back
85 def gather_nd(x, index, name=None):
86 return ivy.gather_nd(x, index)
87
88
89 @to_ivy_arrays_and_back
90 def put_along_axis(arr, indices, values, axis, reduce="assign"):
91 result = ivy.put_along_axis(arr, indices, values, axis)
92 return result
93
94
95 @with_supported_dtypes(
96 {"2.5.2 and below": ("int32", "int64", "float32", "float64")},
97 "paddle",
98 )
99 @to_ivy_arrays_and_back
100 def repeat_interleave(x, repeats, axis=None, name=None):
101 return ivy.repeat(x, repeats, axis=axis)
102
103
104 @to_ivy_arrays_and_back
105 def reshape(x, shape, name=None):
106 return ivy.reshape(x, shape)
107
108
109 @with_supported_dtypes(
110 {
111 "2.5.0 and below": (
112 "float32",
113 "float64",
114 "int32",
115 "int64",
116 "complex64",
117 "complex128",
118 )
119 },
120 "paddle",
121 )
122 @to_ivy_arrays_and_back
123 def roll(x, shifts, axis=None, name=None):
124 return ivy.roll(x, shifts, axis=axis)
125
126
127 @with_supported_device_and_dtypes(
128 {
129 "2.5.2 and above": {
130 "cpu": (
131 "bool",
132 "int32",
133 "int64",
134 "float32",
135 "float64",
136 ),
137 "gpu": ("float16",),
138 },
139 },
140 "paddle",
141 )
142 @to_ivy_arrays_and_back
143 def rot90(x, k=1, axes=(0, 1), name=None):
144 return ivy.rot90(x, k=k, axes=axes)
145
146
147 @with_unsupported_dtypes(
148 {"2.5.2 and below": ("int16", "complex64", "complex128")},
149 "paddle",
150 )
151 @to_ivy_arrays_and_back
152 def split(x, num_or_sections, axis=0, name=None):
153 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
154
155
156 @with_unsupported_dtypes(
157 {"2.5.2 and below": ("float16", "bfloat16", "int8", "int16")},
158 "paddle",
159 )
160 @to_ivy_arrays_and_back
161 def squeeze(x, axis=None, name=None):
162 return ivy.squeeze(x, axis=axis)
163
164
165 @to_ivy_arrays_and_back
166 def stack(x, axis=0, name=None):
167 return ivy.stack(x, axis=axis)
168
169
170 def take_along_axis(arr, indices, axis):
171 return ivy.take_along_axis(arr, indices, axis)
172
173
174 @with_unsupported_dtypes(
175 {"2.5.2 and below": ("int8", "uint8", "int16", "float16")},
176 "paddle",
177 )
178 @to_ivy_arrays_and_back
179 def tile(x, repeat_times, name=None):
180 return ivy.tile(x, repeats=repeat_times)
181
182
183 @to_ivy_arrays_and_back
184 def tolist(x):
185 return ivy.to_list(x)
186
187
188 @with_supported_dtypes(
189 {"2.5.2 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
190 "paddle",
191 )
192 @to_ivy_arrays_and_back
193 def unbind(input, axis=0):
194 shape = list(input.shape)
195 num_splits = shape[axis]
196 shape.pop(axis)
197 return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))
198
199
200 @with_supported_dtypes(
201 {"2.5.2 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
202 "paddle",
203 )
204 @to_ivy_arrays_and_back
205 def unique_consecutive(x, axis=0):
206 return ivy.unique_consecutive(x, axis=axis)
207
208
209 @with_supported_dtypes(
210 {
211 "2.5.2 and below": (
212 "float32",
213 "float64",
214 "int32",
215 "int64",
216 )
217 },
218 "paddle",
219 )
220 @to_ivy_arrays_and_back
221 def unstack(x, axis=0, name=None):
222 return ivy.unstack(x, axis=axis)
223
224
225 absolute = abs
226
[end of ivy/functional/frontends/paddle/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py
--- a/ivy/functional/frontends/paddle/manipulation.py
+++ b/ivy/functional/frontends/paddle/manipulation.py
@@ -86,6 +86,37 @@
return ivy.gather_nd(x, index)
+@with_supported_dtypes(
+ {"2.5.1 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def index_add(x, index, axis, value, *, name=None):
+ x = ivy.swapaxes(x, axis, 0)
+ value = ivy.swapaxes(value, axis, 0)
+ _to_adds = []
+ index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))
+ while index:
+ _curr_idx = index[0][0]
+ while len(_to_adds) < _curr_idx:
+ _to_adds.append(ivy.zeros_like(value[0]))
+ _to_add_cum = ivy.get_item(value, index[0][1])
+ while (len(index)) > 1 and (index[0][0] == index[1][0]):
+ _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])
+ index.pop(0)
+ _to_adds.append(_to_add_cum)
+ while len(_to_adds) < x.shape[0]:
+ _to_adds.append(ivy.zeros_like(value[0]))
+ _to_adds = ivy.stack(_to_adds)
+ if len(x.shape) < 2:
+ # Added this line due to the paddle backend treating scalars as 1-d arrays
+ _to_adds = ivy.flatten(_to_adds)
+
+ ret = ivy.add(x, _to_adds)
+ ret = ivy.swapaxes(ret, axis, 0)
+ return ret
+
+
@to_ivy_arrays_and_back
def put_along_axis(arr, indices, values, axis, reduce="assign"):
result = ivy.put_along_axis(arr, indices, values, axis)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py\n--- a/ivy/functional/frontends/paddle/manipulation.py\n+++ b/ivy/functional/frontends/paddle/manipulation.py\n@@ -86,6 +86,37 @@\n return ivy.gather_nd(x, index)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.1 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def index_add(x, index, axis, value, *, name=None):\n+ x = ivy.swapaxes(x, axis, 0)\n+ value = ivy.swapaxes(value, axis, 0)\n+ _to_adds = []\n+ index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))\n+ while index:\n+ _curr_idx = index[0][0]\n+ while len(_to_adds) < _curr_idx:\n+ _to_adds.append(ivy.zeros_like(value[0]))\n+ _to_add_cum = ivy.get_item(value, index[0][1])\n+ while (len(index)) > 1 and (index[0][0] == index[1][0]):\n+ _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])\n+ index.pop(0)\n+ _to_adds.append(_to_add_cum)\n+ while len(_to_adds) < x.shape[0]:\n+ _to_adds.append(ivy.zeros_like(value[0]))\n+ _to_adds = ivy.stack(_to_adds)\n+ if len(x.shape) < 2:\n+ # Added this line due to the paddle backend treating scalars as 1-d arrays\n+ _to_adds = ivy.flatten(_to_adds)\n+\n+ ret = ivy.add(x, _to_adds)\n+ ret = ivy.swapaxes(ret, axis, 0)\n+ return ret\n+\n+\n @to_ivy_arrays_and_back\n def put_along_axis(arr, indices, values, axis, reduce=\"assign\"):\n result = ivy.put_along_axis(arr, indices, values, axis)\n", "issue": "index_add\nindex_add function\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.5.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_unsupported_dtypes({\"2.5.2 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather_nd(x, index, name=None):\n return ivy.gather_nd(x, index)\n\n\n@to_ivy_arrays_and_back\ndef put_along_axis(arr, indices, values, axis, reduce=\"assign\"):\n result = ivy.put_along_axis(arr, indices, values, axis)\n return result\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef repeat_interleave(x, repeats, axis=None, name=None):\n return ivy.repeat(x, repeats, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape, name=None):\n return ivy.reshape(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.2 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n\n\n@with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.2 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@to_ivy_arrays_and_back\ndef tolist(x):\n return ivy.to_list(x)\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unbind(input, axis=0):\n shape = list(input.shape)\n num_splits = shape[axis]\n shape.pop(axis)\n return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))\n\n\n@with_supported_dtypes(\n {\"2.5.2 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unique_consecutive(x, axis=0):\n return ivy.unique_consecutive(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unstack(x, axis=0, name=None):\n return ivy.unstack(x, axis=axis)\n\n\nabsolute = abs\n", "path": "ivy/functional/frontends/paddle/manipulation.py"}]} | 2,759 | 531 |
gh_patches_debug_29326 | rasdani/github-patches | git_diff | cloudtools__troposphere-1102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prefix is optional for
I assume `True` here means that `Prefix` is mandatory:
```py
class S3DestinationConfiguration(AWSProperty):
props = {
'BucketARN': (basestring, True),
'BufferingHints': (BufferingHints, True),
'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
'CompressionFormat': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
'Prefix': (basestring, True),
'RoleARN': (basestring, True),
}
class ExtendedS3DestinationConfiguration(AWSProperty):
props = {
'BucketARN': (basestring, True),
'BufferingHints': (BufferingHints, True),
'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
'CompressionFormat': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
'Prefix': (basestring, True),
'ProcessingConfiguration': (ProcessingConfiguration, False),
'RoleARN': (basestring, True),
'S3BackupConfiguration': (S3DestinationConfiguration, False),
'S3BackupMode': (s3_backup_mode_extended_s3_validator, False),
}
```
However, [`Prefix` is optional](https://docs.aws.amazon.com/firehose/latest/APIReference/API_S3DestinationConfiguration.html).
</issue>
<code>
[start of troposphere/firehose.py]
1 # Copyright (c) 2016-2017, troposphere project
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import boolean, positive_integer
8
9
10 def processor_type_validator(x):
11 valid_types = ["Lambda"]
12 if x not in valid_types:
13 raise ValueError("Type must be one of: %s" %
14 ", ".join(valid_types))
15 return x
16
17
18 def delivery_stream_type_validator(x):
19 valid_types = ["DirectPut", "KinesisStreamAsSource"]
20 if x not in valid_types:
21 raise ValueError("DeliveryStreamType must be one of: %s" %
22 ", ".join(valid_types))
23 return x
24
25
26 def index_rotation_period_validator(x):
27 valid_types = ["NoRotation", "OneHour", "OneDay", "OneWeek", "OneMonth"]
28 if x not in valid_types:
29 raise ValueError("IndexRotationPeriod must be one of: %s" %
30 ", ".join(valid_types))
31 return x
32
33
34 def s3_backup_mode_elastic_search_validator(x):
35 valid_types = ["FailedDocumentsOnly", "AllDocuments"]
36 if x not in valid_types:
37 raise ValueError("S3BackupMode must be one of: %s" %
38 ", ".join(valid_types))
39 return x
40
41
42 def s3_backup_mode_extended_s3_validator(x):
43 valid_types = ["Disabled", "Enabled"]
44 if x not in valid_types:
45 raise ValueError("S3BackupMode must be one of: %s" %
46 ", ".join(valid_types))
47 return x
48
49
50 class BufferingHints(AWSProperty):
51 props = {
52 'IntervalInSeconds': (positive_integer, True),
53 'SizeInMBs': (positive_integer, True)
54 }
55
56
57 class CloudWatchLoggingOptions(AWSProperty):
58 props = {
59 'Enabled': (boolean, False),
60 'LogGroupName': (basestring, False), # Conditional
61 'LogStreamName': (basestring, False), # Conditional
62 }
63
64
65 class RetryOptions(AWSProperty):
66 props = {
67 'DurationInSeconds': (positive_integer, True),
68 }
69
70
71 class KMSEncryptionConfig(AWSProperty):
72 props = {
73 'AWSKMSKeyARN': (basestring, True),
74 }
75
76
77 class EncryptionConfiguration(AWSProperty):
78 props = {
79 'KMSEncryptionConfig': (KMSEncryptionConfig, False),
80 'NoEncryptionConfig': (basestring, False),
81 }
82
83
84 class S3Configuration(AWSProperty):
85 props = {
86 'BucketARN': (basestring, True),
87 'BufferingHints': (BufferingHints, True),
88 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
89 'CompressionFormat': (basestring, True),
90 'EncryptionConfiguration': (EncryptionConfiguration, False),
91 'Prefix': (basestring, True),
92 'RoleARN': (basestring, True)
93 }
94
95
96 class CopyCommand(AWSProperty):
97 props = {
98 'CopyOptions': (basestring, False),
99 'DataTableColumns': (basestring, False),
100 'DataTableName': (basestring, True),
101 }
102
103
104 class ProcessorParameter(AWSProperty):
105 props = {
106 'ParameterName': (basestring, True),
107 'ParameterValue': (basestring, True),
108 }
109
110
111 class Processor(AWSProperty):
112 props = {
113 'Parameters': ([ProcessorParameter], True),
114 'Type': (processor_type_validator, True),
115 }
116
117
118 class ProcessingConfiguration(AWSProperty):
119 props = {
120 'Enabled': (boolean, True),
121 'Processors': ([Processor], True),
122 }
123
124
125 class ElasticsearchDestinationConfiguration(AWSProperty):
126 props = {
127 'BufferingHints': (BufferingHints, True),
128 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
129 'DomainARN': (basestring, True),
130 'IndexName': (basestring, True),
131 'IndexRotationPeriod': (index_rotation_period_validator, True),
132 'ProcessingConfiguration': (ProcessingConfiguration, False),
133 'RetryOptions': (RetryOptions, False),
134 'RoleARN': (basestring, True),
135 'S3BackupMode': (s3_backup_mode_elastic_search_validator, True),
136 'S3Configuration': (S3Configuration, False),
137 'TypeName': (basestring, True),
138 }
139
140
141 class RedshiftDestinationConfiguration(AWSProperty):
142 props = {
143 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
144 'ClusterJDBCURL': (basestring, True),
145 'CopyCommand': (CopyCommand, True),
146 'Password': (basestring, True),
147 'ProcessingConfiguration': (ProcessingConfiguration, False),
148 'RoleARN': (basestring, True),
149 'S3Configuration': (S3Configuration, True),
150 'Username': (basestring, True),
151 }
152
153
154 class S3DestinationConfiguration(AWSProperty):
155 props = {
156 'BucketARN': (basestring, True),
157 'BufferingHints': (BufferingHints, True),
158 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
159 'CompressionFormat': (basestring, True),
160 'EncryptionConfiguration': (EncryptionConfiguration, False),
161 'Prefix': (basestring, True),
162 'RoleARN': (basestring, True),
163 }
164
165
166 class ExtendedS3DestinationConfiguration(AWSProperty):
167 props = {
168 'BucketARN': (basestring, True),
169 'BufferingHints': (BufferingHints, True),
170 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
171 'CompressionFormat': (basestring, True),
172 'EncryptionConfiguration': (EncryptionConfiguration, False),
173 'Prefix': (basestring, True),
174 'ProcessingConfiguration': (ProcessingConfiguration, False),
175 'RoleARN': (basestring, True),
176 'S3BackupConfiguration': (S3DestinationConfiguration, False),
177 'S3BackupMode': (s3_backup_mode_extended_s3_validator, False),
178 }
179
180
181 class KinesisStreamSourceConfiguration(AWSProperty):
182 props = {
183 'KinesisStreamARN': (basestring, True),
184 'RoleARN': (basestring, True)
185 }
186
187
188 class SplunkRetryOptions(AWSProperty):
189 props = {
190 'DurationInSeconds': (positive_integer, True),
191 }
192
193
194 class SplunkDestinationConfiguration(AWSProperty):
195 props = {
196 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
197 'HECAcknowledgmentTimeoutInSeconds': (positive_integer, False),
198 'HECEndpoint': (basestring, True),
199 'HECEndpointType': (basestring, True),
200 'HECToken': (basestring, True),
201 'ProcessingConfiguration': (ProcessingConfiguration, False),
202 'RetryOptions': (SplunkRetryOptions, False),
203 'S3BackupMode': (basestring, False),
204 'S3Configuration': (S3DestinationConfiguration, True),
205 }
206
207
208 class DeliveryStream(AWSObject):
209 resource_type = "AWS::KinesisFirehose::DeliveryStream"
210
211 props = {
212 'DeliveryStreamName': (basestring, False),
213 'DeliveryStreamType': (delivery_stream_type_validator, False),
214 'ElasticsearchDestinationConfiguration': (ElasticsearchDestinationConfiguration, False), # noqa
215 'ExtendedS3DestinationConfiguration': (ExtendedS3DestinationConfiguration, False), # noqa
216 'KinesisStreamSourceConfiguration': (KinesisStreamSourceConfiguration, False), # noqa
217 'RedshiftDestinationConfiguration': (RedshiftDestinationConfiguration, False), # noqa
218 'S3DestinationConfiguration': (S3DestinationConfiguration, False),
219 'SplunkDestinationConfiguration':
220 (SplunkDestinationConfiguration, False),
221 }
222
[end of troposphere/firehose.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/firehose.py b/troposphere/firehose.py
--- a/troposphere/firehose.py
+++ b/troposphere/firehose.py
@@ -88,7 +88,7 @@
'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
'CompressionFormat': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
- 'Prefix': (basestring, True),
+ 'Prefix': (basestring, False),
'RoleARN': (basestring, True)
}
@@ -158,7 +158,7 @@
'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
'CompressionFormat': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
- 'Prefix': (basestring, True),
+ 'Prefix': (basestring, False),
'RoleARN': (basestring, True),
}
@@ -170,7 +170,7 @@
'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),
'CompressionFormat': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
- 'Prefix': (basestring, True),
+ 'Prefix': (basestring, False),
'ProcessingConfiguration': (ProcessingConfiguration, False),
'RoleARN': (basestring, True),
'S3BackupConfiguration': (S3DestinationConfiguration, False),
| {"golden_diff": "diff --git a/troposphere/firehose.py b/troposphere/firehose.py\n--- a/troposphere/firehose.py\n+++ b/troposphere/firehose.py\n@@ -88,7 +88,7 @@\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n- 'Prefix': (basestring, True),\n+ 'Prefix': (basestring, False),\n 'RoleARN': (basestring, True)\n }\n \n@@ -158,7 +158,7 @@\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n- 'Prefix': (basestring, True),\n+ 'Prefix': (basestring, False),\n 'RoleARN': (basestring, True),\n }\n \n@@ -170,7 +170,7 @@\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n- 'Prefix': (basestring, True),\n+ 'Prefix': (basestring, False),\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\n 'RoleARN': (basestring, True),\n 'S3BackupConfiguration': (S3DestinationConfiguration, False),\n", "issue": "Prefix is optional for \nI assume `True` here means that `Prefix` is mandatory:\r\n\r\n```py\r\nclass S3DestinationConfiguration(AWSProperty):\r\n props = {\r\n 'BucketARN': (basestring, True),\r\n 'BufferingHints': (BufferingHints, True),\r\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\r\n 'CompressionFormat': (basestring, True),\r\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\r\n 'Prefix': (basestring, True),\r\n 'RoleARN': (basestring, True),\r\n }\r\n\r\nclass ExtendedS3DestinationConfiguration(AWSProperty):\r\n props = {\r\n 'BucketARN': (basestring, True),\r\n 'BufferingHints': (BufferingHints, True),\r\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\r\n 'CompressionFormat': (basestring, True),\r\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\r\n 'Prefix': (basestring, True),\r\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\r\n 'RoleARN': (basestring, True),\r\n 'S3BackupConfiguration': (S3DestinationConfiguration, False),\r\n 'S3BackupMode': (s3_backup_mode_extended_s3_validator, False),\r\n }\r\n```\r\nHowever, [`Prefix` is optional](https://docs.aws.amazon.com/firehose/latest/APIReference/API_S3DestinationConfiguration.html).\n", "before_files": [{"content": "# Copyright (c) 2016-2017, troposphere project\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import boolean, positive_integer\n\n\ndef processor_type_validator(x):\n valid_types = [\"Lambda\"]\n if x not in valid_types:\n raise ValueError(\"Type must be one of: %s\" %\n \", \".join(valid_types))\n return x\n\n\ndef delivery_stream_type_validator(x):\n valid_types = [\"DirectPut\", \"KinesisStreamAsSource\"]\n if x not in valid_types:\n raise ValueError(\"DeliveryStreamType must be one of: %s\" %\n \", \".join(valid_types))\n return x\n\n\ndef index_rotation_period_validator(x):\n valid_types = [\"NoRotation\", \"OneHour\", \"OneDay\", \"OneWeek\", \"OneMonth\"]\n if x not in valid_types:\n raise ValueError(\"IndexRotationPeriod must be one of: %s\" %\n \", \".join(valid_types))\n return x\n\n\ndef s3_backup_mode_elastic_search_validator(x):\n valid_types = [\"FailedDocumentsOnly\", \"AllDocuments\"]\n if x not in valid_types:\n raise ValueError(\"S3BackupMode must be one of: %s\" %\n \", \".join(valid_types))\n return x\n\n\ndef s3_backup_mode_extended_s3_validator(x):\n valid_types = [\"Disabled\", \"Enabled\"]\n if x not in valid_types:\n raise ValueError(\"S3BackupMode must be one of: %s\" %\n \", \".join(valid_types))\n return x\n\n\nclass BufferingHints(AWSProperty):\n props = {\n 'IntervalInSeconds': (positive_integer, True),\n 'SizeInMBs': (positive_integer, True)\n }\n\n\nclass CloudWatchLoggingOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'LogGroupName': (basestring, False), # Conditional\n 'LogStreamName': (basestring, False), # Conditional\n }\n\n\nclass RetryOptions(AWSProperty):\n props = {\n 'DurationInSeconds': (positive_integer, True),\n }\n\n\nclass KMSEncryptionConfig(AWSProperty):\n props = {\n 'AWSKMSKeyARN': (basestring, True),\n }\n\n\nclass EncryptionConfiguration(AWSProperty):\n props = {\n 'KMSEncryptionConfig': (KMSEncryptionConfig, False),\n 'NoEncryptionConfig': (basestring, False),\n }\n\n\nclass S3Configuration(AWSProperty):\n props = {\n 'BucketARN': (basestring, True),\n 'BufferingHints': (BufferingHints, True),\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n 'Prefix': (basestring, True),\n 'RoleARN': (basestring, True)\n }\n\n\nclass CopyCommand(AWSProperty):\n props = {\n 'CopyOptions': (basestring, False),\n 'DataTableColumns': (basestring, False),\n 'DataTableName': (basestring, True),\n }\n\n\nclass ProcessorParameter(AWSProperty):\n props = {\n 'ParameterName': (basestring, True),\n 'ParameterValue': (basestring, True),\n }\n\n\nclass Processor(AWSProperty):\n props = {\n 'Parameters': ([ProcessorParameter], True),\n 'Type': (processor_type_validator, True),\n }\n\n\nclass ProcessingConfiguration(AWSProperty):\n props = {\n 'Enabled': (boolean, True),\n 'Processors': ([Processor], True),\n }\n\n\nclass ElasticsearchDestinationConfiguration(AWSProperty):\n props = {\n 'BufferingHints': (BufferingHints, True),\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'DomainARN': (basestring, True),\n 'IndexName': (basestring, True),\n 'IndexRotationPeriod': (index_rotation_period_validator, True),\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\n 'RetryOptions': (RetryOptions, False),\n 'RoleARN': (basestring, True),\n 'S3BackupMode': (s3_backup_mode_elastic_search_validator, True),\n 'S3Configuration': (S3Configuration, False),\n 'TypeName': (basestring, True),\n }\n\n\nclass RedshiftDestinationConfiguration(AWSProperty):\n props = {\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'ClusterJDBCURL': (basestring, True),\n 'CopyCommand': (CopyCommand, True),\n 'Password': (basestring, True),\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\n 'RoleARN': (basestring, True),\n 'S3Configuration': (S3Configuration, True),\n 'Username': (basestring, True),\n }\n\n\nclass S3DestinationConfiguration(AWSProperty):\n props = {\n 'BucketARN': (basestring, True),\n 'BufferingHints': (BufferingHints, True),\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n 'Prefix': (basestring, True),\n 'RoleARN': (basestring, True),\n }\n\n\nclass ExtendedS3DestinationConfiguration(AWSProperty):\n props = {\n 'BucketARN': (basestring, True),\n 'BufferingHints': (BufferingHints, True),\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'CompressionFormat': (basestring, True),\n 'EncryptionConfiguration': (EncryptionConfiguration, False),\n 'Prefix': (basestring, True),\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\n 'RoleARN': (basestring, True),\n 'S3BackupConfiguration': (S3DestinationConfiguration, False),\n 'S3BackupMode': (s3_backup_mode_extended_s3_validator, False),\n }\n\n\nclass KinesisStreamSourceConfiguration(AWSProperty):\n props = {\n 'KinesisStreamARN': (basestring, True),\n 'RoleARN': (basestring, True)\n }\n\n\nclass SplunkRetryOptions(AWSProperty):\n props = {\n 'DurationInSeconds': (positive_integer, True),\n }\n\n\nclass SplunkDestinationConfiguration(AWSProperty):\n props = {\n 'CloudWatchLoggingOptions': (CloudWatchLoggingOptions, False),\n 'HECAcknowledgmentTimeoutInSeconds': (positive_integer, False),\n 'HECEndpoint': (basestring, True),\n 'HECEndpointType': (basestring, True),\n 'HECToken': (basestring, True),\n 'ProcessingConfiguration': (ProcessingConfiguration, False),\n 'RetryOptions': (SplunkRetryOptions, False),\n 'S3BackupMode': (basestring, False),\n 'S3Configuration': (S3DestinationConfiguration, True),\n }\n\n\nclass DeliveryStream(AWSObject):\n resource_type = \"AWS::KinesisFirehose::DeliveryStream\"\n\n props = {\n 'DeliveryStreamName': (basestring, False),\n 'DeliveryStreamType': (delivery_stream_type_validator, False),\n 'ElasticsearchDestinationConfiguration': (ElasticsearchDestinationConfiguration, False), # noqa\n 'ExtendedS3DestinationConfiguration': (ExtendedS3DestinationConfiguration, False), # noqa\n 'KinesisStreamSourceConfiguration': (KinesisStreamSourceConfiguration, False), # noqa\n 'RedshiftDestinationConfiguration': (RedshiftDestinationConfiguration, False), # noqa\n 'S3DestinationConfiguration': (S3DestinationConfiguration, False),\n 'SplunkDestinationConfiguration':\n (SplunkDestinationConfiguration, False),\n }\n", "path": "troposphere/firehose.py"}]} | 3,074 | 322 |
gh_patches_debug_21935 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modify domain invitation script to process "friends of the show" first, then incrementally process others
### Issue description
We have a few domain managers that we'll invite to the registrar first. Let's modify the domain invitation script to send to a defined set of domains first.
We also shouldn't blast thousands of emails out to the internet, but incrementally roll them out.
### Acceptance criteria
- [ ] Invitation script works with a product owner-specified list of domains/contacts (before sending to everyone else)
- [ ] Script slow rolls out invitations. Could be percentage-based (1/2/5/10/20/45/75/100) or volume-based (a few hundred at a time)
### Additional context
_No response_
### Links to other issues
🔄 Related to PR #1038
</issue>
<code>
[start of src/registrar/management/commands/send_domain_invitations.py]
1 """Data migration: Send domain invitations once to existing customers."""
2
3 import logging
4 import copy
5
6 from django.core.management import BaseCommand
7 from registrar.models import TransitionDomain
8 from ...utility.email import send_templated_email, EmailSendingError
9 from typing import List
10
11 logger = logging.getLogger(__name__)
12
13
14 class Command(BaseCommand):
15 help = "Send domain invitations once to existing customers."
16
17 # this array is used to store and process the transition_domains
18 transition_domains: List[str] = []
19 # this array is used to store domains with errors, which are not
20 # sent emails; this array is used to update the succesful
21 # transition_domains to email_sent=True, and also to report
22 # out errors
23 domains_with_errors: List[str] = []
24 # this array is used to store email_context; each item in the array
25 # contains the context for a single email; single emails may be 1
26 # or more transition_domains, as they are grouped by username
27 emails_to_send: List[str] = []
28
29 def add_arguments(self, parser):
30 """Add command line arguments."""
31 parser.add_argument(
32 "-s",
33 "--send_emails",
34 action="store_true",
35 default=False,
36 dest="send_emails",
37 help="Send emails ",
38 )
39
40 def handle(self, **options):
41 """Process the objects in TransitionDomain."""
42
43 logger.info("checking domains and preparing emails")
44 # Get all TransitionDomain objects
45 self.transition_domains = TransitionDomain.objects.filter(
46 email_sent=False,
47 ).order_by("username")
48 logger.info("Found %d transition domains", len(self.transition_domains))
49
50 self.build_emails_to_send_array()
51 logger.info("Prepared %d emails to send", len(self.emails_to_send))
52
53 if options["send_emails"]:
54 logger.info("about to send emails")
55 self.send_emails()
56 logger.info("done sending emails")
57
58 self.update_domains_as_sent()
59
60 logger.info("done sending emails and updating transition_domains")
61 else:
62 logger.info("not sending emails")
63 for email_context in self.emails_to_send:
64 logger.info(
65 "would send email to %s for %s",
66 email_context["email"],
67 email_context["domains"],
68 )
69
70 def build_emails_to_send_array(self):
71 """this method sends emails to distinct usernames"""
72
73 # data structure to hold email context for a single email;
74 # transition_domains ordered by username, a single email_context
75 # may include information from more than one transition_domain
76 email_context = {"email": ""}
77
78 # loop through all transition_domains; group them by username
79 # into emails_to_send_array
80 for transition_domain in self.transition_domains:
81 # attempt to get the domain from domain objects; if there is
82 # an error getting the domain, skip this domain and add it to
83 # domains_with_errors
84 try:
85 # if prior username does not match current username
86 if not email_context["email"] or email_context["email"] != transition_domain.username:
87 # if not first in list of transition_domains
88 if email_context["email"]:
89 # append the email context to the emails_to_send array
90 self.emails_to_send.append(copy.deepcopy(email_context))
91 email_context["domains"] = []
92 email_context["email"] = transition_domain.username
93 email_context["domains"].append(transition_domain.domain_name)
94 except Exception as err:
95 # error condition if domain not in database
96 self.domains_with_errors.append(copy.deepcopy(transition_domain.domain_name))
97 logger.error(f"error retrieving domain {transition_domain.domain_name}: {err}")
98 # if there are at least one more transition domains than errors,
99 # then append one more item
100 if len(self.transition_domains) > len(self.domains_with_errors):
101 self.emails_to_send.append(email_context)
102
103 def send_emails(self):
104 if len(self.emails_to_send) > 0:
105 for email_data in self.emails_to_send:
106 self.send_email(email_data)
107 else:
108 logger.info("no emails to send")
109
110 def send_email(self, email_data):
111 try:
112 send_templated_email(
113 "emails/transition_domain_invitation.txt",
114 "emails/transition_domain_invitation_subject.txt",
115 to_address=email_data["email"],
116 context={
117 "domains": email_data["domains"],
118 },
119 )
120 # success message is logged
121 logger.info(
122 f"email sent successfully to {email_data['email']} for "
123 f"{[domain for domain in email_data['domains']]}"
124 )
125 except EmailSendingError as err:
126 logger.error(
127 f"email did not send successfully to {email_data['email']} "
128 f"for {[domain for domain in email_data['domains']]}"
129 f": {err}"
130 )
131 # if email failed to send, set error in domains_with_errors for each
132 # domain in the email so that transition domain email_sent is not set
133 # to True
134 for domain in email_data["domains"]:
135 self.domains_with_errors.append(domain)
136
137 def update_domains_as_sent(self):
138 """set email_sent to True in all transition_domains which have
139 been processed successfully"""
140 for transition_domain in self.transition_domains:
141 if transition_domain.domain_name not in self.domains_with_errors:
142 transition_domain.email_sent = True
143 transition_domain.save()
144
[end of src/registrar/management/commands/send_domain_invitations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/management/commands/send_domain_invitations.py b/src/registrar/management/commands/send_domain_invitations.py
--- a/src/registrar/management/commands/send_domain_invitations.py
+++ b/src/registrar/management/commands/send_domain_invitations.py
@@ -37,14 +37,24 @@
help="Send emails ",
)
+ parser.add_argument("emails", nargs="*", help="Email addresses to send invitations to")
+
def handle(self, **options):
"""Process the objects in TransitionDomain."""
logger.info("checking domains and preparing emails")
- # Get all TransitionDomain objects
- self.transition_domains = TransitionDomain.objects.filter(
- email_sent=False,
- ).order_by("username")
+
+ if options["emails"]:
+ # this option is a list of email addresses
+ self.transition_domains = TransitionDomain.objects.filter(
+ username__in=options["emails"],
+ email_sent=False,
+ ).order_by("username")
+ else:
+ # Get all TransitionDomain objects
+ self.transition_domains = TransitionDomain.objects.filter(
+ email_sent=False,
+ ).order_by("username")
logger.info("Found %d transition domains", len(self.transition_domains))
self.build_emails_to_send_array()
| {"golden_diff": "diff --git a/src/registrar/management/commands/send_domain_invitations.py b/src/registrar/management/commands/send_domain_invitations.py\n--- a/src/registrar/management/commands/send_domain_invitations.py\n+++ b/src/registrar/management/commands/send_domain_invitations.py\n@@ -37,14 +37,24 @@\n help=\"Send emails \",\n )\n \n+ parser.add_argument(\"emails\", nargs=\"*\", help=\"Email addresses to send invitations to\")\n+\n def handle(self, **options):\n \"\"\"Process the objects in TransitionDomain.\"\"\"\n \n logger.info(\"checking domains and preparing emails\")\n- # Get all TransitionDomain objects\n- self.transition_domains = TransitionDomain.objects.filter(\n- email_sent=False,\n- ).order_by(\"username\")\n+\n+ if options[\"emails\"]:\n+ # this option is a list of email addresses\n+ self.transition_domains = TransitionDomain.objects.filter(\n+ username__in=options[\"emails\"],\n+ email_sent=False,\n+ ).order_by(\"username\")\n+ else:\n+ # Get all TransitionDomain objects\n+ self.transition_domains = TransitionDomain.objects.filter(\n+ email_sent=False,\n+ ).order_by(\"username\")\n logger.info(\"Found %d transition domains\", len(self.transition_domains))\n \n self.build_emails_to_send_array()\n", "issue": "Modify domain invitation script to process \"friends of the show\" first, then incrementally process others\n### Issue description\n\nWe have a few domain managers that we'll invite to the registrar first. Let's modify the domain invitation script to send to a defined set of domains first. \r\n\r\nWe also shouldn't blast thousands of emails out to the internet, but incrementally roll them out.\n\n### Acceptance criteria\n\n- [ ] Invitation script works with a product owner-specified list of domains/contacts (before sending to everyone else)\r\n- [ ] Script slow rolls out invitations. Could be percentage-based (1/2/5/10/20/45/75/100) or volume-based (a few hundred at a time)\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\n\ud83d\udd04 Related to PR #1038\n", "before_files": [{"content": "\"\"\"Data migration: Send domain invitations once to existing customers.\"\"\"\n\nimport logging\nimport copy\n\nfrom django.core.management import BaseCommand\nfrom registrar.models import TransitionDomain\nfrom ...utility.email import send_templated_email, EmailSendingError\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = \"Send domain invitations once to existing customers.\"\n\n # this array is used to store and process the transition_domains\n transition_domains: List[str] = []\n # this array is used to store domains with errors, which are not\n # sent emails; this array is used to update the succesful\n # transition_domains to email_sent=True, and also to report\n # out errors\n domains_with_errors: List[str] = []\n # this array is used to store email_context; each item in the array\n # contains the context for a single email; single emails may be 1\n # or more transition_domains, as they are grouped by username\n emails_to_send: List[str] = []\n\n def add_arguments(self, parser):\n \"\"\"Add command line arguments.\"\"\"\n parser.add_argument(\n \"-s\",\n \"--send_emails\",\n action=\"store_true\",\n default=False,\n dest=\"send_emails\",\n help=\"Send emails \",\n )\n\n def handle(self, **options):\n \"\"\"Process the objects in TransitionDomain.\"\"\"\n\n logger.info(\"checking domains and preparing emails\")\n # Get all TransitionDomain objects\n self.transition_domains = TransitionDomain.objects.filter(\n email_sent=False,\n ).order_by(\"username\")\n logger.info(\"Found %d transition domains\", len(self.transition_domains))\n\n self.build_emails_to_send_array()\n logger.info(\"Prepared %d emails to send\", len(self.emails_to_send))\n\n if options[\"send_emails\"]:\n logger.info(\"about to send emails\")\n self.send_emails()\n logger.info(\"done sending emails\")\n\n self.update_domains_as_sent()\n\n logger.info(\"done sending emails and updating transition_domains\")\n else:\n logger.info(\"not sending emails\")\n for email_context in self.emails_to_send:\n logger.info(\n \"would send email to %s for %s\",\n email_context[\"email\"],\n email_context[\"domains\"],\n )\n\n def build_emails_to_send_array(self):\n \"\"\"this method sends emails to distinct usernames\"\"\"\n\n # data structure to hold email context for a single email;\n # transition_domains ordered by username, a single email_context\n # may include information from more than one transition_domain\n email_context = {\"email\": \"\"}\n\n # loop through all transition_domains; group them by username\n # into emails_to_send_array\n for transition_domain in self.transition_domains:\n # attempt to get the domain from domain objects; if there is\n # an error getting the domain, skip this domain and add it to\n # domains_with_errors\n try:\n # if prior username does not match current username\n if not email_context[\"email\"] or email_context[\"email\"] != transition_domain.username:\n # if not first in list of transition_domains\n if email_context[\"email\"]:\n # append the email context to the emails_to_send array\n self.emails_to_send.append(copy.deepcopy(email_context))\n email_context[\"domains\"] = []\n email_context[\"email\"] = transition_domain.username\n email_context[\"domains\"].append(transition_domain.domain_name)\n except Exception as err:\n # error condition if domain not in database\n self.domains_with_errors.append(copy.deepcopy(transition_domain.domain_name))\n logger.error(f\"error retrieving domain {transition_domain.domain_name}: {err}\")\n # if there are at least one more transition domains than errors,\n # then append one more item\n if len(self.transition_domains) > len(self.domains_with_errors):\n self.emails_to_send.append(email_context)\n\n def send_emails(self):\n if len(self.emails_to_send) > 0:\n for email_data in self.emails_to_send:\n self.send_email(email_data)\n else:\n logger.info(\"no emails to send\")\n\n def send_email(self, email_data):\n try:\n send_templated_email(\n \"emails/transition_domain_invitation.txt\",\n \"emails/transition_domain_invitation_subject.txt\",\n to_address=email_data[\"email\"],\n context={\n \"domains\": email_data[\"domains\"],\n },\n )\n # success message is logged\n logger.info(\n f\"email sent successfully to {email_data['email']} for \"\n f\"{[domain for domain in email_data['domains']]}\"\n )\n except EmailSendingError as err:\n logger.error(\n f\"email did not send successfully to {email_data['email']} \"\n f\"for {[domain for domain in email_data['domains']]}\"\n f\": {err}\"\n )\n # if email failed to send, set error in domains_with_errors for each\n # domain in the email so that transition domain email_sent is not set\n # to True\n for domain in email_data[\"domains\"]:\n self.domains_with_errors.append(domain)\n\n def update_domains_as_sent(self):\n \"\"\"set email_sent to True in all transition_domains which have\n been processed successfully\"\"\"\n for transition_domain in self.transition_domains:\n if transition_domain.domain_name not in self.domains_with_errors:\n transition_domain.email_sent = True\n transition_domain.save()\n", "path": "src/registrar/management/commands/send_domain_invitations.py"}]} | 2,198 | 282 |
gh_patches_debug_21002 | rasdani/github-patches | git_diff | fossasia__open-event-server-4664 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server is not found locally after #4643
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server
After #4643, the server cannot be accessed via the URL `localhost:5000` or any other means. This has lead to Travis build failing and inability to test things locally as well. Revert this behaviour and reopen the original issue
</issue>
<code>
[start of config.py]
1 # -*- coding: utf-8 -*-
2 import os
3 from envparse import env
4
5 env.read_envfile()
6
7 basedir = os.path.abspath(os.path.dirname(__file__))
8
9 VERSION_NAME = '2.0.0-alpha.1'
10
11 LANGUAGES = {
12 'en': 'English',
13 'bn': 'Bengali/Bangla',
14 'zh_Hans': 'Chinese (Simplified)',
15 'zh_Hant': 'Chinese (Traditional)',
16 'fr': 'French',
17 'de': 'German',
18 'id': 'Indonesian',
19 'ko': 'Korean',
20 'pl': 'Polish',
21 'es': 'Spanish',
22 'th': 'Thai',
23 'vi': 'Vietnamese',
24 'hi': 'Hindi',
25 'ja': 'Japanese',
26 'ru': 'Russian',
27 }
28
29
30 class Config(object):
31 """
32 The base configuration option. Contains the defaults.
33 """
34
35 DEBUG = False
36
37 DEVELOPMENT = False
38 STAGING = False
39 PRODUCTION = False
40 TESTING = False
41
42 CACHING = False
43 PROFILE = False
44 SQLALCHEMY_RECORD_QUERIES = False
45
46 FLASK_ADMIN_SWATCH = 'lumen'
47
48 VERSION = VERSION_NAME
49 SQLALCHEMY_TRACK_MODIFICATIONS = True
50 ERROR_404_HELP = False
51 CSRF_ENABLED = True
52 SERVER_NAME = env('SERVER_NAME', default='127.0.0.1:5000')
53 FALLBACK_PORT = 80
54 CORS_HEADERS = 'Content-Type'
55 SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)
56 SERVE_STATIC = env.bool('SERVE_STATIC', default=False)
57 DATABASE_QUERY_TIMEOUT = 0.1
58 SENTRY_DSN = env('SENTRY_DSN', default=None)
59 ENABLE_ELASTICSEARCH = env.bool('ENABLE_ELASTICSEARCH', default=False)
60 ELASTICSEARCH_HOST = env('ELASTICSEARCH_HOST', default='localhost:9200')
61 REDIS_URL = env('REDIS_URL', default='redis://localhost:6379/0')
62
63 # API configs
64 SOFT_DELETE = True
65 PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)
66 DASHERIZE_API = True
67 API_PROPOGATE_UNCAUGHT_EXCEPTIONS = env.bool('API_PROPOGATE_UNCAUGHT_EXCEPTIONS', default=True)
68 ETAG = True
69
70 if not SQLALCHEMY_DATABASE_URI:
71 print('`DATABASE_URL` either not exported or empty')
72 exit()
73
74 BASE_DIR = basedir
75 FORCE_SSL = os.getenv('FORCE_SSL', 'no') == 'yes'
76
77 if SERVE_STATIC:
78 UPLOADS_FOLDER = BASE_DIR + '/static/uploads/'
79 TEMP_UPLOADS_FOLDER = BASE_DIR + '/static/uploads/temp/'
80 UPLOAD_FOLDER = UPLOADS_FOLDER
81 STATIC_URL = '/static/'
82 STATIC_ROOT = 'staticfiles'
83 STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
84
85 if FORCE_SSL:
86 PREFERRED_URL_SCHEME = 'https'
87
88
89 class ProductionConfig(Config):
90 """
91 The configuration for a production environment
92 """
93
94 MINIFY_PAGE = True
95 PRODUCTION = True
96 CACHING = True
97
98 # if force on
99
100
101 class StagingConfig(ProductionConfig):
102 """
103 The configuration for a staging environment
104 """
105
106 PRODUCTION = False
107 STAGING = True
108
109
110 class DevelopmentConfig(Config):
111 """
112 The configuration for a development environment
113 """
114
115 DEVELOPMENT = True
116 DEBUG = True
117 CACHING = True
118 PROPOGATE_ERROR = True
119
120 # Test database performance
121 SQLALCHEMY_RECORD_QUERIES = True
122
123
124 class TestingConfig(Config):
125 """
126 The configuration for a test suit
127 """
128 TESTING = True
129 CELERY_ALWAYS_EAGER = True
130 CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
131 SQLALCHEMY_RECORD_QUERIES = True
132 DEBUG_TB_ENABLED = False
133 BROKER_BACKEND = 'memory'
134 SQLALCHEMY_DATABASE_URI = env('TEST_DATABASE_URL', default=None)
135 PROPOGATE_ERROR = True
136
[end of config.py]
[start of manage.py]
1 from __future__ import print_function
2 from flask_script import Server
3 import os
4
5 from app.api.helpers.db import save_to_db
6 from app.models.event import Event, get_new_event_identifier
7 from app import manager
8 from app import current_app as app
9 from app.models import db
10 from app.models.speaker import Speaker
11 from populate_db import populate
12 from flask_migrate import stamp
13 from sqlalchemy.engine import reflection
14
15 from tests.unittests.auth_helper import create_super_admin
16
17
18 @manager.command
19 def list_routes():
20 import urllib
21
22 output = []
23 for rule in app.url_map.iter_rules():
24 methods = ','.join(rule.methods)
25 line = urllib.unquote("{:50s} {:20s} {}".format(
26 rule.endpoint, methods, rule))
27 output.append(line)
28
29 for line in sorted(output):
30 print(line)
31
32
33 @manager.command
34 def add_event_identifier():
35 events = Event.query.all()
36 for event in events:
37 event.identifier = get_new_event_identifier()
38 save_to_db(event)
39
40
41 @manager.option('-e', '--event', help='Event ID. Eg. 1')
42 def fix_speaker_images(event):
43 from app.helpers.sessions_speakers.speakers import speaker_image_sizes
44 from app.helpers.sessions_speakers.speakers import save_resized_photo
45 import urllib
46 from app.helpers.storage import generate_hash
47 event_id = int(event)
48 image_sizes = speaker_image_sizes()
49 speakers = Speaker.query.filter_by(event_id=event_id).all()
50 for speaker in speakers:
51 if speaker.photo and speaker.photo.strip() != '':
52 file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'
53 file_path = app.config['BASE_DIR'] + '/' + file_relative_path
54 urllib.urlretrieve(speaker.photo, file_path)
55 speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)
56 speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)
57 speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)
58 db.session.add(speaker)
59 os.remove(file_path)
60 print("Downloaded " + speaker.photo + " into " + file_relative_path)
61 print("Processed - " + str(speaker.id))
62 db.session.commit()
63
64
65 @manager.option('-c', '--credentials', help='Super admin credentials. Eg. username:password')
66 def initialize_db(credentials):
67 with app.app_context():
68 populate_data = True
69 inspector = reflection.Inspector.from_engine(db.engine)
70 table_name = 'events'
71 table_names = inspector.get_table_names()
72 print("[LOG] Existing tables:")
73 print("[LOG] " + ','.join(table_names))
74 if table_name not in table_names:
75 print("[LOG] Table not found. Attempting creation")
76 try:
77 db.create_all()
78 stamp()
79 except:
80 populate_data = False
81 print("[LOG] Could not create tables. Either database does not exist or tables already created")
82 if populate_data:
83 credentials = credentials.split(":")
84 create_super_admin(credentials[0], credentials[1])
85 populate()
86 else:
87 print("[LOG] Tables already exist. Skipping data population & creation.")
88
89
90 @manager.command
91 def prepare_kubernetes_db():
92 with app.app_context():
93 initialize_db('[email protected]:fossasia')
94
95
96 if __name__ == "__main__":
97 host_port = app.config['SERVER_NAME'].split(":")
98 host = host_port[0]
99 port = int(host_port[1] if len(host_port) > 1 else app.config['FALLBACK_PORT'])
100 manager.add_command("runserver", Server(host=host, port=port))
101 manager.run()
102
[end of manage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/config.py b/config.py
--- a/config.py
+++ b/config.py
@@ -49,8 +49,7 @@
SQLALCHEMY_TRACK_MODIFICATIONS = True
ERROR_404_HELP = False
CSRF_ENABLED = True
- SERVER_NAME = env('SERVER_NAME', default='127.0.0.1:5000')
- FALLBACK_PORT = 80
+ SERVER_NAME = env('SERVER_NAME', default=None)
CORS_HEADERS = 'Content-Type'
SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)
SERVE_STATIC = env.bool('SERVE_STATIC', default=False)
diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -1,5 +1,4 @@
from __future__ import print_function
-from flask_script import Server
import os
from app.api.helpers.db import save_to_db
@@ -94,8 +93,4 @@
if __name__ == "__main__":
- host_port = app.config['SERVER_NAME'].split(":")
- host = host_port[0]
- port = int(host_port[1] if len(host_port) > 1 else app.config['FALLBACK_PORT'])
- manager.add_command("runserver", Server(host=host, port=port))
manager.run()
| {"golden_diff": "diff --git a/config.py b/config.py\n--- a/config.py\n+++ b/config.py\n@@ -49,8 +49,7 @@\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n ERROR_404_HELP = False\n CSRF_ENABLED = True\n- SERVER_NAME = env('SERVER_NAME', default='127.0.0.1:5000')\n- FALLBACK_PORT = 80\n+ SERVER_NAME = env('SERVER_NAME', default=None)\n CORS_HEADERS = 'Content-Type'\n SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)\n SERVE_STATIC = env.bool('SERVE_STATIC', default=False)\ndiff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -1,5 +1,4 @@\n from __future__ import print_function\n-from flask_script import Server\n import os\n \n from app.api.helpers.db import save_to_db\n@@ -94,8 +93,4 @@\n \n \n if __name__ == \"__main__\":\n- host_port = app.config['SERVER_NAME'].split(\":\")\n- host = host_port[0]\n- port = int(host_port[1] if len(host_port) > 1 else app.config['FALLBACK_PORT'])\n- manager.add_command(\"runserver\", Server(host=host, port=port))\n manager.run()\n", "issue": "Server is not found locally after #4643\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\nAfter #4643, the server cannot be accessed via the URL `localhost:5000` or any other means. This has lead to Travis build failing and inability to test things locally as well. Revert this behaviour and reopen the original issue\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom envparse import env\n\nenv.read_envfile()\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nVERSION_NAME = '2.0.0-alpha.1'\n\nLANGUAGES = {\n 'en': 'English',\n 'bn': 'Bengali/Bangla',\n 'zh_Hans': 'Chinese (Simplified)',\n 'zh_Hant': 'Chinese (Traditional)',\n 'fr': 'French',\n 'de': 'German',\n 'id': 'Indonesian',\n 'ko': 'Korean',\n 'pl': 'Polish',\n 'es': 'Spanish',\n 'th': 'Thai',\n 'vi': 'Vietnamese',\n 'hi': 'Hindi',\n 'ja': 'Japanese',\n 'ru': 'Russian',\n}\n\n\nclass Config(object):\n \"\"\"\n The base configuration option. Contains the defaults.\n \"\"\"\n\n DEBUG = False\n\n DEVELOPMENT = False\n STAGING = False\n PRODUCTION = False\n TESTING = False\n\n CACHING = False\n PROFILE = False\n SQLALCHEMY_RECORD_QUERIES = False\n\n FLASK_ADMIN_SWATCH = 'lumen'\n\n VERSION = VERSION_NAME\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n ERROR_404_HELP = False\n CSRF_ENABLED = True\n SERVER_NAME = env('SERVER_NAME', default='127.0.0.1:5000')\n FALLBACK_PORT = 80\n CORS_HEADERS = 'Content-Type'\n SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', default=None)\n SERVE_STATIC = env.bool('SERVE_STATIC', default=False)\n DATABASE_QUERY_TIMEOUT = 0.1\n SENTRY_DSN = env('SENTRY_DSN', default=None)\n ENABLE_ELASTICSEARCH = env.bool('ENABLE_ELASTICSEARCH', default=False)\n ELASTICSEARCH_HOST = env('ELASTICSEARCH_HOST', default='localhost:9200')\n REDIS_URL = env('REDIS_URL', default='redis://localhost:6379/0')\n\n # API configs\n SOFT_DELETE = True\n PROPOGATE_ERROR = env.bool('PROPOGATE_ERROR', default=False)\n DASHERIZE_API = True\n API_PROPOGATE_UNCAUGHT_EXCEPTIONS = env.bool('API_PROPOGATE_UNCAUGHT_EXCEPTIONS', default=True)\n ETAG = True\n\n if not SQLALCHEMY_DATABASE_URI:\n print('`DATABASE_URL` either not exported or empty')\n exit()\n\n BASE_DIR = basedir\n FORCE_SSL = os.getenv('FORCE_SSL', 'no') == 'yes'\n\n if SERVE_STATIC:\n UPLOADS_FOLDER = BASE_DIR + '/static/uploads/'\n TEMP_UPLOADS_FOLDER = BASE_DIR + '/static/uploads/temp/'\n UPLOAD_FOLDER = UPLOADS_FOLDER\n STATIC_URL = '/static/'\n STATIC_ROOT = 'staticfiles'\n STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)\n\n if FORCE_SSL:\n PREFERRED_URL_SCHEME = 'https'\n\n\nclass ProductionConfig(Config):\n \"\"\"\n The configuration for a production environment\n \"\"\"\n\n MINIFY_PAGE = True\n PRODUCTION = True\n CACHING = True\n\n # if force on\n\n\nclass StagingConfig(ProductionConfig):\n \"\"\"\n The configuration for a staging environment\n \"\"\"\n\n PRODUCTION = False\n STAGING = True\n\n\nclass DevelopmentConfig(Config):\n \"\"\"\n The configuration for a development environment\n \"\"\"\n\n DEVELOPMENT = True\n DEBUG = True\n CACHING = True\n PROPOGATE_ERROR = True\n\n # Test database performance\n SQLALCHEMY_RECORD_QUERIES = True\n\n\nclass TestingConfig(Config):\n \"\"\"\n The configuration for a test suit\n \"\"\"\n TESTING = True\n CELERY_ALWAYS_EAGER = True\n CELERY_EAGER_PROPAGATES_EXCEPTIONS = True\n SQLALCHEMY_RECORD_QUERIES = True\n DEBUG_TB_ENABLED = False\n BROKER_BACKEND = 'memory'\n SQLALCHEMY_DATABASE_URI = env('TEST_DATABASE_URL', default=None)\n PROPOGATE_ERROR = True\n", "path": "config.py"}, {"content": "from __future__ import print_function\nfrom flask_script import Server\nimport os\n\nfrom app.api.helpers.db import save_to_db\nfrom app.models.event import Event, get_new_event_identifier\nfrom app import manager\nfrom app import current_app as app\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom populate_db import populate\nfrom flask_migrate import stamp\nfrom sqlalchemy.engine import reflection\n\nfrom tests.unittests.auth_helper import create_super_admin\n\n\[email protected]\ndef list_routes():\n import urllib\n\n output = []\n for rule in app.url_map.iter_rules():\n methods = ','.join(rule.methods)\n line = urllib.unquote(\"{:50s} {:20s} {}\".format(\n rule.endpoint, methods, rule))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\[email protected]\ndef add_event_identifier():\n events = Event.query.all()\n for event in events:\n event.identifier = get_new_event_identifier()\n save_to_db(event)\n\n\[email protected]('-e', '--event', help='Event ID. Eg. 1')\ndef fix_speaker_images(event):\n from app.helpers.sessions_speakers.speakers import speaker_image_sizes\n from app.helpers.sessions_speakers.speakers import save_resized_photo\n import urllib\n from app.helpers.storage import generate_hash\n event_id = int(event)\n image_sizes = speaker_image_sizes()\n speakers = Speaker.query.filter_by(event_id=event_id).all()\n for speaker in speakers:\n if speaker.photo and speaker.photo.strip() != '':\n file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'\n file_path = app.config['BASE_DIR'] + '/' + file_relative_path\n urllib.urlretrieve(speaker.photo, file_path)\n speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)\n speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)\n speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)\n db.session.add(speaker)\n os.remove(file_path)\n print(\"Downloaded \" + speaker.photo + \" into \" + file_relative_path)\n print(\"Processed - \" + str(speaker.id))\n db.session.commit()\n\n\[email protected]('-c', '--credentials', help='Super admin credentials. Eg. username:password')\ndef initialize_db(credentials):\n with app.app_context():\n populate_data = True\n inspector = reflection.Inspector.from_engine(db.engine)\n table_name = 'events'\n table_names = inspector.get_table_names()\n print(\"[LOG] Existing tables:\")\n print(\"[LOG] \" + ','.join(table_names))\n if table_name not in table_names:\n print(\"[LOG] Table not found. Attempting creation\")\n try:\n db.create_all()\n stamp()\n except:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n credentials = credentials.split(\":\")\n create_super_admin(credentials[0], credentials[1])\n populate()\n else:\n print(\"[LOG] Tables already exist. Skipping data population & creation.\")\n\n\[email protected]\ndef prepare_kubernetes_db():\n with app.app_context():\n initialize_db('[email protected]:fossasia')\n\n\nif __name__ == \"__main__\":\n host_port = app.config['SERVER_NAME'].split(\":\")\n host = host_port[0]\n port = int(host_port[1] if len(host_port) > 1 else app.config['FALLBACK_PORT'])\n manager.add_command(\"runserver\", Server(host=host, port=port))\n manager.run()\n", "path": "manage.py"}]} | 2,901 | 294 |
gh_patches_debug_11456 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SEARCH_PATH for Geotrek DB user
Since Geotrek 0.28, tables and functions have be moved to different schemas, which is a very good point (https://github.com/makinacorpus/Geotrek/releases/tag/v0.28.0).
Schemas are not mentionned in triggers which is OK too, as Django is doing it in his connexions so it is not a problem for GEOTREK applications.
It gets a problem when you try to edit or insert a data from an external tool (QGIS, Talend...).
You have to change the db_user search_path so that he can find tables and functions not only in public schemas.
It could be interesting to do it during GEOTREK installation for the Geotrek DB user mentionned in settings :
ALTER USER $geotrek_db_user SET
search_path=public,django,geotrek,gestion,rando,zonage,foncier,tourisme;
Of course if you are using another user to edit datas in external tools, you will have to do it manually the first time.
</issue>
<code>
[start of geotrek/common/utils/postgresql.py]
1 import re
2 import os
3 import logging
4 import traceback
5 from functools import wraps
6
7 from django.db import connection, models
8 from django.conf import settings
9 from django.db.models import get_app, get_models
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def debug_pg_notices(f):
16
17 @wraps(f)
18 def wrapped(*args, **kwargs):
19 before = len(connection.connection.notices) if connection.connection else 0
20 try:
21 r = f(*args, **kwargs)
22 finally:
23 # Show triggers output
24 allnotices = []
25 current = ''
26 if connection.connection:
27 notices = []
28 for notice in connection.connection.notices[before:]:
29 try:
30 notice, context = notice.split('CONTEXT:', 1)
31 context = re.sub("\s+", " ", context)
32 except ValueError:
33 context = ''
34 notices.append((context, notice))
35 if context != current:
36 allnotices.append(notices)
37 notices = []
38 current = context
39 allnotices.append(notices)
40 current = ''
41 for notices in allnotices:
42 for context, notice in notices:
43 if context != current:
44 if context != '':
45 logger.debug('Context %s...:' % context.strip()[:80])
46 current = context
47 notice = notice.replace('NOTICE: ', '')
48 prefix = '' if context == '' else ' '
49 logger.debug('%s%s' % (prefix, notice.strip()))
50 return r
51
52 return wrapped
53
54
55 def load_sql_files(app_label):
56 """
57 Look for SQL files in Django app, and load them into database.
58 We remove RAISE NOTICE instructions from SQL outside unit testing
59 since they lead to interpolation errors of '%' character in python.
60 """
61 app_dir = os.path.dirname(models.get_app(app_label).__file__)
62 sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
63 if not os.path.exists(sql_dir):
64 logger.debug("No SQL folder for %s" % app_label)
65 return
66
67 r = re.compile(r'^.*\.sql$')
68 sql_files = [os.path.join(sql_dir, f)
69 for f in os.listdir(sql_dir)
70 if r.match(f) is not None]
71 sql_files.sort()
72
73 if len(sql_files) == 0:
74 logger.warning("Empty folder %s" % sql_dir)
75
76 cursor = connection.cursor()
77 for sql_file in sql_files:
78 try:
79 logger.info("Loading initial SQL data from '%s'" % sql_file)
80 f = open(sql_file)
81 sql = f.read()
82 f.close()
83 if not settings.TEST:
84 # Remove RAISE NOTICE (/!\ only one-liners)
85 sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
86 # TODO: this is the ugliest driver hack ever
87 sql = sql.replace('%', '%%')
88
89 # Replace curly braces with settings values
90 pattern = re.compile(r'{{\s*(.*)\s*}}')
91 for m in pattern.finditer(sql):
92 value = getattr(settings, m.group(1))
93 sql = sql.replace(m.group(0), unicode(value))
94 cursor.execute(sql)
95 except Exception as e:
96 logger.critical("Failed to install custom SQL file '%s': %s\n" %
97 (sql_file, e))
98 traceback.print_exc()
99 raise
100
101
102 def move_models_to_schemas(app_label):
103 """
104 Move models tables to PostgreSQL schemas.
105
106 Views, functions and triggers will be moved in Geotrek app SQL files.
107 """
108 app = get_app(app_label)
109 default_schema = settings.DATABASE_SCHEMAS.get('default')
110 app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)
111
112 table_schemas = {}
113 for model in get_models(app):
114 model_name = model._meta.module_name
115 table_name = model._meta.db_table
116 model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
117 table_schemas.setdefault(model_schema, []).append(table_name)
118
119 for m2m_field in model._meta.many_to_many:
120 table_name = m2m_field.db_table
121 if table_name:
122 table_schemas[model_schema].append(table_name)
123
124 cursor = connection.cursor()
125
126 for schema_name in table_schemas.keys():
127 try:
128 sql = "CREATE SCHEMA %s;" % model_schema
129 cursor.execute(sql)
130 logger.info("Created schema %s" % model_schema)
131 except Exception:
132 logger.debug("Schema %s already exists." % model_schema)
133
134 for schema_name, tables in table_schemas.items():
135 for table_name in tables:
136 try:
137 sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
138 cursor.execute(sql)
139 logger.info("Moved %s to schema %s" % (table_name, schema_name))
140 except Exception:
141 logger.debug("Table %s already in schema %s" % (table_name, schema_name))
142
143 # For Django, search_path is set in connection options.
144 # But when accessing the database using QGis or ETL, search_path must be
145 # set database level (for all users, and for this database only).
146 if app_label == 'common':
147 dbname = settings.DATABASES['default']['NAME']
148 search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))
149 sql = "ALTER DATABASE %s SET search_path=%s;" % (dbname, search_path)
150 cursor.execute(sql)
151
[end of geotrek/common/utils/postgresql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py
--- a/geotrek/common/utils/postgresql.py
+++ b/geotrek/common/utils/postgresql.py
@@ -145,6 +145,7 @@
# set database level (for all users, and for this database only).
if app_label == 'common':
dbname = settings.DATABASES['default']['NAME']
+ dbuser = settings.DATABASES['default']['USER']
search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))
- sql = "ALTER DATABASE %s SET search_path=%s;" % (dbname, search_path)
+ sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
cursor.execute(sql)
| {"golden_diff": "diff --git a/geotrek/common/utils/postgresql.py b/geotrek/common/utils/postgresql.py\n--- a/geotrek/common/utils/postgresql.py\n+++ b/geotrek/common/utils/postgresql.py\n@@ -145,6 +145,7 @@\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n+ dbuser = settings.DATABASES['default']['USER']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n- sql = \"ALTER DATABASE %s SET search_path=%s;\" % (dbname, search_path)\n+ sql = \"ALTER ROLE %s IN DATABASE %s SET search_path=%s;\" % (dbuser, dbname, search_path)\n cursor.execute(sql)\n", "issue": "SEARCH_PATH for Geotrek DB user\nSince Geotrek 0.28, tables and functions have be moved to different schemas, which is a very good point (https://github.com/makinacorpus/Geotrek/releases/tag/v0.28.0).\n\nSchemas are not mentionned in triggers which is OK too, as Django is doing it in his connexions so it is not a problem for GEOTREK applications.\n\nIt gets a problem when you try to edit or insert a data from an external tool (QGIS, Talend...). \nYou have to change the db_user search_path so that he can find tables and functions not only in public schemas.\n\nIt could be interesting to do it during GEOTREK installation for the Geotrek DB user mentionned in settings : \n\nALTER USER $geotrek_db_user SET \nsearch_path=public,django,geotrek,gestion,rando,zonage,foncier,tourisme; \n\nOf course if you are using another user to edit datas in external tools, you will have to do it manually the first time. \n\n", "before_files": [{"content": "import re\nimport os\nimport logging\nimport traceback\nfrom functools import wraps\n\nfrom django.db import connection, models\nfrom django.conf import settings\nfrom django.db.models import get_app, get_models\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef debug_pg_notices(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n before = len(connection.connection.notices) if connection.connection else 0\n try:\n r = f(*args, **kwargs)\n finally:\n # Show triggers output\n allnotices = []\n current = ''\n if connection.connection:\n notices = []\n for notice in connection.connection.notices[before:]:\n try:\n notice, context = notice.split('CONTEXT:', 1)\n context = re.sub(\"\\s+\", \" \", context)\n except ValueError:\n context = ''\n notices.append((context, notice))\n if context != current:\n allnotices.append(notices)\n notices = []\n current = context\n allnotices.append(notices)\n current = ''\n for notices in allnotices:\n for context, notice in notices:\n if context != current:\n if context != '':\n logger.debug('Context %s...:' % context.strip()[:80])\n current = context\n notice = notice.replace('NOTICE: ', '')\n prefix = '' if context == '' else ' '\n logger.debug('%s%s' % (prefix, notice.strip()))\n return r\n\n return wrapped\n\n\ndef load_sql_files(app_label):\n \"\"\"\n Look for SQL files in Django app, and load them into database.\n We remove RAISE NOTICE instructions from SQL outside unit testing\n since they lead to interpolation errors of '%' character in python.\n \"\"\"\n app_dir = os.path.dirname(models.get_app(app_label).__file__)\n sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))\n if not os.path.exists(sql_dir):\n logger.debug(\"No SQL folder for %s\" % app_label)\n return\n\n r = re.compile(r'^.*\\.sql$')\n sql_files = [os.path.join(sql_dir, f)\n for f in os.listdir(sql_dir)\n if r.match(f) is not None]\n sql_files.sort()\n\n if len(sql_files) == 0:\n logger.warning(\"Empty folder %s\" % sql_dir)\n\n cursor = connection.cursor()\n for sql_file in sql_files:\n try:\n logger.info(\"Loading initial SQL data from '%s'\" % sql_file)\n f = open(sql_file)\n sql = f.read()\n f.close()\n if not settings.TEST:\n # Remove RAISE NOTICE (/!\\ only one-liners)\n sql = re.sub(r\"\\n.*RAISE NOTICE.*\\n\", \"\\n\", sql)\n # TODO: this is the ugliest driver hack ever\n sql = sql.replace('%', '%%')\n\n # Replace curly braces with settings values\n pattern = re.compile(r'{{\\s*(.*)\\s*}}')\n for m in pattern.finditer(sql):\n value = getattr(settings, m.group(1))\n sql = sql.replace(m.group(0), unicode(value))\n cursor.execute(sql)\n except Exception as e:\n logger.critical(\"Failed to install custom SQL file '%s': %s\\n\" %\n (sql_file, e))\n traceback.print_exc()\n raise\n\n\ndef move_models_to_schemas(app_label):\n \"\"\"\n Move models tables to PostgreSQL schemas.\n\n Views, functions and triggers will be moved in Geotrek app SQL files.\n \"\"\"\n app = get_app(app_label)\n default_schema = settings.DATABASE_SCHEMAS.get('default')\n app_schema = settings.DATABASE_SCHEMAS.get(app_label, default_schema)\n\n table_schemas = {}\n for model in get_models(app):\n model_name = model._meta.module_name\n table_name = model._meta.db_table\n model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)\n table_schemas.setdefault(model_schema, []).append(table_name)\n\n for m2m_field in model._meta.many_to_many:\n table_name = m2m_field.db_table\n if table_name:\n table_schemas[model_schema].append(table_name)\n\n cursor = connection.cursor()\n\n for schema_name in table_schemas.keys():\n try:\n sql = \"CREATE SCHEMA %s;\" % model_schema\n cursor.execute(sql)\n logger.info(\"Created schema %s\" % model_schema)\n except Exception:\n logger.debug(\"Schema %s already exists.\" % model_schema)\n\n for schema_name, tables in table_schemas.items():\n for table_name in tables:\n try:\n sql = \"ALTER TABLE %s SET SCHEMA %s;\" % (table_name, schema_name)\n cursor.execute(sql)\n logger.info(\"Moved %s to schema %s\" % (table_name, schema_name))\n except Exception:\n logger.debug(\"Table %s already in schema %s\" % (table_name, schema_name))\n\n # For Django, search_path is set in connection options.\n # But when accessing the database using QGis or ETL, search_path must be\n # set database level (for all users, and for this database only).\n if app_label == 'common':\n dbname = settings.DATABASES['default']['NAME']\n search_path = 'public,%s' % ','.join(set(settings.DATABASE_SCHEMAS.values()))\n sql = \"ALTER DATABASE %s SET search_path=%s;\" % (dbname, search_path)\n cursor.execute(sql)\n", "path": "geotrek/common/utils/postgresql.py"}]} | 2,316 | 185 |
gh_patches_debug_40390 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default shell to bash
Right now, if users are created by jupyterhub with useradd on a stock Ubuntu, there is no default shell. This leads to `/bin/sh` being used rather than bash. When the web terminal is opened all readline related actions (tab, up, down) don't work. I know I can configure this in `/etc/default/useradd`, but we jut added an option to set the home directory base path. Should we default the shell to bash in the useradd CLI call? Maybe add an config option that defaults to `/bin/bash`. This behavior means that a pretty standard config on Ubuntu will have a (slightly) broken terminal.
I could have sworn that we did something on this previously, when I was debugging things with my Active Directory setup where `SHELL` wasn't being set. But I don't remember what we ended up doing (I may have just set it myself).
Thoughts?
</issue>
<code>
[start of jupyterhub/auth.py]
1 """Simple PAM authenticator"""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 from grp import getgrnam
7 import pwd
8 from subprocess import check_call, check_output, CalledProcessError
9
10 from tornado import gen
11 import pamela
12
13 from traitlets.config import LoggingConfigurable
14 from traitlets import Bool, Set, Unicode, Any
15
16 from .handlers.login import LoginHandler
17 from .utils import url_path_join
18
19 class Authenticator(LoggingConfigurable):
20 """A class for authentication.
21
22 The API is one method, `authenticate`, a tornado gen.coroutine.
23 """
24
25 db = Any()
26 admin_users = Set(config=True,
27 help="""set of usernames of admin users
28
29 If unspecified, only the user that launches the server will be admin.
30 """
31 )
32 whitelist = Set(config=True,
33 help="""Username whitelist.
34
35 Use this to restrict which users can login.
36 If empty, allow any user to attempt login.
37 """
38 )
39 custom_html = Unicode('',
40 help="""HTML login form for custom handlers.
41 Override in form-based custom authenticators
42 that don't use username+password,
43 or need custom branding.
44 """
45 )
46 login_service = Unicode('',
47 help="""Name of the login service for external
48 login services (e.g. 'GitHub').
49 """
50 )
51
52 @gen.coroutine
53 def authenticate(self, handler, data):
54 """Authenticate a user with login form data.
55
56 This must be a tornado gen.coroutine.
57 It must return the username on successful authentication,
58 and return None on failed authentication.
59 """
60
61 def pre_spawn_start(self, user, spawner):
62 """Hook called before spawning a user's server.
63
64 Can be used to do auth-related startup, e.g. opening PAM sessions.
65 """
66
67 def post_spawn_stop(self, user, spawner):
68 """Hook called after stopping a user container.
69
70 Can be used to do auth-related cleanup, e.g. closing PAM sessions.
71 """
72
73 def check_whitelist(self, user):
74 """
75 Return True if the whitelist is empty or user is in the whitelist.
76 """
77 # Parens aren't necessary here, but they make this easier to parse.
78 return (not self.whitelist) or (user in self.whitelist)
79
80 def add_user(self, user):
81 """Add a new user
82
83 By default, this just adds the user to the whitelist.
84
85 Subclasses may do more extensive things,
86 such as adding actual unix users.
87 """
88 if self.whitelist:
89 self.whitelist.add(user.name)
90
91 def delete_user(self, user):
92 """Triggered when a user is deleted.
93
94 Removes the user from the whitelist.
95 """
96 self.whitelist.discard(user.name)
97
98 def login_url(self, base_url):
99 """Override to register a custom login handler"""
100 return url_path_join(base_url, 'login')
101
102 def logout_url(self, base_url):
103 """Override to register a custom logout handler"""
104 return url_path_join(base_url, 'logout')
105
106 def get_handlers(self, app):
107 """Return any custom handlers the authenticator needs to register
108
109 (e.g. for OAuth)
110 """
111 return [
112 ('/login', LoginHandler),
113 ]
114
115 class LocalAuthenticator(Authenticator):
116 """Base class for Authenticators that work with local *ix users
117
118 Checks for local users, and can attempt to create them if they exist.
119 """
120
121 create_system_users = Bool(False, config=True,
122 help="""If a user is added that doesn't exist on the system,
123 should I try to create the system user?
124 """
125 )
126 system_user_home = Unicode('', config=True,
127 help="""Specify root home directory for users if different from system default.
128
129 Passed to `useradd -b`.
130 If specified, when system users are created their home directories will be created in
131
132 system_user_home/username
133
134 Only has an effect when users are created with `create_system_users=True`.
135 """
136 )
137
138 group_whitelist = Set(
139 config=True,
140 help="Automatically whitelist anyone in this group.",
141 )
142
143 def _group_whitelist_changed(self, name, old, new):
144 if self.whitelist:
145 self.log.warn(
146 "Ignoring username whitelist because group whitelist supplied!"
147 )
148
149 def check_whitelist(self, username):
150 if self.group_whitelist:
151 return self.check_group_whitelist(username)
152 else:
153 return super().check_whitelist(username)
154
155 def check_group_whitelist(self, username):
156 if not self.group_whitelist:
157 return False
158 for grnam in self.group_whitelist:
159 try:
160 group = getgrnam(grnam)
161 except KeyError:
162 self.log.error('No such group: [%s]' % grnam)
163 continue
164 if username in group.gr_mem:
165 return True
166 return False
167
168 @gen.coroutine
169 def add_user(self, user):
170 """Add a new user
171
172 By default, this just adds the user to the whitelist.
173
174 Subclasses may do more extensive things,
175 such as adding actual unix users.
176 """
177 user_exists = yield gen.maybe_future(self.system_user_exists(user))
178 if not user_exists:
179 if self.create_system_users:
180 yield gen.maybe_future(self.add_system_user(user))
181 else:
182 raise KeyError("User %s does not exist." % user.name)
183
184 yield gen.maybe_future(super().add_user(user))
185
186 @staticmethod
187 def system_user_exists(user):
188 """Check if the user exists on the system"""
189 try:
190 pwd.getpwnam(user.name)
191 except KeyError:
192 return False
193 else:
194 return True
195
196 def add_system_user(self, user):
197 """Create a new *ix user on the system. Works on FreeBSD and Linux, at least."""
198 name = user.name
199 for useradd in (
200 ['pw', 'useradd', '-m'],
201 ['useradd', '-m'],
202 ):
203 try:
204 check_output(['which', useradd[0]])
205 except CalledProcessError:
206 continue
207 else:
208 break
209 else:
210 raise RuntimeError("I don't know how to add users on this system.")
211
212 cmd = useradd
213 if self.system_user_home:
214 cmd = cmd + ['-b', self.system_user_home]
215 check_call(cmd + [name])
216
217
218 class PAMAuthenticator(LocalAuthenticator):
219 """Authenticate local *ix users with PAM"""
220 encoding = Unicode('utf8', config=True,
221 help="""The encoding to use for PAM"""
222 )
223 service = Unicode('login', config=True,
224 help="""The PAM service to use for authentication."""
225 )
226
227 @gen.coroutine
228 def authenticate(self, handler, data):
229 """Authenticate with PAM, and return the username if login is successful.
230
231 Return None otherwise.
232 """
233 username = data['username']
234 if not self.check_whitelist(username):
235 return
236 try:
237 pamela.authenticate(username, data['password'], service=self.service)
238 except pamela.PAMError as e:
239 if handler is not None:
240 self.log.warn("PAM Authentication failed (@%s): %s", handler.request.remote_ip, e)
241 else:
242 self.log.warn("PAM Authentication failed: %s", e)
243 else:
244 return username
245
246 def pre_spawn_start(self, user, spawner):
247 """Open PAM session for user"""
248 try:
249 pamela.open_session(user.name, service=self.service)
250 except pamela.PAMError as e:
251 self.log.warn("Failed to open PAM session for %s: %s", user.name, e)
252
253 def post_spawn_stop(self, user, spawner):
254 """Close PAM session for user"""
255 try:
256 pamela.close_session(user.name, service=self.service)
257 except pamela.PAMError as e:
258 self.log.warn("Failed to close PAM session for %s: %s", user.name, e)
259
260
[end of jupyterhub/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/auth.py b/jupyterhub/auth.py
--- a/jupyterhub/auth.py
+++ b/jupyterhub/auth.py
@@ -4,8 +4,11 @@
# Distributed under the terms of the Modified BSD License.
from grp import getgrnam
+import pipes
import pwd
-from subprocess import check_call, check_output, CalledProcessError
+from shutil import which
+import sys
+from subprocess import check_call
from tornado import gen
import pamela
@@ -15,6 +18,7 @@
from .handlers.login import LoginHandler
from .utils import url_path_join
+from .traitlets import Command
class Authenticator(LoggingConfigurable):
"""A class for authentication.
@@ -123,17 +127,36 @@
should I try to create the system user?
"""
)
- system_user_home = Unicode('', config=True,
- help="""Specify root home directory for users if different from system default.
+ add_user_cmd = Command(config=True,
+ help="""The command to use for creating users as a list of strings.
- Passed to `useradd -b`.
- If specified, when system users are created their home directories will be created in
+ For each element in the list, the string USERNAME will be replaced with
+ the user's username. The username will also be appended as the final argument.
- system_user_home/username
+ For Linux, the default value is:
- Only has an effect when users are created with `create_system_users=True`.
+ ['adduser', '-q', '--gecos', '""', '--disabled-password']
+
+ To specify a custom home directory, set this to:
+
+ ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME', '--disabled-password']
+
+ This will run the command:
+
+ adduser -q --gecos "" --home /customhome/river --disabled-password river
+
+ when the user 'river' is created.
"""
)
+ def _add_user_cmd_default(self):
+ if sys.platform == 'darwin':
+ raise ValueError("I don't know how to create users on OS X")
+ elif which('pw'):
+ # Probably BSD
+ return ['pw', 'useradd', '-m']
+ else:
+ # This appears to be the Linux non-interactive adduser command:
+ return ['adduser', '-q', '--gecos', '""', '--disabled-password']
group_whitelist = Set(
config=True,
@@ -196,23 +219,9 @@
def add_system_user(self, user):
"""Create a new *ix user on the system. Works on FreeBSD and Linux, at least."""
name = user.name
- for useradd in (
- ['pw', 'useradd', '-m'],
- ['useradd', '-m'],
- ):
- try:
- check_output(['which', useradd[0]])
- except CalledProcessError:
- continue
- else:
- break
- else:
- raise RuntimeError("I don't know how to add users on this system.")
-
- cmd = useradd
- if self.system_user_home:
- cmd = cmd + ['-b', self.system_user_home]
- check_call(cmd + [name])
+ cmd = [ arg.replace('USERNAME', name) for arg in self.add_user_cmd ] + [name]
+ self.log.info("Creating user: %s", ' '.join(map(pipes.quote, cmd)))
+ check_call(cmd)
class PAMAuthenticator(LocalAuthenticator):
| {"golden_diff": "diff --git a/jupyterhub/auth.py b/jupyterhub/auth.py\n--- a/jupyterhub/auth.py\n+++ b/jupyterhub/auth.py\n@@ -4,8 +4,11 @@\n # Distributed under the terms of the Modified BSD License.\n \n from grp import getgrnam\n+import pipes\n import pwd\n-from subprocess import check_call, check_output, CalledProcessError\n+from shutil import which\n+import sys\n+from subprocess import check_call\n \n from tornado import gen\n import pamela\n@@ -15,6 +18,7 @@\n \n from .handlers.login import LoginHandler\n from .utils import url_path_join\n+from .traitlets import Command\n \n class Authenticator(LoggingConfigurable):\n \"\"\"A class for authentication.\n@@ -123,17 +127,36 @@\n should I try to create the system user?\n \"\"\"\n )\n- system_user_home = Unicode('', config=True,\n- help=\"\"\"Specify root home directory for users if different from system default.\n+ add_user_cmd = Command(config=True,\n+ help=\"\"\"The command to use for creating users as a list of strings.\n \n- Passed to `useradd -b`.\n- If specified, when system users are created their home directories will be created in\n+ For each element in the list, the string USERNAME will be replaced with\n+ the user's username. The username will also be appended as the final argument.\n \n- system_user_home/username\n+ For Linux, the default value is:\n \n- Only has an effect when users are created with `create_system_users=True`.\n+ ['adduser', '-q', '--gecos', '\"\"', '--disabled-password']\n+ \n+ To specify a custom home directory, set this to:\n+ \n+ ['adduser', '-q', '--gecos', '\"\"', '--home', '/customhome/USERNAME', '--disabled-password']\n+\n+ This will run the command:\n+\n+ adduser -q --gecos \"\" --home /customhome/river --disabled-password river\n+ \n+ when the user 'river' is created.\n \"\"\"\n )\n+ def _add_user_cmd_default(self):\n+ if sys.platform == 'darwin':\n+ raise ValueError(\"I don't know how to create users on OS X\")\n+ elif which('pw'):\n+ # Probably BSD\n+ return ['pw', 'useradd', '-m']\n+ else:\n+ # This appears to be the Linux non-interactive adduser command:\n+ return ['adduser', '-q', '--gecos', '\"\"', '--disabled-password']\n \n group_whitelist = Set(\n config=True,\n@@ -196,23 +219,9 @@\n def add_system_user(self, user):\n \"\"\"Create a new *ix user on the system. Works on FreeBSD and Linux, at least.\"\"\"\n name = user.name\n- for useradd in (\n- ['pw', 'useradd', '-m'],\n- ['useradd', '-m'],\n- ):\n- try:\n- check_output(['which', useradd[0]])\n- except CalledProcessError:\n- continue\n- else:\n- break\n- else:\n- raise RuntimeError(\"I don't know how to add users on this system.\")\n- \n- cmd = useradd\n- if self.system_user_home:\n- cmd = cmd + ['-b', self.system_user_home]\n- check_call(cmd + [name])\n+ cmd = [ arg.replace('USERNAME', name) for arg in self.add_user_cmd ] + [name]\n+ self.log.info(\"Creating user: %s\", ' '.join(map(pipes.quote, cmd)))\n+ check_call(cmd)\n \n \n class PAMAuthenticator(LocalAuthenticator):\n", "issue": "Default shell to bash\nRight now, if users are created by jupyterhub with useradd on a stock Ubuntu, there is no default shell. This leads to `/bin/sh` being used rather than bash. When the web terminal is opened all readline related actions (tab, up, down) don't work. I know I can configure this in `/etc/default/useradd`, but we jut added an option to set the home directory base path. Should we default the shell to bash in the useradd CLI call? Maybe add an config option that defaults to `/bin/bash`. This behavior means that a pretty standard config on Ubuntu will have a (slightly) broken terminal.\n\nI could have sworn that we did something on this previously, when I was debugging things with my Active Directory setup where `SHELL` wasn't being set. But I don't remember what we ended up doing (I may have just set it myself).\n\nThoughts?\n\n", "before_files": [{"content": "\"\"\"Simple PAM authenticator\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom grp import getgrnam\nimport pwd\nfrom subprocess import check_call, check_output, CalledProcessError\n\nfrom tornado import gen\nimport pamela\n\nfrom traitlets.config import LoggingConfigurable\nfrom traitlets import Bool, Set, Unicode, Any\n\nfrom .handlers.login import LoginHandler\nfrom .utils import url_path_join\n\nclass Authenticator(LoggingConfigurable):\n \"\"\"A class for authentication.\n \n The API is one method, `authenticate`, a tornado gen.coroutine.\n \"\"\"\n \n db = Any()\n admin_users = Set(config=True,\n help=\"\"\"set of usernames of admin users\n\n If unspecified, only the user that launches the server will be admin.\n \"\"\"\n )\n whitelist = Set(config=True,\n help=\"\"\"Username whitelist.\n \n Use this to restrict which users can login.\n If empty, allow any user to attempt login.\n \"\"\"\n )\n custom_html = Unicode('',\n help=\"\"\"HTML login form for custom handlers.\n Override in form-based custom authenticators\n that don't use username+password,\n or need custom branding.\n \"\"\"\n )\n login_service = Unicode('',\n help=\"\"\"Name of the login service for external\n login services (e.g. 'GitHub').\n \"\"\"\n )\n \n @gen.coroutine\n def authenticate(self, handler, data):\n \"\"\"Authenticate a user with login form data.\n \n This must be a tornado gen.coroutine.\n It must return the username on successful authentication,\n and return None on failed authentication.\n \"\"\"\n\n def pre_spawn_start(self, user, spawner):\n \"\"\"Hook called before spawning a user's server.\n \n Can be used to do auth-related startup, e.g. opening PAM sessions.\n \"\"\"\n \n def post_spawn_stop(self, user, spawner):\n \"\"\"Hook called after stopping a user container.\n \n Can be used to do auth-related cleanup, e.g. closing PAM sessions.\n \"\"\"\n \n def check_whitelist(self, user):\n \"\"\"\n Return True if the whitelist is empty or user is in the whitelist.\n \"\"\"\n # Parens aren't necessary here, but they make this easier to parse.\n return (not self.whitelist) or (user in self.whitelist)\n\n def add_user(self, user):\n \"\"\"Add a new user\n \n By default, this just adds the user to the whitelist.\n \n Subclasses may do more extensive things,\n such as adding actual unix users.\n \"\"\"\n if self.whitelist:\n self.whitelist.add(user.name)\n \n def delete_user(self, user):\n \"\"\"Triggered when a user is deleted.\n \n Removes the user from the whitelist.\n \"\"\"\n self.whitelist.discard(user.name)\n \n def login_url(self, base_url):\n \"\"\"Override to register a custom login handler\"\"\"\n return url_path_join(base_url, 'login')\n \n def logout_url(self, base_url):\n \"\"\"Override to register a custom logout handler\"\"\"\n return url_path_join(base_url, 'logout')\n \n def get_handlers(self, app):\n \"\"\"Return any custom handlers the authenticator needs to register\n \n (e.g. for OAuth)\n \"\"\"\n return [\n ('/login', LoginHandler),\n ]\n\nclass LocalAuthenticator(Authenticator):\n \"\"\"Base class for Authenticators that work with local *ix users\n \n Checks for local users, and can attempt to create them if they exist.\n \"\"\"\n \n create_system_users = Bool(False, config=True,\n help=\"\"\"If a user is added that doesn't exist on the system,\n should I try to create the system user?\n \"\"\"\n )\n system_user_home = Unicode('', config=True,\n help=\"\"\"Specify root home directory for users if different from system default.\n \n Passed to `useradd -b`.\n If specified, when system users are created their home directories will be created in\n \n system_user_home/username\n \n Only has an effect when users are created with `create_system_users=True`.\n \"\"\"\n )\n\n group_whitelist = Set(\n config=True,\n help=\"Automatically whitelist anyone in this group.\",\n )\n\n def _group_whitelist_changed(self, name, old, new):\n if self.whitelist:\n self.log.warn(\n \"Ignoring username whitelist because group whitelist supplied!\"\n )\n\n def check_whitelist(self, username):\n if self.group_whitelist:\n return self.check_group_whitelist(username)\n else:\n return super().check_whitelist(username)\n\n def check_group_whitelist(self, username):\n if not self.group_whitelist:\n return False\n for grnam in self.group_whitelist:\n try:\n group = getgrnam(grnam)\n except KeyError:\n self.log.error('No such group: [%s]' % grnam)\n continue\n if username in group.gr_mem:\n return True\n return False\n\n @gen.coroutine\n def add_user(self, user):\n \"\"\"Add a new user\n \n By default, this just adds the user to the whitelist.\n \n Subclasses may do more extensive things,\n such as adding actual unix users.\n \"\"\"\n user_exists = yield gen.maybe_future(self.system_user_exists(user))\n if not user_exists:\n if self.create_system_users:\n yield gen.maybe_future(self.add_system_user(user))\n else:\n raise KeyError(\"User %s does not exist.\" % user.name)\n \n yield gen.maybe_future(super().add_user(user))\n \n @staticmethod\n def system_user_exists(user):\n \"\"\"Check if the user exists on the system\"\"\"\n try:\n pwd.getpwnam(user.name)\n except KeyError:\n return False\n else:\n return True\n \n def add_system_user(self, user):\n \"\"\"Create a new *ix user on the system. Works on FreeBSD and Linux, at least.\"\"\"\n name = user.name\n for useradd in (\n ['pw', 'useradd', '-m'],\n ['useradd', '-m'],\n ):\n try:\n check_output(['which', useradd[0]])\n except CalledProcessError:\n continue\n else:\n break\n else:\n raise RuntimeError(\"I don't know how to add users on this system.\")\n \n cmd = useradd\n if self.system_user_home:\n cmd = cmd + ['-b', self.system_user_home]\n check_call(cmd + [name])\n\n\nclass PAMAuthenticator(LocalAuthenticator):\n \"\"\"Authenticate local *ix users with PAM\"\"\"\n encoding = Unicode('utf8', config=True,\n help=\"\"\"The encoding to use for PAM\"\"\"\n )\n service = Unicode('login', config=True,\n help=\"\"\"The PAM service to use for authentication.\"\"\"\n )\n \n @gen.coroutine\n def authenticate(self, handler, data):\n \"\"\"Authenticate with PAM, and return the username if login is successful.\n \n Return None otherwise.\n \"\"\"\n username = data['username']\n if not self.check_whitelist(username):\n return\n try:\n pamela.authenticate(username, data['password'], service=self.service)\n except pamela.PAMError as e:\n if handler is not None:\n self.log.warn(\"PAM Authentication failed (@%s): %s\", handler.request.remote_ip, e)\n else:\n self.log.warn(\"PAM Authentication failed: %s\", e)\n else:\n return username\n \n def pre_spawn_start(self, user, spawner):\n \"\"\"Open PAM session for user\"\"\"\n try:\n pamela.open_session(user.name, service=self.service)\n except pamela.PAMError as e:\n self.log.warn(\"Failed to open PAM session for %s: %s\", user.name, e)\n \n def post_spawn_stop(self, user, spawner):\n \"\"\"Close PAM session for user\"\"\"\n try:\n pamela.close_session(user.name, service=self.service)\n except pamela.PAMError as e:\n self.log.warn(\"Failed to close PAM session for %s: %s\", user.name, e)\n \n", "path": "jupyterhub/auth.py"}]} | 3,144 | 816 |
gh_patches_debug_39150 | rasdani/github-patches | git_diff | python-pillow__Pillow-3673 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DDS Plugin cannot handle uncompressed files
Loading a DDS file without compression gives me the following error:
```
File "<SNIP>\PIL\Image.py", line 2676, in open
im = _open_core(fp, filename, prefix)
File "<SNIP>\PIL\Image.py", line 2658, in _open_core
im = factory(fp, filename)
File "<SNIP>\PIL\ImageFile.py", line 103, in __init__
self._open()
File "<SNIP>\PIL\DdsImagePlugin.py", line 158, in _open
(fourcc))
NotImplementedError: Unimplemented pixel format b'\x00\x00\x00\x00'
```
To demonstrate the issue, I created a simple DDS file as demonstration. Calling `Image.open()` on that file will not work.
[uncompressed_dds.zip](https://github.com/python-pillow/Pillow/files/2872238/uncompressed_dds.zip)
Looking over the source code of DdsImagePlugin, it seems that only FourCC DDS files are supported.
Pillow version: 5.4.1
Python version: 3.7.2
</issue>
<code>
[start of src/PIL/DdsImagePlugin.py]
1 """
2 A Pillow loader for .dds files (S3TC-compressed aka DXTC)
3 Jerome Leclanche <[email protected]>
4
5 Documentation:
6 https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
7
8 The contents of this file are hereby released in the public domain (CC0)
9 Full text of the CC0 license:
10 https://creativecommons.org/publicdomain/zero/1.0/
11 """
12
13 import struct
14 from io import BytesIO
15 from . import Image, ImageFile
16
17
18 # Magic ("DDS ")
19 DDS_MAGIC = 0x20534444
20
21 # DDS flags
22 DDSD_CAPS = 0x1
23 DDSD_HEIGHT = 0x2
24 DDSD_WIDTH = 0x4
25 DDSD_PITCH = 0x8
26 DDSD_PIXELFORMAT = 0x1000
27 DDSD_MIPMAPCOUNT = 0x20000
28 DDSD_LINEARSIZE = 0x80000
29 DDSD_DEPTH = 0x800000
30
31 # DDS caps
32 DDSCAPS_COMPLEX = 0x8
33 DDSCAPS_TEXTURE = 0x1000
34 DDSCAPS_MIPMAP = 0x400000
35
36 DDSCAPS2_CUBEMAP = 0x200
37 DDSCAPS2_CUBEMAP_POSITIVEX = 0x400
38 DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800
39 DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000
40 DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000
41 DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000
42 DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000
43 DDSCAPS2_VOLUME = 0x200000
44
45 # Pixel Format
46 DDPF_ALPHAPIXELS = 0x1
47 DDPF_ALPHA = 0x2
48 DDPF_FOURCC = 0x4
49 DDPF_PALETTEINDEXED8 = 0x20
50 DDPF_RGB = 0x40
51 DDPF_LUMINANCE = 0x20000
52
53
54 # dds.h
55
56 DDS_FOURCC = DDPF_FOURCC
57 DDS_RGB = DDPF_RGB
58 DDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS
59 DDS_LUMINANCE = DDPF_LUMINANCE
60 DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS
61 DDS_ALPHA = DDPF_ALPHA
62 DDS_PAL8 = DDPF_PALETTEINDEXED8
63
64 DDS_HEADER_FLAGS_TEXTURE = (DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH |
65 DDSD_PIXELFORMAT)
66 DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT
67 DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH
68 DDS_HEADER_FLAGS_PITCH = DDSD_PITCH
69 DDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE
70
71 DDS_HEIGHT = DDSD_HEIGHT
72 DDS_WIDTH = DDSD_WIDTH
73
74 DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE
75 DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP
76 DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX
77
78 DDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX
79 DDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX
80 DDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY
81 DDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY
82 DDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ
83 DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ
84
85
86 # DXT1
87 DXT1_FOURCC = 0x31545844
88
89 # DXT3
90 DXT3_FOURCC = 0x33545844
91
92 # DXT5
93 DXT5_FOURCC = 0x35545844
94
95
96 # dxgiformat.h
97
98 DXGI_FORMAT_BC7_TYPELESS = 97
99 DXGI_FORMAT_BC7_UNORM = 98
100 DXGI_FORMAT_BC7_UNORM_SRGB = 99
101
102
103 class DdsImageFile(ImageFile.ImageFile):
104 format = "DDS"
105 format_description = "DirectDraw Surface"
106
107 def _open(self):
108 magic, header_size = struct.unpack("<II", self.fp.read(8))
109 if header_size != 124:
110 raise IOError("Unsupported header size %r" % (header_size))
111 header_bytes = self.fp.read(header_size - 4)
112 if len(header_bytes) != 120:
113 raise IOError("Incomplete header: %s bytes" % len(header_bytes))
114 header = BytesIO(header_bytes)
115
116 flags, height, width = struct.unpack("<3I", header.read(12))
117 self._size = (width, height)
118 self.mode = "RGBA"
119
120 pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
121 struct.unpack("<11I", header.read(44)) # reserved
122
123 # pixel format
124 pfsize, pfflags = struct.unpack("<2I", header.read(8))
125 fourcc = header.read(4)
126 bitcount, rmask, gmask, bmask, amask = struct.unpack("<5I",
127 header.read(20))
128
129 data_start = header_size + 4
130 n = 0
131 if fourcc == b"DXT1":
132 self.pixel_format = "DXT1"
133 n = 1
134 elif fourcc == b"DXT3":
135 self.pixel_format = "DXT3"
136 n = 2
137 elif fourcc == b"DXT5":
138 self.pixel_format = "DXT5"
139 n = 3
140 elif fourcc == b"DX10":
141 data_start += 20
142 # ignoring flags which pertain to volume textures and cubemaps
143 dxt10 = BytesIO(self.fp.read(20))
144 dxgi_format, dimension = struct.unpack("<II", dxt10.read(8))
145 if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,
146 DXGI_FORMAT_BC7_UNORM):
147 self.pixel_format = "BC7"
148 n = 7
149 elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:
150 self.pixel_format = "BC7"
151 self.im_info["gamma"] = 1/2.2
152 n = 7
153 else:
154 raise NotImplementedError("Unimplemented DXGI format %d" %
155 (dxgi_format))
156 else:
157 raise NotImplementedError("Unimplemented pixel format %r" %
158 (fourcc))
159
160 self.tile = [
161 ("bcn", (0, 0) + self.size, data_start, (n))
162 ]
163
164 def load_seek(self, pos):
165 pass
166
167
168 def _validate(prefix):
169 return prefix[:4] == b"DDS "
170
171
172 Image.register_open(DdsImageFile.format, DdsImageFile, _validate)
173 Image.register_extension(DdsImageFile.format, ".dds")
174
[end of src/PIL/DdsImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/PIL/DdsImagePlugin.py b/src/PIL/DdsImagePlugin.py
--- a/src/PIL/DdsImagePlugin.py
+++ b/src/PIL/DdsImagePlugin.py
@@ -123,43 +123,52 @@
# pixel format
pfsize, pfflags = struct.unpack("<2I", header.read(8))
fourcc = header.read(4)
- bitcount, rmask, gmask, bmask, amask = struct.unpack("<5I",
- header.read(20))
-
- data_start = header_size + 4
- n = 0
- if fourcc == b"DXT1":
- self.pixel_format = "DXT1"
- n = 1
- elif fourcc == b"DXT3":
- self.pixel_format = "DXT3"
- n = 2
- elif fourcc == b"DXT5":
- self.pixel_format = "DXT5"
- n = 3
- elif fourcc == b"DX10":
- data_start += 20
- # ignoring flags which pertain to volume textures and cubemaps
- dxt10 = BytesIO(self.fp.read(20))
- dxgi_format, dimension = struct.unpack("<II", dxt10.read(8))
- if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,
- DXGI_FORMAT_BC7_UNORM):
- self.pixel_format = "BC7"
- n = 7
- elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:
- self.pixel_format = "BC7"
- self.im_info["gamma"] = 1/2.2
- n = 7
- else:
- raise NotImplementedError("Unimplemented DXGI format %d" %
- (dxgi_format))
+ bitcount, = struct.unpack("<I", header.read(4))
+ masks = struct.unpack("<4I", header.read(16))
+ if pfflags & 0x40:
+ # DDPF_RGB - Texture contains uncompressed RGB data
+ masks = {mask: ["R", "G", "B", "A"][i] for i, mask in enumerate(masks)}
+ rawmode = ""
+ if bitcount == 32:
+ rawmode += masks[0xff000000]
+ rawmode += masks[0xff0000] + masks[0xff00] + masks[0xff]
+
+ self.tile = [("raw", (0, 0) + self.size, 0, (rawmode, 0, 1))]
else:
- raise NotImplementedError("Unimplemented pixel format %r" %
- (fourcc))
+ data_start = header_size + 4
+ n = 0
+ if fourcc == b"DXT1":
+ self.pixel_format = "DXT1"
+ n = 1
+ elif fourcc == b"DXT3":
+ self.pixel_format = "DXT3"
+ n = 2
+ elif fourcc == b"DXT5":
+ self.pixel_format = "DXT5"
+ n = 3
+ elif fourcc == b"DX10":
+ data_start += 20
+ # ignoring flags which pertain to volume textures and cubemaps
+ dxt10 = BytesIO(self.fp.read(20))
+ dxgi_format, dimension = struct.unpack("<II", dxt10.read(8))
+ if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,
+ DXGI_FORMAT_BC7_UNORM):
+ self.pixel_format = "BC7"
+ n = 7
+ elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:
+ self.pixel_format = "BC7"
+ self.im_info["gamma"] = 1/2.2
+ n = 7
+ else:
+ raise NotImplementedError("Unimplemented DXGI format %d" %
+ (dxgi_format))
+ else:
+ raise NotImplementedError("Unimplemented pixel format %r" %
+ (fourcc))
- self.tile = [
- ("bcn", (0, 0) + self.size, data_start, (n))
- ]
+ self.tile = [
+ ("bcn", (0, 0) + self.size, data_start, (n))
+ ]
def load_seek(self, pos):
pass
| {"golden_diff": "diff --git a/src/PIL/DdsImagePlugin.py b/src/PIL/DdsImagePlugin.py\n--- a/src/PIL/DdsImagePlugin.py\n+++ b/src/PIL/DdsImagePlugin.py\n@@ -123,43 +123,52 @@\n # pixel format\n pfsize, pfflags = struct.unpack(\"<2I\", header.read(8))\n fourcc = header.read(4)\n- bitcount, rmask, gmask, bmask, amask = struct.unpack(\"<5I\",\n- header.read(20))\n-\n- data_start = header_size + 4\n- n = 0\n- if fourcc == b\"DXT1\":\n- self.pixel_format = \"DXT1\"\n- n = 1\n- elif fourcc == b\"DXT3\":\n- self.pixel_format = \"DXT3\"\n- n = 2\n- elif fourcc == b\"DXT5\":\n- self.pixel_format = \"DXT5\"\n- n = 3\n- elif fourcc == b\"DX10\":\n- data_start += 20\n- # ignoring flags which pertain to volume textures and cubemaps\n- dxt10 = BytesIO(self.fp.read(20))\n- dxgi_format, dimension = struct.unpack(\"<II\", dxt10.read(8))\n- if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,\n- DXGI_FORMAT_BC7_UNORM):\n- self.pixel_format = \"BC7\"\n- n = 7\n- elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:\n- self.pixel_format = \"BC7\"\n- self.im_info[\"gamma\"] = 1/2.2\n- n = 7\n- else:\n- raise NotImplementedError(\"Unimplemented DXGI format %d\" %\n- (dxgi_format))\n+ bitcount, = struct.unpack(\"<I\", header.read(4))\n+ masks = struct.unpack(\"<4I\", header.read(16))\n+ if pfflags & 0x40:\n+ # DDPF_RGB - Texture contains uncompressed RGB data\n+ masks = {mask: [\"R\", \"G\", \"B\", \"A\"][i] for i, mask in enumerate(masks)}\n+ rawmode = \"\"\n+ if bitcount == 32:\n+ rawmode += masks[0xff000000]\n+ rawmode += masks[0xff0000] + masks[0xff00] + masks[0xff]\n+\n+ self.tile = [(\"raw\", (0, 0) + self.size, 0, (rawmode, 0, 1))]\n else:\n- raise NotImplementedError(\"Unimplemented pixel format %r\" %\n- (fourcc))\n+ data_start = header_size + 4\n+ n = 0\n+ if fourcc == b\"DXT1\":\n+ self.pixel_format = \"DXT1\"\n+ n = 1\n+ elif fourcc == b\"DXT3\":\n+ self.pixel_format = \"DXT3\"\n+ n = 2\n+ elif fourcc == b\"DXT5\":\n+ self.pixel_format = \"DXT5\"\n+ n = 3\n+ elif fourcc == b\"DX10\":\n+ data_start += 20\n+ # ignoring flags which pertain to volume textures and cubemaps\n+ dxt10 = BytesIO(self.fp.read(20))\n+ dxgi_format, dimension = struct.unpack(\"<II\", dxt10.read(8))\n+ if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,\n+ DXGI_FORMAT_BC7_UNORM):\n+ self.pixel_format = \"BC7\"\n+ n = 7\n+ elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:\n+ self.pixel_format = \"BC7\"\n+ self.im_info[\"gamma\"] = 1/2.2\n+ n = 7\n+ else:\n+ raise NotImplementedError(\"Unimplemented DXGI format %d\" %\n+ (dxgi_format))\n+ else:\n+ raise NotImplementedError(\"Unimplemented pixel format %r\" %\n+ (fourcc))\n \n- self.tile = [\n- (\"bcn\", (0, 0) + self.size, data_start, (n))\n- ]\n+ self.tile = [\n+ (\"bcn\", (0, 0) + self.size, data_start, (n))\n+ ]\n \n def load_seek(self, pos):\n pass\n", "issue": "DDS Plugin cannot handle uncompressed files\nLoading a DDS file without compression gives me the following error:\r\n```\r\n File \"<SNIP>\\PIL\\Image.py\", line 2676, in open\r\n im = _open_core(fp, filename, prefix)\r\n File \"<SNIP>\\PIL\\Image.py\", line 2658, in _open_core\r\n im = factory(fp, filename)\r\n File \"<SNIP>\\PIL\\ImageFile.py\", line 103, in __init__\r\n self._open()\r\n File \"<SNIP>\\PIL\\DdsImagePlugin.py\", line 158, in _open\r\n (fourcc))\r\nNotImplementedError: Unimplemented pixel format b'\\x00\\x00\\x00\\x00'\r\n```\r\n\r\nTo demonstrate the issue, I created a simple DDS file as demonstration. Calling `Image.open()` on that file will not work. \r\n[uncompressed_dds.zip](https://github.com/python-pillow/Pillow/files/2872238/uncompressed_dds.zip)\r\n\r\nLooking over the source code of DdsImagePlugin, it seems that only FourCC DDS files are supported.\r\n\r\nPillow version: 5.4.1\r\nPython version: 3.7.2\n", "before_files": [{"content": "\"\"\"\nA Pillow loader for .dds files (S3TC-compressed aka DXTC)\nJerome Leclanche <[email protected]>\n\nDocumentation:\n https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt\n\nThe contents of this file are hereby released in the public domain (CC0)\nFull text of the CC0 license:\n https://creativecommons.org/publicdomain/zero/1.0/\n\"\"\"\n\nimport struct\nfrom io import BytesIO\nfrom . import Image, ImageFile\n\n\n# Magic (\"DDS \")\nDDS_MAGIC = 0x20534444\n\n# DDS flags\nDDSD_CAPS = 0x1\nDDSD_HEIGHT = 0x2\nDDSD_WIDTH = 0x4\nDDSD_PITCH = 0x8\nDDSD_PIXELFORMAT = 0x1000\nDDSD_MIPMAPCOUNT = 0x20000\nDDSD_LINEARSIZE = 0x80000\nDDSD_DEPTH = 0x800000\n\n# DDS caps\nDDSCAPS_COMPLEX = 0x8\nDDSCAPS_TEXTURE = 0x1000\nDDSCAPS_MIPMAP = 0x400000\n\nDDSCAPS2_CUBEMAP = 0x200\nDDSCAPS2_CUBEMAP_POSITIVEX = 0x400\nDDSCAPS2_CUBEMAP_NEGATIVEX = 0x800\nDDSCAPS2_CUBEMAP_POSITIVEY = 0x1000\nDDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000\nDDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000\nDDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000\nDDSCAPS2_VOLUME = 0x200000\n\n# Pixel Format\nDDPF_ALPHAPIXELS = 0x1\nDDPF_ALPHA = 0x2\nDDPF_FOURCC = 0x4\nDDPF_PALETTEINDEXED8 = 0x20\nDDPF_RGB = 0x40\nDDPF_LUMINANCE = 0x20000\n\n\n# dds.h\n\nDDS_FOURCC = DDPF_FOURCC\nDDS_RGB = DDPF_RGB\nDDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS\nDDS_LUMINANCE = DDPF_LUMINANCE\nDDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS\nDDS_ALPHA = DDPF_ALPHA\nDDS_PAL8 = DDPF_PALETTEINDEXED8\n\nDDS_HEADER_FLAGS_TEXTURE = (DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH |\n DDSD_PIXELFORMAT)\nDDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT\nDDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH\nDDS_HEADER_FLAGS_PITCH = DDSD_PITCH\nDDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE\n\nDDS_HEIGHT = DDSD_HEIGHT\nDDS_WIDTH = DDSD_WIDTH\n\nDDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE\nDDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP\nDDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX\n\nDDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX\nDDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX\nDDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY\nDDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY\nDDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ\nDDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ\n\n\n# DXT1\nDXT1_FOURCC = 0x31545844\n\n# DXT3\nDXT3_FOURCC = 0x33545844\n\n# DXT5\nDXT5_FOURCC = 0x35545844\n\n\n# dxgiformat.h\n\nDXGI_FORMAT_BC7_TYPELESS = 97\nDXGI_FORMAT_BC7_UNORM = 98\nDXGI_FORMAT_BC7_UNORM_SRGB = 99\n\n\nclass DdsImageFile(ImageFile.ImageFile):\n format = \"DDS\"\n format_description = \"DirectDraw Surface\"\n\n def _open(self):\n magic, header_size = struct.unpack(\"<II\", self.fp.read(8))\n if header_size != 124:\n raise IOError(\"Unsupported header size %r\" % (header_size))\n header_bytes = self.fp.read(header_size - 4)\n if len(header_bytes) != 120:\n raise IOError(\"Incomplete header: %s bytes\" % len(header_bytes))\n header = BytesIO(header_bytes)\n\n flags, height, width = struct.unpack(\"<3I\", header.read(12))\n self._size = (width, height)\n self.mode = \"RGBA\"\n\n pitch, depth, mipmaps = struct.unpack(\"<3I\", header.read(12))\n struct.unpack(\"<11I\", header.read(44)) # reserved\n\n # pixel format\n pfsize, pfflags = struct.unpack(\"<2I\", header.read(8))\n fourcc = header.read(4)\n bitcount, rmask, gmask, bmask, amask = struct.unpack(\"<5I\",\n header.read(20))\n\n data_start = header_size + 4\n n = 0\n if fourcc == b\"DXT1\":\n self.pixel_format = \"DXT1\"\n n = 1\n elif fourcc == b\"DXT3\":\n self.pixel_format = \"DXT3\"\n n = 2\n elif fourcc == b\"DXT5\":\n self.pixel_format = \"DXT5\"\n n = 3\n elif fourcc == b\"DX10\":\n data_start += 20\n # ignoring flags which pertain to volume textures and cubemaps\n dxt10 = BytesIO(self.fp.read(20))\n dxgi_format, dimension = struct.unpack(\"<II\", dxt10.read(8))\n if dxgi_format in (DXGI_FORMAT_BC7_TYPELESS,\n DXGI_FORMAT_BC7_UNORM):\n self.pixel_format = \"BC7\"\n n = 7\n elif dxgi_format == DXGI_FORMAT_BC7_UNORM_SRGB:\n self.pixel_format = \"BC7\"\n self.im_info[\"gamma\"] = 1/2.2\n n = 7\n else:\n raise NotImplementedError(\"Unimplemented DXGI format %d\" %\n (dxgi_format))\n else:\n raise NotImplementedError(\"Unimplemented pixel format %r\" %\n (fourcc))\n\n self.tile = [\n (\"bcn\", (0, 0) + self.size, data_start, (n))\n ]\n\n def load_seek(self, pos):\n pass\n\n\ndef _validate(prefix):\n return prefix[:4] == b\"DDS \"\n\n\nImage.register_open(DdsImageFile.format, DdsImageFile, _validate)\nImage.register_extension(DdsImageFile.format, \".dds\")\n", "path": "src/PIL/DdsImagePlugin.py"}]} | 2,948 | 1,014 |
gh_patches_debug_2439 | rasdani/github-patches | git_diff | ephios-dev__ephios-384 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot delete section
As a planner, I cannot delete an existing section from a shift with the section_based signup method
</issue>
<code>
[start of ephios/plugins/basesignup/signup/section_based.py]
1 import uuid
2 from functools import cached_property
3 from itertools import groupby
4 from operator import itemgetter
5
6 from django import forms
7 from django.contrib import messages
8 from django.core.exceptions import ValidationError
9 from django.shortcuts import redirect
10 from django.template.loader import get_template
11 from django.urls import reverse
12 from django.utils.translation import gettext_lazy as _
13 from django.views.generic import FormView
14 from django_select2.forms import Select2MultipleWidget
15 from dynamic_preferences.registries import global_preferences_registry
16
17 from ephios.core.models import AbstractParticipation, Qualification
18 from ephios.core.signup import (
19 AbstractParticipant,
20 BaseDispositionParticipationForm,
21 BaseSignupMethod,
22 BaseSignupView,
23 ParticipationError,
24 )
25
26
27 def sections_participant_qualifies_for(sections, participant: AbstractParticipant):
28 available_qualification_ids = set(q.id for q in participant.collect_all_qualifications())
29 return [
30 section
31 for section in sections
32 if set(section["qualifications"]) <= available_qualification_ids
33 ]
34
35
36 class SectionBasedDispositionParticipationForm(BaseDispositionParticipationForm):
37 disposition_participation_template = "basesignup/section_based/fragment_participant.html"
38
39 section = forms.ChoiceField(
40 label=_("Section"),
41 required=False, # only required if participation is confirmed
42 widget=forms.Select(
43 attrs={"data-show-for-state": str(AbstractParticipation.States.CONFIRMED)}
44 ),
45 )
46
47 def __init__(self, **kwargs):
48 super().__init__(**kwargs)
49 sections = self.shift.signup_method.configuration.sections
50 qualified_sections = list(
51 sections_participant_qualifies_for(
52 sections,
53 self.instance.participant,
54 )
55 )
56 unqualified_sections = [
57 section for section in sections if section not in qualified_sections
58 ]
59 self.fields["section"].choices = [("", "---")]
60 if qualified_sections:
61 self.fields["section"].choices += [
62 (
63 _("qualified"),
64 [(section["uuid"], section["title"]) for section in qualified_sections],
65 )
66 ]
67 if unqualified_sections:
68 self.fields["section"].choices += [
69 (
70 _("unqualified"),
71 [(section["uuid"], section["title"]) for section in unqualified_sections],
72 )
73 ]
74 if preferred_section_uuid := self.instance.data.get("preferred_section_uuid"):
75 self.fields["section"].initial = preferred_section_uuid
76 self.preferred_section = next(
77 filter(lambda section: section["uuid"] == preferred_section_uuid, sections), None
78 )
79 if initial := self.instance.data.get("dispatched_section_uuid"):
80 self.fields["section"].initial = initial
81
82 def clean(self):
83 super().clean()
84 if (
85 self.cleaned_data["state"] == AbstractParticipation.States.CONFIRMED
86 and not self.cleaned_data["section"]
87 ):
88 self.add_error(
89 "section",
90 ValidationError(_("You must select a section when confirming a participation.")),
91 )
92
93 def save(self, commit=True):
94 self.instance.data["dispatched_section_uuid"] = self.cleaned_data["section"]
95 super().save(commit)
96
97
98 class SectionForm(forms.Form):
99 title = forms.CharField(label=_("Title"), required=True)
100 qualifications = forms.ModelMultipleChoiceField(
101 label=_("Required Qualifications"),
102 queryset=Qualification.objects.all(),
103 widget=Select2MultipleWidget,
104 required=False,
105 )
106 min_count = forms.IntegerField(label=_("min amount"), min_value=0, required=True)
107 uuid = forms.CharField(widget=forms.HiddenInput, required=False)
108
109 def clean_uuid(self):
110 return self.cleaned_data.get("uuid") or uuid.uuid4()
111
112
113 SectionsFormset = forms.formset_factory(
114 SectionForm, can_delete=True, min_num=1, validate_min=1, extra=0
115 )
116
117
118 class SectionBasedConfigurationForm(forms.Form):
119 def __init__(self, data=None, **kwargs):
120 super().__init__(data, **kwargs)
121 self.sections_formset = SectionsFormset(
122 data=data,
123 initial=self.initial.get("sections", list()),
124 prefix="sections",
125 )
126
127 def clean_sections(self):
128 if not self.sections_formset.is_valid():
129 raise ValidationError(_("The sections aren't configured correctly."))
130
131 sections = [
132 {
133 key: form.cleaned_data[key]
134 for key in ("title", "qualifications", "min_count", "uuid")
135 }
136 for form in self.sections_formset
137 ]
138 return sections
139
140
141 class SectionSignupForm(forms.Form):
142 section = forms.ChoiceField(
143 label=_("Preferred Section"),
144 widget=forms.RadioSelect,
145 required=False,
146 # choices are set as (uuid, title) of section
147 )
148
149
150 class SectionBasedSignupView(FormView, BaseSignupView):
151 template_name = "basesignup/section_based/signup.html"
152
153 @cached_property
154 def sections_participant_qualifies_for(self):
155 return sections_participant_qualifies_for(
156 self.method.configuration.sections, self.participant
157 )
158
159 def get_form(self, form_class=None):
160 form = SectionSignupForm(self.request.POST)
161 form.fields["section"].choices = [
162 (section["uuid"], section["title"])
163 for section in self.sections_participant_qualifies_for
164 ]
165 return form
166
167 def get_context_data(self, **kwargs):
168 kwargs.setdefault("shift", self.shift)
169 kwargs.setdefault(
170 "unqualified_sections",
171 [
172 section["title"]
173 for section in self.method.configuration.sections
174 if section not in self.sections_participant_qualifies_for
175 ],
176 )
177 return super().get_context_data(**kwargs)
178
179 def form_valid(self, form):
180 return super().signup_pressed(preferred_section_uuid=form.cleaned_data.get("section"))
181
182 def signup_pressed(self, **kwargs):
183 if not self.method.configuration.choose_preferred_section:
184 # do straight signup if choosing is not enabled
185 return super().signup_pressed(**kwargs)
186
187 if not self.method.can_sign_up(self.participant):
188 # redirect a misled request
189 messages.warning(self.request, _("You can not sign up for this shift."))
190 return redirect(self.participant.reverse_event_detail(self.shift.event))
191
192 # all good, redirect to the form
193 return redirect(self.participant.reverse_signup_action(self.shift))
194
195
196 class SectionBasedSignupMethod(BaseSignupMethod):
197 slug = "section_based"
198 verbose_name = _("Apply for sections")
199 description = _(
200 """This method lets you define sections for which people can choose from.
201 Sections contain qualifications that helpers need to fulfil."""
202 )
203 registration_button_text = _("Request")
204 signup_success_message = _("You have successfully requested a participation for {shift}.")
205 signup_error_message = _("Requesting a participation failed: {error}")
206
207 configuration_form_class = SectionBasedConfigurationForm
208 signup_view_class = SectionBasedSignupView
209
210 disposition_participation_form_class = SectionBasedDispositionParticipationForm
211
212 def get_configuration_fields(self):
213 return {
214 **super().get_configuration_fields(),
215 "choose_preferred_section": {
216 "formfield": forms.BooleanField(
217 label=_("Ask participants for a preferred section"),
218 help_text=_("This only makes sense if you configure multiple sections."),
219 widget=forms.CheckboxInput,
220 required=False,
221 ),
222 "default": False,
223 },
224 "sections": {
225 "formfield": forms.Field(
226 label=_("Structure"),
227 widget=forms.HiddenInput,
228 required=False,
229 ),
230 "default": [],
231 },
232 }
233
234 def get_participant_count_bounds(self):
235 return sum(section.get("min_count") or 0 for section in self.configuration.sections), None
236
237 @staticmethod
238 def check_qualification(method, participant):
239 if not sections_participant_qualifies_for(method.configuration.sections, participant):
240 return ParticipationError(_("You are not qualified."))
241
242 @property
243 def _signup_checkers(self):
244 return super()._signup_checkers + [self.check_qualification]
245
246 # pylint: disable=arguments-differ
247 def _configure_participation(
248 self, participation: AbstractParticipation, preferred_section_uuid=None, **kwargs
249 ) -> AbstractParticipation:
250 participation.data["preferred_section_uuid"] = preferred_section_uuid
251 if preferred_section_uuid:
252 # reset dispatch decision, as that would have overwritten the preferred choice
253 participation.data["dispatched_section_uuid"] = None
254 participation.state = AbstractParticipation.States.REQUESTED
255 return participation
256
257 def render_configuration_form(self, *args, form=None, **kwargs):
258 form = form or self.get_configuration_form(*args, **kwargs)
259 template = get_template("basesignup/section_based/configuration_form.html").render(
260 {"form": form}
261 )
262 return template
263
264 def _get_sections_with_users(self):
265 relevant_qualification_categories = global_preferences_registry.manager()[
266 "general__relevant_qualification_categories"
267 ]
268 section_by_uuid = {section["uuid"]: section for section in self.configuration.sections}
269 # get name and preferred section uuid for confirmed participants
270 # if they have a section assigned and we have that section on record
271 confirmed_participations = [
272 {
273 "name": str(participation.participant),
274 "relevant_qualifications": ", ".join(
275 participation.participant.qualifications.filter(
276 category__in=relevant_qualification_categories
277 ).values_list("abbreviation", flat=True)
278 ),
279 "uuid": dispatched_section_uuid,
280 }
281 for participation in self.shift.participations.filter(
282 state=AbstractParticipation.States.CONFIRMED
283 )
284 if (dispatched_section_uuid := participation.data.get("dispatched_section_uuid"))
285 and dispatched_section_uuid in section_by_uuid
286 ]
287 # group by section and do some stats
288 sections_with_users = [
289 (
290 section_by_uuid.pop(uuid),
291 [[user["name"], user["relevant_qualifications"]] for user in group],
292 )
293 for uuid, group in groupby(
294 sorted(confirmed_participations, key=itemgetter("uuid")), itemgetter("uuid")
295 )
296 ]
297 # add sections without participants
298 sections_with_users += [(section, None) for section in section_by_uuid.values()]
299 return sections_with_users
300
301 def render_shift_state(self, request):
302 return get_template("basesignup/section_based/fragment_state.html").render(
303 {
304 "shift": self.shift,
305 "requested_participations": (
306 self.shift.participations.filter(state=AbstractParticipation.States.REQUESTED)
307 ),
308 "sections_with_users": self._get_sections_with_users(),
309 "disposition_url": (
310 reverse(
311 "core:shift_disposition",
312 kwargs=dict(pk=self.shift.pk),
313 )
314 if request.user.has_perm("core.change_event", obj=self.shift.event)
315 else None
316 ),
317 }
318 )
319
320 def get_participation_display(self):
321 confirmed_sections_with_users = self._get_sections_with_users()
322 participation_display = []
323 for section, users in confirmed_sections_with_users:
324 if users:
325 participation_display += [[user[0], user[1], section["title"]] for user in users]
326 if not users or len(users) < section["min_count"]:
327 required_qualifications = ", ".join(
328 Qualification.objects.filter(pk__in=section["qualifications"]).values_list(
329 "abbreviation", flat=True
330 )
331 )
332 participation_display += [["", required_qualifications, section["title"]]] * (
333 section["min_count"] - (len(users) if users else 0)
334 )
335 return participation_display
336
[end of ephios/plugins/basesignup/signup/section_based.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/plugins/basesignup/signup/section_based.py b/ephios/plugins/basesignup/signup/section_based.py
--- a/ephios/plugins/basesignup/signup/section_based.py
+++ b/ephios/plugins/basesignup/signup/section_based.py
@@ -134,6 +134,7 @@
for key in ("title", "qualifications", "min_count", "uuid")
}
for form in self.sections_formset
+ if not form.cleaned_data.get("DELETE")
]
return sections
| {"golden_diff": "diff --git a/ephios/plugins/basesignup/signup/section_based.py b/ephios/plugins/basesignup/signup/section_based.py\n--- a/ephios/plugins/basesignup/signup/section_based.py\n+++ b/ephios/plugins/basesignup/signup/section_based.py\n@@ -134,6 +134,7 @@\n for key in (\"title\", \"qualifications\", \"min_count\", \"uuid\")\n }\n for form in self.sections_formset\n+ if not form.cleaned_data.get(\"DELETE\")\n ]\n return sections\n", "issue": "Cannot delete section\nAs a planner, I cannot delete an existing section from a shift with the section_based signup method\n", "before_files": [{"content": "import uuid\nfrom functools import cached_property\nfrom itertools import groupby\nfrom operator import itemgetter\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\nfrom django_select2.forms import Select2MultipleWidget\nfrom dynamic_preferences.registries import global_preferences_registry\n\nfrom ephios.core.models import AbstractParticipation, Qualification\nfrom ephios.core.signup import (\n AbstractParticipant,\n BaseDispositionParticipationForm,\n BaseSignupMethod,\n BaseSignupView,\n ParticipationError,\n)\n\n\ndef sections_participant_qualifies_for(sections, participant: AbstractParticipant):\n available_qualification_ids = set(q.id for q in participant.collect_all_qualifications())\n return [\n section\n for section in sections\n if set(section[\"qualifications\"]) <= available_qualification_ids\n ]\n\n\nclass SectionBasedDispositionParticipationForm(BaseDispositionParticipationForm):\n disposition_participation_template = \"basesignup/section_based/fragment_participant.html\"\n\n section = forms.ChoiceField(\n label=_(\"Section\"),\n required=False, # only required if participation is confirmed\n widget=forms.Select(\n attrs={\"data-show-for-state\": str(AbstractParticipation.States.CONFIRMED)}\n ),\n )\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n sections = self.shift.signup_method.configuration.sections\n qualified_sections = list(\n sections_participant_qualifies_for(\n sections,\n self.instance.participant,\n )\n )\n unqualified_sections = [\n section for section in sections if section not in qualified_sections\n ]\n self.fields[\"section\"].choices = [(\"\", \"---\")]\n if qualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"qualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in qualified_sections],\n )\n ]\n if unqualified_sections:\n self.fields[\"section\"].choices += [\n (\n _(\"unqualified\"),\n [(section[\"uuid\"], section[\"title\"]) for section in unqualified_sections],\n )\n ]\n if preferred_section_uuid := self.instance.data.get(\"preferred_section_uuid\"):\n self.fields[\"section\"].initial = preferred_section_uuid\n self.preferred_section = next(\n filter(lambda section: section[\"uuid\"] == preferred_section_uuid, sections), None\n )\n if initial := self.instance.data.get(\"dispatched_section_uuid\"):\n self.fields[\"section\"].initial = initial\n\n def clean(self):\n super().clean()\n if (\n self.cleaned_data[\"state\"] == AbstractParticipation.States.CONFIRMED\n and not self.cleaned_data[\"section\"]\n ):\n self.add_error(\n \"section\",\n ValidationError(_(\"You must select a section when confirming a participation.\")),\n )\n\n def save(self, commit=True):\n self.instance.data[\"dispatched_section_uuid\"] = self.cleaned_data[\"section\"]\n super().save(commit)\n\n\nclass SectionForm(forms.Form):\n title = forms.CharField(label=_(\"Title\"), required=True)\n qualifications = forms.ModelMultipleChoiceField(\n label=_(\"Required Qualifications\"),\n queryset=Qualification.objects.all(),\n widget=Select2MultipleWidget,\n required=False,\n )\n min_count = forms.IntegerField(label=_(\"min amount\"), min_value=0, required=True)\n uuid = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def clean_uuid(self):\n return self.cleaned_data.get(\"uuid\") or uuid.uuid4()\n\n\nSectionsFormset = forms.formset_factory(\n SectionForm, can_delete=True, min_num=1, validate_min=1, extra=0\n)\n\n\nclass SectionBasedConfigurationForm(forms.Form):\n def __init__(self, data=None, **kwargs):\n super().__init__(data, **kwargs)\n self.sections_formset = SectionsFormset(\n data=data,\n initial=self.initial.get(\"sections\", list()),\n prefix=\"sections\",\n )\n\n def clean_sections(self):\n if not self.sections_formset.is_valid():\n raise ValidationError(_(\"The sections aren't configured correctly.\"))\n\n sections = [\n {\n key: form.cleaned_data[key]\n for key in (\"title\", \"qualifications\", \"min_count\", \"uuid\")\n }\n for form in self.sections_formset\n ]\n return sections\n\n\nclass SectionSignupForm(forms.Form):\n section = forms.ChoiceField(\n label=_(\"Preferred Section\"),\n widget=forms.RadioSelect,\n required=False,\n # choices are set as (uuid, title) of section\n )\n\n\nclass SectionBasedSignupView(FormView, BaseSignupView):\n template_name = \"basesignup/section_based/signup.html\"\n\n @cached_property\n def sections_participant_qualifies_for(self):\n return sections_participant_qualifies_for(\n self.method.configuration.sections, self.participant\n )\n\n def get_form(self, form_class=None):\n form = SectionSignupForm(self.request.POST)\n form.fields[\"section\"].choices = [\n (section[\"uuid\"], section[\"title\"])\n for section in self.sections_participant_qualifies_for\n ]\n return form\n\n def get_context_data(self, **kwargs):\n kwargs.setdefault(\"shift\", self.shift)\n kwargs.setdefault(\n \"unqualified_sections\",\n [\n section[\"title\"]\n for section in self.method.configuration.sections\n if section not in self.sections_participant_qualifies_for\n ],\n )\n return super().get_context_data(**kwargs)\n\n def form_valid(self, form):\n return super().signup_pressed(preferred_section_uuid=form.cleaned_data.get(\"section\"))\n\n def signup_pressed(self, **kwargs):\n if not self.method.configuration.choose_preferred_section:\n # do straight signup if choosing is not enabled\n return super().signup_pressed(**kwargs)\n\n if not self.method.can_sign_up(self.participant):\n # redirect a misled request\n messages.warning(self.request, _(\"You can not sign up for this shift.\"))\n return redirect(self.participant.reverse_event_detail(self.shift.event))\n\n # all good, redirect to the form\n return redirect(self.participant.reverse_signup_action(self.shift))\n\n\nclass SectionBasedSignupMethod(BaseSignupMethod):\n slug = \"section_based\"\n verbose_name = _(\"Apply for sections\")\n description = _(\n \"\"\"This method lets you define sections for which people can choose from.\n Sections contain qualifications that helpers need to fulfil.\"\"\"\n )\n registration_button_text = _(\"Request\")\n signup_success_message = _(\"You have successfully requested a participation for {shift}.\")\n signup_error_message = _(\"Requesting a participation failed: {error}\")\n\n configuration_form_class = SectionBasedConfigurationForm\n signup_view_class = SectionBasedSignupView\n\n disposition_participation_form_class = SectionBasedDispositionParticipationForm\n\n def get_configuration_fields(self):\n return {\n **super().get_configuration_fields(),\n \"choose_preferred_section\": {\n \"formfield\": forms.BooleanField(\n label=_(\"Ask participants for a preferred section\"),\n help_text=_(\"This only makes sense if you configure multiple sections.\"),\n widget=forms.CheckboxInput,\n required=False,\n ),\n \"default\": False,\n },\n \"sections\": {\n \"formfield\": forms.Field(\n label=_(\"Structure\"),\n widget=forms.HiddenInput,\n required=False,\n ),\n \"default\": [],\n },\n }\n\n def get_participant_count_bounds(self):\n return sum(section.get(\"min_count\") or 0 for section in self.configuration.sections), None\n\n @staticmethod\n def check_qualification(method, participant):\n if not sections_participant_qualifies_for(method.configuration.sections, participant):\n return ParticipationError(_(\"You are not qualified.\"))\n\n @property\n def _signup_checkers(self):\n return super()._signup_checkers + [self.check_qualification]\n\n # pylint: disable=arguments-differ\n def _configure_participation(\n self, participation: AbstractParticipation, preferred_section_uuid=None, **kwargs\n ) -> AbstractParticipation:\n participation.data[\"preferred_section_uuid\"] = preferred_section_uuid\n if preferred_section_uuid:\n # reset dispatch decision, as that would have overwritten the preferred choice\n participation.data[\"dispatched_section_uuid\"] = None\n participation.state = AbstractParticipation.States.REQUESTED\n return participation\n\n def render_configuration_form(self, *args, form=None, **kwargs):\n form = form or self.get_configuration_form(*args, **kwargs)\n template = get_template(\"basesignup/section_based/configuration_form.html\").render(\n {\"form\": form}\n )\n return template\n\n def _get_sections_with_users(self):\n relevant_qualification_categories = global_preferences_registry.manager()[\n \"general__relevant_qualification_categories\"\n ]\n section_by_uuid = {section[\"uuid\"]: section for section in self.configuration.sections}\n # get name and preferred section uuid for confirmed participants\n # if they have a section assigned and we have that section on record\n confirmed_participations = [\n {\n \"name\": str(participation.participant),\n \"relevant_qualifications\": \", \".join(\n participation.participant.qualifications.filter(\n category__in=relevant_qualification_categories\n ).values_list(\"abbreviation\", flat=True)\n ),\n \"uuid\": dispatched_section_uuid,\n }\n for participation in self.shift.participations.filter(\n state=AbstractParticipation.States.CONFIRMED\n )\n if (dispatched_section_uuid := participation.data.get(\"dispatched_section_uuid\"))\n and dispatched_section_uuid in section_by_uuid\n ]\n # group by section and do some stats\n sections_with_users = [\n (\n section_by_uuid.pop(uuid),\n [[user[\"name\"], user[\"relevant_qualifications\"]] for user in group],\n )\n for uuid, group in groupby(\n sorted(confirmed_participations, key=itemgetter(\"uuid\")), itemgetter(\"uuid\")\n )\n ]\n # add sections without participants\n sections_with_users += [(section, None) for section in section_by_uuid.values()]\n return sections_with_users\n\n def render_shift_state(self, request):\n return get_template(\"basesignup/section_based/fragment_state.html\").render(\n {\n \"shift\": self.shift,\n \"requested_participations\": (\n self.shift.participations.filter(state=AbstractParticipation.States.REQUESTED)\n ),\n \"sections_with_users\": self._get_sections_with_users(),\n \"disposition_url\": (\n reverse(\n \"core:shift_disposition\",\n kwargs=dict(pk=self.shift.pk),\n )\n if request.user.has_perm(\"core.change_event\", obj=self.shift.event)\n else None\n ),\n }\n )\n\n def get_participation_display(self):\n confirmed_sections_with_users = self._get_sections_with_users()\n participation_display = []\n for section, users in confirmed_sections_with_users:\n if users:\n participation_display += [[user[0], user[1], section[\"title\"]] for user in users]\n if not users or len(users) < section[\"min_count\"]:\n required_qualifications = \", \".join(\n Qualification.objects.filter(pk__in=section[\"qualifications\"]).values_list(\n \"abbreviation\", flat=True\n )\n )\n participation_display += [[\"\", required_qualifications, section[\"title\"]]] * (\n section[\"min_count\"] - (len(users) if users else 0)\n )\n return participation_display\n", "path": "ephios/plugins/basesignup/signup/section_based.py"}]} | 3,915 | 124 |
gh_patches_debug_29592 | rasdani/github-patches | git_diff | e-valuation__EvaP-1484 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Locked questionnaires failing in editor form
#1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form.
Steps to reproduce:
1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation.
2. Enable the evaluation for editor review.
3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field "General questionnaires" ("This field is required.").
The locked questionnaire should count as a selected questionnaire and the form should be saved.
A test should be added for this use case.
</issue>
<code>
[start of evap/contributor/forms.py]
1 from datetime import datetime, timedelta
2 import logging
3
4 from django import forms
5 from django.conf import settings
6 from django.db.models import Q
7 from django.forms.widgets import CheckboxSelectMultiple
8 from django.utils.translation import gettext_lazy as _
9 from evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField
10 from evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile
11 from evap.evaluation.tools import date_to_datetime
12 from evap.staff.forms import ContributionForm
13
14 logger = logging.getLogger(__name__)
15
16
17 class EvaluationForm(forms.ModelForm):
18 general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
19 course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
20 name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False)
21 name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False)
22
23 class Meta:
24 model = Evaluation
25 fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')
26
27 def __init__(self, *args, **kwargs):
28 super().__init__(*args, **kwargs)
29
30 self.fields['name_de_field'].initial = self.instance.full_name_de
31 self.fields['name_en_field'].initial = self.instance.full_name_en
32
33 self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(
34 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()
35
36 self.fields['vote_start_datetime'].localize = True
37 self.fields['vote_end_date'].localize = True
38
39 if self.instance.general_contribution:
40 self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]
41
42 if not self.instance.allow_editors_to_edit:
43 for field in self._meta.fields:
44 self.fields[field].disabled = True
45
46 def clean(self):
47 super().clean()
48
49 vote_start_datetime = self.cleaned_data.get('vote_start_datetime')
50 vote_end_date = self.cleaned_data.get('vote_end_date')
51 if vote_start_datetime and vote_end_date:
52 if vote_start_datetime.date() > vote_end_date:
53 self.add_error("vote_start_datetime", "")
54 self.add_error("vote_end_date", _("The first day of evaluation must be before the last one."))
55
56 def clean_vote_end_date(self):
57 vote_end_date = self.cleaned_data.get('vote_end_date')
58
59 # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.
60 # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.
61 if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():
62 raise forms.ValidationError(_("The last day of evaluation must be in the future."))
63 return vote_end_date
64
65 def clean_general_questionnaires(self):
66 # Ensure all locked questionnaires still have the same status (included or not)
67 locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)
68
69 not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]
70 locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]
71
72 return not_locked + locked
73
74 def save(self, *args, **kw):
75 evaluation = super().save(*args, **kw)
76 evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))
77 return evaluation
78
79
80 class EditorContributionForm(ContributionForm):
81 def __init__(self, *args, **kwargs):
82 super().__init__(*args, **kwargs)
83
84 existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None
85
86 self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(
87 Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()
88 self.fields['contributor'].queryset = UserProfile.objects.filter(
89 (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)
90 )
91
92
93 class DelegatesForm(forms.ModelForm):
94 delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),
95 required=False)
96
97 class Meta:
98 model = UserProfile
99 fields = ('delegates',)
100 field_classes = {
101 'delegates': UserModelMultipleChoiceField,
102 }
103
104 def __init__(self, *args, **kwargs):
105 super().__init__(*args, **kwargs)
106
107 def save(self, *args, **kw):
108 super().save(*args, **kw)
109 logger.info('User "{}" edited the settings.'.format(self.instance.email))
110
111
112 class DelegateSelectionForm(forms.Form):
113 delegate_to = UserModelChoiceField(label=_("Delegate to"),
114 queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))
115
[end of evap/contributor/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py
--- a/evap/contributor/forms.py
+++ b/evap/contributor/forms.py
@@ -15,7 +15,7 @@
class EvaluationForm(forms.ModelForm):
- general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
+ general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False)
name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False)
@@ -64,10 +64,14 @@
def clean_general_questionnaires(self):
# Ensure all locked questionnaires still have the same status (included or not)
- locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)
+ not_locked = []
+ if self.cleaned_data.get('general_questionnaires'):
+ not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))
- not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]
- locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]
+ locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))
+
+ if not not_locked + locked:
+ self.add_error("general_questionnaires", _("At least one questionnaire must be selected."))
return not_locked + locked
| {"golden_diff": "diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py\n--- a/evap/contributor/forms.py\n+++ b/evap/contributor/forms.py\n@@ -15,7 +15,7 @@\n \n \n class EvaluationForm(forms.ModelForm):\n- general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n+ general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, required=False, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n@@ -64,10 +64,14 @@\n \n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n- locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n+ not_locked = []\n+ if self.cleaned_data.get('general_questionnaires'):\n+ not_locked = list(self.cleaned_data.get('general_questionnaires').filter(is_locked=False))\n \n- not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n- locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n+ locked = list(self.instance.general_contribution.questionnaires.filter(is_locked=True))\n+\n+ if not not_locked + locked:\n+ self.add_error(\"general_questionnaires\", _(\"At least one questionnaire must be selected.\"))\n \n return not_locked + locked\n", "issue": "Locked questionnaires failing in editor form\n#1445 introduced locked questionnaires. However, they are not dealt with correctly in the evaluation editor form. When initially opening the form, the locked questionnaires are correctly selected but are not handled correctly when saving the form.\r\n\r\nSteps to reproduce:\r\n1. As manager, assign a locked questionnaire as the only general questionnaire for an evaluation.\r\n2. Enable the evaluation for editor review.\r\n3. As editor, open the evaluation form and try to save it. Saving will fail with an error for the field \"General questionnaires\" (\"This field is required.\").\r\n\r\nThe locked questionnaire should count as a selected questionnaire and the form should be saved.\r\nA test should be added for this use case.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nimport logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom django.utils.translation import gettext_lazy as _\nfrom evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField\nfrom evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile\nfrom evap.evaluation.tools import date_to_datetime\nfrom evap.staff.forms import ContributionForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvaluationForm(forms.ModelForm):\n general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n\n class Meta:\n model = Evaluation\n fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name_de_field'].initial = self.instance.full_name_de\n self.fields['name_en_field'].initial = self.instance.full_name_en\n\n self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()\n\n self.fields['vote_start_datetime'].localize = True\n self.fields['vote_end_date'].localize = True\n\n if self.instance.general_contribution:\n self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n if not self.instance.allow_editors_to_edit:\n for field in self._meta.fields:\n self.fields[field].disabled = True\n\n def clean(self):\n super().clean()\n\n vote_start_datetime = self.cleaned_data.get('vote_start_datetime')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_datetime and vote_end_date:\n if vote_start_datetime.date() > vote_end_date:\n self.add_error(\"vote_start_datetime\", \"\")\n self.add_error(\"vote_end_date\", _(\"The first day of evaluation must be before the last one.\"))\n\n def clean_vote_end_date(self):\n vote_end_date = self.cleaned_data.get('vote_end_date')\n\n # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.\n # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.\n if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():\n raise forms.ValidationError(_(\"The last day of evaluation must be in the future.\"))\n return vote_end_date\n\n def clean_general_questionnaires(self):\n # Ensure all locked questionnaires still have the same status (included or not)\n locked_qs = self.fields['general_questionnaires'].queryset.filter(is_locked=True)\n\n not_locked = [q for q in self.cleaned_data.get('general_questionnaires') if q not in locked_qs]\n locked = [q.pk for q in self.instance.general_contribution.questionnaires.filter(is_locked=True)]\n\n return not_locked + locked\n\n def save(self, *args, **kw):\n evaluation = super().save(*args, **kw)\n evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))\n return evaluation\n\n\nclass EditorContributionForm(ContributionForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.Visibility.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n self.fields['contributor'].queryset = UserProfile.objects.filter(\n (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)\n )\n\n\nclass DelegatesForm(forms.ModelForm):\n delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),\n required=False)\n\n class Meta:\n model = UserProfile\n fields = ('delegates',)\n field_classes = {\n 'delegates': UserModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n logger.info('User \"{}\" edited the settings.'.format(self.instance.email))\n\n\nclass DelegateSelectionForm(forms.Form):\n delegate_to = UserModelChoiceField(label=_(\"Delegate to\"),\n queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))\n", "path": "evap/contributor/forms.py"}]} | 2,046 | 384 |
gh_patches_debug_25270 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2635 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[DOC]: the sphinx theme is too old
### 📚 The doc issue
As stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first.
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 import datetime
10 # If extensions (or modules to document with autodoc) are in another directory,
11 # add these directories to sys.path here. If the directory is relative to the
12 # documentation root, use os.path.abspath to make it absolute, like shown here.
13 #
14 import os
15 import sys
16
17 sys.path.insert(0, os.path.abspath('..'))
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'Colossal-AI'
22 copyright = f'{datetime.datetime.now().year}, HPC-AI Tech'
23 author = 'HPC-AI Technology Inc.'
24
25 # The full version, including alpha/beta/rc tags
26 release = '0.0.1'
27
28
29 # -- General configuration ---------------------------------------------------
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 'sphinx.ext.autodoc',
36 'sphinx.ext.mathjax',
37 'sphinx.ext.napoleon',
38 'sphinx.ext.linkcode',
39 'myst_parser',
40 ]
41
42 # Disable docstring inheritance
43 autodoc_inherit_docstrings = False
44
45 # Disable displaying type annotations, these can be very verbose
46 autodoc_typehints = 'none'
47
48 # Enable overriding of function signatures in the first line of the docstring.
49 autodoc_docstring_signature = True
50 autodoc_default_options = {
51 'member-order': 'bysource',
52 }
53
54 # Add any paths that contain templates here, relative to this directory.
55 templates_path = ['_templates']
56
57 # List of patterns, relative to source directory, that match files and
58 # directories to ignore when looking for source files.
59 # This pattern also affects html_static_path and html_extra_path.
60 exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']
61
62 # -- Options for HTML output -------------------------------------------------
63
64 # The theme to use for HTML and HTML Help pages. See the documentation for
65 # a list of builtin themes.
66 #
67 html_theme = 'sphinx_rtd_theme'
68 html_show_sourcelink = False
69 html_theme_options = {
70 'navigation_depth': 3,
71 }
72
73 html_context = {
74 'display_github': False,
75 'github_user': 'hpcaitech',
76 'github_repo': 'ColossalAI',
77 # 'github_version': 'master/docs/',
78 }
79
80 # Add any paths that contain custom static files (such as style sheets) here,
81 # relative to this directory. They are copied after the builtin static files,
82 # so a file named "default.css" will overwrite the builtin "default.css".
83 html_static_path = ['_static']
84
85 html_css_files = [
86 'css/rtd_theme.css',
87 ]
88
89 # -- Extension configuration -------------------------------------------------
90 source_suffix = ['.rst', '.md', '.MD']
91
92 import inspect
93 import colossalai
94 def linkcode_resolve(domain, info):
95 """
96 Determine the URL corresponding to Python object
97 """
98 if domain != 'py':
99 return None
100
101 modname = info['module']
102 fullname = info['fullname']
103
104 submod = sys.modules.get(modname)
105 if submod is None:
106 return None
107
108 obj = submod
109 for part in fullname.split('.'):
110 try:
111 obj = getattr(obj, part)
112 except Exception:
113 return None
114
115 try:
116 fn = inspect.getsourcefile(obj)
117 except Exception:
118 fn = None
119 if not fn:
120 return None
121
122 try:
123 source, lineno = inspect.findsource(obj)
124 except Exception:
125 lineno = None
126
127 if lineno:
128 linespec = "#L%d" % (lineno + 1)
129 else:
130 linespec = ""
131
132 fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))
133
134 github = "https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}"
135 return github.format(fn, linespec)
136
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -23,8 +23,7 @@
author = 'HPC-AI Technology Inc.'
# The full version, including alpha/beta/rc tags
-release = '0.0.1'
-
+# release = '0.0.1'
# -- General configuration ---------------------------------------------------
@@ -64,14 +63,14 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'sphinx_rtd_theme'
+html_theme = 'sphinx_book_theme'
html_show_sourcelink = False
html_theme_options = {
'navigation_depth': 3,
}
html_context = {
- 'display_github': False,
+ 'display_github': True,
'github_user': 'hpcaitech',
'github_repo': 'ColossalAI',
# 'github_version': 'master/docs/',
@@ -90,7 +89,10 @@
source_suffix = ['.rst', '.md', '.MD']
import inspect
+
import colossalai
+
+
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -23,8 +23,7 @@\n author = 'HPC-AI Technology Inc.'\n \n # The full version, including alpha/beta/rc tags\n-release = '0.0.1'\n-\n+# release = '0.0.1'\n \n # -- General configuration ---------------------------------------------------\n \n@@ -64,14 +63,14 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'sphinx_rtd_theme'\n+html_theme = 'sphinx_book_theme'\n html_show_sourcelink = False\n html_theme_options = {\n 'navigation_depth': 3,\n }\n \n html_context = {\n- 'display_github': False,\n+ 'display_github': True,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n@@ -90,7 +89,10 @@\n source_suffix = ['.rst', '.md', '.MD']\n \n import inspect\n+\n import colossalai\n+\n+\n def linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[DOC]: the sphinx theme is too old\n### \ud83d\udcda The doc issue\n\nAs stated in #2579 , we want to use Read the Docs to host our documentation. In this way, tutorials and API documentations will be visited from a single entry. This issue will mainly discuss the appearance of the RTD website. Ideally, we should use Tailwind for style consistency. However, it can take some time to implement a tailwind-based theme, therefore, we should use an existing theme which looks more modern first.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Colossal-AI'\ncopyright = f'{datetime.datetime.now().year}, HPC-AI Tech'\nauthor = 'HPC-AI Technology Inc.'\n\n# The full version, including alpha/beta/rc tags\nrelease = '0.0.1'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.linkcode',\n 'myst_parser',\n]\n\n# Disable docstring inheritance\nautodoc_inherit_docstrings = False\n\n# Disable displaying type annotations, these can be very verbose\nautodoc_typehints = 'none'\n\n# Enable overriding of function signatures in the first line of the docstring.\nautodoc_docstring_signature = True\nautodoc_default_options = {\n 'member-order': 'bysource',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['.build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_show_sourcelink = False\nhtml_theme_options = {\n 'navigation_depth': 3,\n}\n\nhtml_context = {\n 'display_github': False,\n 'github_user': 'hpcaitech',\n 'github_repo': 'ColossalAI',\n # 'github_version': 'master/docs/',\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'css/rtd_theme.css',\n]\n\n# -- Extension configuration -------------------------------------------------\nsource_suffix = ['.rst', '.md', '.MD']\n\nimport inspect\nimport colossalai\ndef linkcode_resolve(domain, info):\n \"\"\"\n Determine the URL corresponding to Python object\n \"\"\"\n if domain != 'py':\n return None\n\n modname = info['module']\n fullname = info['fullname']\n\n submod = sys.modules.get(modname)\n if submod is None:\n return None\n\n obj = submod\n for part in fullname.split('.'):\n try:\n obj = getattr(obj, part)\n except Exception:\n return None\n\n try:\n fn = inspect.getsourcefile(obj)\n except Exception:\n fn = None\n if not fn:\n return None\n\n try:\n source, lineno = inspect.findsource(obj)\n except Exception:\n lineno = None\n\n if lineno:\n linespec = \"#L%d\" % (lineno + 1)\n else:\n linespec = \"\"\n\n fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__))\n\n github = \"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}\"\n return github.format(fn, linespec)\n", "path": "docs/conf.py"}]} | 1,863 | 279 |
gh_patches_debug_25916 | rasdani/github-patches | git_diff | nf-core__tools-381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
problem with nfcore_cache.sqlite within /tmp
Hi all,
I think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool.
For example I cannot even use nf-core --help
Luca
</issue>
<code>
[start of nf_core/utils.py]
1 #!/usr/bin/env python
2 """
3 Common utility functions for the nf-core python package.
4 """
5
6 import datetime
7 import json
8 import logging
9 import os
10 import subprocess
11 import tempfile
12
13 def fetch_wf_config(wf_path, wf=None):
14 """Uses Nextflow to retrieve the the configuration variables
15 from a Nextflow workflow.
16
17 Args:
18 wf_path (str): Nextflow workflow file system path.
19
20 Returns:
21 dict: Workflow configuration settings.
22 """
23
24 config = dict()
25 cache_fn = None
26 cache_basedir = None
27 cache_path = None
28
29 # Build a cache directory if we can
30 if os.path.isdir(os.path.join(os.getenv("HOME"), '.nextflow')):
31 cache_basedir = os.path.join(os.getenv("HOME"), '.nextflow', 'nf-core')
32 if not os.path.isdir(cache_basedir):
33 os.mkdir(cache_basedir)
34
35 # If we're given a workflow object with a commit, see if we have a cached copy
36 if cache_basedir and wf and wf.full_name and wf.commit_sha:
37 cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)
38 cache_path = os.path.join(cache_basedir, cache_fn)
39 if os.path.isfile(cache_path):
40 logging.debug("Found a config cache, loading: {}".format(cache_path))
41 with open(cache_path, 'r') as fh:
42 config = json.load(fh)
43 return config
44
45
46 # Call `nextflow config` and pipe stderr to /dev/null
47 try:
48 with open(os.devnull, 'w') as devnull:
49 nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)
50 except OSError as e:
51 if e.errno == os.errno.ENOENT:
52 raise AssertionError("It looks like Nextflow is not installed. It is required for most nf-core functions.")
53 except subprocess.CalledProcessError as e:
54 raise AssertionError("`nextflow config` returned non-zero error code: %s,\n %s", e.returncode, e.output)
55 else:
56 for l in nfconfig_raw.splitlines():
57 ul = l.decode('utf-8')
58 k, v = ul.split(' = ', 1)
59 config[k] = v
60
61 # If we can, save a cached copy
62 if cache_path:
63 logging.debug("Saving config cache: {}".format(cache_path))
64 with open(cache_path, 'w') as fh:
65 json.dump(config, fh, indent=4)
66
67 return config
68
69
70 def setup_requests_cachedir():
71 """Sets up local caching for faster remote HTTP requests.
72
73 Caching directory will be generated by tempfile.gettempdir() under
74 a nfcore_cache subdir.
75 """
76 # Only import it if we need it
77 import requests_cache
78
79 cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')
80 if not os.path.exists(cachedir):
81 os.mkdir(cachedir)
82 requests_cache.install_cache(
83 os.path.join(cachedir, 'nfcore_cache'),
84 expire_after=datetime.timedelta(hours=1),
85 backend='sqlite',
86 )
87 # Make world-writeable so that multi-user installations work
88 os.chmod(cachedir, 0o777)
89 os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)
90
[end of nf_core/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/utils.py b/nf_core/utils.py
--- a/nf_core/utils.py
+++ b/nf_core/utils.py
@@ -8,7 +8,6 @@
import logging
import os
import subprocess
-import tempfile
def fetch_wf_config(wf_path, wf=None):
"""Uses Nextflow to retrieve the the configuration variables
@@ -70,20 +69,18 @@
def setup_requests_cachedir():
"""Sets up local caching for faster remote HTTP requests.
- Caching directory will be generated by tempfile.gettempdir() under
- a nfcore_cache subdir.
+ Caching directory will be set up in the user's home directory under
+ a .nfcore_cache subdir.
"""
# Only import it if we need it
import requests_cache
+
- cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')
+ cachedir = os.path.join(os.getenv("HOME"), os.path.join('.nfcore', 'cache'))
if not os.path.exists(cachedir):
- os.mkdir(cachedir)
+ os.makedirs(cachedir)
requests_cache.install_cache(
- os.path.join(cachedir, 'nfcore_cache'),
+ os.path.join(cachedir, 'github_info'),
expire_after=datetime.timedelta(hours=1),
backend='sqlite',
)
- # Make world-writeable so that multi-user installations work
- os.chmod(cachedir, 0o777)
- os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)
| {"golden_diff": "diff --git a/nf_core/utils.py b/nf_core/utils.py\n--- a/nf_core/utils.py\n+++ b/nf_core/utils.py\n@@ -8,7 +8,6 @@\n import logging\n import os\n import subprocess\n-import tempfile\n \n def fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n@@ -70,20 +69,18 @@\n def setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n \n- Caching directory will be generated by tempfile.gettempdir() under\n- a nfcore_cache subdir.\n+ Caching directory will be set up in the user's home directory under\n+ a .nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n+ \n \n- cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n+ cachedir = os.path.join(os.getenv(\"HOME\"), os.path.join('.nfcore', 'cache'))\n if not os.path.exists(cachedir):\n- os.mkdir(cachedir)\n+ os.makedirs(cachedir)\n requests_cache.install_cache(\n- os.path.join(cachedir, 'nfcore_cache'),\n+ os.path.join(cachedir, 'github_info'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n- # Make world-writeable so that multi-user installations work\n- os.chmod(cachedir, 0o777)\n- os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "issue": "problem with nfcore_cache.sqlite within /tmp\nHi all,\r\nI think will be a nice idea to have the nfcore_cache.sqlite within a subfolder in tmp because if two users use the program at the same time the privileges will prevent to use the tool.\r\n\r\nFor example I cannot even use nf-core --help \r\n\r\nLuca\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommon utility functions for the nf-core python package.\n\"\"\"\n\nimport datetime\nimport json\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\ndef fetch_wf_config(wf_path, wf=None):\n \"\"\"Uses Nextflow to retrieve the the configuration variables\n from a Nextflow workflow.\n\n Args:\n wf_path (str): Nextflow workflow file system path.\n\n Returns:\n dict: Workflow configuration settings.\n \"\"\"\n\n config = dict()\n cache_fn = None\n cache_basedir = None\n cache_path = None\n\n # Build a cache directory if we can\n if os.path.isdir(os.path.join(os.getenv(\"HOME\"), '.nextflow')):\n cache_basedir = os.path.join(os.getenv(\"HOME\"), '.nextflow', 'nf-core')\n if not os.path.isdir(cache_basedir):\n os.mkdir(cache_basedir)\n\n # If we're given a workflow object with a commit, see if we have a cached copy\n if cache_basedir and wf and wf.full_name and wf.commit_sha:\n cache_fn = '{}-{}.json'.format(wf.full_name.replace(os.path.sep, '-'), wf.commit_sha)\n cache_path = os.path.join(cache_basedir, cache_fn)\n if os.path.isfile(cache_path):\n logging.debug(\"Found a config cache, loading: {}\".format(cache_path))\n with open(cache_path, 'r') as fh:\n config = json.load(fh)\n return config\n\n\n # Call `nextflow config` and pipe stderr to /dev/null\n try:\n with open(os.devnull, 'w') as devnull:\n nfconfig_raw = subprocess.check_output(['nextflow', 'config', '-flat', wf_path], stderr=devnull)\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n raise AssertionError(\"It looks like Nextflow is not installed. It is required for most nf-core functions.\")\n except subprocess.CalledProcessError as e:\n raise AssertionError(\"`nextflow config` returned non-zero error code: %s,\\n %s\", e.returncode, e.output)\n else:\n for l in nfconfig_raw.splitlines():\n ul = l.decode('utf-8')\n k, v = ul.split(' = ', 1)\n config[k] = v\n\n # If we can, save a cached copy\n if cache_path:\n logging.debug(\"Saving config cache: {}\".format(cache_path))\n with open(cache_path, 'w') as fh:\n json.dump(config, fh, indent=4)\n\n return config\n\n\ndef setup_requests_cachedir():\n \"\"\"Sets up local caching for faster remote HTTP requests.\n\n Caching directory will be generated by tempfile.gettempdir() under\n a nfcore_cache subdir.\n \"\"\"\n # Only import it if we need it\n import requests_cache\n\n cachedir = os.path.join(tempfile.gettempdir(), 'nfcore_cache')\n if not os.path.exists(cachedir):\n os.mkdir(cachedir)\n requests_cache.install_cache(\n os.path.join(cachedir, 'nfcore_cache'),\n expire_after=datetime.timedelta(hours=1),\n backend='sqlite',\n )\n # Make world-writeable so that multi-user installations work\n os.chmod(cachedir, 0o777)\n os.chmod(os.path.join(cachedir, 'nfcore_cache.sqlite'), 0o777)\n", "path": "nf_core/utils.py"}]} | 1,510 | 352 |
gh_patches_debug_22168 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7120 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
development server no longer hot-reloads on file changes
## Description
After #6563, `make dev` no longer hot-reloads on file changes, because the immutable `SDConfig` doesn't carry the `env` attribute from `config.py`. (This regression was last encountered in #5594.)
## Steps to Reproduce
Same as #5594.
## Expected Behavior
Same as #5594.
## Actual Behavior
Same as #5594.
## Comments
The fix in <https://github.com/freedomofpress/securedrop/issues/6669#issuecomment-1526129678> is trivial, but I want to take a moment to see if there's an easy way to test for this regression (fool me once, fool me twice, etc.).
</issue>
<code>
[start of securedrop/sdconfig.py]
1 from dataclasses import dataclass
2 from importlib import import_module
3 from pathlib import Path
4 from typing import Dict, List, Optional
5
6 FALLBACK_LOCALE = "en_US"
7
8 DEFAULT_SECUREDROP_ROOT = Path(__file__).absolute().parent
9
10
11 @dataclass(frozen=True)
12 class _FlaskAppConfig:
13 """Config fields that are common to the Journalist and Source interfaces."""
14
15 SESSION_COOKIE_NAME: str
16 SECRET_KEY: str
17
18 DEBUG: bool
19 TESTING: bool
20 WTF_CSRF_ENABLED: bool
21
22 # Use MAX_CONTENT_LENGTH to mimic the behavior of Apache's LimitRequestBody
23 # in the development environment. See #1714.
24 MAX_CONTENT_LENGTH: int
25
26 # This is recommended for performance, and also resolves #369
27 USE_X_SENDFILE: bool
28
29
30 @dataclass(frozen=True)
31 class JournalistInterfaceConfig(_FlaskAppConfig):
32 # Additional config for JI Redis sessions
33 SESSION_SIGNER_SALT: str = "js_session"
34 SESSION_KEY_PREFIX: str = "js_session:"
35 SESSION_LIFETIME: int = 2 * 60 * 60
36 SESSION_RENEW_COUNT: int = 5
37
38
39 @dataclass(frozen=True)
40 class SourceInterfaceConfig(_FlaskAppConfig):
41 pass
42
43
44 @dataclass(frozen=True)
45 class SecureDropConfig:
46 JOURNALIST_APP_FLASK_CONFIG_CLS: JournalistInterfaceConfig
47 SOURCE_APP_FLASK_CONFIG_CLS: SourceInterfaceConfig
48
49 GPG_KEY_DIR: Path
50 JOURNALIST_KEY: str
51 SCRYPT_GPG_PEPPER: str
52 SCRYPT_ID_PEPPER: str
53 SCRYPT_PARAMS: Dict[str, int]
54
55 SECUREDROP_DATA_ROOT: Path
56
57 DATABASE_FILE: Path # Path to the sqlite DB file
58
59 SECUREDROP_ROOT: Path
60 STATIC_DIR: Path
61 TRANSLATION_DIRS: Path
62 SOURCE_TEMPLATES_DIR: Path
63 JOURNALIST_TEMPLATES_DIR: Path
64 NOUNS: Path
65 ADJECTIVES: Path
66
67 DEFAULT_LOCALE: str
68 SUPPORTED_LOCALES: List[str]
69
70 SESSION_EXPIRATION_MINUTES: float
71
72 RQ_WORKER_NAME: str
73
74 @property
75 def TEMP_DIR(self) -> Path:
76 # We use a directory under the SECUREDROP_DATA_ROOT instead of `/tmp` because
77 # we need to expose this directory via X-Send-File, and want to minimize the
78 # potential for exposing unintended files.
79 return self.SECUREDROP_DATA_ROOT / "tmp"
80
81 @property
82 def STORE_DIR(self) -> Path:
83 return self.SECUREDROP_DATA_ROOT / "store"
84
85 @property
86 def DATABASE_URI(self) -> str:
87 return f"sqlite:///{self.DATABASE_FILE}"
88
89 @classmethod
90 def get_current(cls) -> "SecureDropConfig":
91 global _current_config
92 if _current_config is None:
93 # Retrieve the config by parsing it from ./config.py
94 _current_config = _parse_config_from_file(config_module_name="config")
95 return _current_config
96
97
98 _current_config: Optional[SecureDropConfig] = None
99
100
101 def _parse_config_from_file(config_module_name: str) -> SecureDropConfig:
102 """Parse the config from a config.py file."""
103 config_from_local_file = import_module(config_module_name)
104
105 # Parse the local config; as there are SD instances with very old config files
106 # the parsing logic here has to assume some values might be missing, and hence
107 # set default values for such config entries
108 final_default_locale = getattr(config_from_local_file, "DEFAULT_LOCALE", FALLBACK_LOCALE)
109 final_supp_locales = getattr(config_from_local_file, "SUPPORTED_LOCALES", [FALLBACK_LOCALE])
110 final_sess_expiration_mins = getattr(config_from_local_file, "SESSION_EXPIRATION_MINUTES", 120)
111
112 final_worker_name = getattr(config_from_local_file, "RQ_WORKER_NAME", "default")
113
114 final_scrypt_params = getattr(
115 config_from_local_file, "SCRYPT_PARAMS", dict(N=2**14, r=8, p=1)
116 )
117
118 try:
119 final_securedrop_root = Path(config_from_local_file.SECUREDROP_ROOT)
120 except AttributeError:
121 final_securedrop_root = DEFAULT_SECUREDROP_ROOT
122
123 try:
124 final_securedrop_data_root = Path(config_from_local_file.SECUREDROP_DATA_ROOT)
125 except AttributeError:
126 final_securedrop_data_root = Path("/var/lib/securedrop")
127
128 try:
129 final_db_file = Path(config_from_local_file.DATABASE_FILE)
130 except AttributeError:
131 final_db_file = final_securedrop_data_root / "db.sqlite"
132
133 try:
134 final_gpg_key_dir = Path(config_from_local_file.GPG_KEY_DIR)
135 except AttributeError:
136 final_gpg_key_dir = final_securedrop_data_root / "keys"
137
138 try:
139 final_nouns = Path(config_from_local_file.NOUNS)
140 except AttributeError:
141 final_nouns = final_securedrop_root / "dictionaries" / "nouns.txt"
142
143 try:
144 final_adjectives = Path(config_from_local_file.ADJECTIVES)
145 except AttributeError:
146 final_adjectives = final_securedrop_root / "dictionaries" / "adjectives.txt"
147
148 try:
149 final_static_dir = Path(config_from_local_file.STATIC_DIR) # type: ignore
150 except AttributeError:
151 final_static_dir = final_securedrop_root / "static"
152
153 try:
154 final_transl_dir = Path(config_from_local_file.TRANSLATION_DIRS) # type: ignore
155 except AttributeError:
156 final_transl_dir = final_securedrop_root / "translations"
157
158 try:
159 final_source_tmpl_dir = Path(config_from_local_file.SOURCE_TEMPLATES_DIR)
160 except AttributeError:
161 final_source_tmpl_dir = final_securedrop_root / "source_templates"
162
163 try:
164 final_journ_tmpl_dir = Path(config_from_local_file.JOURNALIST_TEMPLATES_DIR)
165 except AttributeError:
166 final_journ_tmpl_dir = final_securedrop_root / "journalist_templates"
167
168 # Parse the Flask configurations
169 journ_flask_config = config_from_local_file.JournalistInterfaceFlaskConfig
170 parsed_journ_flask_config = JournalistInterfaceConfig(
171 SECRET_KEY=journ_flask_config.SECRET_KEY,
172 SESSION_COOKIE_NAME=getattr(journ_flask_config, "SESSION_COOKIE_NAME", "js"),
173 DEBUG=getattr(journ_flask_config, "DEBUG", False),
174 TESTING=getattr(journ_flask_config, "TESTING", False),
175 WTF_CSRF_ENABLED=getattr(journ_flask_config, "WTF_CSRF_ENABLED", True),
176 MAX_CONTENT_LENGTH=getattr(journ_flask_config, "MAX_CONTENT_LENGTH", 524288000),
177 USE_X_SENDFILE=getattr(journ_flask_config, "USE_X_SENDFILE", False),
178 )
179 source_flask_config = config_from_local_file.SourceInterfaceFlaskConfig
180 parsed_source_flask_config = SourceInterfaceConfig(
181 SECRET_KEY=source_flask_config.SECRET_KEY,
182 SESSION_COOKIE_NAME=getattr(journ_flask_config, "SESSION_COOKIE_NAME", "ss"),
183 DEBUG=getattr(journ_flask_config, "DEBUG", False),
184 TESTING=getattr(journ_flask_config, "TESTING", False),
185 WTF_CSRF_ENABLED=getattr(journ_flask_config, "WTF_CSRF_ENABLED", True),
186 MAX_CONTENT_LENGTH=getattr(journ_flask_config, "MAX_CONTENT_LENGTH", 524288000),
187 USE_X_SENDFILE=getattr(journ_flask_config, "USE_X_SENDFILE", False),
188 )
189
190 return SecureDropConfig(
191 JOURNALIST_APP_FLASK_CONFIG_CLS=parsed_journ_flask_config,
192 SOURCE_APP_FLASK_CONFIG_CLS=parsed_source_flask_config,
193 GPG_KEY_DIR=final_gpg_key_dir,
194 JOURNALIST_KEY=config_from_local_file.JOURNALIST_KEY,
195 SCRYPT_GPG_PEPPER=config_from_local_file.SCRYPT_GPG_PEPPER,
196 SCRYPT_ID_PEPPER=config_from_local_file.SCRYPT_ID_PEPPER,
197 SCRYPT_PARAMS=final_scrypt_params,
198 SECUREDROP_DATA_ROOT=final_securedrop_data_root,
199 SECUREDROP_ROOT=final_securedrop_root,
200 DATABASE_FILE=final_db_file,
201 STATIC_DIR=final_static_dir,
202 TRANSLATION_DIRS=final_transl_dir,
203 SOURCE_TEMPLATES_DIR=final_source_tmpl_dir,
204 JOURNALIST_TEMPLATES_DIR=final_journ_tmpl_dir,
205 NOUNS=final_nouns,
206 ADJECTIVES=final_adjectives,
207 DEFAULT_LOCALE=final_default_locale,
208 SUPPORTED_LOCALES=final_supp_locales,
209 SESSION_EXPIRATION_MINUTES=final_sess_expiration_mins,
210 RQ_WORKER_NAME=final_worker_name,
211 )
212
[end of securedrop/sdconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/sdconfig.py b/securedrop/sdconfig.py
--- a/securedrop/sdconfig.py
+++ b/securedrop/sdconfig.py
@@ -71,6 +71,8 @@
RQ_WORKER_NAME: str
+ env: str = "prod"
+
@property
def TEMP_DIR(self) -> Path:
# We use a directory under the SECUREDROP_DATA_ROOT instead of `/tmp` because
@@ -115,6 +117,8 @@
config_from_local_file, "SCRYPT_PARAMS", dict(N=2**14, r=8, p=1)
)
+ env = getattr(config_from_local_file, "env", "prod")
+
try:
final_securedrop_root = Path(config_from_local_file.SECUREDROP_ROOT)
except AttributeError:
@@ -188,6 +192,7 @@
)
return SecureDropConfig(
+ env=env,
JOURNALIST_APP_FLASK_CONFIG_CLS=parsed_journ_flask_config,
SOURCE_APP_FLASK_CONFIG_CLS=parsed_source_flask_config,
GPG_KEY_DIR=final_gpg_key_dir,
| {"golden_diff": "diff --git a/securedrop/sdconfig.py b/securedrop/sdconfig.py\n--- a/securedrop/sdconfig.py\n+++ b/securedrop/sdconfig.py\n@@ -71,6 +71,8 @@\n \n RQ_WORKER_NAME: str\n \n+ env: str = \"prod\"\n+\n @property\n def TEMP_DIR(self) -> Path:\n # We use a directory under the SECUREDROP_DATA_ROOT instead of `/tmp` because\n@@ -115,6 +117,8 @@\n config_from_local_file, \"SCRYPT_PARAMS\", dict(N=2**14, r=8, p=1)\n )\n \n+ env = getattr(config_from_local_file, \"env\", \"prod\")\n+\n try:\n final_securedrop_root = Path(config_from_local_file.SECUREDROP_ROOT)\n except AttributeError:\n@@ -188,6 +192,7 @@\n )\n \n return SecureDropConfig(\n+ env=env,\n JOURNALIST_APP_FLASK_CONFIG_CLS=parsed_journ_flask_config,\n SOURCE_APP_FLASK_CONFIG_CLS=parsed_source_flask_config,\n GPG_KEY_DIR=final_gpg_key_dir,\n", "issue": "development server no longer hot-reloads on file changes\n## Description\r\n\r\nAfter #6563, `make dev` no longer hot-reloads on file changes, because the immutable `SDConfig` doesn't carry the `env` attribute from `config.py`. (This regression was last encountered in #5594.)\r\n\r\n## Steps to Reproduce\r\n\r\nSame as #5594.\r\n\r\n## Expected Behavior\r\n\r\nSame as #5594.\r\n\r\n## Actual Behavior\r\n\r\nSame as #5594.\r\n\r\n## Comments\r\n\r\nThe fix in <https://github.com/freedomofpress/securedrop/issues/6669#issuecomment-1526129678> is trivial, but I want to take a moment to see if there's an easy way to test for this regression (fool me once, fool me twice, etc.).\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nFALLBACK_LOCALE = \"en_US\"\n\nDEFAULT_SECUREDROP_ROOT = Path(__file__).absolute().parent\n\n\n@dataclass(frozen=True)\nclass _FlaskAppConfig:\n \"\"\"Config fields that are common to the Journalist and Source interfaces.\"\"\"\n\n SESSION_COOKIE_NAME: str\n SECRET_KEY: str\n\n DEBUG: bool\n TESTING: bool\n WTF_CSRF_ENABLED: bool\n\n # Use MAX_CONTENT_LENGTH to mimic the behavior of Apache's LimitRequestBody\n # in the development environment. See #1714.\n MAX_CONTENT_LENGTH: int\n\n # This is recommended for performance, and also resolves #369\n USE_X_SENDFILE: bool\n\n\n@dataclass(frozen=True)\nclass JournalistInterfaceConfig(_FlaskAppConfig):\n # Additional config for JI Redis sessions\n SESSION_SIGNER_SALT: str = \"js_session\"\n SESSION_KEY_PREFIX: str = \"js_session:\"\n SESSION_LIFETIME: int = 2 * 60 * 60\n SESSION_RENEW_COUNT: int = 5\n\n\n@dataclass(frozen=True)\nclass SourceInterfaceConfig(_FlaskAppConfig):\n pass\n\n\n@dataclass(frozen=True)\nclass SecureDropConfig:\n JOURNALIST_APP_FLASK_CONFIG_CLS: JournalistInterfaceConfig\n SOURCE_APP_FLASK_CONFIG_CLS: SourceInterfaceConfig\n\n GPG_KEY_DIR: Path\n JOURNALIST_KEY: str\n SCRYPT_GPG_PEPPER: str\n SCRYPT_ID_PEPPER: str\n SCRYPT_PARAMS: Dict[str, int]\n\n SECUREDROP_DATA_ROOT: Path\n\n DATABASE_FILE: Path # Path to the sqlite DB file\n\n SECUREDROP_ROOT: Path\n STATIC_DIR: Path\n TRANSLATION_DIRS: Path\n SOURCE_TEMPLATES_DIR: Path\n JOURNALIST_TEMPLATES_DIR: Path\n NOUNS: Path\n ADJECTIVES: Path\n\n DEFAULT_LOCALE: str\n SUPPORTED_LOCALES: List[str]\n\n SESSION_EXPIRATION_MINUTES: float\n\n RQ_WORKER_NAME: str\n\n @property\n def TEMP_DIR(self) -> Path:\n # We use a directory under the SECUREDROP_DATA_ROOT instead of `/tmp` because\n # we need to expose this directory via X-Send-File, and want to minimize the\n # potential for exposing unintended files.\n return self.SECUREDROP_DATA_ROOT / \"tmp\"\n\n @property\n def STORE_DIR(self) -> Path:\n return self.SECUREDROP_DATA_ROOT / \"store\"\n\n @property\n def DATABASE_URI(self) -> str:\n return f\"sqlite:///{self.DATABASE_FILE}\"\n\n @classmethod\n def get_current(cls) -> \"SecureDropConfig\":\n global _current_config\n if _current_config is None:\n # Retrieve the config by parsing it from ./config.py\n _current_config = _parse_config_from_file(config_module_name=\"config\")\n return _current_config\n\n\n_current_config: Optional[SecureDropConfig] = None\n\n\ndef _parse_config_from_file(config_module_name: str) -> SecureDropConfig:\n \"\"\"Parse the config from a config.py file.\"\"\"\n config_from_local_file = import_module(config_module_name)\n\n # Parse the local config; as there are SD instances with very old config files\n # the parsing logic here has to assume some values might be missing, and hence\n # set default values for such config entries\n final_default_locale = getattr(config_from_local_file, \"DEFAULT_LOCALE\", FALLBACK_LOCALE)\n final_supp_locales = getattr(config_from_local_file, \"SUPPORTED_LOCALES\", [FALLBACK_LOCALE])\n final_sess_expiration_mins = getattr(config_from_local_file, \"SESSION_EXPIRATION_MINUTES\", 120)\n\n final_worker_name = getattr(config_from_local_file, \"RQ_WORKER_NAME\", \"default\")\n\n final_scrypt_params = getattr(\n config_from_local_file, \"SCRYPT_PARAMS\", dict(N=2**14, r=8, p=1)\n )\n\n try:\n final_securedrop_root = Path(config_from_local_file.SECUREDROP_ROOT)\n except AttributeError:\n final_securedrop_root = DEFAULT_SECUREDROP_ROOT\n\n try:\n final_securedrop_data_root = Path(config_from_local_file.SECUREDROP_DATA_ROOT)\n except AttributeError:\n final_securedrop_data_root = Path(\"/var/lib/securedrop\")\n\n try:\n final_db_file = Path(config_from_local_file.DATABASE_FILE)\n except AttributeError:\n final_db_file = final_securedrop_data_root / \"db.sqlite\"\n\n try:\n final_gpg_key_dir = Path(config_from_local_file.GPG_KEY_DIR)\n except AttributeError:\n final_gpg_key_dir = final_securedrop_data_root / \"keys\"\n\n try:\n final_nouns = Path(config_from_local_file.NOUNS)\n except AttributeError:\n final_nouns = final_securedrop_root / \"dictionaries\" / \"nouns.txt\"\n\n try:\n final_adjectives = Path(config_from_local_file.ADJECTIVES)\n except AttributeError:\n final_adjectives = final_securedrop_root / \"dictionaries\" / \"adjectives.txt\"\n\n try:\n final_static_dir = Path(config_from_local_file.STATIC_DIR) # type: ignore\n except AttributeError:\n final_static_dir = final_securedrop_root / \"static\"\n\n try:\n final_transl_dir = Path(config_from_local_file.TRANSLATION_DIRS) # type: ignore\n except AttributeError:\n final_transl_dir = final_securedrop_root / \"translations\"\n\n try:\n final_source_tmpl_dir = Path(config_from_local_file.SOURCE_TEMPLATES_DIR)\n except AttributeError:\n final_source_tmpl_dir = final_securedrop_root / \"source_templates\"\n\n try:\n final_journ_tmpl_dir = Path(config_from_local_file.JOURNALIST_TEMPLATES_DIR)\n except AttributeError:\n final_journ_tmpl_dir = final_securedrop_root / \"journalist_templates\"\n\n # Parse the Flask configurations\n journ_flask_config = config_from_local_file.JournalistInterfaceFlaskConfig\n parsed_journ_flask_config = JournalistInterfaceConfig(\n SECRET_KEY=journ_flask_config.SECRET_KEY,\n SESSION_COOKIE_NAME=getattr(journ_flask_config, \"SESSION_COOKIE_NAME\", \"js\"),\n DEBUG=getattr(journ_flask_config, \"DEBUG\", False),\n TESTING=getattr(journ_flask_config, \"TESTING\", False),\n WTF_CSRF_ENABLED=getattr(journ_flask_config, \"WTF_CSRF_ENABLED\", True),\n MAX_CONTENT_LENGTH=getattr(journ_flask_config, \"MAX_CONTENT_LENGTH\", 524288000),\n USE_X_SENDFILE=getattr(journ_flask_config, \"USE_X_SENDFILE\", False),\n )\n source_flask_config = config_from_local_file.SourceInterfaceFlaskConfig\n parsed_source_flask_config = SourceInterfaceConfig(\n SECRET_KEY=source_flask_config.SECRET_KEY,\n SESSION_COOKIE_NAME=getattr(journ_flask_config, \"SESSION_COOKIE_NAME\", \"ss\"),\n DEBUG=getattr(journ_flask_config, \"DEBUG\", False),\n TESTING=getattr(journ_flask_config, \"TESTING\", False),\n WTF_CSRF_ENABLED=getattr(journ_flask_config, \"WTF_CSRF_ENABLED\", True),\n MAX_CONTENT_LENGTH=getattr(journ_flask_config, \"MAX_CONTENT_LENGTH\", 524288000),\n USE_X_SENDFILE=getattr(journ_flask_config, \"USE_X_SENDFILE\", False),\n )\n\n return SecureDropConfig(\n JOURNALIST_APP_FLASK_CONFIG_CLS=parsed_journ_flask_config,\n SOURCE_APP_FLASK_CONFIG_CLS=parsed_source_flask_config,\n GPG_KEY_DIR=final_gpg_key_dir,\n JOURNALIST_KEY=config_from_local_file.JOURNALIST_KEY,\n SCRYPT_GPG_PEPPER=config_from_local_file.SCRYPT_GPG_PEPPER,\n SCRYPT_ID_PEPPER=config_from_local_file.SCRYPT_ID_PEPPER,\n SCRYPT_PARAMS=final_scrypt_params,\n SECUREDROP_DATA_ROOT=final_securedrop_data_root,\n SECUREDROP_ROOT=final_securedrop_root,\n DATABASE_FILE=final_db_file,\n STATIC_DIR=final_static_dir,\n TRANSLATION_DIRS=final_transl_dir,\n SOURCE_TEMPLATES_DIR=final_source_tmpl_dir,\n JOURNALIST_TEMPLATES_DIR=final_journ_tmpl_dir,\n NOUNS=final_nouns,\n ADJECTIVES=final_adjectives,\n DEFAULT_LOCALE=final_default_locale,\n SUPPORTED_LOCALES=final_supp_locales,\n SESSION_EXPIRATION_MINUTES=final_sess_expiration_mins,\n RQ_WORKER_NAME=final_worker_name,\n )\n", "path": "securedrop/sdconfig.py"}]} | 3,209 | 265 |
gh_patches_debug_30135 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove external 'zip' dependency from DistributionBuilder (zip_password=True)
Hi there, first off I want to acknowledge your efforts towards making an incredible tool, thank you!
The issue: I faced the exception *FileNotFoundError: [Errno 2] No such file or directory: 'zip'* when trying to specify `zip_password: true` for the DistributionBuilder args while running `provision -p project.yml`, and tracked the cause to `zip` not being installed on the local machine, which was a problem since installing additional software is not allowed on that machine.
## Replicate
```bash
# Launch a docker container with image python:3.8 running interactive bash
docker run -it --rm python:3.8 bash
# === Within docker container ===
pip install nvflare
provision # create example project.yml
# Set zip_password to true
sed -i 's/zip_password: false/zip_password: true/' project.yml
provision -p project.yml
```
We will find the following output:
```
Project yaml file: /project.yml.
Exception raised during provision. Incomplete prod_n folder removed.
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/nvflare/lighter/spec.py", line 166, in provision
b.build(study, ctx)
File "/usr/local/lib/python3.8/site-packages/nvflare/lighter/impl/workspace.py", line 109, in build
subprocess.run(run_args)
File "/usr/local/lib/python3.8/subprocess.py", line 493, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/local/lib/python3.8/subprocess.py", line 858, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/local/lib/python3.8/subprocess.py", line 1704, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'zip'
```
Whereas it'll work if we install zip
```bash
# === Within docker container ===
apt-get update && apt-get install -y zip
provision -p project.yml
```
## Fix
The build function of the DistributionBuilder uses `shutil` to make the non-password protected zip but relies on `subprocess.run(['zip', ...])` in order to create password-protected files, since shutil doesn't support passwords.
I'd propose adding a package requirement of something like `pyminizip` or `pyzipper` to handle password protection.
https://github.com/NVIDIA/NVFlare/blob/c8d51bfb534c02eff874a760c47086c87cbff59f/nvflare/lighter/impl/workspace.py#L100-L113
</issue>
<code>
[start of nvflare/lighter/impl/workspace.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import pathlib
17 import shutil
18 import subprocess
19
20 from nvflare.lighter.spec import Builder, Project
21 from nvflare.lighter.utils import generate_password
22
23
24 class WorkspaceBuilder(Builder):
25 def __init__(self, template_file):
26 """Manages the folder structure for provisioned projects.
27
28 Sets the template_file containing scripts and configs to put into startup folders, creates directories for the
29 participants, and moves the provisioned project to the final location at the end
30 ($WORKSPACE/$PROJECT_NAME/prod_XX). WorkspaceBuilder manages and sets the number in prod_XX by incrementing from
31 the last time provision was run for this project in this workspace, starting with 00 to a max of 99.
32
33 Each time the provisioning tool runs, it requires a workspace folder in the local file system. The workspace
34 will have the following folder structure:
35
36 .. code-block:: text
37
38 $WORKSPACE/ <--- this is assigned by -w option of provision command (default is workspace)
39 $PROJECT_NAME/ <--- this is the name value in the project.yml file
40 prod_00/ <--- a new prod_NN folder is created if provision does not have any errors.
41 prod_01/
42 ...
43 resources/ <--- this folder stores resources for other builders to load
44 state/ <--- this folder stores persistent information (such as certificates) so subsequent runs of the provision command can load the state back.
45 wip/ <--- this is only used during runtime, and will be removed when the provision command exits
46
47 Args:
48 template_file: name of template file containing scripts and configs to put into startup folders
49 """
50 self.template_file = template_file
51
52 def _make_dir(self, dirs):
53 for dir in dirs:
54 if not os.path.exists(dir):
55 os.makedirs(dir)
56
57 def initialize(self, ctx):
58 workspace_dir = ctx["workspace"]
59 prod_dirs = [_ for _ in os.listdir(workspace_dir) if _.startswith("prod_")]
60 last = -1
61 for dir in prod_dirs:
62 stage = int(dir.split("_")[-1])
63 if stage > last:
64 last = stage
65 ctx["last_prod_stage"] = last
66 template_file_full_path = os.path.join(self.get_resources_dir(ctx), self.template_file)
67 file_path = pathlib.Path(__file__).parent.absolute()
68 shutil.copyfile(os.path.join(file_path, self.template_file), template_file_full_path)
69 ctx["template_file"] = self.template_file
70
71 def build(self, project: Project, ctx: dict):
72 dirs = [self.get_kit_dir(p, ctx) for p in project.participants]
73 self._make_dir(dirs)
74
75 def finalize(self, ctx: dict):
76 if ctx["last_prod_stage"] >= 99:
77 print(f"Please clean up {ctx['workspace']} by removing prod_N folders")
78 print("After clean-up, rerun the provision command.")
79 else:
80 current_prod_stage = str(ctx["last_prod_stage"] + 1).zfill(2)
81 current_prod_dir = os.path.join(ctx["workspace"], f"prod_{current_prod_stage}")
82 shutil.move(self.get_wip_dir(ctx), current_prod_dir)
83 ctx.pop("wip_dir", None)
84 print(f"Generated results can be found under {current_prod_dir}. Builder's wip folder removed.")
85 ctx["current_prod_dir"] = current_prod_dir
86
87
88 class DistributionBuilder(Builder):
89 def __init__(self, zip_password=False):
90 """Build the zip files for each folder.
91
92 Creates the zip files containing the archives for each startup kit. It will add password protection if the
93 argument (zip_password) is true.
94
95 Args:
96 zip_password: if true, will create zipped packages with passwords
97 """
98 self.zip_password = zip_password
99
100 def build(self, project: Project, ctx: dict):
101 wip_dir = self.get_wip_dir(ctx)
102 dirs = [name for name in os.listdir(wip_dir) if os.path.isdir(os.path.join(wip_dir, name))]
103 for dir in dirs:
104 dest_zip_file = os.path.join(wip_dir, f"{dir}")
105 if self.zip_password:
106 pw = generate_password()
107 run_args = ["zip", "-rq", "-P", pw, dest_zip_file + ".zip", ".", "-i", "startup/*"]
108 os.chdir(dest_zip_file)
109 subprocess.run(run_args)
110 os.chdir(os.path.join(dest_zip_file, ".."))
111 print(f"Password {pw} on {dir}.zip")
112 else:
113 shutil.make_archive(dest_zip_file, "zip", root_dir=os.path.join(wip_dir, dir), base_dir="startup")
114
[end of nvflare/lighter/impl/workspace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/lighter/impl/workspace.py b/nvflare/lighter/impl/workspace.py
--- a/nvflare/lighter/impl/workspace.py
+++ b/nvflare/lighter/impl/workspace.py
@@ -98,6 +98,15 @@
self.zip_password = zip_password
def build(self, project: Project, ctx: dict):
+ """Create a zip for each individual folder.
+ Note that if zip_password is True, the zip command will be used to encrypt zip files. Users have to to
+ install this zip utility before provisioning. In Ubuntu system, use this command to install zip utility:
+ sudo apt-get install zip
+
+ Args:
+ project (Project): project instance
+ ctx (dict): the provision context
+ """
wip_dir = self.get_wip_dir(ctx)
dirs = [name for name in os.listdir(wip_dir) if os.path.isdir(os.path.join(wip_dir, name))]
for dir in dirs:
@@ -106,8 +115,12 @@
pw = generate_password()
run_args = ["zip", "-rq", "-P", pw, dest_zip_file + ".zip", ".", "-i", "startup/*"]
os.chdir(dest_zip_file)
- subprocess.run(run_args)
- os.chdir(os.path.join(dest_zip_file, ".."))
- print(f"Password {pw} on {dir}.zip")
+ try:
+ subprocess.run(run_args)
+ print(f"Password {pw} on {dir}.zip")
+ except FileNotFoundError as e:
+ raise RuntimeError("Unable to zip folders with password. Maybe the zip utility is not installed.")
+ finally:
+ os.chdir(os.path.join(dest_zip_file, ".."))
else:
shutil.make_archive(dest_zip_file, "zip", root_dir=os.path.join(wip_dir, dir), base_dir="startup")
| {"golden_diff": "diff --git a/nvflare/lighter/impl/workspace.py b/nvflare/lighter/impl/workspace.py\n--- a/nvflare/lighter/impl/workspace.py\n+++ b/nvflare/lighter/impl/workspace.py\n@@ -98,6 +98,15 @@\n self.zip_password = zip_password\n \n def build(self, project: Project, ctx: dict):\n+ \"\"\"Create a zip for each individual folder.\n+ Note that if zip_password is True, the zip command will be used to encrypt zip files. Users have to to\n+ install this zip utility before provisioning. In Ubuntu system, use this command to install zip utility:\n+ sudo apt-get install zip\n+\n+ Args:\n+ project (Project): project instance\n+ ctx (dict): the provision context\n+ \"\"\"\n wip_dir = self.get_wip_dir(ctx)\n dirs = [name for name in os.listdir(wip_dir) if os.path.isdir(os.path.join(wip_dir, name))]\n for dir in dirs:\n@@ -106,8 +115,12 @@\n pw = generate_password()\n run_args = [\"zip\", \"-rq\", \"-P\", pw, dest_zip_file + \".zip\", \".\", \"-i\", \"startup/*\"]\n os.chdir(dest_zip_file)\n- subprocess.run(run_args)\n- os.chdir(os.path.join(dest_zip_file, \"..\"))\n- print(f\"Password {pw} on {dir}.zip\")\n+ try:\n+ subprocess.run(run_args)\n+ print(f\"Password {pw} on {dir}.zip\")\n+ except FileNotFoundError as e:\n+ raise RuntimeError(\"Unable to zip folders with password. Maybe the zip utility is not installed.\")\n+ finally:\n+ os.chdir(os.path.join(dest_zip_file, \"..\"))\n else:\n shutil.make_archive(dest_zip_file, \"zip\", root_dir=os.path.join(wip_dir, dir), base_dir=\"startup\")\n", "issue": "Remove external 'zip' dependency from DistributionBuilder (zip_password=True)\nHi there, first off I want to acknowledge your efforts towards making an incredible tool, thank you!\r\n\r\nThe issue: I faced the exception *FileNotFoundError: [Errno 2] No such file or directory: 'zip'* when trying to specify `zip_password: true` for the DistributionBuilder args while running `provision -p project.yml`, and tracked the cause to `zip` not being installed on the local machine, which was a problem since installing additional software is not allowed on that machine.\r\n\r\n## Replicate\r\n```bash\r\n# Launch a docker container with image python:3.8 running interactive bash\r\ndocker run -it --rm python:3.8 bash\r\n\r\n# === Within docker container ===\r\npip install nvflare\r\nprovision # create example project.yml\r\n\r\n# Set zip_password to true\r\nsed -i 's/zip_password: false/zip_password: true/' project.yml\r\n\r\nprovision -p project.yml\r\n```\r\nWe will find the following output:\r\n```\r\nProject yaml file: /project.yml.\r\nException raised during provision. Incomplete prod_n folder removed.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/nvflare/lighter/spec.py\", line 166, in provision\r\n b.build(study, ctx)\r\n File \"/usr/local/lib/python3.8/site-packages/nvflare/lighter/impl/workspace.py\", line 109, in build\r\n subprocess.run(run_args)\r\n File \"/usr/local/lib/python3.8/subprocess.py\", line 493, in run\r\n with Popen(*popenargs, **kwargs) as process:\r\n File \"/usr/local/lib/python3.8/subprocess.py\", line 858, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/usr/local/lib/python3.8/subprocess.py\", line 1704, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'zip'\r\n```\r\nWhereas it'll work if we install zip\r\n```bash\r\n# === Within docker container ===\r\napt-get update && apt-get install -y zip\r\nprovision -p project.yml\r\n```\r\n\r\n## Fix\r\nThe build function of the DistributionBuilder uses `shutil` to make the non-password protected zip but relies on `subprocess.run(['zip', ...])` in order to create password-protected files, since shutil doesn't support passwords.\r\n\r\nI'd propose adding a package requirement of something like `pyminizip` or `pyzipper` to handle password protection.\r\n\r\nhttps://github.com/NVIDIA/NVFlare/blob/c8d51bfb534c02eff874a760c47086c87cbff59f/nvflare/lighter/impl/workspace.py#L100-L113\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pathlib\nimport shutil\nimport subprocess\n\nfrom nvflare.lighter.spec import Builder, Project\nfrom nvflare.lighter.utils import generate_password\n\n\nclass WorkspaceBuilder(Builder):\n def __init__(self, template_file):\n \"\"\"Manages the folder structure for provisioned projects.\n\n Sets the template_file containing scripts and configs to put into startup folders, creates directories for the\n participants, and moves the provisioned project to the final location at the end\n ($WORKSPACE/$PROJECT_NAME/prod_XX). WorkspaceBuilder manages and sets the number in prod_XX by incrementing from\n the last time provision was run for this project in this workspace, starting with 00 to a max of 99.\n\n Each time the provisioning tool runs, it requires a workspace folder in the local file system. The workspace\n will have the following folder structure:\n\n .. code-block:: text\n\n $WORKSPACE/ <--- this is assigned by -w option of provision command (default is workspace)\n $PROJECT_NAME/ <--- this is the name value in the project.yml file\n prod_00/ <--- a new prod_NN folder is created if provision does not have any errors.\n prod_01/\n ...\n resources/ <--- this folder stores resources for other builders to load\n state/ <--- this folder stores persistent information (such as certificates) so subsequent runs of the provision command can load the state back.\n wip/ <--- this is only used during runtime, and will be removed when the provision command exits\n\n Args:\n template_file: name of template file containing scripts and configs to put into startup folders\n \"\"\"\n self.template_file = template_file\n\n def _make_dir(self, dirs):\n for dir in dirs:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n def initialize(self, ctx):\n workspace_dir = ctx[\"workspace\"]\n prod_dirs = [_ for _ in os.listdir(workspace_dir) if _.startswith(\"prod_\")]\n last = -1\n for dir in prod_dirs:\n stage = int(dir.split(\"_\")[-1])\n if stage > last:\n last = stage\n ctx[\"last_prod_stage\"] = last\n template_file_full_path = os.path.join(self.get_resources_dir(ctx), self.template_file)\n file_path = pathlib.Path(__file__).parent.absolute()\n shutil.copyfile(os.path.join(file_path, self.template_file), template_file_full_path)\n ctx[\"template_file\"] = self.template_file\n\n def build(self, project: Project, ctx: dict):\n dirs = [self.get_kit_dir(p, ctx) for p in project.participants]\n self._make_dir(dirs)\n\n def finalize(self, ctx: dict):\n if ctx[\"last_prod_stage\"] >= 99:\n print(f\"Please clean up {ctx['workspace']} by removing prod_N folders\")\n print(\"After clean-up, rerun the provision command.\")\n else:\n current_prod_stage = str(ctx[\"last_prod_stage\"] + 1).zfill(2)\n current_prod_dir = os.path.join(ctx[\"workspace\"], f\"prod_{current_prod_stage}\")\n shutil.move(self.get_wip_dir(ctx), current_prod_dir)\n ctx.pop(\"wip_dir\", None)\n print(f\"Generated results can be found under {current_prod_dir}. Builder's wip folder removed.\")\n ctx[\"current_prod_dir\"] = current_prod_dir\n\n\nclass DistributionBuilder(Builder):\n def __init__(self, zip_password=False):\n \"\"\"Build the zip files for each folder.\n\n Creates the zip files containing the archives for each startup kit. It will add password protection if the\n argument (zip_password) is true.\n\n Args:\n zip_password: if true, will create zipped packages with passwords\n \"\"\"\n self.zip_password = zip_password\n\n def build(self, project: Project, ctx: dict):\n wip_dir = self.get_wip_dir(ctx)\n dirs = [name for name in os.listdir(wip_dir) if os.path.isdir(os.path.join(wip_dir, name))]\n for dir in dirs:\n dest_zip_file = os.path.join(wip_dir, f\"{dir}\")\n if self.zip_password:\n pw = generate_password()\n run_args = [\"zip\", \"-rq\", \"-P\", pw, dest_zip_file + \".zip\", \".\", \"-i\", \"startup/*\"]\n os.chdir(dest_zip_file)\n subprocess.run(run_args)\n os.chdir(os.path.join(dest_zip_file, \"..\"))\n print(f\"Password {pw} on {dir}.zip\")\n else:\n shutil.make_archive(dest_zip_file, \"zip\", root_dir=os.path.join(wip_dir, dir), base_dir=\"startup\")\n", "path": "nvflare/lighter/impl/workspace.py"}]} | 2,579 | 416 |
gh_patches_debug_26270 | rasdani/github-patches | git_diff | e-valuation__EvaP-2036 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translations in Javascript and Typescript
When writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then.
Instead of doing this, we want to have a unified approach where the use-site can just write `trans("The server is not responding.")` or so. There are two possible approaches:
1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = "English" | "German"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less):
```html
<script type="text/javascript">
window.translationDictionary = {
"de": {
{% language 'de' %}
"The server is not responding": "{% trans 'The server is not responding' %}",
{% endlanguage %}
}
};
</script>
```
2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix.
I think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.
</issue>
<code>
[start of evap/development/management/commands/translate.py]
1 from django.core.management import call_command
2 from django.core.management.base import BaseCommand
3
4
5 class Command(BaseCommand):
6 args = ""
7 help = 'Execute "makemessages --locale=de --ignore=node_modules/*"'
8
9 def handle(self, *args, **options):
10 self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"')
11 call_command("makemessages", "--locale=de", "--ignore=node_modules/*")
12
[end of evap/development/management/commands/translate.py]
[start of evap/urls.py]
1 import django.contrib.auth.views
2 from django.conf import settings
3 from django.urls import include, path
4
5 urlpatterns = [
6 path("", include('evap.evaluation.urls')),
7 path("staff/", include('evap.staff.urls')),
8 path("results/", include('evap.results.urls')),
9 path("student/", include('evap.student.urls')),
10 path("contributor/", include('evap.contributor.urls')),
11 path("rewards/", include('evap.rewards.urls')),
12 path("grades/", include('evap.grades.urls')),
13
14 path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"),
15 path("oidc/", include('mozilla_django_oidc.urls')),
16 ]
17
18 if settings.DEBUG:
19 urlpatterns += [path('development/', include('evap.development.urls'))]
20
21 if settings.ENABLE_DEBUG_TOOLBAR:
22 # pylint does not correctly evaluate this if, so it will raise an import-error on
23 # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.
24 import debug_toolbar # pylint: disable=import-error, useless-suppression
25 urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
26
[end of evap/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py
--- a/evap/development/management/commands/translate.py
+++ b/evap/development/management/commands/translate.py
@@ -9,3 +9,11 @@
def handle(self, *args, **options):
self.stdout.write('Executing "manage.py makemessages --locale=de --ignore=node_modules/*"')
call_command("makemessages", "--locale=de", "--ignore=node_modules/*")
+ call_command(
+ "makemessages",
+ "--domain=djangojs",
+ "--extension=js,ts",
+ "--locale=de",
+ "--ignore=node_modules/*",
+ "--ignore=evap/static/js/*.min.js",
+ )
diff --git a/evap/urls.py b/evap/urls.py
--- a/evap/urls.py
+++ b/evap/urls.py
@@ -1,6 +1,9 @@
import django.contrib.auth.views
from django.conf import settings
from django.urls import include, path
+from django.views.i18n import JavaScriptCatalog
+
+from evap.middleware import no_login_required
urlpatterns = [
path("", include('evap.evaluation.urls')),
@@ -13,6 +16,8 @@
path("logout", django.contrib.auth.views.LogoutView.as_view(next_page="/"), name="django-auth-logout"),
path("oidc/", include('mozilla_django_oidc.urls')),
+
+ path("catalog.js", no_login_required(JavaScriptCatalog.as_view()), name="javascript-catalog"),
]
if settings.DEBUG:
| {"golden_diff": "diff --git a/evap/development/management/commands/translate.py b/evap/development/management/commands/translate.py\n--- a/evap/development/management/commands/translate.py\n+++ b/evap/development/management/commands/translate.py\n@@ -9,3 +9,11 @@\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n+ call_command(\n+ \"makemessages\",\n+ \"--domain=djangojs\",\n+ \"--extension=js,ts\",\n+ \"--locale=de\",\n+ \"--ignore=node_modules/*\",\n+ \"--ignore=evap/static/js/*.min.js\",\n+ )\ndiff --git a/evap/urls.py b/evap/urls.py\n--- a/evap/urls.py\n+++ b/evap/urls.py\n@@ -1,6 +1,9 @@\n import django.contrib.auth.views\n from django.conf import settings\n from django.urls import include, path\n+from django.views.i18n import JavaScriptCatalog\n+\n+from evap.middleware import no_login_required\n \n urlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n@@ -13,6 +16,8 @@\n \n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n+\n+ path(\"catalog.js\", no_login_required(JavaScriptCatalog.as_view()), name=\"javascript-catalog\"),\n ]\n \n if settings.DEBUG:\n", "issue": "Translations in Javascript and Typescript\nWhen writing Javascript and Typescript in separate, non-HTML files, we can't use the Django template functions `trans`, `blocktrans`, etc. anymore. We have worked around this by putting translated strings into the DOM and accessing them via Javascript then.\r\n\r\nInstead of doing this, we want to have a unified approach where the use-site can just write `trans(\"The server is not responding.\")` or so. There are two possible approaches:\r\n\r\n1. DIY: We have a function `trans(english: string, to: Language = window.LANGUAGE): string` with `type Language = \"English\" | \"German\"`. This function looks up the string in a global dictionary (for example `window.translationDictionary` or so). I am not sure what it should do if the string is not present, probably return the English string and emit a warning? This dictionary would be defined in a script tag in a HTML file, something like (possibly with an implementation that doesn't repeat the strings a little less):\r\n```html\r\n<script type=\"text/javascript\">\r\n window.translationDictionary = {\r\n \"de\": {\r\n {% language 'de' %}\r\n \"The server is not responding\": \"{% trans 'The server is not responding' %}\",\r\n {% endlanguage %}\r\n }\r\n };\r\n</script>\r\n```\r\n2. Use Django's builtin functionality: There is a builtin way that configures an extra endpoint to make all translations available (https://docs.djangoproject.com/en/4.2/topics/i18n/translation/#internationalization-in-javascript-code). A plus is that it also supports `ngettext` and so on. It seems like it can also detect all strings used in translations, but the setup may be a bit tricky with Typescript thrown into the mix.\r\n\r\nI think I prefer the first approach, but maybe we encounter difficulties with it or decide that we will need `ngettext` etc. in the future and go with the Django versions directly.\n", "before_files": [{"content": "from django.core.management import call_command\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = 'Execute \"makemessages --locale=de --ignore=node_modules/*\"'\n\n def handle(self, *args, **options):\n self.stdout.write('Executing \"manage.py makemessages --locale=de --ignore=node_modules/*\"')\n call_command(\"makemessages\", \"--locale=de\", \"--ignore=node_modules/*\")\n", "path": "evap/development/management/commands/translate.py"}, {"content": "import django.contrib.auth.views\nfrom django.conf import settings\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"\", include('evap.evaluation.urls')),\n path(\"staff/\", include('evap.staff.urls')),\n path(\"results/\", include('evap.results.urls')),\n path(\"student/\", include('evap.student.urls')),\n path(\"contributor/\", include('evap.contributor.urls')),\n path(\"rewards/\", include('evap.rewards.urls')),\n path(\"grades/\", include('evap.grades.urls')),\n\n path(\"logout\", django.contrib.auth.views.LogoutView.as_view(next_page=\"/\"), name=\"django-auth-logout\"),\n path(\"oidc/\", include('mozilla_django_oidc.urls')),\n]\n\nif settings.DEBUG:\n urlpatterns += [path('development/', include('evap.development.urls'))]\n\n if settings.ENABLE_DEBUG_TOOLBAR:\n # pylint does not correctly evaluate this if, so it will raise an import-error on\n # GitHub actions and a useless-suppression on a vagrant setup. Ignore both cases.\n import debug_toolbar # pylint: disable=import-error, useless-suppression\n urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]\n", "path": "evap/urls.py"}]} | 1,386 | 369 |
gh_patches_debug_1560 | rasdani/github-patches | git_diff | NVIDIA__TransformerEngine-813 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings
https://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9
Importing the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default.
Would it be possible to move setting the warning filter config to a more controlled scope in this module?
</issue>
<code>
[start of transformer_engine/common/utils.py]
1 # Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2 #
3 # See LICENSE for license information.
4 """The utilities for Transformer Engine"""
5 import inspect
6 import warnings
7 from enum import Enum
8
9 warnings.simplefilter('default')
10
11
12 class DeprecatedEnum: # pylint: disable=too-few-public-methods
13 """DeprecatedEnum"""
14
15 def __init__(self, enum_cls, msg):
16 self.enum_cls = enum_cls
17 self.msg = msg
18
19 def __iter__(self):
20 return iter(list(self.enum_cls.__members__.values()))
21
22 def __getattr__(self, name):
23 if name in self.enum_cls.__members__:
24 warnings.warn(self.msg, DeprecationWarning)
25 return self.enum_cls.__members__[name]
26 raise AttributeError(f"{self.enum_cls} does not contain {name}")
27
28
29 def deprecate_wrapper(obj, msg):
30 """Deprecate wrapper"""
31 if inspect.isclass(obj):
32 if issubclass(obj, Enum):
33 return DeprecatedEnum(obj, msg)
34
35 class DeprecatedCls(obj): # pylint: disable=too-few-public-methods
36 """DeprecatedCls"""
37
38 def __init__(self, *args, **kwargs):
39 warnings.warn(msg, DeprecationWarning)
40 super().__init__(*args, **kwargs)
41
42 return DeprecatedCls
43
44 if inspect.isfunction(obj):
45
46 def deprecated(*args, **kwargs):
47 warnings.warn(msg, DeprecationWarning)
48 return obj(*args, **kwargs)
49
50 return deprecated
51
52 raise NotImplementedError(
53 f"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.")
54
[end of transformer_engine/common/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py
--- a/transformer_engine/common/utils.py
+++ b/transformer_engine/common/utils.py
@@ -6,7 +6,8 @@
import warnings
from enum import Enum
-warnings.simplefilter('default')
+warnings.filterwarnings(
+ "module", category=DeprecationWarning, module="transformer_engine.common.utils")
class DeprecatedEnum: # pylint: disable=too-few-public-methods
| {"golden_diff": "diff --git a/transformer_engine/common/utils.py b/transformer_engine/common/utils.py\n--- a/transformer_engine/common/utils.py\n+++ b/transformer_engine/common/utils.py\n@@ -6,7 +6,8 @@\n import warnings\n from enum import Enum\n \n-warnings.simplefilter('default')\n+warnings.filterwarnings(\n+ \"module\", category=DeprecationWarning, module=\"transformer_engine.common.utils\")\n \n \n class DeprecatedEnum: # pylint: disable=too-few-public-methods\n", "issue": "`warnings.simplefilter('default')` in global scope causes excessive DeprecationWarnings\nhttps://github.com/NVIDIA/TransformerEngine/blob/f85553ea369da15fd726ab279818e415be48a228/transformer_engine/common/utils.py#L9\r\n\r\nImporting the `transformer_engine.common.utils` resets the warning filters to default settings using `warnings.simplefilter('default')` in the global scope. This results in the console being flooded with DeprecationWarnings, which are normally ignored by Python by default.\r\n\r\nWould it be possible to move setting the warning filter config to a more controlled scope in this module?\n", "before_files": [{"content": "# Copyright (c) 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# See LICENSE for license information.\n\"\"\"The utilities for Transformer Engine\"\"\"\nimport inspect\nimport warnings\nfrom enum import Enum\n\nwarnings.simplefilter('default')\n\n\nclass DeprecatedEnum: # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedEnum\"\"\"\n\n def __init__(self, enum_cls, msg):\n self.enum_cls = enum_cls\n self.msg = msg\n\n def __iter__(self):\n return iter(list(self.enum_cls.__members__.values()))\n\n def __getattr__(self, name):\n if name in self.enum_cls.__members__:\n warnings.warn(self.msg, DeprecationWarning)\n return self.enum_cls.__members__[name]\n raise AttributeError(f\"{self.enum_cls} does not contain {name}\")\n\n\ndef deprecate_wrapper(obj, msg):\n \"\"\"Deprecate wrapper\"\"\"\n if inspect.isclass(obj):\n if issubclass(obj, Enum):\n return DeprecatedEnum(obj, msg)\n\n class DeprecatedCls(obj): # pylint: disable=too-few-public-methods\n \"\"\"DeprecatedCls\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n super().__init__(*args, **kwargs)\n\n return DeprecatedCls\n\n if inspect.isfunction(obj):\n\n def deprecated(*args, **kwargs):\n warnings.warn(msg, DeprecationWarning)\n return obj(*args, **kwargs)\n\n return deprecated\n\n raise NotImplementedError(\n f\"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.\")\n", "path": "transformer_engine/common/utils.py"}]} | 1,143 | 108 |
gh_patches_debug_76 | rasdani/github-patches | git_diff | streamlit__streamlit-2570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
URL markup does not get generated as a link
# Summary
URLs used to generate an anchor tag automatically in markup. Now it does not
# Steps to reproduce
Code snippet:
```
st.write(f"""
As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome!
---
### Connect With Us
- We can be found at https://streamlit.io and https://twitter.com/streamlit
- Come by
[the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions,
post awesome apps, or just say hi!
""")
```
## Expected behavior:
[0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py)

## Actual behavior:
[0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py)

## Is this a regression?
Yes as of 0.74
</issue>
<code>
[start of lib/setup.py]
1 import os
2 import setuptools
3 import sys
4
5 from setuptools.command.install import install
6
7 try:
8 from pipenv.project import Project
9 from pipenv.utils import convert_deps_to_pip
10 except:
11 exit_msg = (
12 "pipenv is required to package Streamlit. Please install pipenv and try again"
13 )
14 sys.exit(exit_msg)
15
16 VERSION = "0.74.0" # PEP-440
17
18 NAME = "streamlit"
19
20 DESCRIPTION = "The fastest way to build data apps in Python"
21
22 LONG_DESCRIPTION = (
23 "Streamlit's open-source app framework is the easiest way "
24 "for data scientists and machine learning engineers to "
25 "create beautiful, performant apps in only a few hours! "
26 "All in pure Python. All for free."
27 )
28
29 pipfile = Project(chdir=False).parsed_pipfile
30
31 packages = pipfile["packages"].copy()
32 requirements = convert_deps_to_pip(packages, r=False)
33
34
35 class VerifyVersionCommand(install):
36 """Custom command to verify that the git tag matches our version"""
37
38 description = "verify that the git tag matches our version"
39
40 def run(self):
41 tag = os.getenv("CIRCLE_TAG")
42
43 if tag != VERSION:
44 info = "Git tag: {0} does not match the version of this app: {1}".format(
45 tag, VERSION
46 )
47 sys.exit(info)
48
49
50 setuptools.setup(
51 name=NAME,
52 version=VERSION,
53 description=DESCRIPTION,
54 long_description=LONG_DESCRIPTION,
55 url="https://streamlit.io",
56 author="Streamlit Inc",
57 author_email="[email protected]",
58 python_requires=">=3.6",
59 license="Apache 2",
60 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
61 # Requirements
62 install_requires=requirements,
63 zip_safe=False, # install source files not egg
64 include_package_data=True, # copy html and friends
65 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]},
66 # For Windows so that streamlit * commands work ie.
67 # - streamlit version
68 # - streamlit hello
69 scripts=["bin/streamlit.cmd"],
70 cmdclass={
71 "verify": VerifyVersionCommand,
72 },
73 )
74
[end of lib/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -13,7 +13,7 @@
)
sys.exit(exit_msg)
-VERSION = "0.74.0" # PEP-440
+VERSION = "0.74.1" # PEP-440
NAME = "streamlit"
| {"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -13,7 +13,7 @@\n )\n sys.exit(exit_msg)\n \n-VERSION = \"0.74.0\" # PEP-440\n+VERSION = \"0.74.1\" # PEP-440\n \n NAME = \"streamlit\"\n", "issue": "URL markup does not get generated as a link\n# Summary\r\nURLs used to generate an anchor tag automatically in markup. Now it does not\r\n\r\n\r\n# Steps to reproduce\r\nCode snippet:\r\n\r\n```\r\nst.write(f\"\"\"\r\n As always, thank you to [all our contributors](https://github.com/streamlit/streamlit/graphs/contributors) who help make Streamlit awesome!\r\n\r\n ---\r\n\r\n ### Connect With Us\r\n\r\n - We can be found at https://streamlit.io and https://twitter.com/streamlit\r\n - Come by\r\n [the forums](https://discuss.streamlit.io/c/official-announcements/6) if you'd like to ask questions,\r\n post awesome apps, or just say hi!\r\n \"\"\")\r\n```\r\n\r\n## Expected behavior:\r\n[0.73](https://share.streamlit.io/streamlit/release-demos/0.73/0.73/streamlit_app.py)\r\n\r\n\r\n\r\n## Actual behavior:\r\n[0.74](https://share.streamlit.io/streamlit/release-demos/0.74/0.74/streamlit_app.py)\r\n\r\n\r\n\r\n## Is this a regression?\r\nYes as of 0.74\r\n\n", "before_files": [{"content": "import os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\nexcept:\n exit_msg = (\n \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n )\n sys.exit(exit_msg)\n\nVERSION = \"0.74.0\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]} | 1,534 | 91 |
gh_patches_debug_19874 | rasdani/github-patches | git_diff | saleor__saleor-10283 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
checkoutCreate mutation issue - { "code": "REQUIRED", "field": "country", "message": "This field cannot be blank." }
### What I'm trying to achieve
try to add checkout step with createCheckout mutation.
### Steps to reproduce the problem
<!-- Adding logs from the console, as well as query/response help us fix the bug faster -->
1. use docker-platform, deploy with docker compose, delete saleor folder and replace it by saleor folder with 3.1 branch clone
2. use playgraphl to test mutation request (checkoutCreate)
### What I expected to happen
i make test on saleor demo site : https://demo.saleor.io/graphql/
```bash
mutation CheckoutCreate {
checkoutCreate(
input: { channel: "default-channel", email: "[email protected]", lines: [] }
)
{ errors {
code
field
message
}
checkout {
id
token
created
}
}
}
```
result on : https://demo.saleor.io/graphql/
```bash
{
"data": {
"checkoutCreate": {
"errors": [],
"checkout": {
"id": "Q2hlY2tvdXQ6MDQ2MmQwMzQtZGJmYi00MTg1LWExZTMtMWUwYTU2YWMxYjJi",
"token": "0462d034-dbfb-4185-a1e3-1e0a56ac1b2b",
"created": "2021-09-17T13:17:33.994853+00:00"
}
}
}
}
```
# this is fine for me but ....
When i try the samething on my local machine (deploy with docker compose)
i get this:
```bash
{
"data": {
"checkoutCreate": {
"errors": [
{
"code": "REQUIRED",
"field": "country",
"message": "This field cannot be blank."
}
],
"checkout": null
}
}
}
```
i want to get checkoutID and token and the system ask me to add some country field.....
**System information**
<!-- Provide the version of Saleor or whether you're using it from the `master` branch. If using Saleor Dashboard or Storefront, provide their versions too. -->
Saleor version:
- [ ] dev (current master)
- [ X] 3.0
- [ ] 2.11
- [ ] 2.10
Operating system:
- [ ] Windows
- [ X] Linux
- [ ] MacOS
- [ ] Other
</issue>
<code>
[start of saleor/channel/migrations/0001_initial.py]
1 # Generated by Django 3.0.6 on 2020-06-16 07:54
2
3 from django.conf import settings
4 from django.db import migrations, models
5 from django.db.models.signals import post_migrate
6 from django.apps import apps as registry
7
8
9 def assing_permissions(apps, schema_editor):
10 def on_migrations_complete(sender=None, **kwargs):
11 Group = apps.get_model("auth", "Group")
12 Permission = apps.get_model("auth", "Permission")
13 ContentType = apps.get_model("contenttypes", "ContentType")
14
15 ct, _ = ContentType.objects.get_or_create(app_label="channel", model="channel")
16 manage_channels, _ = Permission.objects.get_or_create(
17 name="Manage channels.", content_type=ct, codename="manage_channels"
18 )
19
20 for group in Group.objects.iterator():
21 group.permissions.add(manage_channels)
22
23 sender = registry.get_app_config("channel")
24 post_migrate.connect(on_migrations_complete, weak=False, sender=sender)
25
26
27 def get_default_currency(Checkout, Order, Product, ShippingMethod, Voucher):
28 latest_product = Product.objects.order_by("-pk").first()
29 if latest_product:
30 return latest_product.currency
31 latest_voucher = Voucher.objects.order_by("-pk").first()
32 if latest_voucher:
33 return latest_voucher.currency
34 latest_shipping_method = ShippingMethod.objects.order_by("-pk").first()
35 if latest_shipping_method:
36 return latest_shipping_method.currency
37 latest_order = Order.objects.order_by("-pk").first()
38 if latest_order:
39 return latest_order.currency
40 latest_checkout = Checkout.objects.order_by("-pk").first()
41 if latest_checkout:
42 return latest_checkout.currency
43 return None
44
45
46 def create_default_channel(apps, schema_editor):
47 Channel = apps.get_model("channel", "Channel")
48 Checkout = apps.get_model("checkout", "Checkout")
49 Order = apps.get_model("order", "Order")
50 Product = apps.get_model("product", "Product")
51 ShippingMethod = apps.get_model("shipping", "ShippingMethod")
52 Voucher = apps.get_model("discount", "Voucher")
53
54 default_currency = get_default_currency(
55 Checkout, Order, Product, ShippingMethod, Voucher
56 )
57 if default_currency:
58 Channel.objects.create(
59 name="Default channel",
60 slug=settings.DEFAULT_CHANNEL_SLUG,
61 currency_code=default_currency,
62 is_active=True,
63 )
64
65
66 class Migration(migrations.Migration):
67
68 initial = True
69
70 dependencies = [
71 ("checkout", "0025_auto_20200221_0257"),
72 ("discount", "0019_auto_20200217_0350"),
73 ("order", "0084_auto_20200522_0522"),
74 ("product", "0118_populate_product_variant_price"),
75 ("shipping", "0018_default_zones_countries"),
76 ]
77
78 operations = [
79 migrations.CreateModel(
80 name="Channel",
81 fields=[
82 (
83 "id",
84 models.AutoField(
85 auto_created=True,
86 primary_key=True,
87 serialize=False,
88 verbose_name="ID",
89 ),
90 ),
91 ("name", models.CharField(max_length=250)),
92 ("slug", models.SlugField(max_length=255, unique=True)),
93 ("is_active", models.BooleanField(default=False)),
94 (
95 "currency_code",
96 models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),
97 ),
98 ],
99 options={
100 "ordering": ("slug",),
101 "permissions": (("manage_channels", "Manage channels."),),
102 },
103 ),
104 migrations.RunPython(create_default_channel, migrations.RunPython.noop),
105 migrations.RunPython(assing_permissions, migrations.RunPython.noop),
106 ]
107
[end of saleor/channel/migrations/0001_initial.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/channel/migrations/0001_initial.py b/saleor/channel/migrations/0001_initial.py
--- a/saleor/channel/migrations/0001_initial.py
+++ b/saleor/channel/migrations/0001_initial.py
@@ -1,5 +1,6 @@
# Generated by Django 3.0.6 on 2020-06-16 07:54
+import os
from django.conf import settings
from django.db import migrations, models
from django.db.models.signals import post_migrate
@@ -54,12 +55,14 @@
default_currency = get_default_currency(
Checkout, Order, Product, ShippingMethod, Voucher
)
+ default_country = os.environ.get("DEFAULT_COUNTRY", "US")
if default_currency:
Channel.objects.create(
name="Default channel",
slug=settings.DEFAULT_CHANNEL_SLUG,
currency_code=default_currency,
is_active=True,
+ default_country=default_country,
)
| {"golden_diff": "diff --git a/saleor/channel/migrations/0001_initial.py b/saleor/channel/migrations/0001_initial.py\n--- a/saleor/channel/migrations/0001_initial.py\n+++ b/saleor/channel/migrations/0001_initial.py\n@@ -1,5 +1,6 @@\n # Generated by Django 3.0.6 on 2020-06-16 07:54\n \n+import os\n from django.conf import settings\n from django.db import migrations, models\n from django.db.models.signals import post_migrate\n@@ -54,12 +55,14 @@\n default_currency = get_default_currency(\n Checkout, Order, Product, ShippingMethod, Voucher\n )\n+ default_country = os.environ.get(\"DEFAULT_COUNTRY\", \"US\")\n if default_currency:\n Channel.objects.create(\n name=\"Default channel\",\n slug=settings.DEFAULT_CHANNEL_SLUG,\n currency_code=default_currency,\n is_active=True,\n+ default_country=default_country,\n )\n", "issue": "checkoutCreate mutation issue - { \"code\": \"REQUIRED\", \"field\": \"country\", \"message\": \"This field cannot be blank.\" }\n### What I'm trying to achieve\r\ntry to add checkout step with createCheckout mutation.\r\n\r\n### Steps to reproduce the problem\r\n<!-- Adding logs from the console, as well as query/response help us fix the bug faster -->\r\n1. use docker-platform, deploy with docker compose, delete saleor folder and replace it by saleor folder with 3.1 branch clone\r\n2. use playgraphl to test mutation request (checkoutCreate)\r\n\r\n### What I expected to happen\r\ni make test on saleor demo site : https://demo.saleor.io/graphql/\r\n\r\n```bash\r\nmutation CheckoutCreate {\r\n checkoutCreate(\r\n input: { channel: \"default-channel\", email: \"[email protected]\", lines: [] }\r\n ) \r\n { errors {\r\n code\r\n field\r\n message\r\n }\r\n checkout {\r\n id\r\n token\r\n created\r\n \r\n }\r\n }\r\n}\r\n\r\n```\r\nresult on : https://demo.saleor.io/graphql/\r\n```bash\r\n{\r\n \"data\": {\r\n \"checkoutCreate\": {\r\n \"errors\": [],\r\n \"checkout\": {\r\n \"id\": \"Q2hlY2tvdXQ6MDQ2MmQwMzQtZGJmYi00MTg1LWExZTMtMWUwYTU2YWMxYjJi\",\r\n \"token\": \"0462d034-dbfb-4185-a1e3-1e0a56ac1b2b\",\r\n \"created\": \"2021-09-17T13:17:33.994853+00:00\"\r\n }\r\n }\r\n }\r\n}\r\n\r\n\r\n```\r\n\r\n# this is fine for me but ....\r\nWhen i try the samething on my local machine (deploy with docker compose)\r\ni get this:\r\n\r\n```bash\r\n\r\n{\r\n \"data\": {\r\n \"checkoutCreate\": {\r\n \"errors\": [\r\n {\r\n \"code\": \"REQUIRED\",\r\n \"field\": \"country\",\r\n \"message\": \"This field cannot be blank.\"\r\n }\r\n ],\r\n \"checkout\": null\r\n }\r\n }\r\n}\r\n\r\n```\r\ni want to get checkoutID and token and the system ask me to add some country field.....\r\n\r\n**System information**\r\n<!-- Provide the version of Saleor or whether you're using it from the `master` branch. If using Saleor Dashboard or Storefront, provide their versions too. -->\r\nSaleor version:\r\n- [ ] dev (current master)\r\n- [ X] 3.0\r\n- [ ] 2.11\r\n- [ ] 2.10\r\n\r\nOperating system:\r\n- [ ] Windows\r\n- [ X] Linux\r\n- [ ] MacOS\r\n- [ ] Other\r\n\n", "before_files": [{"content": "# Generated by Django 3.0.6 on 2020-06-16 07:54\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nfrom django.db.models.signals import post_migrate\nfrom django.apps import apps as registry\n\n\ndef assing_permissions(apps, schema_editor):\n def on_migrations_complete(sender=None, **kwargs):\n Group = apps.get_model(\"auth\", \"Group\")\n Permission = apps.get_model(\"auth\", \"Permission\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n ct, _ = ContentType.objects.get_or_create(app_label=\"channel\", model=\"channel\")\n manage_channels, _ = Permission.objects.get_or_create(\n name=\"Manage channels.\", content_type=ct, codename=\"manage_channels\"\n )\n\n for group in Group.objects.iterator():\n group.permissions.add(manage_channels)\n\n sender = registry.get_app_config(\"channel\")\n post_migrate.connect(on_migrations_complete, weak=False, sender=sender)\n\n\ndef get_default_currency(Checkout, Order, Product, ShippingMethod, Voucher):\n latest_product = Product.objects.order_by(\"-pk\").first()\n if latest_product:\n return latest_product.currency\n latest_voucher = Voucher.objects.order_by(\"-pk\").first()\n if latest_voucher:\n return latest_voucher.currency\n latest_shipping_method = ShippingMethod.objects.order_by(\"-pk\").first()\n if latest_shipping_method:\n return latest_shipping_method.currency\n latest_order = Order.objects.order_by(\"-pk\").first()\n if latest_order:\n return latest_order.currency\n latest_checkout = Checkout.objects.order_by(\"-pk\").first()\n if latest_checkout:\n return latest_checkout.currency\n return None\n\n\ndef create_default_channel(apps, schema_editor):\n Channel = apps.get_model(\"channel\", \"Channel\")\n Checkout = apps.get_model(\"checkout\", \"Checkout\")\n Order = apps.get_model(\"order\", \"Order\")\n Product = apps.get_model(\"product\", \"Product\")\n ShippingMethod = apps.get_model(\"shipping\", \"ShippingMethod\")\n Voucher = apps.get_model(\"discount\", \"Voucher\")\n\n default_currency = get_default_currency(\n Checkout, Order, Product, ShippingMethod, Voucher\n )\n if default_currency:\n Channel.objects.create(\n name=\"Default channel\",\n slug=settings.DEFAULT_CHANNEL_SLUG,\n currency_code=default_currency,\n is_active=True,\n )\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"checkout\", \"0025_auto_20200221_0257\"),\n (\"discount\", \"0019_auto_20200217_0350\"),\n (\"order\", \"0084_auto_20200522_0522\"),\n (\"product\", \"0118_populate_product_variant_price\"),\n (\"shipping\", \"0018_default_zones_countries\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Channel\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"name\", models.CharField(max_length=250)),\n (\"slug\", models.SlugField(max_length=255, unique=True)),\n (\"is_active\", models.BooleanField(default=False)),\n (\n \"currency_code\",\n models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH),\n ),\n ],\n options={\n \"ordering\": (\"slug\",),\n \"permissions\": ((\"manage_channels\", \"Manage channels.\"),),\n },\n ),\n migrations.RunPython(create_default_channel, migrations.RunPython.noop),\n migrations.RunPython(assing_permissions, migrations.RunPython.noop),\n ]\n", "path": "saleor/channel/migrations/0001_initial.py"}]} | 2,188 | 227 |
gh_patches_debug_37257 | rasdani/github-patches | git_diff | svthalia__concrexit-3722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lock admin panel behind 2FA
### What?
<!-- A clear and concise high-level description of what you want to happen. -->
lock the admin panel behind the 2FA functionality
### Why?
<!-- A clear and concise motivation why we should consider implementing this. -->
Admin panel has sensitive data so it should be protected. So requiring 2FA makes sense.
### How?
<!-- Optionally some guidance, ideas, context. -->
Probably nice to have a decorator to be able to lock other things of the site behind 2FA in the future.
</issue>
<code>
[start of website/thaliawebsite/admin.py]
1 """Settings for the admin site."""
2 from django.contrib import admin
3 from django.utils.translation import gettext_lazy as _
4
5 admin.site.site_header = _("Thalia administration")
6 admin.site.site_title = _("Thalia")
7
[end of website/thaliawebsite/admin.py]
[start of website/thaliawebsite/views.py]
1 """General views for the website."""
2
3 from django.contrib.admin.views.decorators import staff_member_required
4 from django.contrib.auth.views import LogoutView as BaseLogoutView
5 from django.contrib.auth.views import PasswordResetView
6 from django.core.exceptions import PermissionDenied
7 from django.http import HttpResponse, HttpResponseForbidden
8 from django.shortcuts import redirect
9 from django.utils.decorators import method_decorator
10 from django.views.generic import ListView, TemplateView
11 from django.views.generic.base import View
12
13 from django_ratelimit.decorators import ratelimit
14 from two_factor.views import LoginView
15
16
17 class IndexView(TemplateView):
18 template_name = "index.html"
19
20
21 @method_decorator(staff_member_required, "dispatch")
22 class TestCrashView(View):
23 """Test view to intentionally crash to test the error handling."""
24
25 def dispatch(self, request, *args, **kwargs) -> HttpResponse:
26 if not request.user.is_superuser:
27 return HttpResponseForbidden("This is not for you")
28 raise Exception("Test exception")
29
30
31 class PagedView(ListView):
32 """A ListView with automatic pagination."""
33
34 def get_context_data(self, **kwargs) -> dict:
35 context = super().get_context_data(**kwargs)
36 page = context["page_obj"].number
37 paginator = context["paginator"]
38
39 # Show the two pages before and after the current page
40 page_range_start = max(1, page - 2)
41 page_range_stop = min(page + 3, paginator.num_pages + 1)
42
43 # Add extra pages if we show less than 5 pages
44 page_range_start = min(page_range_start, page_range_stop - 5)
45 page_range_start = max(1, page_range_start)
46
47 # Add extra pages if we still show less than 5 pages
48 page_range_stop = max(page_range_stop, page_range_start + 5)
49 page_range_stop = min(page_range_stop, paginator.num_pages + 1)
50
51 page_range = range(page_range_start, page_range_stop)
52
53 querydict = self.request.GET.copy()
54
55 if "page" in querydict:
56 del querydict["page"]
57
58 context.update(
59 {
60 "page_range": page_range,
61 "base_url": f"{self.request.path}?{querydict.urlencode()}&"
62 if querydict
63 else f"{self.request.path}?",
64 }
65 )
66
67 return context
68
69
70 class RateLimitedPasswordResetView(PasswordResetView):
71 @method_decorator(ratelimit(key="ip", rate="5/h"))
72 def post(self, request, *args, **kwargs):
73 return super().post(request, *args, **kwargs)
74
75
76 class RateLimitedLoginView(LoginView):
77 @method_decorator(ratelimit(key="ip", rate="30/h"))
78 @method_decorator(ratelimit(key="post:username", rate="30/h"))
79 def post(self, request, *args, **kwargs):
80 return super().post(request, *args, **kwargs)
81
82
83 class LogoutView(BaseLogoutView):
84 # Allow GET logout still (this was deprecated in Django 5.0).
85 http_method_names = ["get", "post", "options"]
86
87 def get(self, request, *args, **kwargs):
88 return self.post(request, *args, **kwargs)
89
90
91 def rate_limited_view(request, *args, **kwargs):
92 return HttpResponse("You are rate limited", status=429)
93
94
95 def admin_unauthorized_view(request):
96 if not request.member:
97 url = "/user/account/login"
98 args = request.META.get("QUERY_STRING", "")
99 if args:
100 url = f"{url}?{args}"
101 return redirect(url)
102 elif not request.member.is_staff and not request.member.is_superuser:
103 raise PermissionDenied("You are not allowed to access the administration page.")
104 else:
105 return redirect(request.GET.get("next", "/"))
106
[end of website/thaliawebsite/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py
--- a/website/thaliawebsite/admin.py
+++ b/website/thaliawebsite/admin.py
@@ -1,6 +1,17 @@
"""Settings for the admin site."""
+
+from django.conf import settings
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
-admin.site.site_header = _("Thalia administration")
-admin.site.site_title = _("Thalia")
+from django_otp import user_has_device
+
+
+class ThaliaAdminSite(admin.AdminSite):
+ site_header = _("Thalia administration")
+ site_title = _("Thalia")
+
+ def has_permission(self, request):
+ return super().has_permission(request) and (
+ settings.DEBUG or user_has_device(request.user)
+ )
diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py
--- a/website/thaliawebsite/views.py
+++ b/website/thaliawebsite/views.py
@@ -1,5 +1,6 @@
"""General views for the website."""
+from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.views import LogoutView as BaseLogoutView
from django.contrib.auth.views import PasswordResetView
@@ -10,6 +11,7 @@
from django.views.generic import ListView, TemplateView
from django.views.generic.base import View
+from django_otp import user_has_device
from django_ratelimit.decorators import ratelimit
from two_factor.views import LoginView
@@ -58,9 +60,11 @@
context.update(
{
"page_range": page_range,
- "base_url": f"{self.request.path}?{querydict.urlencode()}&"
- if querydict
- else f"{self.request.path}?",
+ "base_url": (
+ f"{self.request.path}?{querydict.urlencode()}&"
+ if querydict
+ else f"{self.request.path}?"
+ ),
}
)
@@ -101,5 +105,11 @@
return redirect(url)
elif not request.member.is_staff and not request.member.is_superuser:
raise PermissionDenied("You are not allowed to access the administration page.")
+ elif not user_has_device(request.member):
+ messages.error(
+ request,
+ "You need to set up two-factor authentication to access the administration page.",
+ )
+ return redirect("two_factor:setup")
else:
return redirect(request.GET.get("next", "/"))
| {"golden_diff": "diff --git a/website/thaliawebsite/admin.py b/website/thaliawebsite/admin.py\n--- a/website/thaliawebsite/admin.py\n+++ b/website/thaliawebsite/admin.py\n@@ -1,6 +1,17 @@\n \"\"\"Settings for the admin site.\"\"\"\n+\n+from django.conf import settings\n from django.contrib import admin\n from django.utils.translation import gettext_lazy as _\n \n-admin.site.site_header = _(\"Thalia administration\")\n-admin.site.site_title = _(\"Thalia\")\n+from django_otp import user_has_device\n+\n+\n+class ThaliaAdminSite(admin.AdminSite):\n+ site_header = _(\"Thalia administration\")\n+ site_title = _(\"Thalia\")\n+\n+ def has_permission(self, request):\n+ return super().has_permission(request) and (\n+ settings.DEBUG or user_has_device(request.user)\n+ )\ndiff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py\n--- a/website/thaliawebsite/views.py\n+++ b/website/thaliawebsite/views.py\n@@ -1,5 +1,6 @@\n \"\"\"General views for the website.\"\"\"\n \n+from django.contrib import messages\n from django.contrib.admin.views.decorators import staff_member_required\n from django.contrib.auth.views import LogoutView as BaseLogoutView\n from django.contrib.auth.views import PasswordResetView\n@@ -10,6 +11,7 @@\n from django.views.generic import ListView, TemplateView\n from django.views.generic.base import View\n \n+from django_otp import user_has_device\n from django_ratelimit.decorators import ratelimit\n from two_factor.views import LoginView\n \n@@ -58,9 +60,11 @@\n context.update(\n {\n \"page_range\": page_range,\n- \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n- if querydict\n- else f\"{self.request.path}?\",\n+ \"base_url\": (\n+ f\"{self.request.path}?{querydict.urlencode()}&\"\n+ if querydict\n+ else f\"{self.request.path}?\"\n+ ),\n }\n )\n \n@@ -101,5 +105,11 @@\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n+ elif not user_has_device(request.member):\n+ messages.error(\n+ request,\n+ \"You need to set up two-factor authentication to access the administration page.\",\n+ )\n+ return redirect(\"two_factor:setup\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "issue": "Lock admin panel behind 2FA\n### What?\r\n<!-- A clear and concise high-level description of what you want to happen. -->\r\nlock the admin panel behind the 2FA functionality\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nAdmin panel has sensitive data so it should be protected. So requiring 2FA makes sense.\r\n\r\n### How?\r\n<!-- Optionally some guidance, ideas, context. -->\r\nProbably nice to have a decorator to be able to lock other things of the site behind 2FA in the future.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Settings for the admin site.\"\"\"\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nadmin.site.site_header = _(\"Thalia administration\")\nadmin.site.site_title = _(\"Thalia\")\n", "path": "website/thaliawebsite/admin.py"}, {"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LogoutView as BaseLogoutView\nfrom django.contrib.auth.views import PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\nfrom two_factor.views import LoginView\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\",\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass LogoutView(BaseLogoutView):\n # Allow GET logout still (this was deprecated in Django 5.0).\n http_method_names = [\"get\", \"post\", \"options\"]\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/account/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}]} | 1,754 | 565 |
gh_patches_debug_39220 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-1485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[V3 Image] [p]imgur search errors out
### Type:
- [ ] Suggestion
- [x] Bug
### Brief description of the problem
`[p]imgur search` produces an error
### Expected behavior
It should give links to images
### Actual behavior
`Error in command 'imgur search'. Check your console or logs for details.`
### Steps to reproduce
1. do `[p]imgur search cats`
2. get error
Traceback:
```py
Exception in command 'imgur search'
Traceback (most recent call last):
File "/home/palm/redv3/lib/python3.5/site-packages/discord/ext/commands/core.py", line 62, in wrapped
ret = yield from coro(*args, **kwargs)
File "/home/palm/redv3/lib/python3.5/site-packages/redbot/cogs/image/image.py", line 47, in imgur_search
data = await search_get.json()
File "/home/palm/redv3/lib/python3.5/site-packages/aiohttp/client_reqrep.py", line 730, in json
headers=self.headers)
aiohttp.client_exceptions.ClientResponseError: 0, message='Attempt to decode JSON with unexpected mimetype: text/html; charset=utf-8'
```
</issue>
<code>
[start of redbot/cogs/image/image.py]
1 from random import shuffle
2
3 import aiohttp
4 from discord.ext import commands
5
6 from redbot.core.i18n import CogI18n
7 from redbot.core import checks, Config
8
9 _ = CogI18n("Image", __file__)
10
11 GIPHY_API_KEY = "dc6zaTOxFJmzC"
12
13
14 class Image:
15 """Image related commands."""
16 default_global = {
17 "imgur_client_id": None
18 }
19
20 def __init__(self, bot):
21 self.bot = bot
22 self.settings = Config.get_conf(self, identifier=2652104208, force_registration=True)
23 self.settings.register_global(**self.default_global)
24 self.session = aiohttp.ClientSession()
25 self.imgur_base_url = "https://api.imgur.com/3/"
26
27 def __unload(self):
28 self.session.close()
29
30 @commands.group(name="imgur")
31 @commands.guild_only()
32 async def _imgur(self, ctx):
33 """Retrieves pictures from imgur
34
35 Make sure to set the client ID using
36 [p]imgurcreds"""
37 if ctx.invoked_subcommand is None:
38 await ctx.send_help()
39
40 @_imgur.command(name="search")
41 async def imgur_search(self, ctx, *, term: str):
42 """Searches Imgur for the specified term and returns up to 3 results"""
43 url = self.imgur_base_url + "time/all/0"
44 params = {"q": term}
45 headers = {"Authorization": "Client-ID {}".format(await self.settings.imgur_client_id())}
46 async with self.session.get(url, headers=headers, data=params) as search_get:
47 data = await search_get.json()
48
49 if data["success"]:
50 results = data["data"]
51 if not results:
52 await ctx.send(_("Your search returned no results"))
53 return
54 shuffle(results)
55 msg = _("Search results...\n")
56 for r in results[:3]:
57 msg += r["gifv"] if "gifv" in r else r["link"]
58 msg += "\n"
59 await ctx.send(msg)
60 else:
61 await ctx.send(_("Something went wrong. Error code is {}").format(data["status"]))
62
63 @_imgur.command(name="subreddit")
64 async def imgur_subreddit(self, ctx, subreddit: str, sort_type: str="top", window: str="day"):
65 """Gets images from the specified subreddit section
66
67 Sort types: new, top
68 Time windows: day, week, month, year, all"""
69 sort_type = sort_type.lower()
70 window = window.lower()
71
72 if sort_type not in ("new", "top"):
73 await ctx.send(_("Only 'new' and 'top' are a valid sort type."))
74 return
75 elif window not in ("day", "week", "month", "year", "all"):
76 await ctx.send_help()
77 return
78
79 if sort_type == "new":
80 sort = "time"
81 elif sort_type == "top":
82 sort = "top"
83
84 links = []
85 headers = {"Authorization": "Client-ID {}".format(await self.settings.imgur_client_id())}
86 url = self.imgur_base_url + "r/{}/{}/{}/0".format(subreddit, sort, window)
87
88 async with self.session.get(url, headers=headers) as sub_get:
89 data = await sub_get.json()
90
91 if data["success"]:
92 items = data["data"]
93 if items:
94 for item in items[:3]:
95 link = item["gifv"] if "gifv" in item else item["link"]
96 links.append("{}\n{}".format(item["title"], link))
97
98 if links:
99 await ctx.send("\n".join(links))
100 else:
101 await ctx.send(_("No results found."))
102 else:
103 await ctx.send(_("Something went wrong. Error code is {}").format(data["status"]))
104
105 @checks.is_owner()
106 @commands.command()
107 async def imgurcreds(self, ctx, imgur_client_id: str):
108 """Sets the imgur client id
109 You will need an account on Imgur to get this
110
111 You can get these by visiting https://api.imgur.com/oauth2/addclient
112 and filling out the form. Enter a name for the application, select
113 'Anonymous usage without user authorization' for the auth type,
114 leave the app website blank, enter a valid email address, and
115 enter a description. Check the box for the captcha, then click Next.
116 Your client ID will be on the page that loads"""
117 await self.settings.imgur_client_id.set(imgur_client_id)
118 await ctx.send(_("Set the imgur client id!"))
119
120 @commands.command(pass_context=True, no_pm=True)
121 async def gif(self, ctx, *keywords):
122 """Retrieves first search result from giphy"""
123 if keywords:
124 keywords = "+".join(keywords)
125 else:
126 await ctx.send_help()
127 return
128
129 url = ("http://api.giphy.com/v1/gifs/search?&api_key={}&q={}"
130 "".format(GIPHY_API_KEY, keywords))
131
132 async with self.session.get(url) as r:
133 result = await r.json()
134 if r.status == 200:
135 if result["data"]:
136 await ctx.send(result["data"][0]["url"])
137 else:
138 await ctx.send(_("No results found."))
139 else:
140 await ctx.send(_("Error contacting the API"))
141
142 @commands.command(pass_context=True, no_pm=True)
143 async def gifr(self, ctx, *keywords):
144 """Retrieves a random gif from a giphy search"""
145 if keywords:
146 keywords = "+".join(keywords)
147 else:
148 await ctx.send_help()
149 return
150
151 url = ("http://api.giphy.com/v1/gifs/random?&api_key={}&tag={}"
152 "".format(GIPHY_API_KEY, keywords))
153
154 async with self.session.get(url) as r:
155 result = await r.json()
156 if r.status == 200:
157 if result["data"]:
158 await ctx.send(result["data"]["url"])
159 else:
160 await ctx.send(_("No results found."))
161 else:
162 await ctx.send(_("Error contacting the API"))
163
[end of redbot/cogs/image/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redbot/cogs/image/image.py b/redbot/cogs/image/image.py
--- a/redbot/cogs/image/image.py
+++ b/redbot/cogs/image/image.py
@@ -28,7 +28,6 @@
self.session.close()
@commands.group(name="imgur")
- @commands.guild_only()
async def _imgur(self, ctx):
"""Retrieves pictures from imgur
@@ -40,10 +39,16 @@
@_imgur.command(name="search")
async def imgur_search(self, ctx, *, term: str):
"""Searches Imgur for the specified term and returns up to 3 results"""
- url = self.imgur_base_url + "time/all/0"
+ url = self.imgur_base_url + "gallery/search/time/all/0"
params = {"q": term}
- headers = {"Authorization": "Client-ID {}".format(await self.settings.imgur_client_id())}
- async with self.session.get(url, headers=headers, data=params) as search_get:
+ imgur_client_id = await self.settings.imgur_client_id()
+ if not imgur_client_id:
+ await ctx.send(
+ _("A client ID has not been set! Please set one with {}").format(
+ "`{}imgurcreds`".format(ctx.prefix)))
+ return
+ headers = {"Authorization": "Client-ID {}".format(imgur_client_id)}
+ async with self.session.get(url, headers=headers, params=params) as search_get:
data = await search_get.json()
if data["success"]:
@@ -81,9 +86,16 @@
elif sort_type == "top":
sort = "top"
+ imgur_client_id = await self.settings.imgur_client_id()
+ if not imgur_client_id:
+ await ctx.send(
+ _("A client ID has not been set! Please set one with {}").format(
+ "`{}imgurcreds`".format(ctx.prefix)))
+ return
+
links = []
- headers = {"Authorization": "Client-ID {}".format(await self.settings.imgur_client_id())}
- url = self.imgur_base_url + "r/{}/{}/{}/0".format(subreddit, sort, window)
+ headers = {"Authorization": "Client-ID {}".format(imgur_client_id)}
+ url = self.imgur_base_url + "gallery/r/{}/{}/{}/0".format(subreddit, sort, window)
async with self.session.get(url, headers=headers) as sub_get:
data = await sub_get.json()
@@ -111,6 +123,7 @@
You can get these by visiting https://api.imgur.com/oauth2/addclient
and filling out the form. Enter a name for the application, select
'Anonymous usage without user authorization' for the auth type,
+ set the authorization callback url to 'https://localhost'
leave the app website blank, enter a valid email address, and
enter a description. Check the box for the captcha, then click Next.
Your client ID will be on the page that loads"""
| {"golden_diff": "diff --git a/redbot/cogs/image/image.py b/redbot/cogs/image/image.py\n--- a/redbot/cogs/image/image.py\n+++ b/redbot/cogs/image/image.py\n@@ -28,7 +28,6 @@\n self.session.close()\n \n @commands.group(name=\"imgur\")\n- @commands.guild_only()\n async def _imgur(self, ctx):\n \"\"\"Retrieves pictures from imgur\n \n@@ -40,10 +39,16 @@\n @_imgur.command(name=\"search\")\n async def imgur_search(self, ctx, *, term: str):\n \"\"\"Searches Imgur for the specified term and returns up to 3 results\"\"\"\n- url = self.imgur_base_url + \"time/all/0\"\n+ url = self.imgur_base_url + \"gallery/search/time/all/0\"\n params = {\"q\": term}\n- headers = {\"Authorization\": \"Client-ID {}\".format(await self.settings.imgur_client_id())}\n- async with self.session.get(url, headers=headers, data=params) as search_get:\n+ imgur_client_id = await self.settings.imgur_client_id()\n+ if not imgur_client_id:\n+ await ctx.send(\n+ _(\"A client ID has not been set! Please set one with {}\").format(\n+ \"`{}imgurcreds`\".format(ctx.prefix)))\n+ return\n+ headers = {\"Authorization\": \"Client-ID {}\".format(imgur_client_id)}\n+ async with self.session.get(url, headers=headers, params=params) as search_get:\n data = await search_get.json()\n \n if data[\"success\"]:\n@@ -81,9 +86,16 @@\n elif sort_type == \"top\":\n sort = \"top\"\n \n+ imgur_client_id = await self.settings.imgur_client_id()\n+ if not imgur_client_id:\n+ await ctx.send(\n+ _(\"A client ID has not been set! Please set one with {}\").format(\n+ \"`{}imgurcreds`\".format(ctx.prefix)))\n+ return\n+\n links = []\n- headers = {\"Authorization\": \"Client-ID {}\".format(await self.settings.imgur_client_id())}\n- url = self.imgur_base_url + \"r/{}/{}/{}/0\".format(subreddit, sort, window)\n+ headers = {\"Authorization\": \"Client-ID {}\".format(imgur_client_id)}\n+ url = self.imgur_base_url + \"gallery/r/{}/{}/{}/0\".format(subreddit, sort, window)\n \n async with self.session.get(url, headers=headers) as sub_get:\n data = await sub_get.json()\n@@ -111,6 +123,7 @@\n You can get these by visiting https://api.imgur.com/oauth2/addclient\n and filling out the form. Enter a name for the application, select\n 'Anonymous usage without user authorization' for the auth type,\n+ set the authorization callback url to 'https://localhost'\n leave the app website blank, enter a valid email address, and\n enter a description. Check the box for the captcha, then click Next.\n Your client ID will be on the page that loads\"\"\"\n", "issue": "[V3 Image] [p]imgur search errors out\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\n`[p]imgur search` produces an error\r\n### Expected behavior\r\nIt should give links to images\r\n### Actual behavior\r\n`Error in command 'imgur search'. Check your console or logs for details.`\r\n### Steps to reproduce\r\n\r\n1. do `[p]imgur search cats`\r\n2. get error\r\n\r\nTraceback:\r\n\r\n```py\r\nException in command 'imgur search'\r\nTraceback (most recent call last):\r\n File \"/home/palm/redv3/lib/python3.5/site-packages/discord/ext/commands/core.py\", line 62, in wrapped\r\n ret = yield from coro(*args, **kwargs)\r\n File \"/home/palm/redv3/lib/python3.5/site-packages/redbot/cogs/image/image.py\", line 47, in imgur_search\r\n data = await search_get.json()\r\n File \"/home/palm/redv3/lib/python3.5/site-packages/aiohttp/client_reqrep.py\", line 730, in json\r\n headers=self.headers)\r\naiohttp.client_exceptions.ClientResponseError: 0, message='Attempt to decode JSON with unexpected mimetype: text/html; charset=utf-8'\r\n```\n", "before_files": [{"content": "from random import shuffle\n\nimport aiohttp\nfrom discord.ext import commands\n\nfrom redbot.core.i18n import CogI18n\nfrom redbot.core import checks, Config\n\n_ = CogI18n(\"Image\", __file__)\n\nGIPHY_API_KEY = \"dc6zaTOxFJmzC\"\n\n\nclass Image:\n \"\"\"Image related commands.\"\"\"\n default_global = {\n \"imgur_client_id\": None\n }\n\n def __init__(self, bot):\n self.bot = bot\n self.settings = Config.get_conf(self, identifier=2652104208, force_registration=True)\n self.settings.register_global(**self.default_global)\n self.session = aiohttp.ClientSession()\n self.imgur_base_url = \"https://api.imgur.com/3/\"\n\n def __unload(self):\n self.session.close()\n\n @commands.group(name=\"imgur\")\n @commands.guild_only()\n async def _imgur(self, ctx):\n \"\"\"Retrieves pictures from imgur\n\n Make sure to set the client ID using\n [p]imgurcreds\"\"\"\n if ctx.invoked_subcommand is None:\n await ctx.send_help()\n\n @_imgur.command(name=\"search\")\n async def imgur_search(self, ctx, *, term: str):\n \"\"\"Searches Imgur for the specified term and returns up to 3 results\"\"\"\n url = self.imgur_base_url + \"time/all/0\"\n params = {\"q\": term}\n headers = {\"Authorization\": \"Client-ID {}\".format(await self.settings.imgur_client_id())}\n async with self.session.get(url, headers=headers, data=params) as search_get:\n data = await search_get.json()\n\n if data[\"success\"]:\n results = data[\"data\"]\n if not results:\n await ctx.send(_(\"Your search returned no results\"))\n return\n shuffle(results)\n msg = _(\"Search results...\\n\")\n for r in results[:3]:\n msg += r[\"gifv\"] if \"gifv\" in r else r[\"link\"]\n msg += \"\\n\"\n await ctx.send(msg)\n else:\n await ctx.send(_(\"Something went wrong. Error code is {}\").format(data[\"status\"]))\n\n @_imgur.command(name=\"subreddit\")\n async def imgur_subreddit(self, ctx, subreddit: str, sort_type: str=\"top\", window: str=\"day\"):\n \"\"\"Gets images from the specified subreddit section\n\n Sort types: new, top\n Time windows: day, week, month, year, all\"\"\"\n sort_type = sort_type.lower()\n window = window.lower()\n\n if sort_type not in (\"new\", \"top\"):\n await ctx.send(_(\"Only 'new' and 'top' are a valid sort type.\"))\n return\n elif window not in (\"day\", \"week\", \"month\", \"year\", \"all\"):\n await ctx.send_help()\n return\n\n if sort_type == \"new\":\n sort = \"time\"\n elif sort_type == \"top\":\n sort = \"top\"\n\n links = []\n headers = {\"Authorization\": \"Client-ID {}\".format(await self.settings.imgur_client_id())}\n url = self.imgur_base_url + \"r/{}/{}/{}/0\".format(subreddit, sort, window)\n\n async with self.session.get(url, headers=headers) as sub_get:\n data = await sub_get.json()\n\n if data[\"success\"]:\n items = data[\"data\"]\n if items:\n for item in items[:3]:\n link = item[\"gifv\"] if \"gifv\" in item else item[\"link\"]\n links.append(\"{}\\n{}\".format(item[\"title\"], link))\n\n if links:\n await ctx.send(\"\\n\".join(links))\n else:\n await ctx.send(_(\"No results found.\"))\n else:\n await ctx.send(_(\"Something went wrong. Error code is {}\").format(data[\"status\"]))\n\n @checks.is_owner()\n @commands.command()\n async def imgurcreds(self, ctx, imgur_client_id: str):\n \"\"\"Sets the imgur client id\n You will need an account on Imgur to get this\n\n You can get these by visiting https://api.imgur.com/oauth2/addclient\n and filling out the form. Enter a name for the application, select\n 'Anonymous usage without user authorization' for the auth type,\n leave the app website blank, enter a valid email address, and\n enter a description. Check the box for the captcha, then click Next.\n Your client ID will be on the page that loads\"\"\"\n await self.settings.imgur_client_id.set(imgur_client_id)\n await ctx.send(_(\"Set the imgur client id!\"))\n\n @commands.command(pass_context=True, no_pm=True)\n async def gif(self, ctx, *keywords):\n \"\"\"Retrieves first search result from giphy\"\"\"\n if keywords:\n keywords = \"+\".join(keywords)\n else:\n await ctx.send_help()\n return\n\n url = (\"http://api.giphy.com/v1/gifs/search?&api_key={}&q={}\"\n \"\".format(GIPHY_API_KEY, keywords))\n\n async with self.session.get(url) as r:\n result = await r.json()\n if r.status == 200:\n if result[\"data\"]:\n await ctx.send(result[\"data\"][0][\"url\"])\n else:\n await ctx.send(_(\"No results found.\"))\n else:\n await ctx.send(_(\"Error contacting the API\"))\n\n @commands.command(pass_context=True, no_pm=True)\n async def gifr(self, ctx, *keywords):\n \"\"\"Retrieves a random gif from a giphy search\"\"\"\n if keywords:\n keywords = \"+\".join(keywords)\n else:\n await ctx.send_help()\n return\n\n url = (\"http://api.giphy.com/v1/gifs/random?&api_key={}&tag={}\"\n \"\".format(GIPHY_API_KEY, keywords))\n\n async with self.session.get(url) as r:\n result = await r.json()\n if r.status == 200:\n if result[\"data\"]:\n await ctx.send(result[\"data\"][\"url\"])\n else:\n await ctx.send(_(\"No results found.\"))\n else:\n await ctx.send(_(\"Error contacting the API\"))\n", "path": "redbot/cogs/image/image.py"}]} | 2,553 | 671 |
gh_patches_debug_5956 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[RFE] Remove the sizegrip from the statusbar of the GUI tool
# Feature request
## Description
There should not be any sizegrip available in the statusbar of the GUI tool as suggested in https://github.com/freedomofpress/securedrop/pull/3300#discussion_r184066922
</issue>
<code>
[start of journalist_gui/journalist_gui/SecureDropUpdater.py]
1 #!/usr/bin/python
2 from PyQt5 import QtGui, QtWidgets
3 from PyQt5.QtCore import QThread, pyqtSignal
4 import subprocess
5 import os
6 import pexpect
7
8 from journalist_gui import updaterUI, strings, resources_rc # noqa
9
10
11 LOCK_LOCATION = "/home/amnesia/Persistent/securedrop/securedrop_update.lock" # noqa
12
13
14 class SetupThread(QThread):
15 signal = pyqtSignal('PyQt_PyObject')
16
17 def __init__(self):
18 QThread.__init__(self)
19 self.output = ""
20 self.update_success = False
21 self.failure_reason = ""
22
23 def run(self):
24 sdadmin_path = '/home/amnesia/Persistent/securedrop/securedrop-admin'
25 update_command = [sdadmin_path, 'setup']
26
27 # Create lock so we resume failed updates on reboot.
28 # Don't create the lock if it already exists.
29 if not os.path.exists(LOCK_LOCATION):
30 open(LOCK_LOCATION, 'a').close()
31
32 try:
33 self.output = subprocess.check_output(
34 update_command,
35 stderr=subprocess.STDOUT).decode('utf-8')
36 if 'Failed to install' in self.output:
37 self.update_success = False
38 self.failure_reason = strings.update_failed_generic_reason
39 else:
40 self.update_success = True
41 except subprocess.CalledProcessError as e:
42 self.output += e.output.decode('utf-8')
43 self.update_success = False
44 self.failure_reason = strings.update_failed_generic_reason
45 result = {'status': self.update_success,
46 'output': self.output,
47 'failure_reason': self.failure_reason}
48 self.signal.emit(result)
49
50
51 # This thread will handle the ./securedrop-admin update command
52 class UpdateThread(QThread):
53 signal = pyqtSignal('PyQt_PyObject')
54
55 def __init__(self):
56 QThread.__init__(self)
57 self.output = ""
58 self.update_success = False
59 self.failure_reason = ""
60
61 def run(self):
62 sdadmin_path = '/home/amnesia/Persistent/securedrop/securedrop-admin'
63 update_command = [sdadmin_path, 'update']
64 try:
65 self.output = subprocess.check_output(
66 update_command,
67 stderr=subprocess.STDOUT).decode('utf-8')
68 if "Signature verification successful" in self.output:
69 self.update_success = True
70 else:
71 self.failure_reason = strings.update_failed_generic_reason
72 except subprocess.CalledProcessError as e:
73 self.update_success = False
74 self.output += e.output.decode('utf-8')
75 if 'Signature verification failed' in self.output:
76 self.failure_reason = strings.update_failed_sig_failure
77 else:
78 self.failure_reason = strings.update_failed_generic_reason
79 result = {'status': self.update_success,
80 'output': self.output,
81 'failure_reason': self.failure_reason}
82 self.signal.emit(result)
83
84
85 # This thread will handle the ./securedrop-admin tailsconfig command
86 class TailsconfigThread(QThread):
87 signal = pyqtSignal('PyQt_PyObject')
88
89 def __init__(self):
90 QThread.__init__(self)
91 self.output = ""
92 self.update_success = False
93 self.failure_reason = ""
94 self.sudo_password = ""
95
96 def run(self):
97 tailsconfig_command = ("/home/amnesia/Persistent/"
98 "securedrop/securedrop-admin "
99 "tailsconfig")
100 try:
101 child = pexpect.spawn(tailsconfig_command)
102 child.expect('SUDO password:')
103 self.output += child.before.decode('utf-8')
104 child.sendline(self.sudo_password)
105 child.expect(pexpect.EOF)
106 self.output += child.before.decode('utf-8')
107
108 # For Tailsconfig to be considered a success, we expect no
109 # failures in the Ansible output.
110 if 'failed=0' not in self.output:
111 self.update_success = False
112 self.failure_reason = strings.tailsconfig_failed_generic_reason # noqa
113 else:
114 self.update_success = True
115 except pexpect.exceptions.TIMEOUT:
116 self.update_success = False
117 self.failure_reason = strings.tailsconfig_failed_sudo_password
118
119 except subprocess.CalledProcessError:
120 self.update_success = False
121 self.failure_reason = strings.tailsconfig_failed_generic_reason
122 result = {'status': self.update_success,
123 'output': self.output,
124 'failure_reason': self.failure_reason}
125 self.signal.emit(result)
126
127
128 class UpdaterApp(QtWidgets.QMainWindow, updaterUI.Ui_MainWindow):
129
130 def __init__(self, parent=None):
131 super(UpdaterApp, self).__init__(parent)
132 self.setupUi(self)
133 self.output = strings.initial_text_box
134 self.plainTextEdit.setPlainText(self.output)
135 self.update_success = False
136
137 pixmap = QtGui.QPixmap(":/images/static/banner.png")
138 self.label_2.setPixmap(pixmap)
139 self.label_2.setScaledContents(True)
140
141 self.progressBar.setProperty("value", 0)
142 self.setWindowTitle(strings.window_title)
143 self.setWindowIcon(QtGui.QIcon(':/images/static/securedrop_icon.png'))
144 self.label.setText(strings.update_in_progress)
145
146 self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),
147 strings.main_tab)
148 self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),
149 strings.output_tab)
150
151 # Connect buttons to their functions.
152 self.pushButton.setText(strings.install_later_button)
153 self.pushButton.setStyleSheet("""background-color: lightgrey;
154 min-height: 2em;
155 border-radius: 10px""")
156 self.pushButton.clicked.connect(self.close)
157 self.pushButton_2.setText(strings.install_update_button)
158 self.pushButton_2.setStyleSheet("""background-color: #E6FFEB;
159 min-height: 2em;
160 border-radius: 10px;""")
161 self.pushButton_2.clicked.connect(self.update_securedrop)
162 self.update_thread = UpdateThread()
163 self.update_thread.signal.connect(self.update_status)
164 self.tails_thread = TailsconfigThread()
165 self.tails_thread.signal.connect(self.tails_status)
166 self.setup_thread = SetupThread()
167 self.setup_thread.signal.connect(self.setup_status)
168
169 # At the end of this function, we will try to do tailsconfig.
170 # A new slot will handle tailsconfig output
171 def setup_status(self, result):
172 "This is the slot for setup thread"
173 self.output += result['output']
174 self.update_success = result['status']
175 self.failure_reason = result['failure_reason']
176 self.progressBar.setProperty("value", 60)
177 self.plainTextEdit.setPlainText(self.output)
178 self.plainTextEdit.setReadOnly = True
179 if not self.update_success: # Failed to do setup update
180 self.pushButton.setEnabled(True)
181 self.pushButton_2.setEnabled(True)
182 self.update_status_bar_and_output(self.failure_reason)
183 self.progressBar.setProperty("value", 0)
184 self.alert_failure(self.failure_reason)
185 return
186 self.progressBar.setProperty("value", 70)
187 self.call_tailsconfig()
188
189 # This will update the output text after the git commands.
190 def update_status(self, result):
191 "This is the slot for update thread"
192 self.output += result['output']
193 self.update_success = result['status']
194 self.failure_reason = result['failure_reason']
195 self.progressBar.setProperty("value", 40)
196 self.plainTextEdit.setPlainText(self.output)
197 self.plainTextEdit.setReadOnly = True
198 self.progressBar.setProperty("value", 50)
199 self.update_status_bar_and_output(strings.doing_setup)
200 self.setup_thread.start()
201
202 def update_status_bar_and_output(self, status_message):
203 """This method updates the status bar and the output window with the
204 status_message."""
205 self.statusbar.showMessage(status_message)
206 self.output += status_message + '\n'
207 self.plainTextEdit.setPlainText(self.output)
208
209 def call_tailsconfig(self):
210 # Now let us work on tailsconfig part
211 if self.update_success:
212 # Get sudo password and add an enter key as tailsconfig command
213 # expects
214 sudo_password = self.get_sudo_password()
215 if not sudo_password:
216 self.update_success = False
217 self.failure_reason = strings.missing_sudo_password
218 self.on_failure()
219 return
220 self.tails_thread.sudo_password = sudo_password + '\n'
221 self.update_status_bar_and_output(strings.updating_tails_env)
222 self.tails_thread.start()
223 else:
224 self.on_failure()
225
226 def tails_status(self, result):
227 "This is the slot for Tailsconfig thread"
228 self.output += result['output']
229 self.update_success = result['status']
230 self.failure_reason = result['failure_reason']
231 self.plainTextEdit.setPlainText(self.output)
232 self.progressBar.setProperty("value", 80)
233 if self.update_success:
234 # Remove lock
235 os.remove(LOCK_LOCATION)
236 self.update_status_bar_and_output(strings.finished)
237 self.progressBar.setProperty("value", 100)
238 self.alert_success()
239 else:
240 self.on_failure()
241
242 def on_failure(self):
243 self.update_status_bar_and_output(self.failure_reason)
244 self.alert_failure(self.failure_reason)
245 # Now everything is done, enable the button.
246 self.pushButton.setEnabled(True)
247 self.pushButton_2.setEnabled(True)
248 self.progressBar.setProperty("value", 0)
249
250 def update_securedrop(self):
251 self.pushButton_2.setEnabled(False)
252 self.pushButton.setEnabled(False)
253 self.progressBar.setProperty("value", 10)
254 self.update_status_bar_and_output(strings.fetching_update)
255 self.update_thread.start()
256
257 def alert_success(self):
258 self.success_dialog = QtWidgets.QMessageBox()
259 self.success_dialog.setIcon(QtWidgets.QMessageBox.Information)
260 self.success_dialog.setText(strings.finished_dialog_message)
261 self.success_dialog.setWindowTitle(strings.finished_dialog_title)
262 self.success_dialog.show()
263
264 def alert_failure(self, failure_reason):
265 self.error_dialog = QtWidgets.QMessageBox()
266 self.error_dialog.setIcon(QtWidgets.QMessageBox.Critical)
267 self.error_dialog.setText(self.failure_reason)
268 self.error_dialog.setWindowTitle(strings.update_failed_dialog_title)
269 self.error_dialog.show()
270
271 def get_sudo_password(self):
272 sudo_password, ok_is_pressed = QtWidgets.QInputDialog.getText(
273 self, "Tails Administrator password", strings.sudo_password_text,
274 QtWidgets.QLineEdit.Password, "")
275 if ok_is_pressed and sudo_password:
276 return sudo_password
277 else:
278 return None
279
[end of journalist_gui/journalist_gui/SecureDropUpdater.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/journalist_gui/journalist_gui/SecureDropUpdater.py b/journalist_gui/journalist_gui/SecureDropUpdater.py
--- a/journalist_gui/journalist_gui/SecureDropUpdater.py
+++ b/journalist_gui/journalist_gui/SecureDropUpdater.py
@@ -130,6 +130,7 @@
def __init__(self, parent=None):
super(UpdaterApp, self).__init__(parent)
self.setupUi(self)
+ self.statusbar.setSizeGripEnabled(False)
self.output = strings.initial_text_box
self.plainTextEdit.setPlainText(self.output)
self.update_success = False
| {"golden_diff": "diff --git a/journalist_gui/journalist_gui/SecureDropUpdater.py b/journalist_gui/journalist_gui/SecureDropUpdater.py\n--- a/journalist_gui/journalist_gui/SecureDropUpdater.py\n+++ b/journalist_gui/journalist_gui/SecureDropUpdater.py\n@@ -130,6 +130,7 @@\n def __init__(self, parent=None):\n super(UpdaterApp, self).__init__(parent)\n self.setupUi(self)\n+ self.statusbar.setSizeGripEnabled(False)\n self.output = strings.initial_text_box\n self.plainTextEdit.setPlainText(self.output)\n self.update_success = False\n", "issue": "[RFE] Remove the sizegrip from the statusbar of the GUI tool\n# Feature request\r\n\r\n## Description\r\n\r\nThere should not be any sizegrip available in the statusbar of the GUI tool as suggested in https://github.com/freedomofpress/securedrop/pull/3300#discussion_r184066922\n", "before_files": [{"content": "#!/usr/bin/python\nfrom PyQt5 import QtGui, QtWidgets\nfrom PyQt5.QtCore import QThread, pyqtSignal\nimport subprocess\nimport os\nimport pexpect\n\nfrom journalist_gui import updaterUI, strings, resources_rc # noqa\n\n\nLOCK_LOCATION = \"/home/amnesia/Persistent/securedrop/securedrop_update.lock\" # noqa\n\n\nclass SetupThread(QThread):\n signal = pyqtSignal('PyQt_PyObject')\n\n def __init__(self):\n QThread.__init__(self)\n self.output = \"\"\n self.update_success = False\n self.failure_reason = \"\"\n\n def run(self):\n sdadmin_path = '/home/amnesia/Persistent/securedrop/securedrop-admin'\n update_command = [sdadmin_path, 'setup']\n\n # Create lock so we resume failed updates on reboot.\n # Don't create the lock if it already exists.\n if not os.path.exists(LOCK_LOCATION):\n open(LOCK_LOCATION, 'a').close()\n\n try:\n self.output = subprocess.check_output(\n update_command,\n stderr=subprocess.STDOUT).decode('utf-8')\n if 'Failed to install' in self.output:\n self.update_success = False\n self.failure_reason = strings.update_failed_generic_reason\n else:\n self.update_success = True\n except subprocess.CalledProcessError as e:\n self.output += e.output.decode('utf-8')\n self.update_success = False\n self.failure_reason = strings.update_failed_generic_reason\n result = {'status': self.update_success,\n 'output': self.output,\n 'failure_reason': self.failure_reason}\n self.signal.emit(result)\n\n\n# This thread will handle the ./securedrop-admin update command\nclass UpdateThread(QThread):\n signal = pyqtSignal('PyQt_PyObject')\n\n def __init__(self):\n QThread.__init__(self)\n self.output = \"\"\n self.update_success = False\n self.failure_reason = \"\"\n\n def run(self):\n sdadmin_path = '/home/amnesia/Persistent/securedrop/securedrop-admin'\n update_command = [sdadmin_path, 'update']\n try:\n self.output = subprocess.check_output(\n update_command,\n stderr=subprocess.STDOUT).decode('utf-8')\n if \"Signature verification successful\" in self.output:\n self.update_success = True\n else:\n self.failure_reason = strings.update_failed_generic_reason\n except subprocess.CalledProcessError as e:\n self.update_success = False\n self.output += e.output.decode('utf-8')\n if 'Signature verification failed' in self.output:\n self.failure_reason = strings.update_failed_sig_failure\n else:\n self.failure_reason = strings.update_failed_generic_reason\n result = {'status': self.update_success,\n 'output': self.output,\n 'failure_reason': self.failure_reason}\n self.signal.emit(result)\n\n\n# This thread will handle the ./securedrop-admin tailsconfig command\nclass TailsconfigThread(QThread):\n signal = pyqtSignal('PyQt_PyObject')\n\n def __init__(self):\n QThread.__init__(self)\n self.output = \"\"\n self.update_success = False\n self.failure_reason = \"\"\n self.sudo_password = \"\"\n\n def run(self):\n tailsconfig_command = (\"/home/amnesia/Persistent/\"\n \"securedrop/securedrop-admin \"\n \"tailsconfig\")\n try:\n child = pexpect.spawn(tailsconfig_command)\n child.expect('SUDO password:')\n self.output += child.before.decode('utf-8')\n child.sendline(self.sudo_password)\n child.expect(pexpect.EOF)\n self.output += child.before.decode('utf-8')\n\n # For Tailsconfig to be considered a success, we expect no\n # failures in the Ansible output.\n if 'failed=0' not in self.output:\n self.update_success = False\n self.failure_reason = strings.tailsconfig_failed_generic_reason # noqa\n else:\n self.update_success = True\n except pexpect.exceptions.TIMEOUT:\n self.update_success = False\n self.failure_reason = strings.tailsconfig_failed_sudo_password\n\n except subprocess.CalledProcessError:\n self.update_success = False\n self.failure_reason = strings.tailsconfig_failed_generic_reason\n result = {'status': self.update_success,\n 'output': self.output,\n 'failure_reason': self.failure_reason}\n self.signal.emit(result)\n\n\nclass UpdaterApp(QtWidgets.QMainWindow, updaterUI.Ui_MainWindow):\n\n def __init__(self, parent=None):\n super(UpdaterApp, self).__init__(parent)\n self.setupUi(self)\n self.output = strings.initial_text_box\n self.plainTextEdit.setPlainText(self.output)\n self.update_success = False\n\n pixmap = QtGui.QPixmap(\":/images/static/banner.png\")\n self.label_2.setPixmap(pixmap)\n self.label_2.setScaledContents(True)\n\n self.progressBar.setProperty(\"value\", 0)\n self.setWindowTitle(strings.window_title)\n self.setWindowIcon(QtGui.QIcon(':/images/static/securedrop_icon.png'))\n self.label.setText(strings.update_in_progress)\n\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n strings.main_tab)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n strings.output_tab)\n\n # Connect buttons to their functions.\n self.pushButton.setText(strings.install_later_button)\n self.pushButton.setStyleSheet(\"\"\"background-color: lightgrey;\n min-height: 2em;\n border-radius: 10px\"\"\")\n self.pushButton.clicked.connect(self.close)\n self.pushButton_2.setText(strings.install_update_button)\n self.pushButton_2.setStyleSheet(\"\"\"background-color: #E6FFEB;\n min-height: 2em;\n border-radius: 10px;\"\"\")\n self.pushButton_2.clicked.connect(self.update_securedrop)\n self.update_thread = UpdateThread()\n self.update_thread.signal.connect(self.update_status)\n self.tails_thread = TailsconfigThread()\n self.tails_thread.signal.connect(self.tails_status)\n self.setup_thread = SetupThread()\n self.setup_thread.signal.connect(self.setup_status)\n\n # At the end of this function, we will try to do tailsconfig.\n # A new slot will handle tailsconfig output\n def setup_status(self, result):\n \"This is the slot for setup thread\"\n self.output += result['output']\n self.update_success = result['status']\n self.failure_reason = result['failure_reason']\n self.progressBar.setProperty(\"value\", 60)\n self.plainTextEdit.setPlainText(self.output)\n self.plainTextEdit.setReadOnly = True\n if not self.update_success: # Failed to do setup update\n self.pushButton.setEnabled(True)\n self.pushButton_2.setEnabled(True)\n self.update_status_bar_and_output(self.failure_reason)\n self.progressBar.setProperty(\"value\", 0)\n self.alert_failure(self.failure_reason)\n return\n self.progressBar.setProperty(\"value\", 70)\n self.call_tailsconfig()\n\n # This will update the output text after the git commands.\n def update_status(self, result):\n \"This is the slot for update thread\"\n self.output += result['output']\n self.update_success = result['status']\n self.failure_reason = result['failure_reason']\n self.progressBar.setProperty(\"value\", 40)\n self.plainTextEdit.setPlainText(self.output)\n self.plainTextEdit.setReadOnly = True\n self.progressBar.setProperty(\"value\", 50)\n self.update_status_bar_and_output(strings.doing_setup)\n self.setup_thread.start()\n\n def update_status_bar_and_output(self, status_message):\n \"\"\"This method updates the status bar and the output window with the\n status_message.\"\"\"\n self.statusbar.showMessage(status_message)\n self.output += status_message + '\\n'\n self.plainTextEdit.setPlainText(self.output)\n\n def call_tailsconfig(self):\n # Now let us work on tailsconfig part\n if self.update_success:\n # Get sudo password and add an enter key as tailsconfig command\n # expects\n sudo_password = self.get_sudo_password()\n if not sudo_password:\n self.update_success = False\n self.failure_reason = strings.missing_sudo_password\n self.on_failure()\n return\n self.tails_thread.sudo_password = sudo_password + '\\n'\n self.update_status_bar_and_output(strings.updating_tails_env)\n self.tails_thread.start()\n else:\n self.on_failure()\n\n def tails_status(self, result):\n \"This is the slot for Tailsconfig thread\"\n self.output += result['output']\n self.update_success = result['status']\n self.failure_reason = result['failure_reason']\n self.plainTextEdit.setPlainText(self.output)\n self.progressBar.setProperty(\"value\", 80)\n if self.update_success:\n # Remove lock\n os.remove(LOCK_LOCATION)\n self.update_status_bar_and_output(strings.finished)\n self.progressBar.setProperty(\"value\", 100)\n self.alert_success()\n else:\n self.on_failure()\n\n def on_failure(self):\n self.update_status_bar_and_output(self.failure_reason)\n self.alert_failure(self.failure_reason)\n # Now everything is done, enable the button.\n self.pushButton.setEnabled(True)\n self.pushButton_2.setEnabled(True)\n self.progressBar.setProperty(\"value\", 0)\n\n def update_securedrop(self):\n self.pushButton_2.setEnabled(False)\n self.pushButton.setEnabled(False)\n self.progressBar.setProperty(\"value\", 10)\n self.update_status_bar_and_output(strings.fetching_update)\n self.update_thread.start()\n\n def alert_success(self):\n self.success_dialog = QtWidgets.QMessageBox()\n self.success_dialog.setIcon(QtWidgets.QMessageBox.Information)\n self.success_dialog.setText(strings.finished_dialog_message)\n self.success_dialog.setWindowTitle(strings.finished_dialog_title)\n self.success_dialog.show()\n\n def alert_failure(self, failure_reason):\n self.error_dialog = QtWidgets.QMessageBox()\n self.error_dialog.setIcon(QtWidgets.QMessageBox.Critical)\n self.error_dialog.setText(self.failure_reason)\n self.error_dialog.setWindowTitle(strings.update_failed_dialog_title)\n self.error_dialog.show()\n\n def get_sudo_password(self):\n sudo_password, ok_is_pressed = QtWidgets.QInputDialog.getText(\n self, \"Tails Administrator password\", strings.sudo_password_text,\n QtWidgets.QLineEdit.Password, \"\")\n if ok_is_pressed and sudo_password:\n return sudo_password\n else:\n return None\n", "path": "journalist_gui/journalist_gui/SecureDropUpdater.py"}]} | 3,570 | 143 |
gh_patches_debug_20172 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1177 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cannot import name 'clock_gettime' from 'time' (unknown location)
### Expected behaviour
Import time
### Actual behaviour
File "C:\ProgramData\Anaconda3\lib\site-packages\datacube\drivers\postgres\_connections.py", line 20, in <module>
from time import clock_gettime, CLOCK_REALTIME
ImportError: cannot import name 'clock_gettime' from 'time' (unknown location)
cannot import name 'clock_gettime' from 'time' (unknown location)
### Steps to reproduce the behaviour
from ._connections import PostgresDb
### Environment information
Python 3.8.11
* Which ``datacube --version`` are you using?
Unknown, it's referenced in geocube on conda-forge
* What datacube deployment/enviornment are you running against?
n/a
Additional notes:
time.clock has been removed in Python 3.8 after being deprecated in Python 3.3. The recommendation is to change clock by either time.perf_counter or time.process_time.
</issue>
<code>
[start of datacube/drivers/postgres/_connections.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5
6 # We often have one-arg-per column, so these checks aren't so useful.
7 # pylint: disable=too-many-arguments,too-many-public-methods
8
9 # SQLAlchemy queries require "column == None", not "column is None" due to operator overloading:
10 # pylint: disable=singleton-comparison
11
12 """
13 Postgres connection and setup
14 """
15 import json
16 import logging
17 import os
18 import re
19 from contextlib import contextmanager
20 from time import clock_gettime, CLOCK_REALTIME
21 from typing import Callable, Optional, Union
22
23 from sqlalchemy import event, create_engine, text
24 from sqlalchemy.engine import Engine
25 from sqlalchemy.engine.url import URL as EngineUrl
26
27 import datacube
28 from datacube.index.exceptions import IndexSetupError
29 from datacube.utils import jsonify_document
30
31 from . import _api
32 from . import _core
33
34 _LIB_ID = 'agdc-' + str(datacube.__version__)
35
36 _LOG = logging.getLogger(__name__)
37
38 try:
39 import pwd
40
41 DEFAULT_DB_USER = pwd.getpwuid(os.geteuid()).pw_name # type: Optional[str]
42 except (ImportError, KeyError):
43 # No default on Windows and some other systems
44 DEFAULT_DB_USER = None
45 DEFAULT_DB_PORT = 5432
46 DEFAULT_IAM_AUTH = False
47 DEFAULT_IAM_TIMEOUT = 600
48
49
50 class PostgresDb(object):
51 """
52 A thin database access api.
53
54 It exists so that higher level modules are not tied to SQLAlchemy, connections or specifics of database-access.
55
56 (and can be unit tested without any actual databases)
57
58 Thread safe: the only shared state is the (thread-safe) sqlalchemy connection pool.
59
60 But not multiprocess safe once the first connections are made! A connection must not be shared between multiple
61 processes. You can call close() before forking if you know no other threads currently hold connections,
62 or else use a separate instance of this class in each process.
63 """
64
65 def __init__(self, engine):
66 # We don't recommend using this constructor directly as it may change.
67 # Use static methods PostgresDb.create() or PostgresDb.from_config()
68 self._engine = engine
69
70 @classmethod
71 def from_config(cls, config, application_name=None, validate_connection=True):
72 app_name = cls._expand_app_name(application_name)
73
74 return PostgresDb.create(
75 config['db_hostname'],
76 config['db_database'],
77 config.get('db_username', DEFAULT_DB_USER),
78 config.get('db_password', None),
79 config.get('db_port', DEFAULT_DB_PORT),
80 application_name=app_name,
81 validate=validate_connection,
82 iam_rds_auth=bool(config.get("db_iam_authentication", DEFAULT_IAM_AUTH)),
83 iam_rds_timeout=int(config.get("db_iam_timeout", DEFAULT_IAM_TIMEOUT)),
84 pool_timeout=int(config.get('db_connection_timeout', 60)),
85 # pass config?
86 )
87
88 @classmethod
89 def create(cls, hostname, database, username=None, password=None, port=None,
90 application_name=None, validate=True,
91 iam_rds_auth=False, iam_rds_timeout=600,
92 # pass config?
93 pool_timeout=60):
94 mk_url = getattr(EngineUrl, 'create', EngineUrl)
95 engine = cls._create_engine(
96 mk_url(
97 'postgresql',
98 host=hostname, database=database, port=port,
99 username=username, password=password,
100 ),
101 application_name=application_name,
102 iam_rds_auth=iam_rds_auth,
103 iam_rds_timeout=iam_rds_timeout,
104 pool_timeout=pool_timeout)
105 if validate:
106 if not _core.database_exists(engine):
107 raise IndexSetupError('\n\nNo DB schema exists. Have you run init?\n\t{init_command}'.format(
108 init_command='datacube system init'
109 ))
110
111 if not _core.schema_is_latest(engine):
112 raise IndexSetupError(
113 '\n\nDB schema is out of date. '
114 'An administrator must run init:\n\t{init_command}'.format(
115 init_command='datacube -v system init'
116 ))
117 return PostgresDb(engine)
118
119 @staticmethod
120 def _create_engine(url, application_name=None, iam_rds_auth=False, iam_rds_timeout=600, pool_timeout=60):
121 engine = create_engine(
122 url,
123 echo=False,
124 echo_pool=False,
125
126 # 'AUTOCOMMIT' here means READ-COMMITTED isolation level with autocommit on.
127 # When a transaction is needed we will do an explicit begin/commit.
128 isolation_level='AUTOCOMMIT',
129 json_serializer=_to_json,
130 # If a connection is idle for this many seconds, SQLAlchemy will renew it rather
131 # than assuming it's still open. Allows servers to close idle connections without clients
132 # getting errors.
133 pool_recycle=pool_timeout,
134 connect_args={'application_name': application_name}
135 )
136
137 if iam_rds_auth:
138 from datacube.utils.aws import obtain_new_iam_auth_token
139 handle_dynamic_token_authentication(engine, obtain_new_iam_auth_token, timeout=iam_rds_timeout, url=url)
140
141 return engine
142
143 @property
144 def url(self) -> EngineUrl:
145 return self._engine.url
146
147 @staticmethod
148 def get_db_username(config):
149 try:
150 return config['db_username']
151 except KeyError:
152 return DEFAULT_DB_USER
153
154 def close(self):
155 """
156 Close any idle connections in the pool.
157
158 This is good practice if you are keeping this object in scope
159 but wont be using it for a while.
160
161 Connections should not be shared between processes, so this should be called
162 before forking if the same instance will be used.
163
164 (connections are normally closed automatically when this object is
165 garbage collected)
166 """
167 self._engine.dispose()
168
169 @classmethod
170 def _expand_app_name(cls, application_name):
171 """
172 >>> PostgresDb._expand_app_name(None) #doctest: +ELLIPSIS
173 'agdc-...'
174 >>> PostgresDb._expand_app_name('') #doctest: +ELLIPSIS
175 'agdc-...'
176 >>> PostgresDb._expand_app_name('cli') #doctest: +ELLIPSIS
177 'cli agdc-...'
178 >>> PostgresDb._expand_app_name('a b.c/d')
179 'a-b-c-d agdc-...'
180 >>> PostgresDb._expand_app_name(5)
181 Traceback (most recent call last):
182 ...
183 TypeError: Application name must be a string
184 """
185 full_name = _LIB_ID
186 if application_name:
187 if not isinstance(application_name, str):
188 raise TypeError('Application name must be a string')
189
190 full_name = re.sub('[^0-9a-zA-Z]+', '-', application_name) + ' ' + full_name
191
192 if len(full_name) > 64:
193 _LOG.warning('Application name is too long: Truncating to %s chars', (64 - len(_LIB_ID) - 1))
194 return full_name[-64:]
195
196 def init(self, with_permissions=True):
197 """
198 Init a new database (if not already set up).
199
200 :return: If it was newly created.
201 """
202 is_new = _core.ensure_db(self._engine, with_permissions=with_permissions)
203 if not is_new:
204 _core.update_schema(self._engine)
205
206 return is_new
207
208 @contextmanager
209 def connect(self):
210 """
211 Borrow a connection from the pool.
212
213 The name connect() is misleading: it will not create a new connection if one is already available in the pool.
214
215 Callers should minimise the amount of time they hold onto their connections. If they're doing anything between
216 calls to the DB (such as opening files, or waiting on user input), it's better to return the connection
217 to the pool beforehand.
218
219 The connection can raise errors if not following this advice ("server closed the connection unexpectedly"),
220 as some servers will aggressively close idle connections (eg. DEA's NCI servers). It also prevents the
221 connection from being reused while borrowed.
222 """
223 with self._engine.connect() as connection:
224 yield _api.PostgresDbAPI(connection)
225 connection.close()
226
227 @contextmanager
228 def begin(self):
229 """
230 Start a transaction.
231
232 Returns an instance that will maintain a single connection in a transaction.
233
234 Call commit() or rollback() to complete the transaction or use a context manager:
235
236 with db.begin() as trans:
237 trans.insert_dataset(...)
238
239 (Don't share an instance between threads)
240
241 :rtype: PostgresDBAPI
242 """
243 with self._engine.connect() as connection:
244 connection.execute(text('BEGIN'))
245 try:
246 yield _api.PostgresDbAPI(connection)
247 connection.execute(text('COMMIT'))
248 except Exception: # pylint: disable=broad-except
249 connection.execute(text('ROLLBACK'))
250 raise
251 finally:
252 connection.close()
253
254 def give_me_a_connection(self):
255 return self._engine.connect()
256
257 @classmethod
258 def get_dataset_fields(cls, metadata_type_definition):
259 return _api.get_dataset_fields(metadata_type_definition)
260
261 def __repr__(self):
262 return "PostgresDb<engine={!r}>".format(self._engine)
263
264
265 def handle_dynamic_token_authentication(engine: Engine,
266 new_token: Callable[..., str],
267 timeout: Union[float, int] = 600,
268 **kwargs) -> None:
269 last_token = [None]
270 last_token_time = [0.0]
271
272 @event.listens_for(engine, "do_connect")
273 def override_new_connection(dialect, conn_rec, cargs, cparams):
274 # Handle IAM authentication
275 now = clock_gettime(CLOCK_REALTIME)
276 if now - last_token_time[0] > timeout:
277 last_token[0] = new_token(**kwargs)
278 last_token_time[0] = now
279 cparams["password"] = last_token[0]
280
281
282 def _to_json(o):
283 # Postgres <=9.5 doesn't support NaN and Infinity
284 fixedup = jsonify_document(o)
285 return json.dumps(fixedup, default=_json_fallback)
286
287
288 def _json_fallback(obj):
289 """Fallback json serialiser."""
290 raise TypeError("Type not serializable: {}".format(type(obj)))
291
[end of datacube/drivers/postgres/_connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datacube/drivers/postgres/_connections.py b/datacube/drivers/postgres/_connections.py
--- a/datacube/drivers/postgres/_connections.py
+++ b/datacube/drivers/postgres/_connections.py
@@ -17,7 +17,6 @@
import os
import re
from contextlib import contextmanager
-from time import clock_gettime, CLOCK_REALTIME
from typing import Callable, Optional, Union
from sqlalchemy import event, create_engine, text
@@ -272,6 +271,10 @@
@event.listens_for(engine, "do_connect")
def override_new_connection(dialect, conn_rec, cargs, cparams):
# Handle IAM authentication
+ # Importing here because the function `clock_gettime` is not available on Windows
+ # which shouldn't be a problem, because boto3 auth is mostly used on AWS.
+ from time import clock_gettime, CLOCK_REALTIME
+
now = clock_gettime(CLOCK_REALTIME)
if now - last_token_time[0] > timeout:
last_token[0] = new_token(**kwargs)
| {"golden_diff": "diff --git a/datacube/drivers/postgres/_connections.py b/datacube/drivers/postgres/_connections.py\n--- a/datacube/drivers/postgres/_connections.py\n+++ b/datacube/drivers/postgres/_connections.py\n@@ -17,7 +17,6 @@\n import os\n import re\n from contextlib import contextmanager\n-from time import clock_gettime, CLOCK_REALTIME\n from typing import Callable, Optional, Union\n \n from sqlalchemy import event, create_engine, text\n@@ -272,6 +271,10 @@\n @event.listens_for(engine, \"do_connect\")\n def override_new_connection(dialect, conn_rec, cargs, cparams):\n # Handle IAM authentication\n+ # Importing here because the function `clock_gettime` is not available on Windows\n+ # which shouldn't be a problem, because boto3 auth is mostly used on AWS.\n+ from time import clock_gettime, CLOCK_REALTIME\n+\n now = clock_gettime(CLOCK_REALTIME)\n if now - last_token_time[0] > timeout:\n last_token[0] = new_token(**kwargs)\n", "issue": " cannot import name 'clock_gettime' from 'time' (unknown location)\n### Expected behaviour\r\nImport time\r\n\r\n### Actual behaviour\r\n\r\n File \"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\datacube\\drivers\\postgres\\_connections.py\", line 20, in <module>\r\n from time import clock_gettime, CLOCK_REALTIME\r\n\r\nImportError: cannot import name 'clock_gettime' from 'time' (unknown location)\r\n\r\ncannot import name 'clock_gettime' from 'time' (unknown location)\r\n\r\n\r\n### Steps to reproduce the behaviour\r\n from ._connections import PostgresDb\r\n\r\n### Environment information\r\nPython 3.8.11\r\n* Which ``datacube --version`` are you using?\r\nUnknown, it's referenced in geocube on conda-forge\r\n* What datacube deployment/enviornment are you running against?\r\nn/a\r\n\r\nAdditional notes:\r\ntime.clock has been removed in Python 3.8 after being deprecated in Python 3.3. The recommendation is to change clock by either time.perf_counter or time.process_time.\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\n# We often have one-arg-per column, so these checks aren't so useful.\n# pylint: disable=too-many-arguments,too-many-public-methods\n\n# SQLAlchemy queries require \"column == None\", not \"column is None\" due to operator overloading:\n# pylint: disable=singleton-comparison\n\n\"\"\"\nPostgres connection and setup\n\"\"\"\nimport json\nimport logging\nimport os\nimport re\nfrom contextlib import contextmanager\nfrom time import clock_gettime, CLOCK_REALTIME\nfrom typing import Callable, Optional, Union\n\nfrom sqlalchemy import event, create_engine, text\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.engine.url import URL as EngineUrl\n\nimport datacube\nfrom datacube.index.exceptions import IndexSetupError\nfrom datacube.utils import jsonify_document\n\nfrom . import _api\nfrom . import _core\n\n_LIB_ID = 'agdc-' + str(datacube.__version__)\n\n_LOG = logging.getLogger(__name__)\n\ntry:\n import pwd\n\n DEFAULT_DB_USER = pwd.getpwuid(os.geteuid()).pw_name # type: Optional[str]\nexcept (ImportError, KeyError):\n # No default on Windows and some other systems\n DEFAULT_DB_USER = None\nDEFAULT_DB_PORT = 5432\nDEFAULT_IAM_AUTH = False\nDEFAULT_IAM_TIMEOUT = 600\n\n\nclass PostgresDb(object):\n \"\"\"\n A thin database access api.\n\n It exists so that higher level modules are not tied to SQLAlchemy, connections or specifics of database-access.\n\n (and can be unit tested without any actual databases)\n\n Thread safe: the only shared state is the (thread-safe) sqlalchemy connection pool.\n\n But not multiprocess safe once the first connections are made! A connection must not be shared between multiple\n processes. You can call close() before forking if you know no other threads currently hold connections,\n or else use a separate instance of this class in each process.\n \"\"\"\n\n def __init__(self, engine):\n # We don't recommend using this constructor directly as it may change.\n # Use static methods PostgresDb.create() or PostgresDb.from_config()\n self._engine = engine\n\n @classmethod\n def from_config(cls, config, application_name=None, validate_connection=True):\n app_name = cls._expand_app_name(application_name)\n\n return PostgresDb.create(\n config['db_hostname'],\n config['db_database'],\n config.get('db_username', DEFAULT_DB_USER),\n config.get('db_password', None),\n config.get('db_port', DEFAULT_DB_PORT),\n application_name=app_name,\n validate=validate_connection,\n iam_rds_auth=bool(config.get(\"db_iam_authentication\", DEFAULT_IAM_AUTH)),\n iam_rds_timeout=int(config.get(\"db_iam_timeout\", DEFAULT_IAM_TIMEOUT)),\n pool_timeout=int(config.get('db_connection_timeout', 60)),\n # pass config?\n )\n\n @classmethod\n def create(cls, hostname, database, username=None, password=None, port=None,\n application_name=None, validate=True,\n iam_rds_auth=False, iam_rds_timeout=600,\n # pass config?\n pool_timeout=60):\n mk_url = getattr(EngineUrl, 'create', EngineUrl)\n engine = cls._create_engine(\n mk_url(\n 'postgresql',\n host=hostname, database=database, port=port,\n username=username, password=password,\n ),\n application_name=application_name,\n iam_rds_auth=iam_rds_auth,\n iam_rds_timeout=iam_rds_timeout,\n pool_timeout=pool_timeout)\n if validate:\n if not _core.database_exists(engine):\n raise IndexSetupError('\\n\\nNo DB schema exists. Have you run init?\\n\\t{init_command}'.format(\n init_command='datacube system init'\n ))\n\n if not _core.schema_is_latest(engine):\n raise IndexSetupError(\n '\\n\\nDB schema is out of date. '\n 'An administrator must run init:\\n\\t{init_command}'.format(\n init_command='datacube -v system init'\n ))\n return PostgresDb(engine)\n\n @staticmethod\n def _create_engine(url, application_name=None, iam_rds_auth=False, iam_rds_timeout=600, pool_timeout=60):\n engine = create_engine(\n url,\n echo=False,\n echo_pool=False,\n\n # 'AUTOCOMMIT' here means READ-COMMITTED isolation level with autocommit on.\n # When a transaction is needed we will do an explicit begin/commit.\n isolation_level='AUTOCOMMIT',\n json_serializer=_to_json,\n # If a connection is idle for this many seconds, SQLAlchemy will renew it rather\n # than assuming it's still open. Allows servers to close idle connections without clients\n # getting errors.\n pool_recycle=pool_timeout,\n connect_args={'application_name': application_name}\n )\n\n if iam_rds_auth:\n from datacube.utils.aws import obtain_new_iam_auth_token\n handle_dynamic_token_authentication(engine, obtain_new_iam_auth_token, timeout=iam_rds_timeout, url=url)\n\n return engine\n\n @property\n def url(self) -> EngineUrl:\n return self._engine.url\n\n @staticmethod\n def get_db_username(config):\n try:\n return config['db_username']\n except KeyError:\n return DEFAULT_DB_USER\n\n def close(self):\n \"\"\"\n Close any idle connections in the pool.\n\n This is good practice if you are keeping this object in scope\n but wont be using it for a while.\n\n Connections should not be shared between processes, so this should be called\n before forking if the same instance will be used.\n\n (connections are normally closed automatically when this object is\n garbage collected)\n \"\"\"\n self._engine.dispose()\n\n @classmethod\n def _expand_app_name(cls, application_name):\n \"\"\"\n >>> PostgresDb._expand_app_name(None) #doctest: +ELLIPSIS\n 'agdc-...'\n >>> PostgresDb._expand_app_name('') #doctest: +ELLIPSIS\n 'agdc-...'\n >>> PostgresDb._expand_app_name('cli') #doctest: +ELLIPSIS\n 'cli agdc-...'\n >>> PostgresDb._expand_app_name('a b.c/d')\n 'a-b-c-d agdc-...'\n >>> PostgresDb._expand_app_name(5)\n Traceback (most recent call last):\n ...\n TypeError: Application name must be a string\n \"\"\"\n full_name = _LIB_ID\n if application_name:\n if not isinstance(application_name, str):\n raise TypeError('Application name must be a string')\n\n full_name = re.sub('[^0-9a-zA-Z]+', '-', application_name) + ' ' + full_name\n\n if len(full_name) > 64:\n _LOG.warning('Application name is too long: Truncating to %s chars', (64 - len(_LIB_ID) - 1))\n return full_name[-64:]\n\n def init(self, with_permissions=True):\n \"\"\"\n Init a new database (if not already set up).\n\n :return: If it was newly created.\n \"\"\"\n is_new = _core.ensure_db(self._engine, with_permissions=with_permissions)\n if not is_new:\n _core.update_schema(self._engine)\n\n return is_new\n\n @contextmanager\n def connect(self):\n \"\"\"\n Borrow a connection from the pool.\n\n The name connect() is misleading: it will not create a new connection if one is already available in the pool.\n\n Callers should minimise the amount of time they hold onto their connections. If they're doing anything between\n calls to the DB (such as opening files, or waiting on user input), it's better to return the connection\n to the pool beforehand.\n\n The connection can raise errors if not following this advice (\"server closed the connection unexpectedly\"),\n as some servers will aggressively close idle connections (eg. DEA's NCI servers). It also prevents the\n connection from being reused while borrowed.\n \"\"\"\n with self._engine.connect() as connection:\n yield _api.PostgresDbAPI(connection)\n connection.close()\n\n @contextmanager\n def begin(self):\n \"\"\"\n Start a transaction.\n\n Returns an instance that will maintain a single connection in a transaction.\n\n Call commit() or rollback() to complete the transaction or use a context manager:\n\n with db.begin() as trans:\n trans.insert_dataset(...)\n\n (Don't share an instance between threads)\n\n :rtype: PostgresDBAPI\n \"\"\"\n with self._engine.connect() as connection:\n connection.execute(text('BEGIN'))\n try:\n yield _api.PostgresDbAPI(connection)\n connection.execute(text('COMMIT'))\n except Exception: # pylint: disable=broad-except\n connection.execute(text('ROLLBACK'))\n raise\n finally:\n connection.close()\n\n def give_me_a_connection(self):\n return self._engine.connect()\n\n @classmethod\n def get_dataset_fields(cls, metadata_type_definition):\n return _api.get_dataset_fields(metadata_type_definition)\n\n def __repr__(self):\n return \"PostgresDb<engine={!r}>\".format(self._engine)\n\n\ndef handle_dynamic_token_authentication(engine: Engine,\n new_token: Callable[..., str],\n timeout: Union[float, int] = 600,\n **kwargs) -> None:\n last_token = [None]\n last_token_time = [0.0]\n\n @event.listens_for(engine, \"do_connect\")\n def override_new_connection(dialect, conn_rec, cargs, cparams):\n # Handle IAM authentication\n now = clock_gettime(CLOCK_REALTIME)\n if now - last_token_time[0] > timeout:\n last_token[0] = new_token(**kwargs)\n last_token_time[0] = now\n cparams[\"password\"] = last_token[0]\n\n\ndef _to_json(o):\n # Postgres <=9.5 doesn't support NaN and Infinity\n fixedup = jsonify_document(o)\n return json.dumps(fixedup, default=_json_fallback)\n\n\ndef _json_fallback(obj):\n \"\"\"Fallback json serialiser.\"\"\"\n raise TypeError(\"Type not serializable: {}\".format(type(obj)))\n", "path": "datacube/drivers/postgres/_connections.py"}]} | 3,808 | 235 |
gh_patches_debug_3792 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2875 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mitmproxy crashes, when we try to write a value from file into grideditor cell
##### Steps to reproduce the problem:
1. Create a file. Put inside:
```
abc
```
2. Run mitmproxy.
3. Press `n` -> `Enter` -> `Enter` -> `e` -> `cookies`. You will get into `cookies` grideditor.
4. Press `a` -> `Esc` -> `r` (or `R`, also relevant for it).
5. Input the path to the file with `abc`: `console.grideditor.load` `~/your_file_with_abc` -> `Enter`.
You will see:
```
Traceback (most recent call last):
File "/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/master.py", line 216, in run
self.loop.run()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 278, in run
self._run()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 376, in _run
self.event_loop.run()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 682, in run
self._loop()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 710, in _loop
self._entering_idle()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 671, in _entering_idle
callback()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 564, in entering_idle
self.draw_screen()
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py", line 578, in draw_screen
canvas = self._topmost_widget.render(self.screen_size, focus=True)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/listbox.py", line 455, in render
(maxcol, maxrow), focus=focus)
File "/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/listbox.py", line 340, in calculate_visible
focus_widget, focus_pos = self.body.get_focus()
File "/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/base.py", line 227, in get_focus
self.lst[self.focus]
File "/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/base.py", line 83, in __init__
w = self.editor.columns[i].Display(v)
File "/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/col_text.py", line 18, in Display
return TDisplay(data, self.encoding_args)
File "/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/col_text.py", line 31, in __init__
super().__init__(data.encode(*self.encoding_args))
AttributeError: 'bytes' object has no attribute 'encode'
```
##### Any other comments? What have you tried so far?
This bug is relevant for cookies, form, path, query and set-cookies grideditors.
I didn't check carefully, but it seems to be relevant for v2.0.2 as well.
##### System information
Mitmproxy: 3.0.0.dev113 (commit 93425d4)
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial
</issue>
<code>
[start of mitmproxy/tools/console/grideditor/col_text.py]
1 """
2 Welcome to the encoding dance!
3
4 In a nutshell, text columns are actually a proxy class for byte columns,
5 which just encode/decodes contents.
6 """
7
8 from mitmproxy.tools.console import signals
9 from mitmproxy.tools.console.grideditor import col_bytes
10
11
12 class Column(col_bytes.Column):
13 def __init__(self, heading, encoding="utf8", errors="surrogateescape"):
14 super().__init__(heading)
15 self.encoding_args = encoding, errors
16
17 def Display(self, data):
18 return TDisplay(data, self.encoding_args)
19
20 def Edit(self, data):
21 return TEdit(data, self.encoding_args)
22
23 def blank(self):
24 return ""
25
26
27 # This is the same for both edit and display.
28 class EncodingMixin:
29 def __init__(self, data, encoding_args):
30 self.encoding_args = encoding_args
31 super().__init__(data.encode(*self.encoding_args))
32
33 def get_data(self):
34 data = super().get_data()
35 try:
36 return data.decode(*self.encoding_args)
37 except ValueError:
38 signals.status_message.send(
39 self,
40 message="Invalid encoding.",
41 expire=1000
42 )
43 raise
44
45
46 # urwid forces a different name for a subclass.
47 class TDisplay(EncodingMixin, col_bytes.Display):
48 pass
49
50
51 class TEdit(EncodingMixin, col_bytes.Edit):
52 pass
53
[end of mitmproxy/tools/console/grideditor/col_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/tools/console/grideditor/col_text.py b/mitmproxy/tools/console/grideditor/col_text.py
--- a/mitmproxy/tools/console/grideditor/col_text.py
+++ b/mitmproxy/tools/console/grideditor/col_text.py
@@ -28,7 +28,7 @@
class EncodingMixin:
def __init__(self, data, encoding_args):
self.encoding_args = encoding_args
- super().__init__(data.encode(*self.encoding_args))
+ super().__init__(data.__str__().encode(*self.encoding_args))
def get_data(self):
data = super().get_data()
| {"golden_diff": "diff --git a/mitmproxy/tools/console/grideditor/col_text.py b/mitmproxy/tools/console/grideditor/col_text.py\n--- a/mitmproxy/tools/console/grideditor/col_text.py\n+++ b/mitmproxy/tools/console/grideditor/col_text.py\n@@ -28,7 +28,7 @@\n class EncodingMixin:\n def __init__(self, data, encoding_args):\n self.encoding_args = encoding_args\n- super().__init__(data.encode(*self.encoding_args))\n+ super().__init__(data.__str__().encode(*self.encoding_args))\n \n def get_data(self):\n data = super().get_data()\n", "issue": "Mitmproxy crashes, when we try to write a value from file into grideditor cell\n##### Steps to reproduce the problem:\r\n\r\n1. Create a file. Put inside:\r\n```\r\nabc\r\n```\r\n2. Run mitmproxy.\r\n3. Press `n` -> `Enter` -> `Enter` -> `e` -> `cookies`. You will get into `cookies` grideditor.\r\n4. Press `a` -> `Esc` -> `r` (or `R`, also relevant for it).\r\n5. Input the path to the file with `abc`: `console.grideditor.load` `~/your_file_with_abc` -> `Enter`.\r\n\r\nYou will see:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/master.py\", line 216, in run\r\n self.loop.run()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 278, in run\r\n self._run()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 376, in _run\r\n self.event_loop.run()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 682, in run\r\n self._loop()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 710, in _loop\r\n self._entering_idle()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 671, in _entering_idle\r\n callback()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 564, in entering_idle\r\n self.draw_screen()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/main_loop.py\", line 578, in draw_screen\r\n canvas = self._topmost_widget.render(self.screen_size, focus=True)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py\", line 1083, in render\r\n focus and self.focus_part == 'body')\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/decoration.py\", line 225, in render\r\n canv = self._original_widget.render(size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py\", line 1083, in render\r\n focus and self.focus_part == 'body')\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 1750, in render\r\n canv = get_delegate(self).render(size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 1750, in render\r\n canv = get_delegate(self).render(size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/container.py\", line 1083, in render\r\n focus and self.focus_part == 'body')\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/widget.py\", line 141, in cached_render\r\n canv = fn(self, size, focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/listbox.py\", line 455, in render\r\n (maxcol, maxrow), focus=focus)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/venv/lib/python3.5/site-packages/urwid/listbox.py\", line 340, in calculate_visible\r\n focus_widget, focus_pos = self.body.get_focus()\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/base.py\", line 227, in get_focus\r\n self.lst[self.focus]\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/base.py\", line 83, in __init__\r\n w = self.editor.columns[i].Display(v)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/col_text.py\", line 18, in Display\r\n return TDisplay(data, self.encoding_args)\r\n File \"/home/kajoj/Mitmproxy/mitmproxy/mitmproxy/tools/console/grideditor/col_text.py\", line 31, in __init__\r\n super().__init__(data.encode(*self.encoding_args))\r\nAttributeError: 'bytes' object has no attribute 'encode'\r\n```\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThis bug is relevant for cookies, form, path, query and set-cookies grideditors.\r\nI didn't check carefully, but it seems to be relevant for v2.0.2 as well.\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev113 (commit 93425d4) \r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nWelcome to the encoding dance!\n\nIn a nutshell, text columns are actually a proxy class for byte columns,\nwhich just encode/decodes contents.\n\"\"\"\n\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console.grideditor import col_bytes\n\n\nclass Column(col_bytes.Column):\n def __init__(self, heading, encoding=\"utf8\", errors=\"surrogateescape\"):\n super().__init__(heading)\n self.encoding_args = encoding, errors\n\n def Display(self, data):\n return TDisplay(data, self.encoding_args)\n\n def Edit(self, data):\n return TEdit(data, self.encoding_args)\n\n def blank(self):\n return \"\"\n\n\n# This is the same for both edit and display.\nclass EncodingMixin:\n def __init__(self, data, encoding_args):\n self.encoding_args = encoding_args\n super().__init__(data.encode(*self.encoding_args))\n\n def get_data(self):\n data = super().get_data()\n try:\n return data.decode(*self.encoding_args)\n except ValueError:\n signals.status_message.send(\n self,\n message=\"Invalid encoding.\",\n expire=1000\n )\n raise\n\n\n# urwid forces a different name for a subclass.\nclass TDisplay(EncodingMixin, col_bytes.Display):\n pass\n\n\nclass TEdit(EncodingMixin, col_bytes.Edit):\n pass\n", "path": "mitmproxy/tools/console/grideditor/col_text.py"}]} | 2,556 | 137 |
gh_patches_debug_19618 | rasdani/github-patches | git_diff | getsentry__sentry-9691 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Member roles cannot delete own saved search
Users with their role as `Member` cannot delete/remove their own Saved Search:
- Members can save a search query and it will only be shown to the Member that created it.
- Members cannot delete their own saved search.
- Other users cannot see another Member's saved search, not even Admins, Managers, or Owners. Since no one else can see another Member's saved search, Admins and above cannot delete them either.
- If a Member is updated to a different role, they cannot see their own saved searches unless they're back as a Member.
cc @getsentry/workflow (fyi @getsentry/cops)
</issue>
<code>
[start of src/sentry/api/endpoints/project_search_details.py]
1 from __future__ import absolute_import
2
3 from rest_framework import serializers
4 from rest_framework.response import Response
5
6 from sentry.api.bases.project import ProjectEndpoint, RelaxedSearchPermission
7 from sentry.api.exceptions import ResourceDoesNotExist
8 from sentry.api.serializers import serialize
9 from sentry.models import SavedSearch, SavedSearchUserDefault
10
11
12 class LimitedSavedSearchSerializer(serializers.Serializer):
13 isUserDefault = serializers.BooleanField(required=False)
14
15
16 class SavedSearchSerializer(serializers.Serializer):
17 name = serializers.CharField(max_length=128, required=True)
18 query = serializers.CharField(required=True)
19 isDefault = serializers.BooleanField(required=False)
20 isUserDefault = serializers.BooleanField(required=False)
21
22
23 class ProjectSearchDetailsEndpoint(ProjectEndpoint):
24 permission_classes = (RelaxedSearchPermission, )
25
26 def get(self, request, project, search_id):
27 """
28 Retrieve a saved search
29
30 Return details on an individual saved search.
31
32 {method} {path}
33
34 """
35 try:
36 search = SavedSearch.objects.get(
37 project=project,
38 id=search_id,
39 )
40 except SavedSearch.DoesNotExist:
41 raise ResourceDoesNotExist
42
43 return Response(serialize(search, request.user))
44
45 def put(self, request, project, search_id):
46 """
47 Update a saved search
48
49 Update a saved search.
50
51 {method} {path}
52 {{
53 "name: "Unresolved",
54 "query": "is:unresolved",
55 "dateSavedSearchd": "2015-05-11T02:23:10Z"
56 }}
57
58 """
59 try:
60 search = SavedSearch.objects.get(
61 project=project,
62 id=search_id,
63 )
64 except SavedSearch.DoesNotExist:
65 raise ResourceDoesNotExist
66
67 has_team_scope = any(
68 request.access.has_team_scope(team, 'project:write') for team in project.teams.all()
69 )
70 if has_team_scope:
71 serializer = SavedSearchSerializer(data=request.DATA, partial=True)
72 else:
73 serializer = LimitedSavedSearchSerializer(data=request.DATA, partial=True)
74
75 if not serializer.is_valid():
76 return Response(serializer.errors, status=400)
77
78 result = serializer.object
79
80 kwargs = {}
81 if result.get('name'):
82 kwargs['name'] = result['name']
83 if result.get('query'):
84 kwargs['query'] = result['query']
85 if result.get('isDefault'):
86 kwargs['is_default'] = result['isDefault']
87
88 if kwargs:
89 search.update(**kwargs)
90
91 if result.get('isDefault'):
92 SavedSearch.objects.filter(
93 project=project,
94 ).exclude(id=search_id).update(is_default=False)
95
96 if result.get('isUserDefault'):
97 SavedSearchUserDefault.objects.create_or_update(
98 user=request.user, project=project, values={
99 'savedsearch': search,
100 }
101 )
102
103 return Response(serialize(search, request.user))
104
105 def delete(self, request, project, search_id):
106 """
107 Delete a saved search
108
109 Permanently remove a saved search.
110
111 {method} {path}
112
113 """
114 try:
115 search = SavedSearch.objects.get(
116 project=project,
117 id=search_id,
118 )
119 except SavedSearch.DoesNotExist:
120 raise ResourceDoesNotExist
121
122 search.delete()
123
124 return Response(status=204)
125
[end of src/sentry/api/endpoints/project_search_details.py]
[start of src/sentry/api/bases/project.py]
1 from __future__ import absolute_import
2
3 from rest_framework.response import Response
4
5 from sentry import roles
6 from sentry.api.base import Endpoint
7 from sentry.api.exceptions import ResourceDoesNotExist, ProjectMoved
8 from sentry.app import raven
9 from sentry.auth.superuser import is_active_superuser
10 from sentry.models import OrganizationMember, Project, ProjectStatus, ProjectRedirect
11
12 from .organization import OrganizationPermission
13 from .team import has_team_permission
14
15
16 class ProjectPermission(OrganizationPermission):
17 scope_map = {
18 'GET': ['project:read', 'project:write', 'project:admin'],
19 'POST': ['project:write', 'project:admin'],
20 'PUT': ['project:write', 'project:admin'],
21 'DELETE': ['project:admin'],
22 }
23
24 def has_object_permission(self, request, view, project):
25 result = super(ProjectPermission,
26 self).has_object_permission(request, view, project.organization)
27
28 if not result:
29 return result
30
31 if project.teams.exists():
32 return any(
33 has_team_permission(request, team, self.scope_map) for team in project.teams.all()
34 )
35 elif request.user.is_authenticated():
36 # this is only for team-less projects
37 if is_active_superuser(request):
38 return True
39 try:
40 role = OrganizationMember.objects.filter(
41 organization=project.organization,
42 user=request.user,
43 ).values_list('role', flat=True).get()
44 except OrganizationMember.DoesNotExist:
45 # this should probably never happen?
46 return False
47
48 return roles.get(role).is_global
49
50 return False
51
52
53 class StrictProjectPermission(ProjectPermission):
54 scope_map = {
55 'GET': ['project:write', 'project:admin'],
56 'POST': ['project:write', 'project:admin'],
57 'PUT': ['project:write', 'project:admin'],
58 'DELETE': ['project:admin'],
59 }
60
61
62 class ProjectReleasePermission(ProjectPermission):
63 scope_map = {
64 'GET': ['project:read', 'project:write', 'project:admin', 'project:releases'],
65 'POST': ['project:write', 'project:admin', 'project:releases'],
66 'PUT': ['project:write', 'project:admin', 'project:releases'],
67 'DELETE': ['project:admin', 'project:releases'],
68 }
69
70
71 class ProjectEventPermission(ProjectPermission):
72 scope_map = {
73 'GET': ['event:read', 'event:write', 'event:admin'],
74 'POST': ['event:write', 'event:admin'],
75 'PUT': ['event:write', 'event:admin'],
76 'DELETE': ['event:admin'],
77 }
78
79
80 class ProjectSettingPermission(ProjectPermission):
81 scope_map = {
82 'GET': ['project:read', 'project:write', 'project:admin'],
83 'POST': ['project:write', 'project:admin'],
84 'PUT': ['project:write', 'project:admin'],
85 'DELETE': ['project:write', 'project:admin'],
86 }
87
88
89 class RelaxedSearchPermission(ProjectPermission):
90 scope_map = {
91 'GET': ['project:read', 'project:write', 'project:admin'],
92 # members can do writes
93 'POST': ['project:write', 'project:admin', 'project:read'],
94 'PUT': ['project:write', 'project:admin', 'project:read'],
95 'DELETE': ['project:admin'],
96 }
97
98
99 class ProjectEndpoint(Endpoint):
100 permission_classes = (ProjectPermission, )
101
102 def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
103 try:
104 project = Project.objects.filter(
105 organization__slug=organization_slug,
106 slug=project_slug,
107 ).select_related('organization').prefetch_related('teams').get()
108 except Project.DoesNotExist:
109 try:
110 # Project may have been renamed
111 redirect = ProjectRedirect.objects.select_related('project')
112 redirect = redirect.get(
113 organization__slug=organization_slug,
114 redirect_slug=project_slug
115 )
116
117 # get full path so that we keep query strings
118 requested_url = request.get_full_path()
119 new_url = requested_url.replace(
120 'projects/%s/%s/' %
121 (organization_slug, project_slug), 'projects/%s/%s/' %
122 (organization_slug, redirect.project.slug))
123
124 # Resource was moved/renamed if the requested url is different than the new url
125 if requested_url != new_url:
126 raise ProjectMoved(new_url, redirect.project.slug)
127
128 # otherwise project doesn't exist
129 raise ResourceDoesNotExist
130 except ProjectRedirect.DoesNotExist:
131 raise ResourceDoesNotExist
132
133 if project.status != ProjectStatus.VISIBLE:
134 raise ResourceDoesNotExist
135
136 self.check_object_permissions(request, project)
137
138 raven.tags_context({
139 'project': project.id,
140 'organization': project.organization_id,
141 })
142
143 request._request.organization = project.organization
144
145 kwargs['project'] = project
146 return (args, kwargs)
147
148 def handle_exception(self, request, exc):
149 if isinstance(exc, ProjectMoved):
150 response = Response({
151 'slug': exc.detail['extra']['slug'],
152 'detail': exc.detail
153 }, status=exc.status_code)
154 response['Location'] = exc.detail['extra']['url']
155 return response
156 return super(ProjectEndpoint, self).handle_exception(request, exc)
157
[end of src/sentry/api/bases/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/api/bases/project.py b/src/sentry/api/bases/project.py
--- a/src/sentry/api/bases/project.py
+++ b/src/sentry/api/bases/project.py
@@ -92,7 +92,8 @@
# members can do writes
'POST': ['project:write', 'project:admin', 'project:read'],
'PUT': ['project:write', 'project:admin', 'project:read'],
- 'DELETE': ['project:admin'],
+ # members can delete their own searches
+ 'DELETE': ['project:read', 'project:write', 'project:admin'],
}
diff --git a/src/sentry/api/endpoints/project_search_details.py b/src/sentry/api/endpoints/project_search_details.py
--- a/src/sentry/api/endpoints/project_search_details.py
+++ b/src/sentry/api/endpoints/project_search_details.py
@@ -119,6 +119,14 @@
except SavedSearch.DoesNotExist:
raise ResourceDoesNotExist
- search.delete()
+ is_search_owner = request.user and request.user == search.owner
- return Response(status=204)
+ if request.access.has_scope('project:write'):
+ if not search.owner or is_search_owner:
+ search.delete()
+ return Response(status=204)
+ elif is_search_owner:
+ search.delete()
+ return Response(status=204)
+
+ return Response(status=403)
| {"golden_diff": "diff --git a/src/sentry/api/bases/project.py b/src/sentry/api/bases/project.py\n--- a/src/sentry/api/bases/project.py\n+++ b/src/sentry/api/bases/project.py\n@@ -92,7 +92,8 @@\n # members can do writes\n 'POST': ['project:write', 'project:admin', 'project:read'],\n 'PUT': ['project:write', 'project:admin', 'project:read'],\n- 'DELETE': ['project:admin'],\n+ # members can delete their own searches\n+ 'DELETE': ['project:read', 'project:write', 'project:admin'],\n }\n \n \ndiff --git a/src/sentry/api/endpoints/project_search_details.py b/src/sentry/api/endpoints/project_search_details.py\n--- a/src/sentry/api/endpoints/project_search_details.py\n+++ b/src/sentry/api/endpoints/project_search_details.py\n@@ -119,6 +119,14 @@\n except SavedSearch.DoesNotExist:\n raise ResourceDoesNotExist\n \n- search.delete()\n+ is_search_owner = request.user and request.user == search.owner\n \n- return Response(status=204)\n+ if request.access.has_scope('project:write'):\n+ if not search.owner or is_search_owner:\n+ search.delete()\n+ return Response(status=204)\n+ elif is_search_owner:\n+ search.delete()\n+ return Response(status=204)\n+\n+ return Response(status=403)\n", "issue": "Member roles cannot delete own saved search\nUsers with their role as `Member` cannot delete/remove their own Saved Search: \r\n\r\n- Members can save a search query and it will only be shown to the Member that created it.\r\n- Members cannot delete their own saved search.\r\n- Other users cannot see another Member's saved search, not even Admins, Managers, or Owners. Since no one else can see another Member's saved search, Admins and above cannot delete them either.\r\n- If a Member is updated to a different role, they cannot see their own saved searches unless they're back as a Member.\r\n\r\ncc @getsentry/workflow (fyi @getsentry/cops) \n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom rest_framework import serializers\nfrom rest_framework.response import Response\n\nfrom sentry.api.bases.project import ProjectEndpoint, RelaxedSearchPermission\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.serializers import serialize\nfrom sentry.models import SavedSearch, SavedSearchUserDefault\n\n\nclass LimitedSavedSearchSerializer(serializers.Serializer):\n isUserDefault = serializers.BooleanField(required=False)\n\n\nclass SavedSearchSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=128, required=True)\n query = serializers.CharField(required=True)\n isDefault = serializers.BooleanField(required=False)\n isUserDefault = serializers.BooleanField(required=False)\n\n\nclass ProjectSearchDetailsEndpoint(ProjectEndpoint):\n permission_classes = (RelaxedSearchPermission, )\n\n def get(self, request, project, search_id):\n \"\"\"\n Retrieve a saved search\n\n Return details on an individual saved search.\n\n {method} {path}\n\n \"\"\"\n try:\n search = SavedSearch.objects.get(\n project=project,\n id=search_id,\n )\n except SavedSearch.DoesNotExist:\n raise ResourceDoesNotExist\n\n return Response(serialize(search, request.user))\n\n def put(self, request, project, search_id):\n \"\"\"\n Update a saved search\n\n Update a saved search.\n\n {method} {path}\n {{\n \"name: \"Unresolved\",\n \"query\": \"is:unresolved\",\n \"dateSavedSearchd\": \"2015-05-11T02:23:10Z\"\n }}\n\n \"\"\"\n try:\n search = SavedSearch.objects.get(\n project=project,\n id=search_id,\n )\n except SavedSearch.DoesNotExist:\n raise ResourceDoesNotExist\n\n has_team_scope = any(\n request.access.has_team_scope(team, 'project:write') for team in project.teams.all()\n )\n if has_team_scope:\n serializer = SavedSearchSerializer(data=request.DATA, partial=True)\n else:\n serializer = LimitedSavedSearchSerializer(data=request.DATA, partial=True)\n\n if not serializer.is_valid():\n return Response(serializer.errors, status=400)\n\n result = serializer.object\n\n kwargs = {}\n if result.get('name'):\n kwargs['name'] = result['name']\n if result.get('query'):\n kwargs['query'] = result['query']\n if result.get('isDefault'):\n kwargs['is_default'] = result['isDefault']\n\n if kwargs:\n search.update(**kwargs)\n\n if result.get('isDefault'):\n SavedSearch.objects.filter(\n project=project,\n ).exclude(id=search_id).update(is_default=False)\n\n if result.get('isUserDefault'):\n SavedSearchUserDefault.objects.create_or_update(\n user=request.user, project=project, values={\n 'savedsearch': search,\n }\n )\n\n return Response(serialize(search, request.user))\n\n def delete(self, request, project, search_id):\n \"\"\"\n Delete a saved search\n\n Permanently remove a saved search.\n\n {method} {path}\n\n \"\"\"\n try:\n search = SavedSearch.objects.get(\n project=project,\n id=search_id,\n )\n except SavedSearch.DoesNotExist:\n raise ResourceDoesNotExist\n\n search.delete()\n\n return Response(status=204)\n", "path": "src/sentry/api/endpoints/project_search_details.py"}, {"content": "from __future__ import absolute_import\n\nfrom rest_framework.response import Response\n\nfrom sentry import roles\nfrom sentry.api.base import Endpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist, ProjectMoved\nfrom sentry.app import raven\nfrom sentry.auth.superuser import is_active_superuser\nfrom sentry.models import OrganizationMember, Project, ProjectStatus, ProjectRedirect\n\nfrom .organization import OrganizationPermission\nfrom .team import has_team_permission\n\n\nclass ProjectPermission(OrganizationPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin'],\n 'POST': ['project:write', 'project:admin'],\n 'PUT': ['project:write', 'project:admin'],\n 'DELETE': ['project:admin'],\n }\n\n def has_object_permission(self, request, view, project):\n result = super(ProjectPermission,\n self).has_object_permission(request, view, project.organization)\n\n if not result:\n return result\n\n if project.teams.exists():\n return any(\n has_team_permission(request, team, self.scope_map) for team in project.teams.all()\n )\n elif request.user.is_authenticated():\n # this is only for team-less projects\n if is_active_superuser(request):\n return True\n try:\n role = OrganizationMember.objects.filter(\n organization=project.organization,\n user=request.user,\n ).values_list('role', flat=True).get()\n except OrganizationMember.DoesNotExist:\n # this should probably never happen?\n return False\n\n return roles.get(role).is_global\n\n return False\n\n\nclass StrictProjectPermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:write', 'project:admin'],\n 'POST': ['project:write', 'project:admin'],\n 'PUT': ['project:write', 'project:admin'],\n 'DELETE': ['project:admin'],\n }\n\n\nclass ProjectReleasePermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin', 'project:releases'],\n 'POST': ['project:write', 'project:admin', 'project:releases'],\n 'PUT': ['project:write', 'project:admin', 'project:releases'],\n 'DELETE': ['project:admin', 'project:releases'],\n }\n\n\nclass ProjectEventPermission(ProjectPermission):\n scope_map = {\n 'GET': ['event:read', 'event:write', 'event:admin'],\n 'POST': ['event:write', 'event:admin'],\n 'PUT': ['event:write', 'event:admin'],\n 'DELETE': ['event:admin'],\n }\n\n\nclass ProjectSettingPermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin'],\n 'POST': ['project:write', 'project:admin'],\n 'PUT': ['project:write', 'project:admin'],\n 'DELETE': ['project:write', 'project:admin'],\n }\n\n\nclass RelaxedSearchPermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:admin'],\n # members can do writes\n 'POST': ['project:write', 'project:admin', 'project:read'],\n 'PUT': ['project:write', 'project:admin', 'project:read'],\n 'DELETE': ['project:admin'],\n }\n\n\nclass ProjectEndpoint(Endpoint):\n permission_classes = (ProjectPermission, )\n\n def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):\n try:\n project = Project.objects.filter(\n organization__slug=organization_slug,\n slug=project_slug,\n ).select_related('organization').prefetch_related('teams').get()\n except Project.DoesNotExist:\n try:\n # Project may have been renamed\n redirect = ProjectRedirect.objects.select_related('project')\n redirect = redirect.get(\n organization__slug=organization_slug,\n redirect_slug=project_slug\n )\n\n # get full path so that we keep query strings\n requested_url = request.get_full_path()\n new_url = requested_url.replace(\n 'projects/%s/%s/' %\n (organization_slug, project_slug), 'projects/%s/%s/' %\n (organization_slug, redirect.project.slug))\n\n # Resource was moved/renamed if the requested url is different than the new url\n if requested_url != new_url:\n raise ProjectMoved(new_url, redirect.project.slug)\n\n # otherwise project doesn't exist\n raise ResourceDoesNotExist\n except ProjectRedirect.DoesNotExist:\n raise ResourceDoesNotExist\n\n if project.status != ProjectStatus.VISIBLE:\n raise ResourceDoesNotExist\n\n self.check_object_permissions(request, project)\n\n raven.tags_context({\n 'project': project.id,\n 'organization': project.organization_id,\n })\n\n request._request.organization = project.organization\n\n kwargs['project'] = project\n return (args, kwargs)\n\n def handle_exception(self, request, exc):\n if isinstance(exc, ProjectMoved):\n response = Response({\n 'slug': exc.detail['extra']['slug'],\n 'detail': exc.detail\n }, status=exc.status_code)\n response['Location'] = exc.detail['extra']['url']\n return response\n return super(ProjectEndpoint, self).handle_exception(request, exc)\n", "path": "src/sentry/api/bases/project.py"}]} | 3,209 | 324 |
gh_patches_debug_35183 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User's script error handler function
As discussed with @cortesi on slack, right now whenever a user's script throws an error due to various reasons, it is being handled at different places differently.
Therefore we can have a consistent error handler function which can be invoked whenever there is an error
This will also handle #2837 #2838 #2839
### Function
Signature
`script_error(path, message, lineno, exception)`
What function will do
>"Error in script XXX:NNN MMM” where XXX is the path as specified by the user (the .path attribute of Script), NNN is a line number if we have one, and MMM is a short message
The idea here is to display the above mentioned message in the console app and display the traceback related to the error in the event log.
</issue>
<code>
[start of mitmproxy/addons/script.py]
1 import os
2 import importlib.util
3 import importlib.machinery
4 import time
5 import sys
6 import types
7 import typing
8
9 from mitmproxy import addonmanager
10 from mitmproxy import exceptions
11 from mitmproxy import flow
12 from mitmproxy import command
13 from mitmproxy import eventsequence
14 from mitmproxy import ctx
15 import mitmproxy.types as mtypes
16
17
18 def load_script(path: str) -> types.ModuleType:
19 fullname = "__mitmproxy_script__.{}".format(
20 os.path.splitext(os.path.basename(path))[0]
21 )
22 # the fullname is not unique among scripts, so if there already is an existing script with said
23 # fullname, remove it.
24 sys.modules.pop(fullname, None)
25 oldpath = sys.path
26 sys.path.insert(0, os.path.dirname(path))
27 try:
28 loader = importlib.machinery.SourceFileLoader(fullname, path)
29 spec = importlib.util.spec_from_loader(fullname, loader=loader)
30 m = importlib.util.module_from_spec(spec)
31 loader.exec_module(m)
32 if not getattr(m, "name", None):
33 m.name = path # type: ignore
34 return m
35 finally:
36 sys.path[:] = oldpath
37
38
39 class Script:
40 """
41 An addon that manages a single script.
42 """
43 ReloadInterval = 2
44
45 def __init__(self, path):
46 self.name = "scriptmanager:" + path
47 self.path = path
48 self.fullpath = os.path.expanduser(
49 path.strip("'\" ")
50 )
51 self.ns = None
52
53 self.last_load = 0
54 self.last_mtime = 0
55 if not os.path.isfile(self.fullpath):
56 raise exceptions.OptionsError('No such script: "%s"' % self.fullpath)
57
58 @property
59 def addons(self):
60 return [self.ns] if self.ns else []
61
62 def tick(self):
63 if time.time() - self.last_load > self.ReloadInterval:
64 try:
65 mtime = os.stat(self.fullpath).st_mtime
66 except FileNotFoundError:
67 scripts = list(ctx.options.scripts)
68 scripts.remove(self.path)
69 ctx.options.update(scripts=scripts)
70 return
71
72 if mtime > self.last_mtime:
73 ctx.log.info("Loading script: %s" % self.path)
74 if self.ns:
75 ctx.master.addons.remove(self.ns)
76 self.ns = None
77 with addonmanager.safecall():
78 ns = load_script(self.fullpath)
79 ctx.master.addons.register(ns)
80 self.ns = ns
81 if self.ns:
82 # We're already running, so we have to explicitly register and
83 # configure the addon
84 ctx.master.addons.invoke_addon(self.ns, "running")
85 ctx.master.addons.invoke_addon(
86 self.ns,
87 "configure",
88 ctx.options.keys()
89 )
90 self.last_load = time.time()
91 self.last_mtime = mtime
92
93
94 class ScriptLoader:
95 """
96 An addon that manages loading scripts from options.
97 """
98 def __init__(self):
99 self.is_running = False
100 self.addons = []
101
102 def load(self, loader):
103 loader.add_option(
104 "scripts", typing.Sequence[str], [],
105 """
106 Execute a script.
107 """
108 )
109
110 def running(self):
111 self.is_running = True
112
113 @command.command("script.run")
114 def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:
115 """
116 Run a script on the specified flows. The script is loaded with
117 default options, and all lifecycle events for each flow are
118 simulated.
119 """
120 try:
121 s = Script(path)
122 l = addonmanager.Loader(ctx.master)
123 ctx.master.addons.invoke_addon(s, "load", l)
124 ctx.master.addons.invoke_addon(s, "configure", ctx.options.keys())
125 # Script is loaded on the first tick
126 ctx.master.addons.invoke_addon(s, "tick")
127 for f in flows:
128 for evt, arg in eventsequence.iterate(f):
129 ctx.master.addons.invoke_addon(s, evt, arg)
130 except exceptions.OptionsError as e:
131 raise exceptions.CommandError("Error running script: %s" % e) from e
132
133 def configure(self, updated):
134 if "scripts" in updated:
135 for s in ctx.options.scripts:
136 if ctx.options.scripts.count(s) > 1:
137 raise exceptions.OptionsError("Duplicate script: %s" % s)
138
139 for a in self.addons[:]:
140 if a.path not in ctx.options.scripts:
141 ctx.log.info("Un-loading script: %s" % a.name)
142 ctx.master.addons.remove(a)
143 self.addons.remove(a)
144
145 # The machinations below are to ensure that:
146 # - Scripts remain in the same order
147 # - Scripts are not initialized un-necessarily. If only a
148 # script's order in the script list has changed, it is just
149 # moved.
150
151 current = {}
152 for a in self.addons:
153 current[a.path] = a
154
155 ordered = []
156 newscripts = []
157 for s in ctx.options.scripts:
158 if s in current:
159 ordered.append(current[s])
160 else:
161 sc = Script(s)
162 ordered.append(sc)
163 newscripts.append(sc)
164
165 self.addons = ordered
166
167 for s in newscripts:
168 ctx.master.addons.register(s)
169 if self.is_running:
170 # If we're already running, we configure and tell the addon
171 # we're up and running.
172 ctx.master.addons.invoke_addon(s, "running")
173
[end of mitmproxy/addons/script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -5,6 +5,7 @@
import sys
import types
import typing
+import traceback
from mitmproxy import addonmanager
from mitmproxy import exceptions
@@ -36,6 +37,25 @@
sys.path[:] = oldpath
+def script_error_handler(path, exc, msg="", tb=False):
+ """
+ Handles all the user's script errors with
+ an optional traceback
+ """
+ exception = type(exc).__name__
+ if msg:
+ exception = msg
+ lineno = ""
+ if hasattr(exc, "lineno"):
+ lineno = str(exc.lineno)
+ log_msg = "in Script {}:{} {}".format(path, lineno, exception)
+ if tb:
+ etype, value, tback = sys.exc_info()
+ tback = addonmanager.cut_traceback(tback, "invoke_addon")
+ log_msg = log_msg.join(["\n"] + traceback.format_exception(etype, value, tback))
+ ctx.log.error(log_msg)
+
+
class Script:
"""
An addon that manages a single script.
@@ -53,7 +73,7 @@
self.last_load = 0
self.last_mtime = 0
if not os.path.isfile(self.fullpath):
- raise exceptions.OptionsError('No such script: "%s"' % self.fullpath)
+ raise exceptions.OptionsError('No such script')
@property
def addons(self):
@@ -128,13 +148,13 @@
for evt, arg in eventsequence.iterate(f):
ctx.master.addons.invoke_addon(s, evt, arg)
except exceptions.OptionsError as e:
- raise exceptions.CommandError("Error running script: %s" % e) from e
+ script_error_handler(path, e, msg=str(e))
def configure(self, updated):
if "scripts" in updated:
for s in ctx.options.scripts:
if ctx.options.scripts.count(s) > 1:
- raise exceptions.OptionsError("Duplicate script: %s" % s)
+ raise exceptions.OptionsError("Duplicate script")
for a in self.addons[:]:
if a.path not in ctx.options.scripts:
| {"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -5,6 +5,7 @@\n import sys\n import types\n import typing\n+import traceback\n \n from mitmproxy import addonmanager\n from mitmproxy import exceptions\n@@ -36,6 +37,25 @@\n sys.path[:] = oldpath\n \n \n+def script_error_handler(path, exc, msg=\"\", tb=False):\n+ \"\"\"\n+ Handles all the user's script errors with\n+ an optional traceback\n+ \"\"\"\n+ exception = type(exc).__name__\n+ if msg:\n+ exception = msg\n+ lineno = \"\"\n+ if hasattr(exc, \"lineno\"):\n+ lineno = str(exc.lineno)\n+ log_msg = \"in Script {}:{} {}\".format(path, lineno, exception)\n+ if tb:\n+ etype, value, tback = sys.exc_info()\n+ tback = addonmanager.cut_traceback(tback, \"invoke_addon\")\n+ log_msg = log_msg.join([\"\\n\"] + traceback.format_exception(etype, value, tback))\n+ ctx.log.error(log_msg)\n+\n+\n class Script:\n \"\"\"\n An addon that manages a single script.\n@@ -53,7 +73,7 @@\n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n- raise exceptions.OptionsError('No such script: \"%s\"' % self.fullpath)\n+ raise exceptions.OptionsError('No such script')\n \n @property\n def addons(self):\n@@ -128,13 +148,13 @@\n for evt, arg in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(s, evt, arg)\n except exceptions.OptionsError as e:\n- raise exceptions.CommandError(\"Error running script: %s\" % e) from e\n+ script_error_handler(path, e, msg=str(e))\n \n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n- raise exceptions.OptionsError(\"Duplicate script: %s\" % s)\n+ raise exceptions.OptionsError(\"Duplicate script\")\n \n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n", "issue": "User's script error handler function\nAs discussed with @cortesi on slack, right now whenever a user's script throws an error due to various reasons, it is being handled at different places differently.\r\nTherefore we can have a consistent error handler function which can be invoked whenever there is an error\r\nThis will also handle #2837 #2838 #2839 \r\n### Function\r\n\r\nSignature\r\n`script_error(path, message, lineno, exception)`\r\n\r\nWhat function will do\r\n>\"Error in script XXX:NNN MMM\u201d where XXX is the path as specified by the user (the .path attribute of Script), NNN is a line number if we have one, and MMM is a short message\r\n\r\nThe idea here is to display the above mentioned message in the console app and display the traceback related to the error in the event log. \n", "before_files": [{"content": "import os\nimport importlib.util\nimport importlib.machinery\nimport time\nimport sys\nimport types\nimport typing\n\nfrom mitmproxy import addonmanager\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\nimport mitmproxy.types as mtypes\n\n\ndef load_script(path: str) -> types.ModuleType:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n return m\n finally:\n sys.path[:] = oldpath\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n ReloadInterval = 2\n\n def __init__(self, path):\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(\n path.strip(\"'\\\" \")\n )\n self.ns = None\n\n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError('No such script: \"%s\"' % self.fullpath)\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def tick(self):\n if time.time() - self.last_load > self.ReloadInterval:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n\n if mtime > self.last_mtime:\n ctx.log.info(\"Loading script: %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, \"running\")\n ctx.master.addons.invoke_addon(\n self.ns,\n \"configure\",\n ctx.options.keys()\n )\n self.last_load = time.time()\n self.last_mtime = mtime\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def load(self, loader):\n loader.add_option(\n \"scripts\", typing.Sequence[str], [],\n \"\"\"\n Execute a script.\n \"\"\"\n )\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:\n \"\"\"\n Run a script on the specified flows. The script is loaded with\n default options, and all lifecycle events for each flow are\n simulated.\n \"\"\"\n try:\n s = Script(path)\n l = addonmanager.Loader(ctx.master)\n ctx.master.addons.invoke_addon(s, \"load\", l)\n ctx.master.addons.invoke_addon(s, \"configure\", ctx.options.keys())\n # Script is loaded on the first tick\n ctx.master.addons.invoke_addon(s, \"tick\")\n for f in flows:\n for evt, arg in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(s, evt, arg)\n except exceptions.OptionsError as e:\n raise exceptions.CommandError(\"Error running script: %s\" % e) from e\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script: %s\" % s)\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}]} | 2,345 | 529 |
gh_patches_debug_170 | rasdani/github-patches | git_diff | pydantic__pydantic-4418 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
V1.10 release
To do/decide:
* [x] #2557 - **merged**
* [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2
* [x] #2190 - **deferred**
* [x] cherry pick stuff from v1.9 branch, maybe just history #4350
* [x] #3346
* [x] #3593 - **deferred**
* [x] #3946
* [x] #4028 - **API will change in v2**
* [x] #4354
* [x] #4216
* [x] #4191
* [x] #3941 - revert or fix
* [x] #4339
* [x] #4356
</issue>
<code>
[start of pydantic/version.py]
1 __all__ = 'compiled', 'VERSION', 'version_info'
2
3 VERSION = '1.9.2'
4
5 try:
6 import cython # type: ignore
7 except ImportError:
8 compiled: bool = False
9 else: # pragma: no cover
10 try:
11 compiled = cython.compiled
12 except AttributeError:
13 compiled = False
14
15
16 def version_info() -> str:
17 import platform
18 import sys
19 from importlib import import_module
20 from pathlib import Path
21
22 optional_deps = []
23 for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):
24 try:
25 import_module(p.replace('-', '_'))
26 except ImportError:
27 continue
28 optional_deps.append(p)
29
30 info = {
31 'pydantic version': VERSION,
32 'pydantic compiled': compiled,
33 'install path': Path(__file__).resolve().parent,
34 'python version': sys.version,
35 'platform': platform.platform(),
36 'optional deps. installed': optional_deps,
37 }
38 return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
39
[end of pydantic/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/version.py b/pydantic/version.py
--- a/pydantic/version.py
+++ b/pydantic/version.py
@@ -1,6 +1,6 @@
__all__ = 'compiled', 'VERSION', 'version_info'
-VERSION = '1.9.2'
+VERSION = '1.10.0a1'
try:
import cython # type: ignore
| {"golden_diff": "diff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -1,6 +1,6 @@\n __all__ = 'compiled', 'VERSION', 'version_info'\n \n-VERSION = '1.9.2'\n+VERSION = '1.10.0a1'\n \n try:\n import cython # type: ignore\n", "issue": "V1.10 release\nTo do/decide:\r\n* [x] #2557 - **merged**\r\n* [x] #2745 - needs some tweaks, but we need to decide if it's a good idea before V2\r\n* [x] #2190 - **deferred**\r\n* [x] cherry pick stuff from v1.9 branch, maybe just history #4350\r\n* [x] #3346\r\n* [x] #3593 - **deferred**\r\n* [x] #3946\r\n* [x] #4028 - **API will change in v2**\r\n* [x] #4354\r\n* [x] #4216\r\n* [x] #4191\r\n* [x] #3941 - revert or fix\r\n* [x] #4339\r\n* [x] #4356\n", "before_files": [{"content": "__all__ = 'compiled', 'VERSION', 'version_info'\n\nVERSION = '1.9.2'\n\ntry:\n import cython # type: ignore\nexcept ImportError:\n compiled: bool = False\nelse: # pragma: no cover\n try:\n compiled = cython.compiled\n except AttributeError:\n compiled = False\n\n\ndef version_info() -> str:\n import platform\n import sys\n from importlib import import_module\n from pathlib import Path\n\n optional_deps = []\n for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'):\n try:\n import_module(p.replace('-', '_'))\n except ImportError:\n continue\n optional_deps.append(p)\n\n info = {\n 'pydantic version': VERSION,\n 'pydantic compiled': compiled,\n 'install path': Path(__file__).resolve().parent,\n 'python version': sys.version,\n 'platform': platform.platform(),\n 'optional deps. installed': optional_deps,\n }\n return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v in info.items())\n", "path": "pydantic/version.py"}]} | 1,068 | 93 |
gh_patches_debug_15235 | rasdani/github-patches | git_diff | conan-io__conan-center-index-10038 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] libusb/1.0.25
### Package Details
* Package Name/Version: **libusb/1.0.25**
* Changelog: **https://github.com/libusb/libusb/blob/master/ChangeLog**
2022-01-31: v1.0.25
* Linux: Fix regression with some particular devices
* Linux: Fix regression with libusb_handle_events_timeout_completed()
* Linux: Fix regression with cpu usage in libusb_bulk_transfer
* Darwin (macOS): Add support for detaching kernel drivers with authorization.
* Darwin (macOS): Do not drop partial data on timeout.
* Darwin (macOS): Silence pipe error in set_interface_alt_setting().
* Windows: Fix HID backend missing byte
* Windows: Fix segfault with libusbk driver
* Windows: Fix regression when using libusb0 driver
* Windows: Support LIBUSB_TRANSFER_ADD_ZERO_PACKET on winusb
* New NO_DEVICE_DISCOVERY option replaces WEAK_AUTHORITY option
* Various other bug fixes and improvements
PR follows
</issue>
<code>
[start of recipes/libusb/all/conanfile.py]
1 from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import re
5
6 required_conan_version = ">=1.33.0"
7
8
9 class LibUSBConan(ConanFile):
10 name = "libusb"
11 description = "A cross-platform library to access USB devices"
12 license = "LGPL-2.1"
13 homepage = "https://github.com/libusb/libusb"
14 url = "https://github.com/conan-io/conan-center-index"
15 topics = ("conan", "libusb", "usb", "device")
16 settings = "os", "compiler", "build_type", "arch"
17 options = {
18 "shared": [True, False],
19 "fPIC": [True, False],
20 "enable_udev": [True, False],
21 }
22 default_options = {
23 "shared": False,
24 "fPIC": True,
25 "enable_udev": True,
26 }
27 _autotools = None
28
29 @property
30 def _source_subfolder(self):
31 return "source_subfolder"
32
33 @property
34 def _is_mingw(self):
35 return self.settings.os == "Windows" and self.settings.compiler == "gcc"
36
37 @property
38 def _is_msvc(self):
39 return self.settings.os == "Windows" and self.settings.compiler == "Visual Studio"
40
41 @property
42 def _settings_build(self):
43 return self.settings_build if hasattr(self, "settings_build") else self.settings
44
45 def config_options(self):
46 if self.settings.os == "Windows":
47 del self.options.fPIC
48 if self.settings.os not in ["Linux", "Android"]:
49 del self.options.enable_udev
50 # FIXME: enable_udev should be True for Android, but libudev recipe is missing
51 if self.settings.os == "Android":
52 self.options.enable_udev = False
53
54 def configure(self):
55 if self.options.shared:
56 del self.options.fPIC
57 del self.settings.compiler.libcxx
58 del self.settings.compiler.cppstd
59
60 def build_requirements(self):
61 if self._settings_build.os == "Windows" and not self._is_msvc and not tools.get_env("CONAN_BASH_PATH"):
62 self.build_requires("msys2/cci.latest")
63
64 def requirements(self):
65 if self.settings.os == "Linux":
66 if self.options.enable_udev:
67 self.requires("libudev/system")
68
69 def source(self):
70 tools.get(**self.conan_data["sources"][self.version],
71 destination=self._source_subfolder, strip_root=True)
72
73 def _build_visual_studio(self):
74 with tools.chdir(self._source_subfolder):
75 # Assume we're using the latest Visual Studio and default to libusb_2019.sln
76 # (or libusb_2017.sln for libusb < 1.0.24).
77 # If we're not using the latest Visual Studio, select an appropriate solution file.
78 solution_msvc_year = 2019 if tools.Version(self.version) >= "1.0.24" else 2017
79
80 solution_msvc_year = {
81 "11": 2012,
82 "12": 2013,
83 "14": 2015,
84 "15": 2017
85 }.get(str(self.settings.compiler.version), solution_msvc_year)
86
87 solution_file = os.path.join("msvc", "libusb_{}.sln".format(solution_msvc_year))
88 platforms = {"x86":"Win32"}
89 properties = {
90 # Enable LTO when CFLAGS contains -GL
91 "WholeProgramOptimization": "true" if any(re.finditer("(^| )[/-]GL($| )", tools.get_env("CFLAGS", ""))) else "false",
92 }
93 msbuild = MSBuild(self)
94 build_type = "Debug" if self.settings.build_type == "Debug" else "Release"
95 msbuild.build(solution_file, platforms=platforms, upgrade_project=False, properties=properties, build_type=build_type)
96
97 def _configure_autotools(self):
98 if not self._autotools:
99 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
100 configure_args = ["--enable-shared" if self.options.shared else "--disable-shared"]
101 configure_args.append("--enable-static" if not self.options.shared else "--disable-static")
102 if self.settings.os in ["Linux", "Android"]:
103 configure_args.append("--enable-udev" if self.options.enable_udev else "--disable-udev")
104 elif self._is_mingw:
105 if self.settings.arch == "x86_64":
106 configure_args.append("--host=x86_64-w64-mingw32")
107 elif self.settings.arch == "x86":
108 configure_args.append("--build=i686-w64-mingw32")
109 configure_args.append("--host=i686-w64-mingw32")
110 self._autotools.configure(args=configure_args, configure_dir=self._source_subfolder)
111 return self._autotools
112
113 def build(self):
114 if self._is_msvc:
115 if tools.Version(self.version) < "1.0.24":
116 for vcxproj in ["fxload_2017", "getopt_2017", "hotplugtest_2017", "libusb_dll_2017",
117 "libusb_static_2017", "listdevs_2017", "stress_2017", "testlibusb_2017", "xusb_2017"]:
118 vcxproj_path = os.path.join(self._source_subfolder, "msvc", "%s.vcxproj" % vcxproj)
119 tools.replace_in_file(vcxproj_path, "<WindowsTargetPlatformVersion>10.0.16299.0</WindowsTargetPlatformVersion>", "")
120 self._build_visual_studio()
121 else:
122 autotools = self._configure_autotools()
123 autotools.make()
124
125 def _package_visual_studio(self):
126 self.copy(pattern="libusb.h", dst=os.path.join("include", "libusb-1.0"), src=os.path.join(self._source_subfolder, "libusb"), keep_path=False)
127 arch = "x64" if self.settings.arch == "x86_64" else "Win32"
128 source_dir = os.path.join(self._source_subfolder, arch, str(self.settings.build_type), "dll" if self.options.shared else "lib")
129 if self.options.shared:
130 self.copy(pattern="libusb-1.0.dll", dst="bin", src=source_dir, keep_path=False)
131 self.copy(pattern="libusb-1.0.lib", dst="lib", src=source_dir, keep_path=False)
132 self.copy(pattern="libusb-usbdk-1.0.dll", dst="bin", src=source_dir, keep_path=False)
133 self.copy(pattern="libusb-usbdk-1.0.lib", dst="lib", src=source_dir, keep_path=False)
134 else:
135 self.copy(pattern="libusb-1.0.lib", dst="lib", src=source_dir, keep_path=False)
136 self.copy(pattern="libusb-usbdk-1.0.lib", dst="lib", src=source_dir, keep_path=False)
137
138 def package(self):
139 self.copy("COPYING", src=self._source_subfolder, dst="licenses", keep_path=False)
140 if self._is_msvc:
141 self._package_visual_studio()
142 else:
143 autotools = self._configure_autotools()
144 autotools.install()
145 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
146 tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
147
148 def package_info(self):
149 self.cpp_info.names["pkg_config"] = "libusb-1.0"
150 self.cpp_info.libs = tools.collect_libs(self)
151 self.cpp_info.includedirs.append(os.path.join("include", "libusb-1.0"))
152 if self.settings.os in ["Linux", "FreeBSD"]:
153 self.cpp_info.system_libs.append("pthread")
154 elif self.settings.os == "Macos":
155 self.cpp_info.system_libs = ["objc"]
156 self.cpp_info.frameworks = ["IOKit", "CoreFoundation"]
157 elif self.settings.os == "Windows":
158 self.cpp_info.system_libs = ["advapi32"]
159
[end of recipes/libusb/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libusb/all/conanfile.py b/recipes/libusb/all/conanfile.py
--- a/recipes/libusb/all/conanfile.py
+++ b/recipes/libusb/all/conanfile.py
@@ -1,5 +1,4 @@
from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
-from conans.errors import ConanInvalidConfiguration
import os
import re
@@ -153,6 +152,6 @@
self.cpp_info.system_libs.append("pthread")
elif self.settings.os == "Macos":
self.cpp_info.system_libs = ["objc"]
- self.cpp_info.frameworks = ["IOKit", "CoreFoundation"]
+ self.cpp_info.frameworks = ["IOKit", "CoreFoundation", "Security"]
elif self.settings.os == "Windows":
self.cpp_info.system_libs = ["advapi32"]
| {"golden_diff": "diff --git a/recipes/libusb/all/conanfile.py b/recipes/libusb/all/conanfile.py\n--- a/recipes/libusb/all/conanfile.py\n+++ b/recipes/libusb/all/conanfile.py\n@@ -1,5 +1,4 @@\n from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n-from conans.errors import ConanInvalidConfiguration\n import os\n import re\n \n@@ -153,6 +152,6 @@\n self.cpp_info.system_libs.append(\"pthread\")\n elif self.settings.os == \"Macos\":\n self.cpp_info.system_libs = [\"objc\"]\n- self.cpp_info.frameworks = [\"IOKit\", \"CoreFoundation\"]\n+ self.cpp_info.frameworks = [\"IOKit\", \"CoreFoundation\", \"Security\"]\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"advapi32\"]\n", "issue": "[request] libusb/1.0.25\n### Package Details\r\n * Package Name/Version: **libusb/1.0.25**\r\n * Changelog: **https://github.com/libusb/libusb/blob/master/ChangeLog**\r\n\r\n\r\n2022-01-31: v1.0.25\r\n* Linux: Fix regression with some particular devices\r\n* Linux: Fix regression with libusb_handle_events_timeout_completed()\r\n* Linux: Fix regression with cpu usage in libusb_bulk_transfer\r\n* Darwin (macOS): Add support for detaching kernel drivers with authorization.\r\n* Darwin (macOS): Do not drop partial data on timeout.\r\n* Darwin (macOS): Silence pipe error in set_interface_alt_setting().\r\n* Windows: Fix HID backend missing byte\r\n* Windows: Fix segfault with libusbk driver\r\n* Windows: Fix regression when using libusb0 driver\r\n* Windows: Support LIBUSB_TRANSFER_ADD_ZERO_PACKET on winusb\r\n* New NO_DEVICE_DISCOVERY option replaces WEAK_AUTHORITY option\r\n* Various other bug fixes and improvements\r\n\r\nPR follows\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport re\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibUSBConan(ConanFile):\n name = \"libusb\"\n description = \"A cross-platform library to access USB devices\"\n license = \"LGPL-2.1\"\n homepage = \"https://github.com/libusb/libusb\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"conan\", \"libusb\", \"usb\", \"device\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"enable_udev\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"enable_udev\": True,\n }\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\"\n\n @property\n def _is_msvc(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\"\n\n @property\n def _settings_build(self):\n return self.settings_build if hasattr(self, \"settings_build\") else self.settings\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if self.settings.os not in [\"Linux\", \"Android\"]:\n del self.options.enable_udev\n # FIXME: enable_udev should be True for Android, but libudev recipe is missing\n if self.settings.os == \"Android\":\n self.options.enable_udev = False\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n if self._settings_build.os == \"Windows\" and not self._is_msvc and not tools.get_env(\"CONAN_BASH_PATH\"):\n self.build_requires(\"msys2/cci.latest\")\n\n def requirements(self):\n if self.settings.os == \"Linux\":\n if self.options.enable_udev:\n self.requires(\"libudev/system\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _build_visual_studio(self):\n with tools.chdir(self._source_subfolder):\n # Assume we're using the latest Visual Studio and default to libusb_2019.sln\n # (or libusb_2017.sln for libusb < 1.0.24).\n # If we're not using the latest Visual Studio, select an appropriate solution file.\n solution_msvc_year = 2019 if tools.Version(self.version) >= \"1.0.24\" else 2017\n\n solution_msvc_year = {\n \"11\": 2012,\n \"12\": 2013,\n \"14\": 2015,\n \"15\": 2017\n }.get(str(self.settings.compiler.version), solution_msvc_year)\n\n solution_file = os.path.join(\"msvc\", \"libusb_{}.sln\".format(solution_msvc_year))\n platforms = {\"x86\":\"Win32\"}\n properties = {\n # Enable LTO when CFLAGS contains -GL\n \"WholeProgramOptimization\": \"true\" if any(re.finditer(\"(^| )[/-]GL($| )\", tools.get_env(\"CFLAGS\", \"\"))) else \"false\",\n }\n msbuild = MSBuild(self)\n build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n msbuild.build(solution_file, platforms=platforms, upgrade_project=False, properties=properties, build_type=build_type)\n\n def _configure_autotools(self):\n if not self._autotools:\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n configure_args = [\"--enable-shared\" if self.options.shared else \"--disable-shared\"]\n configure_args.append(\"--enable-static\" if not self.options.shared else \"--disable-static\")\n if self.settings.os in [\"Linux\", \"Android\"]:\n configure_args.append(\"--enable-udev\" if self.options.enable_udev else \"--disable-udev\")\n elif self._is_mingw:\n if self.settings.arch == \"x86_64\":\n configure_args.append(\"--host=x86_64-w64-mingw32\")\n elif self.settings.arch == \"x86\":\n configure_args.append(\"--build=i686-w64-mingw32\")\n configure_args.append(\"--host=i686-w64-mingw32\")\n self._autotools.configure(args=configure_args, configure_dir=self._source_subfolder)\n return self._autotools\n\n def build(self):\n if self._is_msvc:\n if tools.Version(self.version) < \"1.0.24\":\n for vcxproj in [\"fxload_2017\", \"getopt_2017\", \"hotplugtest_2017\", \"libusb_dll_2017\",\n \"libusb_static_2017\", \"listdevs_2017\", \"stress_2017\", \"testlibusb_2017\", \"xusb_2017\"]:\n vcxproj_path = os.path.join(self._source_subfolder, \"msvc\", \"%s.vcxproj\" % vcxproj)\n tools.replace_in_file(vcxproj_path, \"<WindowsTargetPlatformVersion>10.0.16299.0</WindowsTargetPlatformVersion>\", \"\")\n self._build_visual_studio()\n else:\n autotools = self._configure_autotools()\n autotools.make()\n\n def _package_visual_studio(self):\n self.copy(pattern=\"libusb.h\", dst=os.path.join(\"include\", \"libusb-1.0\"), src=os.path.join(self._source_subfolder, \"libusb\"), keep_path=False)\n arch = \"x64\" if self.settings.arch == \"x86_64\" else \"Win32\"\n source_dir = os.path.join(self._source_subfolder, arch, str(self.settings.build_type), \"dll\" if self.options.shared else \"lib\")\n if self.options.shared:\n self.copy(pattern=\"libusb-1.0.dll\", dst=\"bin\", src=source_dir, keep_path=False)\n self.copy(pattern=\"libusb-1.0.lib\", dst=\"lib\", src=source_dir, keep_path=False)\n self.copy(pattern=\"libusb-usbdk-1.0.dll\", dst=\"bin\", src=source_dir, keep_path=False)\n self.copy(pattern=\"libusb-usbdk-1.0.lib\", dst=\"lib\", src=source_dir, keep_path=False)\n else:\n self.copy(pattern=\"libusb-1.0.lib\", dst=\"lib\", src=source_dir, keep_path=False)\n self.copy(pattern=\"libusb-usbdk-1.0.lib\", dst=\"lib\", src=source_dir, keep_path=False)\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\", keep_path=False)\n if self._is_msvc:\n self._package_visual_studio()\n else:\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"libusb-1.0\"\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"libusb-1.0\"))\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.append(\"pthread\")\n elif self.settings.os == \"Macos\":\n self.cpp_info.system_libs = [\"objc\"]\n self.cpp_info.frameworks = [\"IOKit\", \"CoreFoundation\"]\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"advapi32\"]\n", "path": "recipes/libusb/all/conanfile.py"}]} | 3,015 | 194 |
gh_patches_debug_31538 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-291 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Mention Support
**Describe the solution you'd like**
Add Remove Mention support as JS and C#. See ActivityExtensions for a reference of Mention related methods. To remove mentions from Activity.Text, see ActivityExtensions.RemoveMentionText and ActivityExtensions.RemoveRecipientMention. Note that in JS it is TurnContext.removeMentionText.
**Describe alternatives you've considered**
None
**Additional context**
I have implemented SkypeMentionNormalizeMiddleware on all platforms to correct Skype mentions. The user could still make use of this middleware, but would have to manually remove the mention from Activity.Text to have the same functionality as the other platforms.
[enhancement]
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/turn_context.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from copy import copy
5 from typing import List, Callable, Union, Dict
6 from botbuilder.schema import Activity, ConversationReference, ResourceResponse
7
8
9 class TurnContext:
10 def __init__(self, adapter_or_context, request: Activity = None):
11 """
12 Creates a new TurnContext instance.
13 :param adapter_or_context:
14 :param request:
15 """
16 if isinstance(adapter_or_context, TurnContext):
17 adapter_or_context.copy_to(self)
18 else:
19 self.adapter = adapter_or_context
20 self._activity = request
21 self.responses: List[Activity] = []
22 self._services: dict = {}
23 self._on_send_activities: Callable[
24 ["TurnContext", List[Activity], Callable], List[ResourceResponse]
25 ] = []
26 self._on_update_activity: Callable[
27 ["TurnContext", Activity, Callable], ResourceResponse
28 ] = []
29 self._on_delete_activity: Callable[
30 ["TurnContext", ConversationReference, Callable], None
31 ] = []
32 self._responded: bool = False
33
34 if self.adapter is None:
35 raise TypeError("TurnContext must be instantiated with an adapter.")
36 if self.activity is None:
37 raise TypeError(
38 "TurnContext must be instantiated with a request parameter of type Activity."
39 )
40
41 self._turn_state = {}
42
43 @property
44 def turn_state(self) -> Dict[str, object]:
45 return self._turn_state
46
47 def copy_to(self, context: "TurnContext") -> None:
48 """
49 Called when this TurnContext instance is passed into the constructor of a new TurnContext
50 instance. Can be overridden in derived classes.
51 :param context:
52 :return:
53 """
54 for attribute in [
55 "adapter",
56 "activity",
57 "_responded",
58 "_services",
59 "_on_send_activities",
60 "_on_update_activity",
61 "_on_delete_activity",
62 ]:
63 setattr(context, attribute, getattr(self, attribute))
64
65 @property
66 def activity(self):
67 """
68 The received activity.
69 :return:
70 """
71 return self._activity
72
73 @activity.setter
74 def activity(self, value):
75 """
76 Used to set TurnContext._activity when a context object is created. Only takes instances of Activities.
77 :param value:
78 :return:
79 """
80 if not isinstance(value, Activity):
81 raise TypeError(
82 "TurnContext: cannot set `activity` to a type other than Activity."
83 )
84 self._activity = value
85
86 @property
87 def responded(self) -> bool:
88 """
89 If `true` at least one response has been sent for the current turn of conversation.
90 :return:
91 """
92 return self._responded
93
94 @responded.setter
95 def responded(self, value: bool):
96 if not value:
97 raise ValueError("TurnContext: cannot set TurnContext.responded to False.")
98 self._responded = True
99
100 @property
101 def services(self):
102 """
103 Map of services and other values cached for the lifetime of the turn.
104 :return:
105 """
106 return self._services
107
108 def get(self, key: str) -> object:
109 if not key or not isinstance(key, str):
110 raise TypeError('"key" must be a valid string.')
111 try:
112 return self._services[key]
113 except KeyError:
114 raise KeyError("%s not found in TurnContext._services." % key)
115
116 def has(self, key: str) -> bool:
117 """
118 Returns True is set() has been called for a key. The cached value may be of type 'None'.
119 :param key:
120 :return:
121 """
122 if key in self._services:
123 return True
124 return False
125
126 def set(self, key: str, value: object) -> None:
127 """
128 Caches a value for the lifetime of the current turn.
129 :param key:
130 :param value:
131 :return:
132 """
133 if not key or not isinstance(key, str):
134 raise KeyError('"key" must be a valid string.')
135
136 self._services[key] = value
137
138 async def send_activity(
139 self, *activity_or_text: Union[Activity, str]
140 ) -> ResourceResponse:
141 """
142 Sends a single activity or message to the user.
143 :param activity_or_text:
144 :return:
145 """
146 reference = TurnContext.get_conversation_reference(self.activity)
147
148 output = [
149 TurnContext.apply_conversation_reference(
150 Activity(text=a, type="message") if isinstance(a, str) else a, reference
151 )
152 for a in activity_or_text
153 ]
154 for activity in output:
155 if not activity.input_hint:
156 activity.input_hint = "acceptingInput"
157
158 async def callback(context: "TurnContext", output):
159 responses = await context.adapter.send_activities(context, output)
160 context._responded = True # pylint: disable=protected-access
161 return responses
162
163 result = await self._emit(
164 self._on_send_activities, output, callback(self, output)
165 )
166
167 return result[0] if result else ResourceResponse()
168
169 async def update_activity(self, activity: Activity):
170 """
171 Replaces an existing activity.
172 :param activity:
173 :return:
174 """
175 return await self._emit(
176 self._on_update_activity,
177 activity,
178 self.adapter.update_activity(self, activity),
179 )
180
181 async def delete_activity(self, id_or_reference: Union[str, ConversationReference]):
182 """
183 Deletes an existing activity.
184 :param id_or_reference:
185 :return:
186 """
187 if isinstance(id_or_reference, str):
188 reference = TurnContext.get_conversation_reference(self.activity)
189 reference.activity_id = id_or_reference
190 else:
191 reference = id_or_reference
192 return await self._emit(
193 self._on_delete_activity,
194 reference,
195 self.adapter.delete_activity(self, reference),
196 )
197
198 def on_send_activities(self, handler) -> "TurnContext":
199 """
200 Registers a handler to be notified of and potentially intercept the sending of activities.
201 :param handler:
202 :return:
203 """
204 self._on_send_activities.append(handler)
205 return self
206
207 def on_update_activity(self, handler) -> "TurnContext":
208 """
209 Registers a handler to be notified of and potentially intercept an activity being updated.
210 :param handler:
211 :return:
212 """
213 self._on_update_activity.append(handler)
214 return self
215
216 def on_delete_activity(self, handler) -> "TurnContext":
217 """
218 Registers a handler to be notified of and potentially intercept an activity being deleted.
219 :param handler:
220 :return:
221 """
222 self._on_delete_activity.append(handler)
223 return self
224
225 async def _emit(self, plugins, arg, logic):
226 handlers = copy(plugins)
227
228 async def emit_next(i: int):
229 context = self
230 try:
231 if i < len(handlers):
232
233 async def next_handler():
234 await emit_next(i + 1)
235
236 await handlers[i](context, arg, next_handler)
237
238 except Exception as error:
239 raise error
240
241 await emit_next(0)
242 # This should be changed to `return await logic()`
243 return await logic
244
245 @staticmethod
246 def get_conversation_reference(activity: Activity) -> ConversationReference:
247 """
248 Returns the conversation reference for an activity. This can be saved as a plain old JSON
249 object and then later used to message the user proactively.
250
251 Usage Example:
252 reference = TurnContext.get_conversation_reference(context.request)
253 :param activity:
254 :return:
255 """
256 return ConversationReference(
257 activity_id=activity.id,
258 user=copy(activity.from_property),
259 bot=copy(activity.recipient),
260 conversation=copy(activity.conversation),
261 channel_id=activity.channel_id,
262 service_url=activity.service_url,
263 )
264
265 @staticmethod
266 def apply_conversation_reference(
267 activity: Activity, reference: ConversationReference, is_incoming: bool = False
268 ) -> Activity:
269 """
270 Updates an activity with the delivery information from a conversation reference. Calling
271 this after get_conversation_reference on an incoming activity
272 will properly address the reply to a received activity.
273 :param activity:
274 :param reference:
275 :param is_incoming:
276 :return:
277 """
278 activity.channel_id = reference.channel_id
279 activity.service_url = reference.service_url
280 activity.conversation = reference.conversation
281 if is_incoming:
282 activity.from_property = reference.user
283 activity.recipient = reference.bot
284 if reference.activity_id:
285 activity.id = reference.activity_id
286 else:
287 activity.from_property = reference.bot
288 activity.recipient = reference.user
289 if reference.activity_id:
290 activity.reply_to_id = reference.activity_id
291
292 return activity
293
[end of libraries/botbuilder-core/botbuilder/core/turn_context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/turn_context.py b/libraries/botbuilder-core/botbuilder/core/turn_context.py
--- a/libraries/botbuilder-core/botbuilder/core/turn_context.py
+++ b/libraries/botbuilder-core/botbuilder/core/turn_context.py
@@ -1,9 +1,10 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
+import re
from copy import copy
from typing import List, Callable, Union, Dict
-from botbuilder.schema import Activity, ConversationReference, ResourceResponse
+from botbuilder.schema import Activity, ConversationReference, Mention, ResourceResponse
class TurnContext:
@@ -290,3 +291,44 @@
activity.reply_to_id = reference.activity_id
return activity
+
+ @staticmethod
+ def get_reply_conversation_reference(
+ activity: Activity, reply: ResourceResponse
+ ) -> ConversationReference:
+ reference: ConversationReference = TurnContext.get_conversation_reference(
+ activity
+ )
+
+ # Update the reference with the new outgoing Activity's id.
+ reference.activity_id = reply.id
+
+ return reference
+
+ @staticmethod
+ def remove_recipient_mention(activity: Activity) -> str:
+ return TurnContext.remove_mention_text(activity, activity.recipient.id)
+
+ @staticmethod
+ def remove_mention_text(activity: Activity, identifier: str) -> str:
+ mentions = TurnContext.get_mentions(activity)
+ for mention in mentions:
+ if mention.mentioned.id == identifier:
+ mention_name_match = re.match(
+ r"<at(.*)>(.*?)<\/at>", mention.text, re.IGNORECASE
+ )
+ if mention_name_match:
+ activity.text = re.sub(
+ mention_name_match.groups()[1], "", activity.text
+ )
+ activity.text = re.sub(r"<at><\/at>", "", activity.text)
+ return activity.text
+
+ @staticmethod
+ def get_mentions(activity: Activity) -> List[Mention]:
+ result: List[Mention] = []
+ if activity.entities is not None:
+ for entity in activity.entities:
+ if entity.type.lower() == "mention":
+ result.append(entity)
+ return result
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/turn_context.py b/libraries/botbuilder-core/botbuilder/core/turn_context.py\n--- a/libraries/botbuilder-core/botbuilder/core/turn_context.py\n+++ b/libraries/botbuilder-core/botbuilder/core/turn_context.py\n@@ -1,9 +1,10 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \n+import re\n from copy import copy\n from typing import List, Callable, Union, Dict\n-from botbuilder.schema import Activity, ConversationReference, ResourceResponse\n+from botbuilder.schema import Activity, ConversationReference, Mention, ResourceResponse\n \n \n class TurnContext:\n@@ -290,3 +291,44 @@\n activity.reply_to_id = reference.activity_id\n \n return activity\n+\n+ @staticmethod\n+ def get_reply_conversation_reference(\n+ activity: Activity, reply: ResourceResponse\n+ ) -> ConversationReference:\n+ reference: ConversationReference = TurnContext.get_conversation_reference(\n+ activity\n+ )\n+\n+ # Update the reference with the new outgoing Activity's id.\n+ reference.activity_id = reply.id\n+\n+ return reference\n+\n+ @staticmethod\n+ def remove_recipient_mention(activity: Activity) -> str:\n+ return TurnContext.remove_mention_text(activity, activity.recipient.id)\n+\n+ @staticmethod\n+ def remove_mention_text(activity: Activity, identifier: str) -> str:\n+ mentions = TurnContext.get_mentions(activity)\n+ for mention in mentions:\n+ if mention.mentioned.id == identifier:\n+ mention_name_match = re.match(\n+ r\"<at(.*)>(.*?)<\\/at>\", mention.text, re.IGNORECASE\n+ )\n+ if mention_name_match:\n+ activity.text = re.sub(\n+ mention_name_match.groups()[1], \"\", activity.text\n+ )\n+ activity.text = re.sub(r\"<at><\\/at>\", \"\", activity.text)\n+ return activity.text\n+\n+ @staticmethod\n+ def get_mentions(activity: Activity) -> List[Mention]:\n+ result: List[Mention] = []\n+ if activity.entities is not None:\n+ for entity in activity.entities:\n+ if entity.type.lower() == \"mention\":\n+ result.append(entity)\n+ return result\n", "issue": "Remove Mention Support\n**Describe the solution you'd like**\r\nAdd Remove Mention support as JS and C#. See ActivityExtensions for a reference of Mention related methods. To remove mentions from Activity.Text, see ActivityExtensions.RemoveMentionText and ActivityExtensions.RemoveRecipientMention. Note that in JS it is TurnContext.removeMentionText.\r\n\r\n**Describe alternatives you've considered**\r\nNone\r\n\r\n**Additional context**\r\nI have implemented SkypeMentionNormalizeMiddleware on all platforms to correct Skype mentions. The user could still make use of this middleware, but would have to manually remove the mention from Activity.Text to have the same functionality as the other platforms.\r\n\r\n[enhancement]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom copy import copy\nfrom typing import List, Callable, Union, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ResourceResponse\n\n\nclass TurnContext:\n def __init__(self, adapter_or_context, request: Activity = None):\n \"\"\"\n Creates a new TurnContext instance.\n :param adapter_or_context:\n :param request:\n \"\"\"\n if isinstance(adapter_or_context, TurnContext):\n adapter_or_context.copy_to(self)\n else:\n self.adapter = adapter_or_context\n self._activity = request\n self.responses: List[Activity] = []\n self._services: dict = {}\n self._on_send_activities: Callable[\n [\"TurnContext\", List[Activity], Callable], List[ResourceResponse]\n ] = []\n self._on_update_activity: Callable[\n [\"TurnContext\", Activity, Callable], ResourceResponse\n ] = []\n self._on_delete_activity: Callable[\n [\"TurnContext\", ConversationReference, Callable], None\n ] = []\n self._responded: bool = False\n\n if self.adapter is None:\n raise TypeError(\"TurnContext must be instantiated with an adapter.\")\n if self.activity is None:\n raise TypeError(\n \"TurnContext must be instantiated with a request parameter of type Activity.\"\n )\n\n self._turn_state = {}\n\n @property\n def turn_state(self) -> Dict[str, object]:\n return self._turn_state\n\n def copy_to(self, context: \"TurnContext\") -> None:\n \"\"\"\n Called when this TurnContext instance is passed into the constructor of a new TurnContext\n instance. Can be overridden in derived classes.\n :param context:\n :return:\n \"\"\"\n for attribute in [\n \"adapter\",\n \"activity\",\n \"_responded\",\n \"_services\",\n \"_on_send_activities\",\n \"_on_update_activity\",\n \"_on_delete_activity\",\n ]:\n setattr(context, attribute, getattr(self, attribute))\n\n @property\n def activity(self):\n \"\"\"\n The received activity.\n :return:\n \"\"\"\n return self._activity\n\n @activity.setter\n def activity(self, value):\n \"\"\"\n Used to set TurnContext._activity when a context object is created. Only takes instances of Activities.\n :param value:\n :return:\n \"\"\"\n if not isinstance(value, Activity):\n raise TypeError(\n \"TurnContext: cannot set `activity` to a type other than Activity.\"\n )\n self._activity = value\n\n @property\n def responded(self) -> bool:\n \"\"\"\n If `true` at least one response has been sent for the current turn of conversation.\n :return:\n \"\"\"\n return self._responded\n\n @responded.setter\n def responded(self, value: bool):\n if not value:\n raise ValueError(\"TurnContext: cannot set TurnContext.responded to False.\")\n self._responded = True\n\n @property\n def services(self):\n \"\"\"\n Map of services and other values cached for the lifetime of the turn.\n :return:\n \"\"\"\n return self._services\n\n def get(self, key: str) -> object:\n if not key or not isinstance(key, str):\n raise TypeError('\"key\" must be a valid string.')\n try:\n return self._services[key]\n except KeyError:\n raise KeyError(\"%s not found in TurnContext._services.\" % key)\n\n def has(self, key: str) -> bool:\n \"\"\"\n Returns True is set() has been called for a key. The cached value may be of type 'None'.\n :param key:\n :return:\n \"\"\"\n if key in self._services:\n return True\n return False\n\n def set(self, key: str, value: object) -> None:\n \"\"\"\n Caches a value for the lifetime of the current turn.\n :param key:\n :param value:\n :return:\n \"\"\"\n if not key or not isinstance(key, str):\n raise KeyError('\"key\" must be a valid string.')\n\n self._services[key] = value\n\n async def send_activity(\n self, *activity_or_text: Union[Activity, str]\n ) -> ResourceResponse:\n \"\"\"\n Sends a single activity or message to the user.\n :param activity_or_text:\n :return:\n \"\"\"\n reference = TurnContext.get_conversation_reference(self.activity)\n\n output = [\n TurnContext.apply_conversation_reference(\n Activity(text=a, type=\"message\") if isinstance(a, str) else a, reference\n )\n for a in activity_or_text\n ]\n for activity in output:\n if not activity.input_hint:\n activity.input_hint = \"acceptingInput\"\n\n async def callback(context: \"TurnContext\", output):\n responses = await context.adapter.send_activities(context, output)\n context._responded = True # pylint: disable=protected-access\n return responses\n\n result = await self._emit(\n self._on_send_activities, output, callback(self, output)\n )\n\n return result[0] if result else ResourceResponse()\n\n async def update_activity(self, activity: Activity):\n \"\"\"\n Replaces an existing activity.\n :param activity:\n :return:\n \"\"\"\n return await self._emit(\n self._on_update_activity,\n activity,\n self.adapter.update_activity(self, activity),\n )\n\n async def delete_activity(self, id_or_reference: Union[str, ConversationReference]):\n \"\"\"\n Deletes an existing activity.\n :param id_or_reference:\n :return:\n \"\"\"\n if isinstance(id_or_reference, str):\n reference = TurnContext.get_conversation_reference(self.activity)\n reference.activity_id = id_or_reference\n else:\n reference = id_or_reference\n return await self._emit(\n self._on_delete_activity,\n reference,\n self.adapter.delete_activity(self, reference),\n )\n\n def on_send_activities(self, handler) -> \"TurnContext\":\n \"\"\"\n Registers a handler to be notified of and potentially intercept the sending of activities.\n :param handler:\n :return:\n \"\"\"\n self._on_send_activities.append(handler)\n return self\n\n def on_update_activity(self, handler) -> \"TurnContext\":\n \"\"\"\n Registers a handler to be notified of and potentially intercept an activity being updated.\n :param handler:\n :return:\n \"\"\"\n self._on_update_activity.append(handler)\n return self\n\n def on_delete_activity(self, handler) -> \"TurnContext\":\n \"\"\"\n Registers a handler to be notified of and potentially intercept an activity being deleted.\n :param handler:\n :return:\n \"\"\"\n self._on_delete_activity.append(handler)\n return self\n\n async def _emit(self, plugins, arg, logic):\n handlers = copy(plugins)\n\n async def emit_next(i: int):\n context = self\n try:\n if i < len(handlers):\n\n async def next_handler():\n await emit_next(i + 1)\n\n await handlers[i](context, arg, next_handler)\n\n except Exception as error:\n raise error\n\n await emit_next(0)\n # This should be changed to `return await logic()`\n return await logic\n\n @staticmethod\n def get_conversation_reference(activity: Activity) -> ConversationReference:\n \"\"\"\n Returns the conversation reference for an activity. This can be saved as a plain old JSON\n object and then later used to message the user proactively.\n\n Usage Example:\n reference = TurnContext.get_conversation_reference(context.request)\n :param activity:\n :return:\n \"\"\"\n return ConversationReference(\n activity_id=activity.id,\n user=copy(activity.from_property),\n bot=copy(activity.recipient),\n conversation=copy(activity.conversation),\n channel_id=activity.channel_id,\n service_url=activity.service_url,\n )\n\n @staticmethod\n def apply_conversation_reference(\n activity: Activity, reference: ConversationReference, is_incoming: bool = False\n ) -> Activity:\n \"\"\"\n Updates an activity with the delivery information from a conversation reference. Calling\n this after get_conversation_reference on an incoming activity\n will properly address the reply to a received activity.\n :param activity:\n :param reference:\n :param is_incoming:\n :return:\n \"\"\"\n activity.channel_id = reference.channel_id\n activity.service_url = reference.service_url\n activity.conversation = reference.conversation\n if is_incoming:\n activity.from_property = reference.user\n activity.recipient = reference.bot\n if reference.activity_id:\n activity.id = reference.activity_id\n else:\n activity.from_property = reference.bot\n activity.recipient = reference.user\n if reference.activity_id:\n activity.reply_to_id = reference.activity_id\n\n return activity\n", "path": "libraries/botbuilder-core/botbuilder/core/turn_context.py"}]} | 3,350 | 509 |
gh_patches_debug_554 | rasdani/github-patches | git_diff | scikit-image__scikit-image-353 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please add info how to run the skimage unit tests at the end of the installation instructions
I couldn't find instructions how to run the skimage unit tests.
First I tried
```
python -c 'import skimage; skimage.test()
```
which ran 287 tests and gave 16 errors, all the same:
```
ImportError: cannot import name BytesIO
```
Then I tried
```
nosetests --exe skimage
```
which ran 490 tests, no error.
Full output is here: https://gist.github.com/3832077
Apparently it is important to not use `skimage.test()`, but `nosetests` instead?
Could you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make "nosetests" or "run tests" in the sphinx search find the appropriate command to run).
Thanks!
</issue>
<code>
[start of skimage/__init__.py]
1 """Image Processing SciKit (Toolbox for SciPy)
2
3 ``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image
4 processing and computer vision.
5
6 The main package of ``skimage`` only provides a few utilities for converting
7 between image data types; for most features, you need to import one of the
8 following subpackages:
9
10 Subpackages
11 -----------
12 color
13 Color space conversion.
14 data
15 Test images and example data.
16 draw
17 Image drawing primitives (lines, text, etc.).
18 exposure
19 Image intensity adjustment (e.g., histogram equalization).
20 feature
21 Feature detection (e.g. texture analysis, corners, etc.).
22 filter
23 Sharpening, edge finding, denoising, etc.
24 graph
25 Graph-theoretic operations, e.g. dynamic programming (shortest paths).
26 io
27 Reading, saving, and displaying images and video.
28 measure
29 Measurement of image properties, e.g., similarity and contours.
30 morphology
31 Morphological operations, e.g. opening or skeletonization.
32 segmentation
33 Splitting an image into self-similar regions.
34 transform
35 Geometric and other transforms, e.g. rotation or the Radon transform.
36 util
37 Generic utilities.
38
39 Utility Functions
40 -----------------
41 get_log
42 Returns the ``skimage`` log. Use this to print debug output.
43 img_as_float
44 Convert an image to floating point format, with values in [0, 1].
45 img_as_uint
46 Convert an image to unsigned integer format, with values in [0, 65535].
47 img_as_int
48 Convert an image to signed integer format, with values in [-32768, 32767].
49 img_as_ubyte
50 Convert an image to unsigned byte format, with values in [0, 255].
51
52 """
53
54 import os.path as _osp
55
56 pkg_dir = _osp.abspath(_osp.dirname(__file__))
57 data_dir = _osp.join(pkg_dir, 'data')
58
59 try:
60 from .version import version as __version__
61 except ImportError:
62 __version__ = "unbuilt-dev"
63
64
65 def _setup_test(verbose=False):
66 import functools
67
68 args = ['', '--exe', '-w', pkg_dir]
69 if verbose:
70 args.extend(['-v', '-s'])
71
72 try:
73 import nose as _nose
74 except ImportError:
75 def broken_test_func():
76 """This would invoke the skimage test suite, but nose couldn't be
77 imported so the test suite can not run.
78 """
79 raise ImportError("Could not load nose. Unit tests not available.")
80 return broken_test_func
81 else:
82 f = functools.partial(_nose.run, 'skimage', argv=args)
83 f.__doc__ = 'Invoke the skimage test suite.'
84 return f
85
86
87 test = _setup_test()
88 test_verbose = _setup_test(verbose=True)
89
90
91 def get_log(name=None):
92 """Return a console logger.
93
94 Output may be sent to the logger using the `debug`, `info`, `warning`,
95 `error` and `critical` methods.
96
97 Parameters
98 ----------
99 name : str
100 Name of the log.
101
102 References
103 ----------
104 .. [1] Logging facility for Python,
105 http://docs.python.org/library/logging.html
106
107 """
108 import logging
109
110 if name is None:
111 name = 'skimage'
112 else:
113 name = 'skimage.' + name
114
115 log = logging.getLogger(name)
116 return log
117
118
119 def _setup_log():
120 """Configure root logger.
121
122 """
123 import logging
124 import sys
125
126 formatter = logging.Formatter(
127 '%(name)s: %(levelname)s: %(message)s'
128 )
129
130 try:
131 handler = logging.StreamHandler(stream=sys.stdout)
132 except TypeError:
133 handler = logging.StreamHandler(strm=sys.stdout)
134 handler.setFormatter(formatter)
135
136 log = get_log()
137 log.addHandler(handler)
138 log.setLevel(logging.WARNING)
139 log.propagate = False
140
141 _setup_log()
142
143 from .util.dtype import *
144
[end of skimage/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/__init__.py b/skimage/__init__.py
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -65,7 +65,7 @@
def _setup_test(verbose=False):
import functools
- args = ['', '--exe', '-w', pkg_dir]
+ args = ['', pkg_dir, '--exe']
if verbose:
args.extend(['-v', '-s'])
| {"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -65,7 +65,7 @@\n def _setup_test(verbose=False):\n import functools\n \n- args = ['', '--exe', '-w', pkg_dir]\n+ args = ['', pkg_dir, '--exe']\n if verbose:\n args.extend(['-v', '-s'])\n", "issue": "Please add info how to run the skimage unit tests at the end of the installation instructions\nI couldn't find instructions how to run the skimage unit tests.\n\nFirst I tried\n\n```\npython -c 'import skimage; skimage.test()\n```\n\nwhich ran 287 tests and gave 16 errors, all the same:\n\n```\nImportError: cannot import name BytesIO\n```\n\nThen I tried\n\n```\nnosetests --exe skimage\n```\n\nwhich ran 490 tests, no error.\n\nFull output is here: https://gist.github.com/3832077\n\nApparently it is important to not use `skimage.test()`, but `nosetests` instead?\nCould you please add this info somewhere, the first place I would have looked is at the end of http://skimage.org/docs/dev/install.html ( or make \"nosetests\" or \"run tests\" in the sphinx search find the appropriate command to run).\n\nThanks!\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikits-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Image drawing primitives (lines, text, etc.).\nexposure\n Image intensity adjustment (e.g., histogram equalization).\nfeature\n Feature detection (e.g. texture analysis, corners, etc.).\nfilter\n Sharpening, edge finding, denoising, etc.\ngraph\n Graph-theoretic operations, e.g. dynamic programming (shortest paths).\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g. opening or skeletonization.\nsegmentation\n Splitting an image into self-similar regions.\ntransform\n Geometric and other transforms, e.g. rotation or the Radon transform.\nutil\n Generic utilities.\n\nUtility Functions\n-----------------\nget_log\n Returns the ``skimage`` log. Use this to print debug output.\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\n\n\ndef _setup_test(verbose=False):\n import functools\n\n args = ['', '--exe', '-w', pkg_dir]\n if verbose:\n args.extend(['-v', '-s'])\n\n try:\n import nose as _nose\n except ImportError:\n def broken_test_func():\n \"\"\"This would invoke the skimage test suite, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n return broken_test_func\n else:\n f = functools.partial(_nose.run, 'skimage', argv=args)\n f.__doc__ = 'Invoke the skimage test suite.'\n return f\n\n\ntest = _setup_test()\ntest_verbose = _setup_test(verbose=True)\n\n\ndef get_log(name=None):\n \"\"\"Return a console logger.\n\n Output may be sent to the logger using the `debug`, `info`, `warning`,\n `error` and `critical` methods.\n\n Parameters\n ----------\n name : str\n Name of the log.\n\n References\n ----------\n .. [1] Logging facility for Python,\n http://docs.python.org/library/logging.html\n\n \"\"\"\n import logging\n\n if name is None:\n name = 'skimage'\n else:\n name = 'skimage.' + name\n\n log = logging.getLogger(name)\n return log\n\n\ndef _setup_log():\n \"\"\"Configure root logger.\n\n \"\"\"\n import logging\n import sys\n\n formatter = logging.Formatter(\n '%(name)s: %(levelname)s: %(message)s'\n )\n\n try:\n handler = logging.StreamHandler(stream=sys.stdout)\n except TypeError:\n handler = logging.StreamHandler(strm=sys.stdout)\n handler.setFormatter(formatter)\n\n log = get_log()\n log.addHandler(handler)\n log.setLevel(logging.WARNING)\n log.propagate = False\n\n_setup_log()\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]} | 1,933 | 102 |
gh_patches_debug_28801 | rasdani/github-patches | git_diff | nautobot__nautobot-3925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Removing credentials from a previously synced GitRepository that requires them may result in hangs
### Environment
* Nautobot version (Docker tag too if applicable): 1.5.21
### Steps to Reproduce
1. For a Git repository that requires authentication (such as a private GitHub repository), configure the `GitRepository` in Nautobot, with appropriate credentials, mark it as providing `Jobs`, and sync it successfully (which happens in the Celery worker environment).
2. Edit the `GitRepository` to remove the credentials and resync it. The resync fails as expected (again, in the Celery worker environment.)
3. Restart the Nautobot server (during startup, the `post_upgrade` signal will trigger a check and potential sync of all Git repositories in the Nautobot server environment in order to ensure that Jobs are properly discovered).
### Expected Behavior
Nautobot server to start up, perhaps with logs indicating that the repository could not be synced.
### Observed Behavior
Nautobot server startup hangs, apparently because GitPython received and didn't handle the Git `Username:` prompt and is waiting indefinitely for user input that will never come.
</issue>
<code>
[start of nautobot/utilities/git.py]
1 """General-purpose Git utilities."""
2
3 from collections import namedtuple
4 import logging
5 import os
6
7 from git import Repo
8
9
10 logger = logging.getLogger("nautobot.utilities.git")
11
12 # namedtuple takes a git log diff status and its accompanying text.
13 GitDiffLog = namedtuple("GitDiffLog", ["status", "text"])
14
15 # 'A' and 'D' status are swapped because of the way the repo.git.diff was implemented
16 # e.g. 'A' actually stands for Addition but in this case is Deletion
17 GIT_STATUS_MAP = {
18 "A": "Deletion",
19 "M": "Modification",
20 "C": "Copy",
21 "D": "Addition",
22 "R": "Renaming",
23 "T": "File Type Changed",
24 "U": "File Unmerged",
25 "X": "Unknown",
26 }
27
28
29 def swap_status_initials(data):
30 """Swap Git status initials with its equivalent."""
31 initial, text = data.split("\t")
32 return GitDiffLog(status=GIT_STATUS_MAP.get(initial), text=text)
33
34
35 def convert_git_diff_log_to_list(logs):
36 """
37 Convert Git diff log into a list splitted by \\n
38
39 Example:
40 >>> git_log = "M\tindex.html\nR\tsample.txt"
41 >>> print(convert_git_diff_log_to_list(git_log))
42 ["Modification - index.html", "Renaming - sample.txt"]
43 """
44 logs = logs.split("\n")
45 return [swap_status_initials(line) for line in logs]
46
47
48 class BranchDoesNotExist(Exception):
49 pass
50
51
52 class GitRepo:
53 def __init__(self, path, url, clone_initially=True):
54 """
55 Ensure that we have a clone of the given remote Git repository URL at the given local directory path.
56
57 Args:
58 path (str): path to git repo
59 url (str): git repo url
60 clone_initially (bool): True if the repo needs to be cloned
61 """
62 if os.path.isdir(path):
63 self.repo = Repo(path=path)
64 elif clone_initially:
65 self.repo = Repo.clone_from(url, to_path=path)
66 else:
67 self.repo = Repo.init(path)
68 self.repo.create_remote("origin", url=url)
69
70 if url not in self.repo.remotes.origin.urls:
71 self.repo.remotes.origin.set_url(url)
72
73 def fetch(self):
74 self.repo.remotes.origin.fetch()
75
76 def checkout(self, branch, commit_hexsha=None):
77 """
78 Check out the given branch, and optionally the specified commit within that branch.
79 """
80 # Short-circuit logic - do we already have this commit checked out?
81 if commit_hexsha and commit_hexsha == self.repo.head.commit.hexsha:
82 logger.debug(f"Commit {commit_hexsha} is already checked out.")
83 return commit_hexsha
84
85 self.fetch()
86 if commit_hexsha:
87 # Sanity check - GitPython doesn't provide a handy API for this so we just call a raw Git command:
88 # $ git branch origin/<branch> --remotes --contains <commit>
89 # prints the branch name if it DOES contain the commit, and nothing if it DOES NOT contain the commit.
90 # Since we did a `fetch` and not a `pull` above, we need to check for the commit in the remote origin
91 # branch, not the local (not-yet-updated) branch.
92 if branch not in self.repo.git.branch(f"origin/{branch}", "--remotes", "--contains", commit_hexsha):
93 raise RuntimeError(f"Requested to check out commit `{commit_hexsha}`, but it's not in branch {branch}!")
94 logger.info(f"Checking out commit `{commit_hexsha}` on branch `{branch}`...")
95 self.repo.git.checkout(commit_hexsha)
96 return commit_hexsha
97
98 if branch in self.repo.heads:
99 branch_head = self.repo.heads[branch]
100 else:
101 try:
102 branch_head = self.repo.create_head(branch, self.repo.remotes.origin.refs[branch])
103 branch_head.set_tracking_branch(self.repo.remotes.origin.refs[branch])
104 except IndexError as git_error:
105 logger.error(
106 "Branch %s does not exist at %s. %s", branch, list(self.repo.remotes.origin.urls)[0], git_error
107 )
108 raise BranchDoesNotExist(
109 f"Please create branch '{branch}' in upstream and try again."
110 f" If this is a new repo, please add a commit before syncing. {git_error}"
111 )
112
113 logger.info(f"Checking out latest commit on branch `{branch}`...")
114 branch_head.checkout()
115 # No specific commit hash was given, so make sure we get the latest from origin
116 # We would use repo.remotes.origin.pull() here, but that will fail in the case where someone has
117 # force-pushed to the upstream repo since the last time we did a pull. To be safe, we reset instead.
118 self.repo.head.reset(f"origin/{branch}", index=True, working_tree=True)
119 commit_hexsha = self.repo.head.reference.commit.hexsha
120 logger.info(f"Latest commit on branch `{branch}` is `{commit_hexsha}`")
121 return commit_hexsha
122
123 def diff_remote(self, branch):
124 logger.debug("Fetching from remote.")
125 self.fetch()
126
127 try:
128 self.repo.remotes.origin.refs[branch]
129 except IndexError as git_error:
130 logger.error(
131 "Branch %s does not exist at %s. %s", branch, list(self.repo.remotes.origin.urls)[0], git_error
132 )
133 raise BranchDoesNotExist(
134 f"Please create branch '{branch}' in upstream and try again."
135 f" If this is a new repo, please add a commit before syncing. {git_error}"
136 )
137
138 logger.debug("Getting diff between local branch and remote branch")
139 diff = self.repo.git.diff("--name-status", f"origin/{branch}")
140 if diff: # if diff is not empty
141 return convert_git_diff_log_to_list(diff)
142 logger.debug("No Difference")
143 return []
144
[end of nautobot/utilities/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/utilities/git.py b/nautobot/utilities/git.py
--- a/nautobot/utilities/git.py
+++ b/nautobot/utilities/git.py
@@ -25,6 +25,11 @@
"X": "Unknown",
}
+# Environment variables to set on appropriate `git` CLI calls
+GIT_ENVIRONMENT = {
+ "GIT_TERMINAL_PROMPT": "0", # never prompt for user input such as credentials - important to avoid hangs!
+}
+
def swap_status_initials(data):
"""Swap Git status initials with its equivalent."""
@@ -59,10 +64,12 @@
url (str): git repo url
clone_initially (bool): True if the repo needs to be cloned
"""
- if os.path.isdir(path):
+ if os.path.isdir(path) and os.path.isdir(os.path.join(path, ".git")):
self.repo = Repo(path=path)
elif clone_initially:
- self.repo = Repo.clone_from(url, to_path=path)
+ # Don't log `url` as it may include authentication details.
+ logger.debug("Cloning git repository to %s...", path)
+ self.repo = Repo.clone_from(url, to_path=path, env=GIT_ENVIRONMENT)
else:
self.repo = Repo.init(path)
self.repo.create_remote("origin", url=url)
@@ -71,7 +78,8 @@
self.repo.remotes.origin.set_url(url)
def fetch(self):
- self.repo.remotes.origin.fetch()
+ with self.repo.git.custom_environment(**GIT_ENVIRONMENT):
+ self.repo.remotes.origin.fetch()
def checkout(self, branch, commit_hexsha=None):
"""
| {"golden_diff": "diff --git a/nautobot/utilities/git.py b/nautobot/utilities/git.py\n--- a/nautobot/utilities/git.py\n+++ b/nautobot/utilities/git.py\n@@ -25,6 +25,11 @@\n \"X\": \"Unknown\",\n }\n \n+# Environment variables to set on appropriate `git` CLI calls\n+GIT_ENVIRONMENT = {\n+ \"GIT_TERMINAL_PROMPT\": \"0\", # never prompt for user input such as credentials - important to avoid hangs!\n+}\n+\n \n def swap_status_initials(data):\n \"\"\"Swap Git status initials with its equivalent.\"\"\"\n@@ -59,10 +64,12 @@\n url (str): git repo url\n clone_initially (bool): True if the repo needs to be cloned\n \"\"\"\n- if os.path.isdir(path):\n+ if os.path.isdir(path) and os.path.isdir(os.path.join(path, \".git\")):\n self.repo = Repo(path=path)\n elif clone_initially:\n- self.repo = Repo.clone_from(url, to_path=path)\n+ # Don't log `url` as it may include authentication details.\n+ logger.debug(\"Cloning git repository to %s...\", path)\n+ self.repo = Repo.clone_from(url, to_path=path, env=GIT_ENVIRONMENT)\n else:\n self.repo = Repo.init(path)\n self.repo.create_remote(\"origin\", url=url)\n@@ -71,7 +78,8 @@\n self.repo.remotes.origin.set_url(url)\n \n def fetch(self):\n- self.repo.remotes.origin.fetch()\n+ with self.repo.git.custom_environment(**GIT_ENVIRONMENT):\n+ self.repo.remotes.origin.fetch()\n \n def checkout(self, branch, commit_hexsha=None):\n \"\"\"\n", "issue": "Removing credentials from a previously synced GitRepository that requires them may result in hangs\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.5.21\r\n\r\n### Steps to Reproduce\r\n1. For a Git repository that requires authentication (such as a private GitHub repository), configure the `GitRepository` in Nautobot, with appropriate credentials, mark it as providing `Jobs`, and sync it successfully (which happens in the Celery worker environment).\r\n2. Edit the `GitRepository` to remove the credentials and resync it. The resync fails as expected (again, in the Celery worker environment.)\r\n3. Restart the Nautobot server (during startup, the `post_upgrade` signal will trigger a check and potential sync of all Git repositories in the Nautobot server environment in order to ensure that Jobs are properly discovered).\r\n\r\n### Expected Behavior\r\n\r\nNautobot server to start up, perhaps with logs indicating that the repository could not be synced.\r\n\r\n### Observed Behavior\r\n\r\nNautobot server startup hangs, apparently because GitPython received and didn't handle the Git `Username:` prompt and is waiting indefinitely for user input that will never come.\n", "before_files": [{"content": "\"\"\"General-purpose Git utilities.\"\"\"\n\nfrom collections import namedtuple\nimport logging\nimport os\n\nfrom git import Repo\n\n\nlogger = logging.getLogger(\"nautobot.utilities.git\")\n\n# namedtuple takes a git log diff status and its accompanying text.\nGitDiffLog = namedtuple(\"GitDiffLog\", [\"status\", \"text\"])\n\n# 'A' and 'D' status are swapped because of the way the repo.git.diff was implemented\n# e.g. 'A' actually stands for Addition but in this case is Deletion\nGIT_STATUS_MAP = {\n \"A\": \"Deletion\",\n \"M\": \"Modification\",\n \"C\": \"Copy\",\n \"D\": \"Addition\",\n \"R\": \"Renaming\",\n \"T\": \"File Type Changed\",\n \"U\": \"File Unmerged\",\n \"X\": \"Unknown\",\n}\n\n\ndef swap_status_initials(data):\n \"\"\"Swap Git status initials with its equivalent.\"\"\"\n initial, text = data.split(\"\\t\")\n return GitDiffLog(status=GIT_STATUS_MAP.get(initial), text=text)\n\n\ndef convert_git_diff_log_to_list(logs):\n \"\"\"\n Convert Git diff log into a list splitted by \\\\n\n\n Example:\n >>> git_log = \"M\\tindex.html\\nR\\tsample.txt\"\n >>> print(convert_git_diff_log_to_list(git_log))\n [\"Modification - index.html\", \"Renaming - sample.txt\"]\n \"\"\"\n logs = logs.split(\"\\n\")\n return [swap_status_initials(line) for line in logs]\n\n\nclass BranchDoesNotExist(Exception):\n pass\n\n\nclass GitRepo:\n def __init__(self, path, url, clone_initially=True):\n \"\"\"\n Ensure that we have a clone of the given remote Git repository URL at the given local directory path.\n\n Args:\n path (str): path to git repo\n url (str): git repo url\n clone_initially (bool): True if the repo needs to be cloned\n \"\"\"\n if os.path.isdir(path):\n self.repo = Repo(path=path)\n elif clone_initially:\n self.repo = Repo.clone_from(url, to_path=path)\n else:\n self.repo = Repo.init(path)\n self.repo.create_remote(\"origin\", url=url)\n\n if url not in self.repo.remotes.origin.urls:\n self.repo.remotes.origin.set_url(url)\n\n def fetch(self):\n self.repo.remotes.origin.fetch()\n\n def checkout(self, branch, commit_hexsha=None):\n \"\"\"\n Check out the given branch, and optionally the specified commit within that branch.\n \"\"\"\n # Short-circuit logic - do we already have this commit checked out?\n if commit_hexsha and commit_hexsha == self.repo.head.commit.hexsha:\n logger.debug(f\"Commit {commit_hexsha} is already checked out.\")\n return commit_hexsha\n\n self.fetch()\n if commit_hexsha:\n # Sanity check - GitPython doesn't provide a handy API for this so we just call a raw Git command:\n # $ git branch origin/<branch> --remotes --contains <commit>\n # prints the branch name if it DOES contain the commit, and nothing if it DOES NOT contain the commit.\n # Since we did a `fetch` and not a `pull` above, we need to check for the commit in the remote origin\n # branch, not the local (not-yet-updated) branch.\n if branch not in self.repo.git.branch(f\"origin/{branch}\", \"--remotes\", \"--contains\", commit_hexsha):\n raise RuntimeError(f\"Requested to check out commit `{commit_hexsha}`, but it's not in branch {branch}!\")\n logger.info(f\"Checking out commit `{commit_hexsha}` on branch `{branch}`...\")\n self.repo.git.checkout(commit_hexsha)\n return commit_hexsha\n\n if branch in self.repo.heads:\n branch_head = self.repo.heads[branch]\n else:\n try:\n branch_head = self.repo.create_head(branch, self.repo.remotes.origin.refs[branch])\n branch_head.set_tracking_branch(self.repo.remotes.origin.refs[branch])\n except IndexError as git_error:\n logger.error(\n \"Branch %s does not exist at %s. %s\", branch, list(self.repo.remotes.origin.urls)[0], git_error\n )\n raise BranchDoesNotExist(\n f\"Please create branch '{branch}' in upstream and try again.\"\n f\" If this is a new repo, please add a commit before syncing. {git_error}\"\n )\n\n logger.info(f\"Checking out latest commit on branch `{branch}`...\")\n branch_head.checkout()\n # No specific commit hash was given, so make sure we get the latest from origin\n # We would use repo.remotes.origin.pull() here, but that will fail in the case where someone has\n # force-pushed to the upstream repo since the last time we did a pull. To be safe, we reset instead.\n self.repo.head.reset(f\"origin/{branch}\", index=True, working_tree=True)\n commit_hexsha = self.repo.head.reference.commit.hexsha\n logger.info(f\"Latest commit on branch `{branch}` is `{commit_hexsha}`\")\n return commit_hexsha\n\n def diff_remote(self, branch):\n logger.debug(\"Fetching from remote.\")\n self.fetch()\n\n try:\n self.repo.remotes.origin.refs[branch]\n except IndexError as git_error:\n logger.error(\n \"Branch %s does not exist at %s. %s\", branch, list(self.repo.remotes.origin.urls)[0], git_error\n )\n raise BranchDoesNotExist(\n f\"Please create branch '{branch}' in upstream and try again.\"\n f\" If this is a new repo, please add a commit before syncing. {git_error}\"\n )\n\n logger.debug(\"Getting diff between local branch and remote branch\")\n diff = self.repo.git.diff(\"--name-status\", f\"origin/{branch}\")\n if diff: # if diff is not empty\n return convert_git_diff_log_to_list(diff)\n logger.debug(\"No Difference\")\n return []\n", "path": "nautobot/utilities/git.py"}]} | 2,392 | 375 |
gh_patches_debug_36057 | rasdani/github-patches | git_diff | PennyLaneAI__pennylane-1581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change order of technical description and list of functions in documentation
Three modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function.
</issue>
<code>
[start of pennylane/qaoa/__init__.py]
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 This module contains functionality to construct QAOA workflows in PennyLane.
16 """
17
18 from .mixers import *
19 from .cost import *
20 from .layers import *
21 import pennylane.qaoa.cycle
22
[end of pennylane/qaoa/__init__.py]
[start of pennylane/kernels/__init__.py]
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 r"""
15 This subpackage defines functions that relate to quantum kernel methods.
16 On one hand this includes functions to call a quantum kernel systematically
17 on training and test datasets to obtain the *kernel matrix*.
18 On the other hand it provides postprocessing methods for those kernel
19 matrices which can be used to mitigate device noise and sampling errors.
20
21 Given a kernel
22
23 .. math ::
24
25 k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad
26 (x_1, x_2)\mapsto k(x_1, x_2)
27
28 the kernel matrix of :math:`k` on a training dataset
29 :math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d`
30 and :math:`y_i\in\{-1, 1\}` is defined as
31
32 .. math ::
33
34 K_{ij} = k(x_i, x_j).
35
36 For valid kernels, this is a real symmetric positive semi-definite matrix.
37 We also define the *ideal kernel matrix* for the training dataset which
38 perfectly predicts whether two points have identical labels or not:
39
40 .. math ::
41
42 K^\ast_{ij} = y_i y_j
43
44 We can measure the similarity between :math:`K` and :math:`K^\ast`,
45 through the *kernel polarity* which can be expressed as the Frobenius inner
46 product between the two matrices:
47
48 .. math ::
49
50 \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j)
51
52 Additionally, there is the *kernel-target alignment*, which is the normalized
53 counterpart to the kernel polarity:
54
55 .. math ::
56
57 \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\
58 \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\
59 \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2}
60
61 For datasets with different numbers of training points per class the labels are rescaled
62 by the number of datapoints in the respective class to avoid that kernel polarity and
63 kernel-target alignment are dominated by the properties of the kernel for just a single class.
64
65 Given a callable kernel function, all these quantities can readily be computed
66 using the methods in this module.
67 """
68 from .cost_functions import (
69 polarity,
70 target_alignment,
71 )
72 from .postprocessing import (
73 threshold_matrix,
74 displace_matrix,
75 flip_matrix,
76 closest_psd_matrix,
77 mitigate_depolarizing_noise,
78 )
79 from .utils import (
80 kernel_matrix,
81 square_kernel_matrix,
82 )
83
[end of pennylane/kernels/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py
--- a/pennylane/kernels/__init__.py
+++ b/pennylane/kernels/__init__.py
@@ -13,58 +13,8 @@
# limitations under the License.
r"""
This subpackage defines functions that relate to quantum kernel methods.
-On one hand this includes functions to call a quantum kernel systematically
-on training and test datasets to obtain the *kernel matrix*.
-On the other hand it provides postprocessing methods for those kernel
-matrices which can be used to mitigate device noise and sampling errors.
-
-Given a kernel
-
-.. math ::
-
- k: \mathbb{R}^d \times \mathbb{R}^d \to \mathbb{R}, \quad
- (x_1, x_2)\mapsto k(x_1, x_2)
-
-the kernel matrix of :math:`k` on a training dataset
-:math:`\{(x_1, y_1),\cdots (x_n, y_n)\}` with :math:`x_i\in\mathbb{R}^d`
-and :math:`y_i\in\{-1, 1\}` is defined as
-
-.. math ::
-
- K_{ij} = k(x_i, x_j).
-
-For valid kernels, this is a real symmetric positive semi-definite matrix.
-We also define the *ideal kernel matrix* for the training dataset which
-perfectly predicts whether two points have identical labels or not:
-
-.. math ::
-
- K^\ast_{ij} = y_i y_j
-
-We can measure the similarity between :math:`K` and :math:`K^\ast`,
-through the *kernel polarity* which can be expressed as the Frobenius inner
-product between the two matrices:
-
-.. math ::
-
- \operatorname{P}(k) = \langle K^\ast, K \rangle_F = \sum_{i,j=1}^n y_i y_j k(x_i, x_j)
-
-Additionally, there is the *kernel-target alignment*, which is the normalized
-counterpart to the kernel polarity:
-
-.. math ::
-
- \operatorname{TA}(k) &= \frac{P(k)}{\lVert K^\ast \rVert_F\;\lVert K \rVert_F}\\
- \lVert K\rVert_F &= \sqrt{\sum_{i,j=1}^n k(x_i, x_j)^2}\\
- \lVert K^\ast\rVert_F &= \sqrt{\sum_{i,j=1}^n (y_iy_j)^2}
-
-For datasets with different numbers of training points per class the labels are rescaled
-by the number of datapoints in the respective class to avoid that kernel polarity and
-kernel-target alignment are dominated by the properties of the kernel for just a single class.
-
-Given a callable kernel function, all these quantities can readily be computed
-using the methods in this module.
"""
+
from .cost_functions import (
polarity,
target_alignment,
diff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py
--- a/pennylane/qaoa/__init__.py
+++ b/pennylane/qaoa/__init__.py
@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
-This module contains functionality to construct QAOA workflows in PennyLane.
+This module provides a collection of methods that help in the construction of
+QAOA workflows.
"""
+import pennylane.qaoa.cycle
from .mixers import *
from .cost import *
from .layers import *
-import pennylane.qaoa.cycle
| {"golden_diff": "diff --git a/pennylane/kernels/__init__.py b/pennylane/kernels/__init__.py\n--- a/pennylane/kernels/__init__.py\n+++ b/pennylane/kernels/__init__.py\n@@ -13,58 +13,8 @@\n # limitations under the License.\n r\"\"\"\n This subpackage defines functions that relate to quantum kernel methods.\n-On one hand this includes functions to call a quantum kernel systematically\n-on training and test datasets to obtain the *kernel matrix*.\n-On the other hand it provides postprocessing methods for those kernel\n-matrices which can be used to mitigate device noise and sampling errors.\n-\n-Given a kernel\n-\n-.. math ::\n-\n- k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n- (x_1, x_2)\\mapsto k(x_1, x_2)\n-\n-the kernel matrix of :math:`k` on a training dataset\n-:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\n-and :math:`y_i\\in\\{-1, 1\\}` is defined as\n-\n-.. math ::\n-\n- K_{ij} = k(x_i, x_j).\n-\n-For valid kernels, this is a real symmetric positive semi-definite matrix.\n-We also define the *ideal kernel matrix* for the training dataset which\n-perfectly predicts whether two points have identical labels or not:\n-\n-.. math ::\n-\n- K^\\ast_{ij} = y_i y_j\n-\n-We can measure the similarity between :math:`K` and :math:`K^\\ast`,\n-through the *kernel polarity* which can be expressed as the Frobenius inner\n-product between the two matrices:\n-\n-.. math ::\n-\n- \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n-\n-Additionally, there is the *kernel-target alignment*, which is the normalized\n-counterpart to the kernel polarity:\n-\n-.. math ::\n-\n- \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n- \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n- \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n-\n-For datasets with different numbers of training points per class the labels are rescaled\n-by the number of datapoints in the respective class to avoid that kernel polarity and\n-kernel-target alignment are dominated by the properties of the kernel for just a single class.\n-\n-Given a callable kernel function, all these quantities can readily be computed\n-using the methods in this module.\n \"\"\"\n+\n from .cost_functions import (\n polarity,\n target_alignment,\ndiff --git a/pennylane/qaoa/__init__.py b/pennylane/qaoa/__init__.py\n--- a/pennylane/qaoa/__init__.py\n+++ b/pennylane/qaoa/__init__.py\n@@ -12,10 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n r\"\"\"\n-This module contains functionality to construct QAOA workflows in PennyLane.\n+This module provides a collection of methods that help in the construction of\n+QAOA workflows.\n \"\"\"\n \n+import pennylane.qaoa.cycle\n from .mixers import *\n from .cost import *\n from .layers import *\n-import pennylane.qaoa.cycle\n", "issue": "Change order of technical description and list of functions in documentation\nThree modules, [`kernels`](https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html), [`grouping`](https://pennylane.readthedocs.io/en/latest/code/qml_grouping.html), and [`qaoa`](https://pennylane.readthedocs.io/en/latest/code/qml_qaoa.html) have their module documentation ordered such that there is first a lengthy description of the theory, and the actual list of functions comes after. We should update the docs of these modules so that the functions appear *first*, and the technical details come afterwards (as was recently discussed in #1160). This will improve readability of the documentation and make it easier to find the details of a desired function.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module contains functionality to construct QAOA workflows in PennyLane.\n\"\"\"\n\nfrom .mixers import *\nfrom .cost import *\nfrom .layers import *\nimport pennylane.qaoa.cycle\n", "path": "pennylane/qaoa/__init__.py"}, {"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis subpackage defines functions that relate to quantum kernel methods.\nOn one hand this includes functions to call a quantum kernel systematically\non training and test datasets to obtain the *kernel matrix*.\nOn the other hand it provides postprocessing methods for those kernel\nmatrices which can be used to mitigate device noise and sampling errors.\n\nGiven a kernel\n\n.. math ::\n\n k: \\mathbb{R}^d \\times \\mathbb{R}^d \\to \\mathbb{R}, \\quad\n (x_1, x_2)\\mapsto k(x_1, x_2)\n\nthe kernel matrix of :math:`k` on a training dataset\n:math:`\\{(x_1, y_1),\\cdots (x_n, y_n)\\}` with :math:`x_i\\in\\mathbb{R}^d`\nand :math:`y_i\\in\\{-1, 1\\}` is defined as\n\n.. math ::\n\n K_{ij} = k(x_i, x_j).\n\nFor valid kernels, this is a real symmetric positive semi-definite matrix.\nWe also define the *ideal kernel matrix* for the training dataset which\nperfectly predicts whether two points have identical labels or not:\n\n.. math ::\n\n K^\\ast_{ij} = y_i y_j\n\nWe can measure the similarity between :math:`K` and :math:`K^\\ast`,\nthrough the *kernel polarity* which can be expressed as the Frobenius inner\nproduct between the two matrices:\n\n.. math ::\n\n \\operatorname{P}(k) = \\langle K^\\ast, K \\rangle_F = \\sum_{i,j=1}^n y_i y_j k(x_i, x_j)\n\nAdditionally, there is the *kernel-target alignment*, which is the normalized\ncounterpart to the kernel polarity:\n\n.. math ::\n\n \\operatorname{TA}(k) &= \\frac{P(k)}{\\lVert K^\\ast \\rVert_F\\;\\lVert K \\rVert_F}\\\\\n \\lVert K\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n k(x_i, x_j)^2}\\\\\n \\lVert K^\\ast\\rVert_F &= \\sqrt{\\sum_{i,j=1}^n (y_iy_j)^2}\n\nFor datasets with different numbers of training points per class the labels are rescaled\nby the number of datapoints in the respective class to avoid that kernel polarity and\nkernel-target alignment are dominated by the properties of the kernel for just a single class.\n\nGiven a callable kernel function, all these quantities can readily be computed\nusing the methods in this module.\n\"\"\"\nfrom .cost_functions import (\n polarity,\n target_alignment,\n)\nfrom .postprocessing import (\n threshold_matrix,\n displace_matrix,\n flip_matrix,\n closest_psd_matrix,\n mitigate_depolarizing_noise,\n)\nfrom .utils import (\n kernel_matrix,\n square_kernel_matrix,\n)\n", "path": "pennylane/kernels/__init__.py"}]} | 1,887 | 848 |
gh_patches_debug_41388 | rasdani/github-patches | git_diff | deepset-ai__haystack-7988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `max_retries` and `timeout` params to all `AzureOpenAI` classes
**Is your feature request related to a problem? Please describe.**
Currently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.
The corresponding `AzureOpenAI` don't always have the same params.
**Describe the solution you'd like**
It would be nice to have these params in the `AzureOpenAI` classes
**Describe alternatives you've considered**
Subclass `AzureOpenAI` and create custom components.
**Additional context**
cc @anakin87 :)
</issue>
<code>
[start of haystack/components/generators/chat/azure.py]
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 import os
6 from typing import Any, Callable, Dict, Optional
7
8 # pylint: disable=import-error
9 from openai.lib.azure import AzureOpenAI
10
11 from haystack import component, default_from_dict, default_to_dict, logging
12 from haystack.components.generators.chat import OpenAIChatGenerator
13 from haystack.dataclasses import StreamingChunk
14 from haystack.utils import Secret, deserialize_callable, deserialize_secrets_inplace, serialize_callable
15
16 logger = logging.getLogger(__name__)
17
18
19 @component
20 class AzureOpenAIChatGenerator(OpenAIChatGenerator):
21 """
22 A Chat Generator component that uses the Azure OpenAI API to generate text.
23
24 Enables text generation using OpenAI's large language models (LLMs) on Azure. It supports `gpt-4` and
25 `gpt-3.5-turbo` family of models accessed through the chat completions API endpoint.
26
27 Users can pass any text generation parameters valid for the `openai.ChatCompletion.create` method
28 directly to this component via the `generation_kwargs` parameter in `__init__` or the `generation_kwargs`
29 parameter in `run` method.
30
31 For more details on OpenAI models deployed on Azure, refer to the Microsoft
32 [documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/).
33
34 Key Features and Compatibility:
35 - Primary Compatibility: Designed to work seamlessly with the OpenAI API Chat Completion endpoint.
36 - Streaming Support: Supports streaming responses from the OpenAI API Chat Completion endpoint.
37 - Customizability: Supports all parameters supported by the OpenAI API Chat Completion endpoint.
38
39 Input and Output Format:
40 - ChatMessage Format: This component uses the ChatMessage format for structuring both input and output, ensuring
41 coherent and contextually relevant responses in chat-based text generation scenarios.
42 - Details on the ChatMessage format can be found [here](https://docs.haystack.deepset.ai/v2.0/docs/data-classes#chatmessage).
43
44
45 Usage example:
46
47 ```python
48 from haystack.components.generators.chat import AzureOpenAIGenerator
49 from haystack.dataclasses import ChatMessage
50 from haystack.utils import Secret
51
52 messages = [ChatMessage.from_user("What's Natural Language Processing?")]
53
54 client = AzureOpenAIGenerator(
55 azure_endpoint="<Your Azure endpoint e.g. `https://your-company.azure.openai.com/>",
56 api_key=Secret.from_token("<your-api-key>"),
57 azure_deployment="<this a model name, e.g. gpt-35-turbo>")
58 response = client.run(messages)
59 print(response)
60 ```
61
62 ```
63 {'replies':
64 [ChatMessage(content='Natural Language Processing (NLP) is a branch of artificial intelligence that focuses on
65 enabling computers to understand, interpret, and generate human language in a way that is useful.',
66 role=<ChatRole.ASSISTANT: 'assistant'>, name=None,
67 meta={'model': 'gpt-3.5-turbo-0613', 'index': 0, 'finish_reason': 'stop',
68 'usage': {'prompt_tokens': 15, 'completion_tokens': 36, 'total_tokens': 51}})]
69 }
70 ```
71 """
72
73 # pylint: disable=super-init-not-called
74 def __init__(
75 self,
76 azure_endpoint: Optional[str] = None,
77 api_version: Optional[str] = "2023-05-15",
78 azure_deployment: Optional[str] = "gpt-35-turbo",
79 api_key: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_API_KEY", strict=False),
80 azure_ad_token: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_AD_TOKEN", strict=False),
81 organization: Optional[str] = None,
82 streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
83 timeout: Optional[float] = None,
84 generation_kwargs: Optional[Dict[str, Any]] = None,
85 ):
86 """
87 Initialize the Azure OpenAI Chat Generator component.
88
89 :param azure_endpoint: The endpoint of the deployed model, e.g. `"https://example-resource.azure.openai.com/"`
90 :param api_version: The version of the API to use. Defaults to 2023-05-15
91 :param azure_deployment: The deployment of the model, usually the model name.
92 :param api_key: The API key to use for authentication.
93 :param azure_ad_token: [Azure Active Directory token](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)
94 :param organization: The Organization ID, defaults to `None`. See
95 [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization).
96 :param streaming_callback: A callback function that is called when a new token is received from the stream.
97 The callback function accepts StreamingChunk as an argument.
98 :param generation_kwargs: Other parameters to use for the model. These parameters are all sent directly to
99 the OpenAI endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/chat) for
100 more details.
101 Some of the supported parameters:
102 - `max_tokens`: The maximum number of tokens the output text can have.
103 - `temperature`: What sampling temperature to use. Higher values mean the model will take more risks.
104 Try 0.9 for more creative applications and 0 (argmax sampling) for ones with a well-defined answer.
105 - `top_p`: An alternative to sampling with temperature, called nucleus sampling, where the model
106 considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens
107 comprising the top 10% probability mass are considered.
108 - `n`: How many completions to generate for each prompt. For example, if the LLM gets 3 prompts and n is 2,
109 it will generate two completions for each of the three prompts, ending up with 6 completions in total.
110 - `stop`: One or more sequences after which the LLM should stop generating tokens.
111 - `presence_penalty`: What penalty to apply if a token is already present at all. Bigger values mean
112 the model will be less likely to repeat the same token in the text.
113 - `frequency_penalty`: What penalty to apply if a token has already been generated in the text.
114 Bigger values mean the model will be less likely to repeat the same token in the text.
115 - `logit_bias`: Add a logit bias to specific tokens. The keys of the dictionary are tokens, and the
116 values are the bias to add to that token.
117 """
118 # We intentionally do not call super().__init__ here because we only need to instantiate the client to interact
119 # with the API.
120
121 # Why is this here?
122 # AzureOpenAI init is forcing us to use an init method that takes either base_url or azure_endpoint as not
123 # None init parameters. This way we accommodate the use case where env var AZURE_OPENAI_ENDPOINT is set instead
124 # of passing it as a parameter.
125 azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
126 if not azure_endpoint:
127 raise ValueError("Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.")
128
129 if api_key is None and azure_ad_token is None:
130 raise ValueError("Please provide an API key or an Azure Active Directory token.")
131
132 # The check above makes mypy incorrectly infer that api_key is never None,
133 # which propagates the incorrect type.
134 self.api_key = api_key # type: ignore
135 self.azure_ad_token = azure_ad_token
136 self.generation_kwargs = generation_kwargs or {}
137 self.streaming_callback = streaming_callback
138 self.api_version = api_version
139 self.azure_endpoint = azure_endpoint
140 self.azure_deployment = azure_deployment
141 self.organization = organization
142 self.model = azure_deployment or "gpt-35-turbo"
143 self.timeout = timeout
144
145 self.client = AzureOpenAI(
146 api_version=api_version,
147 azure_endpoint=azure_endpoint,
148 azure_deployment=azure_deployment,
149 api_key=api_key.resolve_value() if api_key is not None else None,
150 azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,
151 organization=organization,
152 )
153
154 def to_dict(self) -> Dict[str, Any]:
155 """
156 Serialize this component to a dictionary.
157
158 :returns:
159 The serialized component as a dictionary.
160 """
161 callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None
162 return default_to_dict(
163 self,
164 azure_endpoint=self.azure_endpoint,
165 azure_deployment=self.azure_deployment,
166 organization=self.organization,
167 api_version=self.api_version,
168 streaming_callback=callback_name,
169 generation_kwargs=self.generation_kwargs,
170 timeout=self.timeout,
171 api_key=self.api_key.to_dict() if self.api_key is not None else None,
172 azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,
173 )
174
175 @classmethod
176 def from_dict(cls, data: Dict[str, Any]) -> "AzureOpenAIChatGenerator":
177 """
178 Deserialize this component from a dictionary.
179
180 :param data: The dictionary representation of this component.
181 :returns:
182 The deserialized component instance.
183 """
184 deserialize_secrets_inplace(data["init_parameters"], keys=["api_key", "azure_ad_token"])
185 init_params = data.get("init_parameters", {})
186 serialized_callback_handler = init_params.get("streaming_callback")
187 if serialized_callback_handler:
188 data["init_parameters"]["streaming_callback"] = deserialize_callable(serialized_callback_handler)
189 return default_from_dict(cls, data)
190
[end of haystack/components/generators/chat/azure.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/generators/chat/azure.py b/haystack/components/generators/chat/azure.py
--- a/haystack/components/generators/chat/azure.py
+++ b/haystack/components/generators/chat/azure.py
@@ -81,6 +81,7 @@
organization: Optional[str] = None,
streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
timeout: Optional[float] = None,
+ max_retries: Optional[int] = None,
generation_kwargs: Optional[Dict[str, Any]] = None,
):
"""
@@ -95,6 +96,10 @@
[production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization).
:param streaming_callback: A callback function that is called when a new token is received from the stream.
The callback function accepts StreamingChunk as an argument.
+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is
+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.
+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,
+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.
:param generation_kwargs: Other parameters to use for the model. These parameters are all sent directly to
the OpenAI endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/chat) for
more details.
@@ -140,7 +145,8 @@
self.azure_deployment = azure_deployment
self.organization = organization
self.model = azure_deployment or "gpt-35-turbo"
- self.timeout = timeout
+ self.timeout = timeout or float(os.environ.get("OPENAI_TIMEOUT", 30.0))
+ self.max_retries = max_retries or int(os.environ.get("OPENAI_MAX_RETRIES", 5))
self.client = AzureOpenAI(
api_version=api_version,
@@ -149,6 +155,8 @@
api_key=api_key.resolve_value() if api_key is not None else None,
azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,
organization=organization,
+ timeout=self.timeout,
+ max_retries=self.max_retries,
)
def to_dict(self) -> Dict[str, Any]:
@@ -168,6 +176,7 @@
streaming_callback=callback_name,
generation_kwargs=self.generation_kwargs,
timeout=self.timeout,
+ max_retries=self.max_retries,
api_key=self.api_key.to_dict() if self.api_key is not None else None,
azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,
)
| {"golden_diff": "diff --git a/haystack/components/generators/chat/azure.py b/haystack/components/generators/chat/azure.py\n--- a/haystack/components/generators/chat/azure.py\n+++ b/haystack/components/generators/chat/azure.py\n@@ -81,6 +81,7 @@\n organization: Optional[str] = None,\n streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,\n timeout: Optional[float] = None,\n+ max_retries: Optional[int] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"\n@@ -95,6 +96,10 @@\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization).\n :param streaming_callback: A callback function that is called when a new token is received from the stream.\n The callback function accepts StreamingChunk as an argument.\n+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\n+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\n+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\n+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\n :param generation_kwargs: Other parameters to use for the model. These parameters are all sent directly to\n the OpenAI endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/chat) for\n more details.\n@@ -140,7 +145,8 @@\n self.azure_deployment = azure_deployment\n self.organization = organization\n self.model = azure_deployment or \"gpt-35-turbo\"\n- self.timeout = timeout\n+ self.timeout = timeout or float(os.environ.get(\"OPENAI_TIMEOUT\", 30.0))\n+ self.max_retries = max_retries or int(os.environ.get(\"OPENAI_MAX_RETRIES\", 5))\n \n self.client = AzureOpenAI(\n api_version=api_version,\n@@ -149,6 +155,8 @@\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n def to_dict(self) -> Dict[str, Any]:\n@@ -168,6 +176,7 @@\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n timeout=self.timeout,\n+ max_retries=self.max_retries,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n )\n", "issue": "Add `max_retries` and `timeout` params to all `AzureOpenAI` classes\n**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.\r\n\r\nThe corresponding `AzureOpenAI` don't always have the same params.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIt would be nice to have these params in the `AzureOpenAI` classes\r\n\r\n**Describe alternatives you've considered**\r\n\r\nSubclass `AzureOpenAI` and create custom components.\r\n\r\n**Additional context**\r\n\r\ncc @anakin87 :)\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom typing import Any, Callable, Dict, Optional\n\n# pylint: disable=import-error\nfrom openai.lib.azure import AzureOpenAI\n\nfrom haystack import component, default_from_dict, default_to_dict, logging\nfrom haystack.components.generators.chat import OpenAIChatGenerator\nfrom haystack.dataclasses import StreamingChunk\nfrom haystack.utils import Secret, deserialize_callable, deserialize_secrets_inplace, serialize_callable\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass AzureOpenAIChatGenerator(OpenAIChatGenerator):\n \"\"\"\n A Chat Generator component that uses the Azure OpenAI API to generate text.\n\n Enables text generation using OpenAI's large language models (LLMs) on Azure. It supports `gpt-4` and\n `gpt-3.5-turbo` family of models accessed through the chat completions API endpoint.\n\n Users can pass any text generation parameters valid for the `openai.ChatCompletion.create` method\n directly to this component via the `generation_kwargs` parameter in `__init__` or the `generation_kwargs`\n parameter in `run` method.\n\n For more details on OpenAI models deployed on Azure, refer to the Microsoft\n [documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/).\n\n Key Features and Compatibility:\n - Primary Compatibility: Designed to work seamlessly with the OpenAI API Chat Completion endpoint.\n - Streaming Support: Supports streaming responses from the OpenAI API Chat Completion endpoint.\n - Customizability: Supports all parameters supported by the OpenAI API Chat Completion endpoint.\n\n Input and Output Format:\n - ChatMessage Format: This component uses the ChatMessage format for structuring both input and output, ensuring\n coherent and contextually relevant responses in chat-based text generation scenarios.\n - Details on the ChatMessage format can be found [here](https://docs.haystack.deepset.ai/v2.0/docs/data-classes#chatmessage).\n\n\n Usage example:\n\n ```python\n from haystack.components.generators.chat import AzureOpenAIGenerator\n from haystack.dataclasses import ChatMessage\n from haystack.utils import Secret\n\n messages = [ChatMessage.from_user(\"What's Natural Language Processing?\")]\n\n client = AzureOpenAIGenerator(\n azure_endpoint=\"<Your Azure endpoint e.g. `https://your-company.azure.openai.com/>\",\n api_key=Secret.from_token(\"<your-api-key>\"),\n azure_deployment=\"<this a model name, e.g. gpt-35-turbo>\")\n response = client.run(messages)\n print(response)\n ```\n\n ```\n {'replies':\n [ChatMessage(content='Natural Language Processing (NLP) is a branch of artificial intelligence that focuses on\n enabling computers to understand, interpret, and generate human language in a way that is useful.',\n role=<ChatRole.ASSISTANT: 'assistant'>, name=None,\n meta={'model': 'gpt-3.5-turbo-0613', 'index': 0, 'finish_reason': 'stop',\n 'usage': {'prompt_tokens': 15, 'completion_tokens': 36, 'total_tokens': 51}})]\n }\n ```\n \"\"\"\n\n # pylint: disable=super-init-not-called\n def __init__(\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n azure_deployment: Optional[str] = \"gpt-35-turbo\",\n api_key: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_API_KEY\", strict=False),\n azure_ad_token: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_AD_TOKEN\", strict=False),\n organization: Optional[str] = None,\n streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,\n timeout: Optional[float] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"\n Initialize the Azure OpenAI Chat Generator component.\n\n :param azure_endpoint: The endpoint of the deployed model, e.g. `\"https://example-resource.azure.openai.com/\"`\n :param api_version: The version of the API to use. Defaults to 2023-05-15\n :param azure_deployment: The deployment of the model, usually the model name.\n :param api_key: The API key to use for authentication.\n :param azure_ad_token: [Azure Active Directory token](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\n :param organization: The Organization ID, defaults to `None`. See\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization).\n :param streaming_callback: A callback function that is called when a new token is received from the stream.\n The callback function accepts StreamingChunk as an argument.\n :param generation_kwargs: Other parameters to use for the model. These parameters are all sent directly to\n the OpenAI endpoint. See OpenAI [documentation](https://platform.openai.com/docs/api-reference/chat) for\n more details.\n Some of the supported parameters:\n - `max_tokens`: The maximum number of tokens the output text can have.\n - `temperature`: What sampling temperature to use. Higher values mean the model will take more risks.\n Try 0.9 for more creative applications and 0 (argmax sampling) for ones with a well-defined answer.\n - `top_p`: An alternative to sampling with temperature, called nucleus sampling, where the model\n considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens\n comprising the top 10% probability mass are considered.\n - `n`: How many completions to generate for each prompt. For example, if the LLM gets 3 prompts and n is 2,\n it will generate two completions for each of the three prompts, ending up with 6 completions in total.\n - `stop`: One or more sequences after which the LLM should stop generating tokens.\n - `presence_penalty`: What penalty to apply if a token is already present at all. Bigger values mean\n the model will be less likely to repeat the same token in the text.\n - `frequency_penalty`: What penalty to apply if a token has already been generated in the text.\n Bigger values mean the model will be less likely to repeat the same token in the text.\n - `logit_bias`: Add a logit bias to specific tokens. The keys of the dictionary are tokens, and the\n values are the bias to add to that token.\n \"\"\"\n # We intentionally do not call super().__init__ here because we only need to instantiate the client to interact\n # with the API.\n\n # Why is this here?\n # AzureOpenAI init is forcing us to use an init method that takes either base_url or azure_endpoint as not\n # None init parameters. This way we accommodate the use case where env var AZURE_OPENAI_ENDPOINT is set instead\n # of passing it as a parameter.\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n if not azure_endpoint:\n raise ValueError(\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\")\n\n if api_key is None and azure_ad_token is None:\n raise ValueError(\"Please provide an API key or an Azure Active Directory token.\")\n\n # The check above makes mypy incorrectly infer that api_key is never None,\n # which propagates the incorrect type.\n self.api_key = api_key # type: ignore\n self.azure_ad_token = azure_ad_token\n self.generation_kwargs = generation_kwargs or {}\n self.streaming_callback = streaming_callback\n self.api_version = api_version\n self.azure_endpoint = azure_endpoint\n self.azure_deployment = azure_deployment\n self.organization = organization\n self.model = azure_deployment or \"gpt-35-turbo\"\n self.timeout = timeout\n\n self.client = AzureOpenAI(\n api_version=api_version,\n azure_endpoint=azure_endpoint,\n azure_deployment=azure_deployment,\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n )\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n timeout=self.timeout,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"AzureOpenAIChatGenerator\":\n \"\"\"\n Deserialize this component from a dictionary.\n\n :param data: The dictionary representation of this component.\n :returns:\n The deserialized component instance.\n \"\"\"\n deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\", \"azure_ad_token\"])\n init_params = data.get(\"init_parameters\", {})\n serialized_callback_handler = init_params.get(\"streaming_callback\")\n if serialized_callback_handler:\n data[\"init_parameters\"][\"streaming_callback\"] = deserialize_callable(serialized_callback_handler)\n return default_from_dict(cls, data)\n", "path": "haystack/components/generators/chat/azure.py"}]} | 3,296 | 649 |
gh_patches_debug_34754 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add organisation filter for maps API resources
</issue>
<code>
[start of akvo/rest/views/project_update_location.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import ProjectUpdateLocation
9 from ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer
10 from ..viewsets import BaseRSRViewSet
11
12
13 class ProjectUpdateLocationViewSet(BaseRSRViewSet):
14 """
15 API endpoint that allows organisation locations to be viewed or edited.
16 """
17 queryset = ProjectUpdateLocation.objects.all()
18 serializer_class = ProjectUpdateLocationSerializer
19
20
21 class MapProjectUpdateLocationViewSet(BaseRSRViewSet):
22
23 """Returns a resource tailored for generating a map of update locations.
24
25 Allowed parameters are:
26 limit (default 100 / max 500), and
27 location_target__project (filter on project ID)
28 """
29
30 filter_fields = ('location_target__project', )
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = ProjectUpdateLocation.objects.select_related(
34 'location_target',
35 'location_target__project').only(
36 'id', 'latitude', 'longitude',
37 'location_target__id', 'location_target__project', 'location_target__title',
38 'location_target__photo', 'location_target__video')
39 serializer_class = MapProjectUpdateLocationSerializer
40
[end of akvo/rest/views/project_update_location.py]
[start of akvo/rest/views/organisation_location.py]
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3 See more details in the license.txt file located at the root folder of the Akvo RSR module.
4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
5 """
6
7 from akvo.rsr.models import OrganisationLocation
8 from ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer
9 from ..viewsets import BaseRSRViewSet
10
11
12 class OrganisationLocationViewSet(BaseRSRViewSet):
13 """
14 API endpoint that allows organisation locations to be viewed or edited.
15 """
16 queryset = OrganisationLocation.objects.all()
17 serializer_class = OrganisationLocationSerializer
18
19
20 class MapOrganisationLocationViewSet(BaseRSRViewSet):
21
22 """Returns a resource tailored for generating a map of organisation locations.
23
24 Allowed parameters are:
25 limit (default 100 / max 500),
26 location_target (filter on organisation ID), and
27 country (filter on country ID)
28 """
29
30 filter_fields = ('location_target', 'country')
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = OrganisationLocation.objects.select_related(
34 'location_target', 'country').only(
35 'id', 'latitude', 'longitude',
36 'location_target__id', 'location_target__name',
37 'location_target__logo',
38 'country')
39 serializer_class = MapOrganisationLocationSerializer
40
[end of akvo/rest/views/organisation_location.py]
[start of akvo/rest/views/project_location.py]
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3 See more details in the license.txt file located at the root folder of the Akvo RSR module.
4 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
5 """
6
7 from akvo.rsr.models import ProjectLocation
8 from ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer
9 from ..viewsets import BaseRSRViewSet
10
11
12 class ProjectLocationViewSet(BaseRSRViewSet):
13 """
14 """
15 queryset = ProjectLocation.objects.all()
16 serializer_class = ProjectLocationSerializer
17 filter_fields = ('location_target', 'country', )
18
19
20 class MapProjectLocationViewSet(BaseRSRViewSet):
21
22 """Returns a resource tailored for generating a map of project locations.
23
24 Allowed parameters are:
25 limit (default 100 / max 500),
26 location_target (filter on project ID), and
27 country (filter on country ID)
28 """
29
30 filter_fields = ('location_target', 'country')
31 max_paginate_by = 500
32 paginate_by = 100
33 queryset = ProjectLocation.objects.select_related(
34 'location_target', 'country').only(
35 'id', 'latitude', 'longitude',
36 'location_target__id', 'location_target__title',
37 'location_target__current_image',
38 'country')
39 serializer_class = MapProjectLocationSerializer
40
[end of akvo/rest/views/project_location.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py
--- a/akvo/rest/views/organisation_location.py
+++ b/akvo/rest/views/organisation_location.py
@@ -22,9 +22,9 @@
"""Returns a resource tailored for generating a map of organisation locations.
Allowed parameters are:
- limit (default 100 / max 500),
- location_target (filter on organisation ID), and
- country (filter on country ID)
+ __limit__ (default 100 / max 500),
+ __location_target__ (filter on organisation ID), and
+ __country__ (filter on country ID)
"""
filter_fields = ('location_target', 'country')
diff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py
--- a/akvo/rest/views/project_location.py
+++ b/akvo/rest/views/project_location.py
@@ -22,12 +22,17 @@
"""Returns a resource tailored for generating a map of project locations.
Allowed parameters are:
- limit (default 100 / max 500),
- location_target (filter on project ID), and
- country (filter on country ID)
+ __limit__ (default 100 / max 500),
+ __location_target__ (filter on project ID),
+ __location_target\__partners__ (filter on organisation ID), and
+ __country__ (filter on country ID)
"""
- filter_fields = ('location_target', 'country')
+ filter_fields = (
+ 'location_target',
+ 'location_target__partners',
+ 'country'
+ )
max_paginate_by = 500
paginate_by = 100
queryset = ProjectLocation.objects.select_related(
diff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py
--- a/akvo/rest/views/project_update_location.py
+++ b/akvo/rest/views/project_update_location.py
@@ -23,11 +23,18 @@
"""Returns a resource tailored for generating a map of update locations.
Allowed parameters are:
- limit (default 100 / max 500), and
- location_target__project (filter on project ID)
+ __limit__ (default 100 / max 500),
+ __location_target\__project__ (filter on project ID),
+ __location_target\__project\__partners__
+ (filter on organisation ID of the projects' organisations),
+ __location_target\__user\__employers__ (filter on organisation ID of the users' organisations)
"""
- filter_fields = ('location_target__project', )
+ filter_fields = (
+ 'location_target__project',
+ 'location_target__project__partners',
+ 'location_target__user__employers'
+ )
max_paginate_by = 500
paginate_by = 100
queryset = ProjectUpdateLocation.objects.select_related(
| {"golden_diff": "diff --git a/akvo/rest/views/organisation_location.py b/akvo/rest/views/organisation_location.py\n--- a/akvo/rest/views/organisation_location.py\n+++ b/akvo/rest/views/organisation_location.py\n@@ -22,9 +22,9 @@\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on organisation ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n filter_fields = ('location_target', 'country')\ndiff --git a/akvo/rest/views/project_location.py b/akvo/rest/views/project_location.py\n--- a/akvo/rest/views/project_location.py\n+++ b/akvo/rest/views/project_location.py\n@@ -22,12 +22,17 @@\n \"\"\"Returns a resource tailored for generating a map of project locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500),\n- location_target (filter on project ID), and\n- country (filter on country ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target__ (filter on project ID),\n+ __location_target\\__partners__ (filter on organisation ID), and\n+ __country__ (filter on country ID)\n \"\"\"\n \n- filter_fields = ('location_target', 'country')\n+ filter_fields = (\n+ 'location_target',\n+ 'location_target__partners',\n+ 'country'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\ndiff --git a/akvo/rest/views/project_update_location.py b/akvo/rest/views/project_update_location.py\n--- a/akvo/rest/views/project_update_location.py\n+++ b/akvo/rest/views/project_update_location.py\n@@ -23,11 +23,18 @@\n \"\"\"Returns a resource tailored for generating a map of update locations.\n \n Allowed parameters are:\n- limit (default 100 / max 500), and\n- location_target__project (filter on project ID)\n+ __limit__ (default 100 / max 500),\n+ __location_target\\__project__ (filter on project ID),\n+ __location_target\\__project\\__partners__\n+ (filter on organisation ID of the projects' organisations),\n+ __location_target\\__user\\__employers__ (filter on organisation ID of the users' organisations)\n \"\"\"\n \n- filter_fields = ('location_target__project', )\n+ filter_fields = (\n+ 'location_target__project',\n+ 'location_target__project__partners',\n+ 'location_target__user__employers'\n+ )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n", "issue": "Add organisation filter for maps API resources\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectUpdateLocation\nfrom ..serializers import ProjectUpdateLocationSerializer, MapProjectUpdateLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectUpdateLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = ProjectUpdateLocation.objects.all()\n serializer_class = ProjectUpdateLocationSerializer\n\n\nclass MapProjectUpdateLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of update locations.\n\n Allowed parameters are:\n limit (default 100 / max 500), and\n location_target__project (filter on project ID)\n \"\"\"\n\n filter_fields = ('location_target__project', )\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectUpdateLocation.objects.select_related(\n 'location_target',\n 'location_target__project').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__project', 'location_target__title',\n 'location_target__photo', 'location_target__video')\n serializer_class = MapProjectUpdateLocationSerializer\n", "path": "akvo/rest/views/project_update_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import OrganisationLocation\nfrom ..serializers import OrganisationLocationSerializer, MapOrganisationLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass OrganisationLocationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisation locations to be viewed or edited.\n \"\"\"\n queryset = OrganisationLocation.objects.all()\n serializer_class = OrganisationLocationSerializer\n\n\nclass MapOrganisationLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of organisation locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on organisation ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = OrganisationLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__name',\n 'location_target__logo',\n 'country')\n serializer_class = MapOrganisationLocationSerializer\n", "path": "akvo/rest/views/organisation_location.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectLocation\nfrom ..serializers import ProjectLocationSerializer, MapProjectLocationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ProjectLocationViewSet(BaseRSRViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectLocation.objects.all()\n serializer_class = ProjectLocationSerializer\n filter_fields = ('location_target', 'country', )\n\n\nclass MapProjectLocationViewSet(BaseRSRViewSet):\n\n \"\"\"Returns a resource tailored for generating a map of project locations.\n\n Allowed parameters are:\n limit (default 100 / max 500),\n location_target (filter on project ID), and\n country (filter on country ID)\n \"\"\"\n\n filter_fields = ('location_target', 'country')\n max_paginate_by = 500\n paginate_by = 100\n queryset = ProjectLocation.objects.select_related(\n 'location_target', 'country').only(\n 'id', 'latitude', 'longitude',\n 'location_target__id', 'location_target__title',\n 'location_target__current_image',\n 'country')\n serializer_class = MapProjectLocationSerializer\n", "path": "akvo/rest/views/project_location.py"}]} | 1,751 | 696 |
gh_patches_debug_23553 | rasdani/github-patches | git_diff | conda__conda-6849 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda env update fails with pip dependencies if Python version changes
**I'm submitting a...**
- [x] bug report
- [ ] feature request
**update:** only the root env appears to be affected
## Current Behavior
`conda env update -f environment.yml` fails if both of these conditions are met:
1. The Python version changes
2. There is at least one dependency to be installed with pip
The error looks like:
```
Unable to install package for pip.
Please double check and ensure you dependencies file has
the correct spelling. You might also try installing the
conda-env-pip package to see if provides the required
installer.
```
This is the same error but not the same bug as #4985. pip is installed and specified as a dependency, but switching Python version in the same transaction results in conda failing to find pip.
### Steps to Reproduce
Create an environment.yml that contains Python and at least one pip dependency (doesn't matter if it's already installed or not):
```yaml
# environment.yml
dependencies:
- python=3.5
- pip
- pip:
- wheel
```
Setup a root env with a *different* Python:
```
conda install python=3.6 pip
```
and then try to update it with the environment file
```
conda env update -n root -f environment.yml
```
## Expected Behavior
`conda env update` should succeed.
<!-- What do you think should happen? -->
## Environment Information
Run in docker with [these files](https://gist.github.com/7d234cbcf69df2c7cd68523c9327db71).
<details open><summary><code>`conda info`</code></summary><p>
<!-- between the ticks below, paste the output of 'conda info' -->
```
active environment : None
user config file : /root/.condarc
populated config files :
conda version : 4.4.8
conda-build version : not installed
python version : 3.6.2.final.0
base environment : /opt/conda (writable)
channel URLs : https://repo.continuum.io/pkgs/main/linux-64
https://repo.continuum.io/pkgs/main/noarch
https://repo.continuum.io/pkgs/free/linux-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/linux-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/linux-64
https://repo.continuum.io/pkgs/pro/noarch
package cache : /opt/conda/pkgs
/root/.conda/pkgs
envs directories : /opt/conda/envs
/root/.conda/envs
platform : linux-64
user-agent : conda/4.4.8 requests/2.18.4 CPython/3.6.2 Linux/4.9.75-linuxkit-aufs debian/8 glibc/2.19
UID:GID : 0:0
netrc file : None
offline mode : False
```
</p></details>
conda env update fails with pip dependencies if Python version changes
**I'm submitting a...**
- [x] bug report
- [ ] feature request
**update:** only the root env appears to be affected
## Current Behavior
`conda env update -f environment.yml` fails if both of these conditions are met:
1. The Python version changes
2. There is at least one dependency to be installed with pip
The error looks like:
```
Unable to install package for pip.
Please double check and ensure you dependencies file has
the correct spelling. You might also try installing the
conda-env-pip package to see if provides the required
installer.
```
This is the same error but not the same bug as #4985. pip is installed and specified as a dependency, but switching Python version in the same transaction results in conda failing to find pip.
### Steps to Reproduce
Create an environment.yml that contains Python and at least one pip dependency (doesn't matter if it's already installed or not):
```yaml
# environment.yml
dependencies:
- python=3.5
- pip
- pip:
- wheel
```
Setup a root env with a *different* Python:
```
conda install python=3.6 pip
```
and then try to update it with the environment file
```
conda env update -n root -f environment.yml
```
## Expected Behavior
`conda env update` should succeed.
<!-- What do you think should happen? -->
## Environment Information
Run in docker with [these files](https://gist.github.com/7d234cbcf69df2c7cd68523c9327db71).
<details open><summary><code>`conda info`</code></summary><p>
<!-- between the ticks below, paste the output of 'conda info' -->
```
active environment : None
user config file : /root/.condarc
populated config files :
conda version : 4.4.8
conda-build version : not installed
python version : 3.6.2.final.0
base environment : /opt/conda (writable)
channel URLs : https://repo.continuum.io/pkgs/main/linux-64
https://repo.continuum.io/pkgs/main/noarch
https://repo.continuum.io/pkgs/free/linux-64
https://repo.continuum.io/pkgs/free/noarch
https://repo.continuum.io/pkgs/r/linux-64
https://repo.continuum.io/pkgs/r/noarch
https://repo.continuum.io/pkgs/pro/linux-64
https://repo.continuum.io/pkgs/pro/noarch
package cache : /opt/conda/pkgs
/root/.conda/pkgs
envs directories : /opt/conda/envs
/root/.conda/envs
platform : linux-64
user-agent : conda/4.4.8 requests/2.18.4 CPython/3.6.2 Linux/4.9.75-linuxkit-aufs debian/8 glibc/2.19
UID:GID : 0:0
netrc file : None
offline mode : False
```
</p></details>
</issue>
<code>
[start of conda_env/cli/main_update.py]
1 from argparse import RawDescriptionHelpFormatter
2 import os
3 import sys
4 import textwrap
5
6 from conda._vendor.auxlib.path import expand
7 from conda.cli import install as cli_install
8 from conda.cli.conda_argparse import add_parser_json, add_parser_prefix
9 from conda.misc import touch_nonadmin
10 from .common import get_prefix
11 from .. import exceptions, specs as install_specs
12 from ..exceptions import CondaEnvException
13 from ..installers.base import InvalidInstaller, get_installer
14
15 description = """
16 Update the current environment based on environment file
17 """
18
19 example = """
20 examples:
21 conda env update
22 conda env update -n=foo
23 conda env update -f=/path/to/environment.yml
24 conda env update --name=foo --file=environment.yml
25 conda env update vader/deathstar
26 """
27
28
29 def configure_parser(sub_parsers):
30 p = sub_parsers.add_parser(
31 'update',
32 formatter_class=RawDescriptionHelpFormatter,
33 description=description,
34 help=description,
35 epilog=example,
36 )
37 add_parser_prefix(p)
38 p.add_argument(
39 '-f', '--file',
40 action='store',
41 help='environment definition (default: environment.yml)',
42 default='environment.yml',
43 )
44 p.add_argument(
45 '--prune',
46 action='store_true',
47 default=False,
48 help='remove installed packages not defined in environment.yml',
49 )
50 p.add_argument(
51 '-q', '--quiet',
52 action='store_true',
53 default=False,
54 )
55 p.add_argument(
56 'remote_definition',
57 help='remote environment definition / IPython notebook',
58 action='store',
59 default=None,
60 nargs='?'
61 )
62 add_parser_json(p)
63 p.set_defaults(func='.main_update.execute')
64
65
66 def execute(args, parser):
67 name = args.remote_definition or args.name
68
69 try:
70 spec = install_specs.detect(name=name, filename=expand(args.file),
71 directory=os.getcwd())
72 env = spec.environment
73 except exceptions.SpecNotFound:
74 raise
75
76 if not (args.name or args.prefix):
77 if not env.name:
78 # Note, this is a hack fofr get_prefix that assumes argparse results
79 # TODO Refactor common.get_prefix
80 name = os.environ.get('CONDA_DEFAULT_ENV', False)
81 if not name:
82 msg = "Unable to determine environment\n\n"
83 msg += textwrap.dedent("""
84 Please re-run this command with one of the following options:
85
86 * Provide an environment name via --name or -n
87 * Re-run this command inside an activated conda environment.""").lstrip()
88 # TODO Add json support
89 raise CondaEnvException(msg)
90
91 # Note: stubbing out the args object as all of the
92 # conda.cli.common code thinks that name will always
93 # be specified.
94 args.name = env.name
95
96 prefix = get_prefix(args, search=False)
97 # CAN'T Check with this function since it assumes we will create prefix.
98 # cli_install.check_prefix(prefix, json=args.json)
99
100 # TODO, add capability
101 # common.ensure_override_channels_requires_channel(args)
102 # channel_urls = args.channel or ()
103
104 for installer_type, specs in env.dependencies.items():
105 try:
106 installer = get_installer(installer_type)
107 installer.install(prefix, specs, args, env)
108 except InvalidInstaller:
109 sys.stderr.write(textwrap.dedent("""
110 Unable to install package for {0}.
111
112 Please double check and ensure you dependencies file has
113 the correct spelling. You might also try installing the
114 conda-env-{0} package to see if provides the required
115 installer.
116 """).lstrip().format(installer_type)
117 )
118 return -1
119
120 touch_nonadmin(prefix)
121 cli_install.print_activate(args.name if args.name else prefix)
122
[end of conda_env/cli/main_update.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py
--- a/conda_env/cli/main_update.py
+++ b/conda_env/cli/main_update.py
@@ -101,11 +101,15 @@
# common.ensure_override_channels_requires_channel(args)
# channel_urls = args.channel or ()
- for installer_type, specs in env.dependencies.items():
+ # create installers before running any of them
+ # to avoid failure to import after the file being deleted
+ # e.g. due to conda_env being upgraded or Python version switched.
+ installers = {}
+
+ for installer_type in env.dependencies:
try:
- installer = get_installer(installer_type)
- installer.install(prefix, specs, args, env)
- except InvalidInstaller:
+ installers[installer_type] = get_installer(installer_type)
+ except InvalidInstaller as e:
sys.stderr.write(textwrap.dedent("""
Unable to install package for {0}.
@@ -117,5 +121,9 @@
)
return -1
+ for installer_type, specs in env.dependencies.items():
+ installer = installers[installer_type]
+ installer.install(prefix, specs, args, env)
+
touch_nonadmin(prefix)
cli_install.print_activate(args.name if args.name else prefix)
| {"golden_diff": "diff --git a/conda_env/cli/main_update.py b/conda_env/cli/main_update.py\n--- a/conda_env/cli/main_update.py\n+++ b/conda_env/cli/main_update.py\n@@ -101,11 +101,15 @@\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n \n- for installer_type, specs in env.dependencies.items():\n+ # create installers before running any of them\n+ # to avoid failure to import after the file being deleted\n+ # e.g. due to conda_env being upgraded or Python version switched.\n+ installers = {}\n+\n+ for installer_type in env.dependencies:\n try:\n- installer = get_installer(installer_type)\n- installer.install(prefix, specs, args, env)\n- except InvalidInstaller:\n+ installers[installer_type] = get_installer(installer_type)\n+ except InvalidInstaller as e:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n \n@@ -117,5 +121,9 @@\n )\n return -1\n \n+ for installer_type, specs in env.dependencies.items():\n+ installer = installers[installer_type]\n+ installer.install(prefix, specs, args, env)\n+\n touch_nonadmin(prefix)\n cli_install.print_activate(args.name if args.name else prefix)\n", "issue": "conda env update fails with pip dependencies if Python version changes\n**I'm submitting a...**\r\n - [x] bug report\r\n - [ ] feature request\r\n\r\n**update:** only the root env appears to be affected\r\n\r\n## Current Behavior\r\n\r\n`conda env update -f environment.yml` fails if both of these conditions are met:\r\n\r\n1. The Python version changes\r\n2. There is at least one dependency to be installed with pip\r\n\r\nThe error looks like:\r\n\r\n```\r\nUnable to install package for pip.\r\n\r\nPlease double check and ensure you dependencies file has\r\nthe correct spelling. You might also try installing the\r\nconda-env-pip package to see if provides the required\r\ninstaller.\r\n```\r\n\r\nThis is the same error but not the same bug as #4985. pip is installed and specified as a dependency, but switching Python version in the same transaction results in conda failing to find pip.\r\n\r\n### Steps to Reproduce\r\n\r\nCreate an environment.yml that contains Python and at least one pip dependency (doesn't matter if it's already installed or not):\r\n\r\n```yaml\r\n# environment.yml\r\ndependencies:\r\n - python=3.5\r\n - pip\r\n - pip:\r\n - wheel\r\n```\r\n\r\nSetup a root env with a *different* Python:\r\n\r\n```\r\nconda install python=3.6 pip\r\n```\r\n\r\nand then try to update it with the environment file\r\n\r\n```\r\nconda env update -n root -f environment.yml\r\n```\r\n\r\n\r\n## Expected Behavior\r\n\r\n`conda env update` should succeed.\r\n<!-- What do you think should happen? -->\r\n\r\n\r\n## Environment Information\r\n\r\nRun in docker with [these files](https://gist.github.com/7d234cbcf69df2c7cd68523c9327db71).\r\n\r\n<details open><summary><code>`conda info`</code></summary><p>\r\n<!-- between the ticks below, paste the output of 'conda info' -->\r\n\r\n```\r\n active environment : None\r\n user config file : /root/.condarc\r\n populated config files :\r\n conda version : 4.4.8\r\n conda-build version : not installed\r\n python version : 3.6.2.final.0\r\n base environment : /opt/conda (writable)\r\n channel URLs : https://repo.continuum.io/pkgs/main/linux-64\r\n https://repo.continuum.io/pkgs/main/noarch\r\n https://repo.continuum.io/pkgs/free/linux-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/linux-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/linux-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n package cache : /opt/conda/pkgs\r\n /root/.conda/pkgs\r\n envs directories : /opt/conda/envs\r\n /root/.conda/envs\r\n platform : linux-64\r\n user-agent : conda/4.4.8 requests/2.18.4 CPython/3.6.2 Linux/4.9.75-linuxkit-aufs debian/8 glibc/2.19\r\n UID:GID : 0:0\r\n netrc file : None\r\n offline mode : False\r\n```\r\n</p></details>\r\n\r\n\nconda env update fails with pip dependencies if Python version changes\n**I'm submitting a...**\r\n - [x] bug report\r\n - [ ] feature request\r\n\r\n**update:** only the root env appears to be affected\r\n\r\n## Current Behavior\r\n\r\n`conda env update -f environment.yml` fails if both of these conditions are met:\r\n\r\n1. The Python version changes\r\n2. There is at least one dependency to be installed with pip\r\n\r\nThe error looks like:\r\n\r\n```\r\nUnable to install package for pip.\r\n\r\nPlease double check and ensure you dependencies file has\r\nthe correct spelling. You might also try installing the\r\nconda-env-pip package to see if provides the required\r\ninstaller.\r\n```\r\n\r\nThis is the same error but not the same bug as #4985. pip is installed and specified as a dependency, but switching Python version in the same transaction results in conda failing to find pip.\r\n\r\n### Steps to Reproduce\r\n\r\nCreate an environment.yml that contains Python and at least one pip dependency (doesn't matter if it's already installed or not):\r\n\r\n```yaml\r\n# environment.yml\r\ndependencies:\r\n - python=3.5\r\n - pip\r\n - pip:\r\n - wheel\r\n```\r\n\r\nSetup a root env with a *different* Python:\r\n\r\n```\r\nconda install python=3.6 pip\r\n```\r\n\r\nand then try to update it with the environment file\r\n\r\n```\r\nconda env update -n root -f environment.yml\r\n```\r\n\r\n\r\n## Expected Behavior\r\n\r\n`conda env update` should succeed.\r\n<!-- What do you think should happen? -->\r\n\r\n\r\n## Environment Information\r\n\r\nRun in docker with [these files](https://gist.github.com/7d234cbcf69df2c7cd68523c9327db71).\r\n\r\n<details open><summary><code>`conda info`</code></summary><p>\r\n<!-- between the ticks below, paste the output of 'conda info' -->\r\n\r\n```\r\n active environment : None\r\n user config file : /root/.condarc\r\n populated config files :\r\n conda version : 4.4.8\r\n conda-build version : not installed\r\n python version : 3.6.2.final.0\r\n base environment : /opt/conda (writable)\r\n channel URLs : https://repo.continuum.io/pkgs/main/linux-64\r\n https://repo.continuum.io/pkgs/main/noarch\r\n https://repo.continuum.io/pkgs/free/linux-64\r\n https://repo.continuum.io/pkgs/free/noarch\r\n https://repo.continuum.io/pkgs/r/linux-64\r\n https://repo.continuum.io/pkgs/r/noarch\r\n https://repo.continuum.io/pkgs/pro/linux-64\r\n https://repo.continuum.io/pkgs/pro/noarch\r\n package cache : /opt/conda/pkgs\r\n /root/.conda/pkgs\r\n envs directories : /opt/conda/envs\r\n /root/.conda/envs\r\n platform : linux-64\r\n user-agent : conda/4.4.8 requests/2.18.4 CPython/3.6.2 Linux/4.9.75-linuxkit-aufs debian/8 glibc/2.19\r\n UID:GID : 0:0\r\n netrc file : None\r\n offline mode : False\r\n```\r\n</p></details>\r\n\r\n\n", "before_files": [{"content": "from argparse import RawDescriptionHelpFormatter\nimport os\nimport sys\nimport textwrap\n\nfrom conda._vendor.auxlib.path import expand\nfrom conda.cli import install as cli_install\nfrom conda.cli.conda_argparse import add_parser_json, add_parser_prefix\nfrom conda.misc import touch_nonadmin\nfrom .common import get_prefix\nfrom .. import exceptions, specs as install_specs\nfrom ..exceptions import CondaEnvException\nfrom ..installers.base import InvalidInstaller, get_installer\n\ndescription = \"\"\"\nUpdate the current environment based on environment file\n\"\"\"\n\nexample = \"\"\"\nexamples:\n conda env update\n conda env update -n=foo\n conda env update -f=/path/to/environment.yml\n conda env update --name=foo --file=environment.yml\n conda env update vader/deathstar\n\"\"\"\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'update',\n formatter_class=RawDescriptionHelpFormatter,\n description=description,\n help=description,\n epilog=example,\n )\n add_parser_prefix(p)\n p.add_argument(\n '-f', '--file',\n action='store',\n help='environment definition (default: environment.yml)',\n default='environment.yml',\n )\n p.add_argument(\n '--prune',\n action='store_true',\n default=False,\n help='remove installed packages not defined in environment.yml',\n )\n p.add_argument(\n '-q', '--quiet',\n action='store_true',\n default=False,\n )\n p.add_argument(\n 'remote_definition',\n help='remote environment definition / IPython notebook',\n action='store',\n default=None,\n nargs='?'\n )\n add_parser_json(p)\n p.set_defaults(func='.main_update.execute')\n\n\ndef execute(args, parser):\n name = args.remote_definition or args.name\n\n try:\n spec = install_specs.detect(name=name, filename=expand(args.file),\n directory=os.getcwd())\n env = spec.environment\n except exceptions.SpecNotFound:\n raise\n\n if not (args.name or args.prefix):\n if not env.name:\n # Note, this is a hack fofr get_prefix that assumes argparse results\n # TODO Refactor common.get_prefix\n name = os.environ.get('CONDA_DEFAULT_ENV', False)\n if not name:\n msg = \"Unable to determine environment\\n\\n\"\n msg += textwrap.dedent(\"\"\"\n Please re-run this command with one of the following options:\n\n * Provide an environment name via --name or -n\n * Re-run this command inside an activated conda environment.\"\"\").lstrip()\n # TODO Add json support\n raise CondaEnvException(msg)\n\n # Note: stubbing out the args object as all of the\n # conda.cli.common code thinks that name will always\n # be specified.\n args.name = env.name\n\n prefix = get_prefix(args, search=False)\n # CAN'T Check with this function since it assumes we will create prefix.\n # cli_install.check_prefix(prefix, json=args.json)\n\n # TODO, add capability\n # common.ensure_override_channels_requires_channel(args)\n # channel_urls = args.channel or ()\n\n for installer_type, specs in env.dependencies.items():\n try:\n installer = get_installer(installer_type)\n installer.install(prefix, specs, args, env)\n except InvalidInstaller:\n sys.stderr.write(textwrap.dedent(\"\"\"\n Unable to install package for {0}.\n\n Please double check and ensure you dependencies file has\n the correct spelling. You might also try installing the\n conda-env-{0} package to see if provides the required\n installer.\n \"\"\").lstrip().format(installer_type)\n )\n return -1\n\n touch_nonadmin(prefix)\n cli_install.print_activate(args.name if args.name else prefix)\n", "path": "conda_env/cli/main_update.py"}]} | 3,093 | 299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.