problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_8970 | rasdani/github-patches | git_diff | pyca__cryptography-5013 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test and build wheels on 3.8
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 from __future__ import absolute_import, division, print_function
8
9 import os
10 import platform
11 import sys
12 from distutils.command.build import build
13
14 import pkg_resources
15
16 import setuptools
17 from setuptools import find_packages, setup
18 from setuptools.command.install import install
19
20
21 if (
22 pkg_resources.parse_version(setuptools.__version__) <
23 pkg_resources.parse_version("18.5")
24 ):
25 raise RuntimeError(
26 "cryptography requires setuptools 18.5 or newer, please upgrade to a "
27 "newer version of setuptools"
28 )
29
30 base_dir = os.path.dirname(__file__)
31 src_dir = os.path.join(base_dir, "src")
32
33 # When executing the setup.py, we need to be able to import ourselves, this
34 # means that we need to add the src/ directory to the sys.path.
35 sys.path.insert(0, src_dir)
36
37 about = {}
38 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
39 exec(f.read(), about)
40
41
42 # `setup_requirements` must be kept in sync with `pyproject.toml`
43 setup_requirements = ["cffi>=1.8,!=1.11.3"]
44
45 if platform.python_implementation() == "PyPy":
46 if sys.pypy_version_info < (5, 4):
47 raise RuntimeError(
48 "cryptography is not compatible with PyPy < 5.4. Please upgrade "
49 "PyPy to use this library."
50 )
51
52
53 def keywords_with_side_effects(argv):
54 """
55 Get a dictionary with setup keywords that (can) have side effects.
56
57 :param argv: A list of strings with command line arguments.
58 :returns: A dictionary with keyword arguments for the ``setup()`` function.
59
60 This setup.py script uses the setuptools 'setup_requires' feature because
61 this is required by the cffi package to compile extension modules. The
62 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
63 build process as a result of setup.py invocations that don't need the cffi
64 module to be built (setup.py serves the dual purpose of exposing package
65 metadata).
66
67 All of the options listed by ``python setup.py --help`` that print
68 information should be recognized here. The commands ``clean``,
69 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
70 Any combination of these options and commands is also supported.
71
72 This function was originally based on the `setup.py script`_ of SciPy (see
73 also the discussion in `pip issue #25`_).
74
75 .. _pip issue #25: https://github.com/pypa/pip/issues/25
76 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
77 """
78 no_setup_requires_arguments = (
79 '-h', '--help',
80 '-n', '--dry-run',
81 '-q', '--quiet',
82 '-v', '--verbose',
83 '-V', '--version',
84 '--author',
85 '--author-email',
86 '--classifiers',
87 '--contact',
88 '--contact-email',
89 '--description',
90 '--egg-base',
91 '--fullname',
92 '--help-commands',
93 '--keywords',
94 '--licence',
95 '--license',
96 '--long-description',
97 '--maintainer',
98 '--maintainer-email',
99 '--name',
100 '--no-user-cfg',
101 '--obsoletes',
102 '--platforms',
103 '--provides',
104 '--requires',
105 '--url',
106 'clean',
107 'egg_info',
108 'register',
109 'sdist',
110 'upload',
111 )
112
113 def is_short_option(argument):
114 """Check whether a command line argument is a short option."""
115 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
116
117 def expand_short_options(argument):
118 """Expand combined short options into canonical short options."""
119 return ('-' + char for char in argument[1:])
120
121 def argument_without_setup_requirements(argv, i):
122 """Check whether a command line argument needs setup requirements."""
123 if argv[i] in no_setup_requires_arguments:
124 # Simple case: An argument which is either an option or a command
125 # which doesn't need setup requirements.
126 return True
127 elif (is_short_option(argv[i]) and
128 all(option in no_setup_requires_arguments
129 for option in expand_short_options(argv[i]))):
130 # Not so simple case: Combined short options none of which need
131 # setup requirements.
132 return True
133 elif argv[i - 1:i] == ['--egg-base']:
134 # Tricky case: --egg-info takes an argument which should not make
135 # us use setup_requires (defeating the purpose of this code).
136 return True
137 else:
138 return False
139
140 if all(argument_without_setup_requirements(argv, i)
141 for i in range(1, len(argv))):
142 return {
143 "cmdclass": {
144 "build": DummyBuild,
145 "install": DummyInstall,
146 }
147 }
148 else:
149 cffi_modules = [
150 "src/_cffi_src/build_openssl.py:ffi",
151 "src/_cffi_src/build_constant_time.py:ffi",
152 "src/_cffi_src/build_padding.py:ffi",
153 ]
154
155 return {
156 "setup_requires": setup_requirements,
157 "cffi_modules": cffi_modules
158 }
159
160
161 setup_requires_error = ("Requested setup command that needs 'setup_requires' "
162 "while command line arguments implied a side effect "
163 "free command or option.")
164
165
166 class DummyBuild(build):
167 """
168 This class makes it very obvious when ``keywords_with_side_effects()`` has
169 incorrectly interpreted the command line arguments to ``setup.py build`` as
170 one of the 'side effect free' commands or options.
171 """
172
173 def run(self):
174 raise RuntimeError(setup_requires_error)
175
176
177 class DummyInstall(install):
178 """
179 This class makes it very obvious when ``keywords_with_side_effects()`` has
180 incorrectly interpreted the command line arguments to ``setup.py install``
181 as one of the 'side effect free' commands or options.
182 """
183
184 def run(self):
185 raise RuntimeError(setup_requires_error)
186
187
188 with open(os.path.join(base_dir, "README.rst")) as f:
189 long_description = f.read()
190
191
192 setup(
193 name=about["__title__"],
194 version=about["__version__"],
195
196 description=about["__summary__"],
197 long_description=long_description,
198 long_description_content_type="text/x-rst",
199 license=about["__license__"],
200 url=about["__uri__"],
201
202 author=about["__author__"],
203 author_email=about["__email__"],
204
205 classifiers=[
206 "Development Status :: 5 - Production/Stable",
207 "Intended Audience :: Developers",
208 "License :: OSI Approved :: Apache Software License",
209 "License :: OSI Approved :: BSD License",
210 "Natural Language :: English",
211 "Operating System :: MacOS :: MacOS X",
212 "Operating System :: POSIX",
213 "Operating System :: POSIX :: BSD",
214 "Operating System :: POSIX :: Linux",
215 "Operating System :: Microsoft :: Windows",
216 "Programming Language :: Python",
217 "Programming Language :: Python :: 2",
218 "Programming Language :: Python :: 2.7",
219 "Programming Language :: Python :: 3",
220 "Programming Language :: Python :: 3.4",
221 "Programming Language :: Python :: 3.5",
222 "Programming Language :: Python :: 3.6",
223 "Programming Language :: Python :: 3.7",
224 "Programming Language :: Python :: Implementation :: CPython",
225 "Programming Language :: Python :: Implementation :: PyPy",
226 "Topic :: Security :: Cryptography",
227 ],
228
229 package_dir={"": "src"},
230 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
231 include_package_data=True,
232
233 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
234
235 install_requires=[
236 "six >= 1.4.1",
237 ] + setup_requirements,
238 extras_require={
239 ":python_version < '3'": ["enum34", "ipaddress"],
240
241 "test": [
242 "pytest>=3.6.0,!=3.9.0,!=3.9.1,!=3.9.2",
243 "pretend",
244 "iso8601",
245 "pytz",
246 "hypothesis>=1.11.4,!=3.79.2",
247 ],
248 "docs": [
249 "sphinx >= 1.6.5,!=1.8.0",
250 "sphinx_rtd_theme",
251 ],
252 "docstest": [
253 "doc8",
254 "pyenchant >= 1.6.11",
255 "twine >= 1.12.0",
256 "sphinxcontrib-spelling >= 4.0.1",
257 ],
258 "pep8test": [
259 "flake8",
260 "flake8-import-order",
261 "pep8-naming",
262 ],
263 # This extra is for the U-label support that was deprecated in
264 # cryptography 2.1. If you need this deprecated path install with
265 # pip install cryptography[idna]
266 "idna": [
267 "idna >= 2.1",
268 ]
269 },
270
271 # for cffi
272 zip_safe=False,
273 ext_package="cryptography.hazmat.bindings",
274 **keywords_with_side_effects(sys.argv)
275 )
276
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -221,6 +221,7 @@
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -221,6 +221,7 @@\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n", "issue": "Test and build wheels on 3.8\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nimport setuptools\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\n\n\nif (\n pkg_resources.parse_version(setuptools.__version__) <\n pkg_resources.parse_version(\"18.5\")\n):\n raise RuntimeError(\n \"cryptography requires setuptools 18.5 or newer, please upgrade to a \"\n \"newer version of setuptools\"\n )\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\n# `setup_requirements` must be kept in sync with `pyproject.toml`\nsetup_requirements = [\"cffi>=1.8,!=1.11.3\"]\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (5, 4):\n raise RuntimeError(\n \"cryptography is not compatible with PyPy < 5.4. Please upgrade \"\n \"PyPy to use this library.\"\n )\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n\n return {\n \"setup_requires\": setup_requirements,\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n\n install_requires=[\n \"six >= 1.4.1\",\n ] + setup_requirements,\n extras_require={\n \":python_version < '3'\": [\"enum34\", \"ipaddress\"],\n\n \"test\": [\n \"pytest>=3.6.0,!=3.9.0,!=3.9.1,!=3.9.2\",\n \"pretend\",\n \"iso8601\",\n \"pytz\",\n \"hypothesis>=1.11.4,!=3.79.2\",\n ],\n \"docs\": [\n \"sphinx >= 1.6.5,!=1.8.0\",\n \"sphinx_rtd_theme\",\n ],\n \"docstest\": [\n \"doc8\",\n \"pyenchant >= 1.6.11\",\n \"twine >= 1.12.0\",\n \"sphinxcontrib-spelling >= 4.0.1\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n # This extra is for the U-label support that was deprecated in\n # cryptography 2.1. If you need this deprecated path install with\n # pip install cryptography[idna]\n \"idna\": [\n \"idna >= 2.1\",\n ]\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]} | 3,371 | 116 |
gh_patches_debug_27720 | rasdani/github-patches | git_diff | scikit-hep__pyhf-383 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Binder Build API for Builds in PRs
# Description
After a discussion with @minrk and @betatim on the jupyterhub/binder Gitter, it was made clear that the use of Selenium in [`binder/trigger_binder.py`](https://github.com/diana-hep/pyhf/blob/c81f6007309f4c13241f9efac187594337d0bd08/binder/trigger_binder.py) (and the script itself) is unnecessary. Instead a simple API call can be made just using Python's `webbrowser` with an [endpoint of the form `https://mybinder.org/build/gh/owner/repo/ref`](https://gitter.im/jupyterhub/binder?at=5c2f87038dafa715c73ff54f) as can be [seen in the Binder Hub demo](https://github.com/jupyterhub/binderhub/blob/9ca8fa68bb8b69c6a2736f2275279583073f314f/examples/binder-api.py#L28) (thanks Tim for the link).
So, for example
```
python -m webbrowser "https://mybinder.org/build/gh/diana-hep/pyhf/master"
```
So asking [WWKHTD](https://github.com/kelseyhightower/nocode), this means that `binder/trigger_binder.py` is unnecessary and should be removed and `.travis.yml` should be updated to use the API calls.
</issue>
<code>
[start of binder/trigger_binder.py]
1 #!/usr/bin/env python
2
3 import argparse
4 from contextlib import contextmanager
5 from selenium import webdriver
6 from selenium.webdriver.chrome.options import Options
7 from selenium.webdriver.support.ui import WebDriverWait
8 from selenium.webdriver.support.expected_conditions import staleness_of
9
10
11 class SeleniumSession:
12 def __init__(self, args):
13 self.options = Options()
14 self.options.set_headless()
15 self.options.add_argument('--no-sandbox')
16 if args.chromedriver_path is not None:
17 self.browser = webdriver.Chrome(
18 args.chromedriver_path, chrome_options=self.options
19 )
20 else:
21 self.browser = webdriver.Chrome(chrome_options=self.options)
22
23 @contextmanager
24 def wait_for_page_load(self, timeout=20):
25 old_page = self.browser.find_element_by_tag_name('html')
26 yield
27 WebDriverWait(self.browser, timeout).until(staleness_of(old_page))
28
29 def trigger_binder(self, url):
30 with self.wait_for_page_load():
31 self.browser.get(url)
32
33
34 def main(args):
35 driver = SeleniumSession(args)
36 if args.is_verbose:
37 print('Chrome Headless Browser Invoked')
38 driver.trigger_binder(args.url)
39
40
41 if __name__ == '__main__':
42 parser = argparse.ArgumentParser()
43 parser.add_argument(
44 '-v',
45 '--verbose',
46 dest='is_verbose',
47 action='store_true',
48 help='Print out more information',
49 )
50 parser.add_argument(
51 '--chromedriver-path',
52 dest='chromedriver_path',
53 type=str,
54 default=None,
55 help='System path to ChromeDriver',
56 )
57 parser.add_argument(
58 '--url', dest='url', type=str, default=None, help='URL for Selinium to open'
59 )
60 args = parser.parse_args()
61
62 main(args)
63
[end of binder/trigger_binder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/binder/trigger_binder.py b/binder/trigger_binder.py
deleted file mode 100644
--- a/binder/trigger_binder.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-from contextlib import contextmanager
-from selenium import webdriver
-from selenium.webdriver.chrome.options import Options
-from selenium.webdriver.support.ui import WebDriverWait
-from selenium.webdriver.support.expected_conditions import staleness_of
-
-
-class SeleniumSession:
- def __init__(self, args):
- self.options = Options()
- self.options.set_headless()
- self.options.add_argument('--no-sandbox')
- if args.chromedriver_path is not None:
- self.browser = webdriver.Chrome(
- args.chromedriver_path, chrome_options=self.options
- )
- else:
- self.browser = webdriver.Chrome(chrome_options=self.options)
-
- @contextmanager
- def wait_for_page_load(self, timeout=20):
- old_page = self.browser.find_element_by_tag_name('html')
- yield
- WebDriverWait(self.browser, timeout).until(staleness_of(old_page))
-
- def trigger_binder(self, url):
- with self.wait_for_page_load():
- self.browser.get(url)
-
-
-def main(args):
- driver = SeleniumSession(args)
- if args.is_verbose:
- print('Chrome Headless Browser Invoked')
- driver.trigger_binder(args.url)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-v',
- '--verbose',
- dest='is_verbose',
- action='store_true',
- help='Print out more information',
- )
- parser.add_argument(
- '--chromedriver-path',
- dest='chromedriver_path',
- type=str,
- default=None,
- help='System path to ChromeDriver',
- )
- parser.add_argument(
- '--url', dest='url', type=str, default=None, help='URL for Selinium to open'
- )
- args = parser.parse_args()
-
- main(args)
| {"golden_diff": "diff --git a/binder/trigger_binder.py b/binder/trigger_binder.py\ndeleted file mode 100644\n--- a/binder/trigger_binder.py\n+++ /dev/null\n@@ -1,62 +0,0 @@\n-#!/usr/bin/env python\n-\n-import argparse\n-from contextlib import contextmanager\n-from selenium import webdriver\n-from selenium.webdriver.chrome.options import Options\n-from selenium.webdriver.support.ui import WebDriverWait\n-from selenium.webdriver.support.expected_conditions import staleness_of\n-\n-\n-class SeleniumSession:\n- def __init__(self, args):\n- self.options = Options()\n- self.options.set_headless()\n- self.options.add_argument('--no-sandbox')\n- if args.chromedriver_path is not None:\n- self.browser = webdriver.Chrome(\n- args.chromedriver_path, chrome_options=self.options\n- )\n- else:\n- self.browser = webdriver.Chrome(chrome_options=self.options)\n-\n- @contextmanager\n- def wait_for_page_load(self, timeout=20):\n- old_page = self.browser.find_element_by_tag_name('html')\n- yield\n- WebDriverWait(self.browser, timeout).until(staleness_of(old_page))\n-\n- def trigger_binder(self, url):\n- with self.wait_for_page_load():\n- self.browser.get(url)\n-\n-\n-def main(args):\n- driver = SeleniumSession(args)\n- if args.is_verbose:\n- print('Chrome Headless Browser Invoked')\n- driver.trigger_binder(args.url)\n-\n-\n-if __name__ == '__main__':\n- parser = argparse.ArgumentParser()\n- parser.add_argument(\n- '-v',\n- '--verbose',\n- dest='is_verbose',\n- action='store_true',\n- help='Print out more information',\n- )\n- parser.add_argument(\n- '--chromedriver-path',\n- dest='chromedriver_path',\n- type=str,\n- default=None,\n- help='System path to ChromeDriver',\n- )\n- parser.add_argument(\n- '--url', dest='url', type=str, default=None, help='URL for Selinium to open'\n- )\n- args = parser.parse_args()\n-\n- main(args)\n", "issue": "Use Binder Build API for Builds in PRs\n# Description\r\n\r\nAfter a discussion with @minrk and @betatim on the jupyterhub/binder Gitter, it was made clear that the use of Selenium in [`binder/trigger_binder.py`](https://github.com/diana-hep/pyhf/blob/c81f6007309f4c13241f9efac187594337d0bd08/binder/trigger_binder.py) (and the script itself) is unnecessary. Instead a simple API call can be made just using Python's `webbrowser` with an [endpoint of the form `https://mybinder.org/build/gh/owner/repo/ref`](https://gitter.im/jupyterhub/binder?at=5c2f87038dafa715c73ff54f) as can be [seen in the Binder Hub demo](https://github.com/jupyterhub/binderhub/blob/9ca8fa68bb8b69c6a2736f2275279583073f314f/examples/binder-api.py#L28) (thanks Tim for the link).\r\n\r\nSo, for example\r\n\r\n```\r\npython -m webbrowser \"https://mybinder.org/build/gh/diana-hep/pyhf/master\"\r\n```\r\n\r\nSo asking [WWKHTD](https://github.com/kelseyhightower/nocode), this means that `binder/trigger_binder.py` is unnecessary and should be removed and `.travis.yml` should be updated to use the API calls.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nfrom contextlib import contextmanager\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import staleness_of\n\n\nclass SeleniumSession:\n def __init__(self, args):\n self.options = Options()\n self.options.set_headless()\n self.options.add_argument('--no-sandbox')\n if args.chromedriver_path is not None:\n self.browser = webdriver.Chrome(\n args.chromedriver_path, chrome_options=self.options\n )\n else:\n self.browser = webdriver.Chrome(chrome_options=self.options)\n\n @contextmanager\n def wait_for_page_load(self, timeout=20):\n old_page = self.browser.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.browser, timeout).until(staleness_of(old_page))\n\n def trigger_binder(self, url):\n with self.wait_for_page_load():\n self.browser.get(url)\n\n\ndef main(args):\n driver = SeleniumSession(args)\n if args.is_verbose:\n print('Chrome Headless Browser Invoked')\n driver.trigger_binder(args.url)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-v',\n '--verbose',\n dest='is_verbose',\n action='store_true',\n help='Print out more information',\n )\n parser.add_argument(\n '--chromedriver-path',\n dest='chromedriver_path',\n type=str,\n default=None,\n help='System path to ChromeDriver',\n )\n parser.add_argument(\n '--url', dest='url', type=str, default=None, help='URL for Selinium to open'\n )\n args = parser.parse_args()\n\n main(args)\n", "path": "binder/trigger_binder.py"}]} | 1,387 | 476 |
gh_patches_debug_42929 | rasdani/github-patches | git_diff | goauthentik__authentik-7341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Listing all permissions
**Describe the bug**
When attempting to list all the permissions in order to select which ones to assign to a permission, hangs when gets to 141+
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Directory'
2. Click on 'Roles'
3. Select (or create a new) role.
4. Click on 'Assign Permissions'
5. Click on '+' to Select Permissions to Grant
6. Scroll through permissions by clicking on '>' until arrive at 121 - 140
**Expected behavior**
Continue displaying all permissions until reaching end of list (Displays 377 Permissions)
**Screenshots**

**Logs**
[logs](https://pastebin.com/v4Fd6PDN)
**Version and Deployment (please complete the following information):**
- authentik version: 2023.10.1
- Deployment: docker-compose
**Additional context**
Upgraded from 2023.8 -> 2023.10.0 -> 2023.10.1
Can provide more logs or anything else, if needed.
</issue>
<code>
[start of authentik/rbac/api/rbac.py]
1 """common RBAC serializers"""
2 from django.apps import apps
3 from django.contrib.auth.models import Permission
4 from django.db.models import QuerySet
5 from django_filters.filters import ModelChoiceFilter
6 from django_filters.filterset import FilterSet
7 from rest_framework.exceptions import ValidationError
8 from rest_framework.fields import (
9 CharField,
10 ChoiceField,
11 ListField,
12 ReadOnlyField,
13 SerializerMethodField,
14 )
15 from rest_framework.serializers import ModelSerializer
16 from rest_framework.viewsets import ReadOnlyModelViewSet
17
18 from authentik.core.api.utils import PassiveSerializer
19 from authentik.core.models import User
20 from authentik.lib.validators import RequiredTogetherValidator
21 from authentik.policies.event_matcher.models import model_choices
22 from authentik.rbac.models import Role
23
24
25 class PermissionSerializer(ModelSerializer):
26 """Global permission"""
27
28 app_label = ReadOnlyField(source="content_type.app_label")
29 app_label_verbose = SerializerMethodField()
30 model = ReadOnlyField(source="content_type.model")
31 model_verbose = SerializerMethodField()
32
33 def get_app_label_verbose(self, instance: Permission) -> str:
34 """Human-readable app label"""
35 return apps.get_app_config(instance.content_type.app_label).verbose_name
36
37 def get_model_verbose(self, instance: Permission) -> str:
38 """Human-readable model name"""
39 return apps.get_model(
40 instance.content_type.app_label, instance.content_type.model
41 )._meta.verbose_name
42
43 class Meta:
44 model = Permission
45 fields = [
46 "id",
47 "name",
48 "codename",
49 "model",
50 "app_label",
51 "app_label_verbose",
52 "model_verbose",
53 ]
54
55
56 class PermissionFilter(FilterSet):
57 """Filter permissions"""
58
59 role = ModelChoiceFilter(queryset=Role.objects.all(), method="filter_role")
60 user = ModelChoiceFilter(queryset=User.objects.all())
61
62 def filter_role(self, queryset: QuerySet, name, value: Role) -> QuerySet:
63 """Filter permissions based on role"""
64 return queryset.filter(group__role=value)
65
66 class Meta:
67 model = Permission
68 fields = [
69 "codename",
70 "content_type__model",
71 "content_type__app_label",
72 "role",
73 "user",
74 ]
75
76
77 class RBACPermissionViewSet(ReadOnlyModelViewSet):
78 """Read-only list of all permissions, filterable by model and app"""
79
80 queryset = Permission.objects.none()
81 serializer_class = PermissionSerializer
82 ordering = ["name"]
83 filterset_class = PermissionFilter
84 search_fields = [
85 "codename",
86 "content_type__model",
87 "content_type__app_label",
88 ]
89
90 def get_queryset(self) -> QuerySet:
91 return (
92 Permission.objects.all()
93 .select_related("content_type")
94 .filter(
95 content_type__app_label__startswith="authentik",
96 )
97 )
98
99
100 class PermissionAssignSerializer(PassiveSerializer):
101 """Request to assign a new permission"""
102
103 permissions = ListField(child=CharField())
104 model = ChoiceField(choices=model_choices(), required=False)
105 object_pk = CharField(required=False)
106
107 validators = [RequiredTogetherValidator(fields=["model", "object_pk"])]
108
109 def validate(self, attrs: dict) -> dict:
110 model_instance = None
111 # Check if we're setting an object-level perm or global
112 model = attrs.get("model")
113 object_pk = attrs.get("object_pk")
114 if model and object_pk:
115 model = apps.get_model(attrs["model"])
116 model_instance = model.objects.filter(pk=attrs["object_pk"]).first()
117 attrs["model_instance"] = model_instance
118 if attrs.get("model"):
119 return attrs
120 permissions = attrs.get("permissions", [])
121 if not all("." in perm for perm in permissions):
122 raise ValidationError(
123 {
124 "permissions": (
125 "When assigning global permissions, codename must be given as "
126 "app_label.codename"
127 )
128 }
129 )
130 return attrs
131
[end of authentik/rbac/api/rbac.py]
[start of authentik/rbac/api/rbac_roles.py]
1 """common RBAC serializers"""
2 from typing import Optional
3
4 from django.apps import apps
5 from django_filters.filters import UUIDFilter
6 from django_filters.filterset import FilterSet
7 from guardian.models import GroupObjectPermission
8 from guardian.shortcuts import get_objects_for_group
9 from rest_framework.fields import SerializerMethodField
10 from rest_framework.mixins import ListModelMixin
11 from rest_framework.viewsets import GenericViewSet
12
13 from authentik.api.pagination import SmallerPagination
14 from authentik.rbac.api.rbac_assigned_by_roles import RoleObjectPermissionSerializer
15
16
17 class ExtraRoleObjectPermissionSerializer(RoleObjectPermissionSerializer):
18 """User permission with additional object-related data"""
19
20 app_label_verbose = SerializerMethodField()
21 model_verbose = SerializerMethodField()
22
23 object_description = SerializerMethodField()
24
25 def get_app_label_verbose(self, instance: GroupObjectPermission) -> str:
26 """Get app label from permission's model"""
27 return apps.get_app_config(instance.content_type.app_label).verbose_name
28
29 def get_model_verbose(self, instance: GroupObjectPermission) -> str:
30 """Get model label from permission's model"""
31 return apps.get_model(
32 instance.content_type.app_label, instance.content_type.model
33 )._meta.verbose_name
34
35 def get_object_description(self, instance: GroupObjectPermission) -> Optional[str]:
36 """Get model description from attached model. This operation takes at least
37 one additional query, and the description is only shown if the user/role has the
38 view_ permission on the object"""
39 app_label = instance.content_type.app_label
40 model = instance.content_type.model
41 model_class = apps.get_model(app_label, model)
42 objects = get_objects_for_group(instance.group, f"{app_label}.view_{model}", model_class)
43 obj = objects.first()
44 if not obj:
45 return None
46 return str(obj)
47
48 class Meta(RoleObjectPermissionSerializer.Meta):
49 fields = RoleObjectPermissionSerializer.Meta.fields + [
50 "app_label_verbose",
51 "model_verbose",
52 "object_description",
53 ]
54
55
56 class RolePermissionFilter(FilterSet):
57 """Role permission filter"""
58
59 uuid = UUIDFilter("group__role__uuid", required=True)
60
61
62 class RolePermissionViewSet(ListModelMixin, GenericViewSet):
63 """Get a role's assigned object permissions"""
64
65 serializer_class = ExtraRoleObjectPermissionSerializer
66 ordering = ["group__role__name"]
67 pagination_class = SmallerPagination
68 # The filtering is done in the filterset,
69 # which has a required filter that does the heavy lifting
70 queryset = GroupObjectPermission.objects.select_related("content_type", "group__role").all()
71 filterset_class = RolePermissionFilter
72
[end of authentik/rbac/api/rbac_roles.py]
[start of authentik/rbac/api/rbac_users.py]
1 """common RBAC serializers"""
2 from typing import Optional
3
4 from django.apps import apps
5 from django_filters.filters import NumberFilter
6 from django_filters.filterset import FilterSet
7 from guardian.models import UserObjectPermission
8 from guardian.shortcuts import get_objects_for_user
9 from rest_framework.fields import SerializerMethodField
10 from rest_framework.mixins import ListModelMixin
11 from rest_framework.viewsets import GenericViewSet
12
13 from authentik.api.pagination import SmallerPagination
14 from authentik.rbac.api.rbac_assigned_by_users import UserObjectPermissionSerializer
15
16
17 class ExtraUserObjectPermissionSerializer(UserObjectPermissionSerializer):
18 """User permission with additional object-related data"""
19
20 app_label_verbose = SerializerMethodField()
21 model_verbose = SerializerMethodField()
22
23 object_description = SerializerMethodField()
24
25 def get_app_label_verbose(self, instance: UserObjectPermission) -> str:
26 """Get app label from permission's model"""
27 return apps.get_app_config(instance.content_type.app_label).verbose_name
28
29 def get_model_verbose(self, instance: UserObjectPermission) -> str:
30 """Get model label from permission's model"""
31 return apps.get_model(
32 instance.content_type.app_label, instance.content_type.model
33 )._meta.verbose_name
34
35 def get_object_description(self, instance: UserObjectPermission) -> Optional[str]:
36 """Get model description from attached model. This operation takes at least
37 one additional query, and the description is only shown if the user/role has the
38 view_ permission on the object"""
39 app_label = instance.content_type.app_label
40 model = instance.content_type.model
41 model_class = apps.get_model(app_label, model)
42 objects = get_objects_for_user(instance.user, f"{app_label}.view_{model}", model_class)
43 obj = objects.first()
44 if not obj:
45 return None
46 return str(obj)
47
48 class Meta(UserObjectPermissionSerializer.Meta):
49 fields = UserObjectPermissionSerializer.Meta.fields + [
50 "app_label_verbose",
51 "model_verbose",
52 "object_description",
53 ]
54
55
56 class UserPermissionFilter(FilterSet):
57 """User-assigned permission filter"""
58
59 user_id = NumberFilter("user__id", required=True)
60
61
62 class UserPermissionViewSet(ListModelMixin, GenericViewSet):
63 """Get a users's assigned object permissions"""
64
65 serializer_class = ExtraUserObjectPermissionSerializer
66 ordering = ["user__username"]
67 pagination_class = SmallerPagination
68 # The filtering is done in the filterset,
69 # which has a required filter that does the heavy lifting
70 queryset = UserObjectPermission.objects.select_related("content_type", "user").all()
71 filterset_class = UserPermissionFilter
72
[end of authentik/rbac/api/rbac_users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/rbac/api/rbac.py b/authentik/rbac/api/rbac.py
--- a/authentik/rbac/api/rbac.py
+++ b/authentik/rbac/api/rbac.py
@@ -32,13 +32,19 @@
def get_app_label_verbose(self, instance: Permission) -> str:
"""Human-readable app label"""
- return apps.get_app_config(instance.content_type.app_label).verbose_name
+ try:
+ return apps.get_app_config(instance.content_type.app_label).verbose_name
+ except LookupError:
+ return f"{instance.content_type.app_label}.{instance.content_type.model}"
def get_model_verbose(self, instance: Permission) -> str:
"""Human-readable model name"""
- return apps.get_model(
- instance.content_type.app_label, instance.content_type.model
- )._meta.verbose_name
+ try:
+ return apps.get_model(
+ instance.content_type.app_label, instance.content_type.model
+ )._meta.verbose_name
+ except LookupError:
+ return f"{instance.content_type.app_label}.{instance.content_type.model}"
class Meta:
model = Permission
diff --git a/authentik/rbac/api/rbac_roles.py b/authentik/rbac/api/rbac_roles.py
--- a/authentik/rbac/api/rbac_roles.py
+++ b/authentik/rbac/api/rbac_roles.py
@@ -28,9 +28,12 @@
def get_model_verbose(self, instance: GroupObjectPermission) -> str:
"""Get model label from permission's model"""
- return apps.get_model(
- instance.content_type.app_label, instance.content_type.model
- )._meta.verbose_name
+ try:
+ return apps.get_model(
+ instance.content_type.app_label, instance.content_type.model
+ )._meta.verbose_name
+ except LookupError:
+ return f"{instance.content_type.app_label}.{instance.content_type.model}"
def get_object_description(self, instance: GroupObjectPermission) -> Optional[str]:
"""Get model description from attached model. This operation takes at least
@@ -38,7 +41,10 @@
view_ permission on the object"""
app_label = instance.content_type.app_label
model = instance.content_type.model
- model_class = apps.get_model(app_label, model)
+ try:
+ model_class = apps.get_model(app_label, model)
+ except LookupError:
+ return None
objects = get_objects_for_group(instance.group, f"{app_label}.view_{model}", model_class)
obj = objects.first()
if not obj:
diff --git a/authentik/rbac/api/rbac_users.py b/authentik/rbac/api/rbac_users.py
--- a/authentik/rbac/api/rbac_users.py
+++ b/authentik/rbac/api/rbac_users.py
@@ -28,9 +28,12 @@
def get_model_verbose(self, instance: UserObjectPermission) -> str:
"""Get model label from permission's model"""
- return apps.get_model(
- instance.content_type.app_label, instance.content_type.model
- )._meta.verbose_name
+ try:
+ return apps.get_model(
+ instance.content_type.app_label, instance.content_type.model
+ )._meta.verbose_name
+ except LookupError:
+ return f"{instance.content_type.app_label}.{instance.content_type.model}"
def get_object_description(self, instance: UserObjectPermission) -> Optional[str]:
"""Get model description from attached model. This operation takes at least
@@ -38,7 +41,10 @@
view_ permission on the object"""
app_label = instance.content_type.app_label
model = instance.content_type.model
- model_class = apps.get_model(app_label, model)
+ try:
+ model_class = apps.get_model(app_label, model)
+ except LookupError:
+ return None
objects = get_objects_for_user(instance.user, f"{app_label}.view_{model}", model_class)
obj = objects.first()
if not obj:
| {"golden_diff": "diff --git a/authentik/rbac/api/rbac.py b/authentik/rbac/api/rbac.py\n--- a/authentik/rbac/api/rbac.py\n+++ b/authentik/rbac/api/rbac.py\n@@ -32,13 +32,19 @@\n \n def get_app_label_verbose(self, instance: Permission) -> str:\n \"\"\"Human-readable app label\"\"\"\n- return apps.get_app_config(instance.content_type.app_label).verbose_name\n+ try:\n+ return apps.get_app_config(instance.content_type.app_label).verbose_name\n+ except LookupError:\n+ return f\"{instance.content_type.app_label}.{instance.content_type.model}\"\n \n def get_model_verbose(self, instance: Permission) -> str:\n \"\"\"Human-readable model name\"\"\"\n- return apps.get_model(\n- instance.content_type.app_label, instance.content_type.model\n- )._meta.verbose_name\n+ try:\n+ return apps.get_model(\n+ instance.content_type.app_label, instance.content_type.model\n+ )._meta.verbose_name\n+ except LookupError:\n+ return f\"{instance.content_type.app_label}.{instance.content_type.model}\"\n \n class Meta:\n model = Permission\ndiff --git a/authentik/rbac/api/rbac_roles.py b/authentik/rbac/api/rbac_roles.py\n--- a/authentik/rbac/api/rbac_roles.py\n+++ b/authentik/rbac/api/rbac_roles.py\n@@ -28,9 +28,12 @@\n \n def get_model_verbose(self, instance: GroupObjectPermission) -> str:\n \"\"\"Get model label from permission's model\"\"\"\n- return apps.get_model(\n- instance.content_type.app_label, instance.content_type.model\n- )._meta.verbose_name\n+ try:\n+ return apps.get_model(\n+ instance.content_type.app_label, instance.content_type.model\n+ )._meta.verbose_name\n+ except LookupError:\n+ return f\"{instance.content_type.app_label}.{instance.content_type.model}\"\n \n def get_object_description(self, instance: GroupObjectPermission) -> Optional[str]:\n \"\"\"Get model description from attached model. This operation takes at least\n@@ -38,7 +41,10 @@\n view_ permission on the object\"\"\"\n app_label = instance.content_type.app_label\n model = instance.content_type.model\n- model_class = apps.get_model(app_label, model)\n+ try:\n+ model_class = apps.get_model(app_label, model)\n+ except LookupError:\n+ return None\n objects = get_objects_for_group(instance.group, f\"{app_label}.view_{model}\", model_class)\n obj = objects.first()\n if not obj:\ndiff --git a/authentik/rbac/api/rbac_users.py b/authentik/rbac/api/rbac_users.py\n--- a/authentik/rbac/api/rbac_users.py\n+++ b/authentik/rbac/api/rbac_users.py\n@@ -28,9 +28,12 @@\n \n def get_model_verbose(self, instance: UserObjectPermission) -> str:\n \"\"\"Get model label from permission's model\"\"\"\n- return apps.get_model(\n- instance.content_type.app_label, instance.content_type.model\n- )._meta.verbose_name\n+ try:\n+ return apps.get_model(\n+ instance.content_type.app_label, instance.content_type.model\n+ )._meta.verbose_name\n+ except LookupError:\n+ return f\"{instance.content_type.app_label}.{instance.content_type.model}\"\n \n def get_object_description(self, instance: UserObjectPermission) -> Optional[str]:\n \"\"\"Get model description from attached model. This operation takes at least\n@@ -38,7 +41,10 @@\n view_ permission on the object\"\"\"\n app_label = instance.content_type.app_label\n model = instance.content_type.model\n- model_class = apps.get_model(app_label, model)\n+ try:\n+ model_class = apps.get_model(app_label, model)\n+ except LookupError:\n+ return None\n objects = get_objects_for_user(instance.user, f\"{app_label}.view_{model}\", model_class)\n obj = objects.first()\n if not obj:\n", "issue": "Listing all permissions\n**Describe the bug**\r\nWhen attempting to list all the permissions in order to select which ones to assign to a permission, hangs when gets to 141+\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to 'Directory'\r\n2. Click on 'Roles'\r\n3. Select (or create a new) role.\r\n4. Click on 'Assign Permissions'\r\n5. Click on '+' to Select Permissions to Grant\r\n6. Scroll through permissions by clicking on '>' until arrive at 121 - 140\r\n\r\n**Expected behavior**\r\nContinue displaying all permissions until reaching end of list (Displays 377 Permissions)\r\n\r\n**Screenshots**\r\n\r\n\r\n**Logs**\r\n[logs](https://pastebin.com/v4Fd6PDN)\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.1\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nUpgraded from 2023.8 -> 2023.10.0 -> 2023.10.1\r\n\r\nCan provide more logs or anything else, if needed.\n", "before_files": [{"content": "\"\"\"common RBAC serializers\"\"\"\nfrom django.apps import apps\nfrom django.contrib.auth.models import Permission\nfrom django.db.models import QuerySet\nfrom django_filters.filters import ModelChoiceFilter\nfrom django_filters.filterset import FilterSet\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import (\n CharField,\n ChoiceField,\n ListField,\n ReadOnlyField,\n SerializerMethodField,\n)\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\n\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.core.models import User\nfrom authentik.lib.validators import RequiredTogetherValidator\nfrom authentik.policies.event_matcher.models import model_choices\nfrom authentik.rbac.models import Role\n\n\nclass PermissionSerializer(ModelSerializer):\n \"\"\"Global permission\"\"\"\n\n app_label = ReadOnlyField(source=\"content_type.app_label\")\n app_label_verbose = SerializerMethodField()\n model = ReadOnlyField(source=\"content_type.model\")\n model_verbose = SerializerMethodField()\n\n def get_app_label_verbose(self, instance: Permission) -> str:\n \"\"\"Human-readable app label\"\"\"\n return apps.get_app_config(instance.content_type.app_label).verbose_name\n\n def get_model_verbose(self, instance: Permission) -> str:\n \"\"\"Human-readable model name\"\"\"\n return apps.get_model(\n instance.content_type.app_label, instance.content_type.model\n )._meta.verbose_name\n\n class Meta:\n model = Permission\n fields = [\n \"id\",\n \"name\",\n \"codename\",\n \"model\",\n \"app_label\",\n \"app_label_verbose\",\n \"model_verbose\",\n ]\n\n\nclass PermissionFilter(FilterSet):\n \"\"\"Filter permissions\"\"\"\n\n role = ModelChoiceFilter(queryset=Role.objects.all(), method=\"filter_role\")\n user = ModelChoiceFilter(queryset=User.objects.all())\n\n def filter_role(self, queryset: QuerySet, name, value: Role) -> QuerySet:\n \"\"\"Filter permissions based on role\"\"\"\n return queryset.filter(group__role=value)\n\n class Meta:\n model = Permission\n fields = [\n \"codename\",\n \"content_type__model\",\n \"content_type__app_label\",\n \"role\",\n \"user\",\n ]\n\n\nclass RBACPermissionViewSet(ReadOnlyModelViewSet):\n \"\"\"Read-only list of all permissions, filterable by model and app\"\"\"\n\n queryset = Permission.objects.none()\n serializer_class = PermissionSerializer\n ordering = [\"name\"]\n filterset_class = PermissionFilter\n search_fields = [\n \"codename\",\n \"content_type__model\",\n \"content_type__app_label\",\n ]\n\n def get_queryset(self) -> QuerySet:\n return (\n Permission.objects.all()\n .select_related(\"content_type\")\n .filter(\n content_type__app_label__startswith=\"authentik\",\n )\n )\n\n\nclass PermissionAssignSerializer(PassiveSerializer):\n \"\"\"Request to assign a new permission\"\"\"\n\n permissions = ListField(child=CharField())\n model = ChoiceField(choices=model_choices(), required=False)\n object_pk = CharField(required=False)\n\n validators = [RequiredTogetherValidator(fields=[\"model\", \"object_pk\"])]\n\n def validate(self, attrs: dict) -> dict:\n model_instance = None\n # Check if we're setting an object-level perm or global\n model = attrs.get(\"model\")\n object_pk = attrs.get(\"object_pk\")\n if model and object_pk:\n model = apps.get_model(attrs[\"model\"])\n model_instance = model.objects.filter(pk=attrs[\"object_pk\"]).first()\n attrs[\"model_instance\"] = model_instance\n if attrs.get(\"model\"):\n return attrs\n permissions = attrs.get(\"permissions\", [])\n if not all(\".\" in perm for perm in permissions):\n raise ValidationError(\n {\n \"permissions\": (\n \"When assigning global permissions, codename must be given as \"\n \"app_label.codename\"\n )\n }\n )\n return attrs\n", "path": "authentik/rbac/api/rbac.py"}, {"content": "\"\"\"common RBAC serializers\"\"\"\nfrom typing import Optional\n\nfrom django.apps import apps\nfrom django_filters.filters import UUIDFilter\nfrom django_filters.filterset import FilterSet\nfrom guardian.models import GroupObjectPermission\nfrom guardian.shortcuts import get_objects_for_group\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom authentik.api.pagination import SmallerPagination\nfrom authentik.rbac.api.rbac_assigned_by_roles import RoleObjectPermissionSerializer\n\n\nclass ExtraRoleObjectPermissionSerializer(RoleObjectPermissionSerializer):\n \"\"\"User permission with additional object-related data\"\"\"\n\n app_label_verbose = SerializerMethodField()\n model_verbose = SerializerMethodField()\n\n object_description = SerializerMethodField()\n\n def get_app_label_verbose(self, instance: GroupObjectPermission) -> str:\n \"\"\"Get app label from permission's model\"\"\"\n return apps.get_app_config(instance.content_type.app_label).verbose_name\n\n def get_model_verbose(self, instance: GroupObjectPermission) -> str:\n \"\"\"Get model label from permission's model\"\"\"\n return apps.get_model(\n instance.content_type.app_label, instance.content_type.model\n )._meta.verbose_name\n\n def get_object_description(self, instance: GroupObjectPermission) -> Optional[str]:\n \"\"\"Get model description from attached model. This operation takes at least\n one additional query, and the description is only shown if the user/role has the\n view_ permission on the object\"\"\"\n app_label = instance.content_type.app_label\n model = instance.content_type.model\n model_class = apps.get_model(app_label, model)\n objects = get_objects_for_group(instance.group, f\"{app_label}.view_{model}\", model_class)\n obj = objects.first()\n if not obj:\n return None\n return str(obj)\n\n class Meta(RoleObjectPermissionSerializer.Meta):\n fields = RoleObjectPermissionSerializer.Meta.fields + [\n \"app_label_verbose\",\n \"model_verbose\",\n \"object_description\",\n ]\n\n\nclass RolePermissionFilter(FilterSet):\n \"\"\"Role permission filter\"\"\"\n\n uuid = UUIDFilter(\"group__role__uuid\", required=True)\n\n\nclass RolePermissionViewSet(ListModelMixin, GenericViewSet):\n \"\"\"Get a role's assigned object permissions\"\"\"\n\n serializer_class = ExtraRoleObjectPermissionSerializer\n ordering = [\"group__role__name\"]\n pagination_class = SmallerPagination\n # The filtering is done in the filterset,\n # which has a required filter that does the heavy lifting\n queryset = GroupObjectPermission.objects.select_related(\"content_type\", \"group__role\").all()\n filterset_class = RolePermissionFilter\n", "path": "authentik/rbac/api/rbac_roles.py"}, {"content": "\"\"\"common RBAC serializers\"\"\"\nfrom typing import Optional\n\nfrom django.apps import apps\nfrom django_filters.filters import NumberFilter\nfrom django_filters.filterset import FilterSet\nfrom guardian.models import UserObjectPermission\nfrom guardian.shortcuts import get_objects_for_user\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom authentik.api.pagination import SmallerPagination\nfrom authentik.rbac.api.rbac_assigned_by_users import UserObjectPermissionSerializer\n\n\nclass ExtraUserObjectPermissionSerializer(UserObjectPermissionSerializer):\n \"\"\"User permission with additional object-related data\"\"\"\n\n app_label_verbose = SerializerMethodField()\n model_verbose = SerializerMethodField()\n\n object_description = SerializerMethodField()\n\n def get_app_label_verbose(self, instance: UserObjectPermission) -> str:\n \"\"\"Get app label from permission's model\"\"\"\n return apps.get_app_config(instance.content_type.app_label).verbose_name\n\n def get_model_verbose(self, instance: UserObjectPermission) -> str:\n \"\"\"Get model label from permission's model\"\"\"\n return apps.get_model(\n instance.content_type.app_label, instance.content_type.model\n )._meta.verbose_name\n\n def get_object_description(self, instance: UserObjectPermission) -> Optional[str]:\n \"\"\"Get model description from attached model. This operation takes at least\n one additional query, and the description is only shown if the user/role has the\n view_ permission on the object\"\"\"\n app_label = instance.content_type.app_label\n model = instance.content_type.model\n model_class = apps.get_model(app_label, model)\n objects = get_objects_for_user(instance.user, f\"{app_label}.view_{model}\", model_class)\n obj = objects.first()\n if not obj:\n return None\n return str(obj)\n\n class Meta(UserObjectPermissionSerializer.Meta):\n fields = UserObjectPermissionSerializer.Meta.fields + [\n \"app_label_verbose\",\n \"model_verbose\",\n \"object_description\",\n ]\n\n\nclass UserPermissionFilter(FilterSet):\n \"\"\"User-assigned permission filter\"\"\"\n\n user_id = NumberFilter(\"user__id\", required=True)\n\n\nclass UserPermissionViewSet(ListModelMixin, GenericViewSet):\n \"\"\"Get a users's assigned object permissions\"\"\"\n\n serializer_class = ExtraUserObjectPermissionSerializer\n ordering = [\"user__username\"]\n pagination_class = SmallerPagination\n # The filtering is done in the filterset,\n # which has a required filter that does the heavy lifting\n queryset = UserObjectPermission.objects.select_related(\"content_type\", \"user\").all()\n filterset_class = UserPermissionFilter\n", "path": "authentik/rbac/api/rbac_users.py"}]} | 3,415 | 897 |
gh_patches_debug_14777 | rasdani/github-patches | git_diff | Mailu__Mailu-1941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Letsencrypt Force Renewal
Is there a limit on the Subject Alt Name entries?
I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.
Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.
I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'
Letsencrypt Force Renewal
Is there a limit on the Subject Alt Name entries?
I have updated my /mailu/mailu.env "HOSTNAMES" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.
Previously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.
I am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'
</issue>
<code>
[start of core/nginx/letsencrypt.py]
1 #!/usr/bin/python3
2
3 import os
4 import time
5 import subprocess
6
7 command = [
8 "certbot",
9 "-n", "--agree-tos", # non-interactive
10 "-d", os.environ["HOSTNAMES"],
11 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
12 "certonly", "--standalone",
13 "--cert-name", "mailu",
14 "--preferred-challenges", "http", "--http-01-port", "8008",
15 "--keep-until-expiring",
16 "--config-dir", "/certs/letsencrypt",
17 "--post-hook", "/config.py"
18 ]
19 command2 = [
20 "certbot",
21 "-n", "--agree-tos", # non-interactive
22 "-d", os.environ["HOSTNAMES"],
23 "-m", "{}@{}".format(os.environ["POSTMASTER"], os.environ["DOMAIN"]),
24 "certonly", "--standalone",
25 "--cert-name", "mailu-ecdsa",
26 "--preferred-challenges", "http", "--http-01-port", "8008",
27 "--keep-until-expiring",
28 "--key-type", "ecdsa",
29 "--config-dir", "/certs/letsencrypt",
30 "--post-hook", "/config.py"
31 ]
32
33 def format_for_nginx(fullchain, output):
34 """ We may want to strip ISRG Root X1 out
35 """
36 certs = []
37 with open(fullchain, 'r') as pem:
38 cert = ''
39 for line in pem:
40 cert += line
41 if '-----END CERTIFICATE-----' in line:
42 certs += [cert]
43 cert = ''
44 with open(output, 'w') as pem:
45 for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default="False") else certs:
46 pem.write(cert)
47
48 # Wait for nginx to start
49 time.sleep(5)
50
51 # Run certbot every day
52 while True:
53 subprocess.call(command)
54 format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')
55 subprocess.call(command2)
56 format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')
57 time.sleep(86400)
58
[end of core/nginx/letsencrypt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -13,6 +13,7 @@
"--cert-name", "mailu",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
+ "--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
"--post-hook", "/config.py"
]
@@ -26,6 +27,7 @@
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
"--key-type", "ecdsa",
+ "--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
"--post-hook", "/config.py"
]
| {"golden_diff": "diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py\n--- a/core/nginx/letsencrypt.py\n+++ b/core/nginx/letsencrypt.py\n@@ -13,6 +13,7 @@\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n+ \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n ]\n@@ -26,6 +27,7 @@\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n+ \"--renew-with-new-domains\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n ]\n", "issue": "Letsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\nLetsencrypt Force Renewal\nIs there a limit on the Subject Alt Name entries?\r\n\r\nI have updated my /mailu/mailu.env \"HOSTNAMES\" variable, but when I restart Mailu it doesn't update the Subject Alt Names on the mailu cert.\r\n\r\nPreviously it has worked, so I am guessing that I need to force Letsencrypt to refresh as it isnt within the renewal window. But there is no guidance for the new letsencrypt certbot.\r\n\r\nI am using the latest Mailu version (1.7) and this is the command I am using to restart mailu '/mailu/docker-compose -p mailu up -d'\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport time\nimport subprocess\n\ncommand = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\ncommand2 = [\n \"certbot\",\n \"-n\", \"--agree-tos\", # non-interactive\n \"-d\", os.environ[\"HOSTNAMES\"],\n \"-m\", \"{}@{}\".format(os.environ[\"POSTMASTER\"], os.environ[\"DOMAIN\"]),\n \"certonly\", \"--standalone\",\n \"--cert-name\", \"mailu-ecdsa\",\n \"--preferred-challenges\", \"http\", \"--http-01-port\", \"8008\",\n \"--keep-until-expiring\",\n \"--key-type\", \"ecdsa\",\n \"--config-dir\", \"/certs/letsencrypt\",\n \"--post-hook\", \"/config.py\"\n]\n\ndef format_for_nginx(fullchain, output):\n \"\"\" We may want to strip ISRG Root X1 out\n \"\"\"\n certs = []\n with open(fullchain, 'r') as pem:\n cert = ''\n for line in pem:\n cert += line\n if '-----END CERTIFICATE-----' in line:\n certs += [cert]\n cert = ''\n with open(output, 'w') as pem:\n for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default=\"False\") else certs:\n pem.write(cert)\n\n# Wait for nginx to start\ntime.sleep(5)\n\n# Run certbot every day\nwhile True:\n subprocess.call(command)\n format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')\n subprocess.call(command2)\n format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')\n time.sleep(86400)\n", "path": "core/nginx/letsencrypt.py"}]} | 1,423 | 202 |
gh_patches_debug_16656 | rasdani/github-patches | git_diff | AnalogJ__lexicon-133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix logging TypeError (digitalocean.py)
[This same issue](https://github.com/AnalogJ/lexicon/pull/128/commits/903af58378ab9942d817c57e0330b5f7ac26b4e9) exists in `lexicon/providers/digitalocean.py` line 111. The same edit is needed to fix it.
The error generated is:
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 734, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 465, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 329, in getMessage
msg = msg % self.args
TypeError: not all arguments converted during string formatting
Logged from file digitalocean.py, line 111
```
That section is:
```
Line 110: # is always True at this point, if a non 200 response is returned an error is raised.
Line 111: logger.debug('delete_record: {0}', True)
Line 112: return True
```
</issue>
<code>
[start of lexicon/providers/digitalocean.py]
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import json
5 import logging
6
7 import requests
8
9 from .base import Provider as BaseProvider
10
11 logger = logging.getLogger(__name__)
12
13
14 def ProviderParser(subparser):
15 subparser.add_argument("--auth-token", help="specify token used authenticate to DNS provider")
16
17 class Provider(BaseProvider):
18
19 def __init__(self, options, engine_overrides=None):
20 super(Provider, self).__init__(options, engine_overrides)
21 self.domain_id = None
22 self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')
23
24 def authenticate(self):
25
26 payload = self._get('/domains/{0}'.format(self.options['domain']))
27 self.domain_id = self.options['domain']
28
29 def create_record(self, type, name, content):
30 record = {
31 'type': type,
32 'name': self._relative_name(name),
33 'data': content,
34
35 }
36 if type == 'CNAME':
37 record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.
38
39 payload = self._post('/domains/{0}/records'.format(self.domain_id), record)
40
41 logger.debug('create_record: %s', True)
42 return True
43
44 # List all records. Return an empty list if no records found
45 # type, name and content are used to filter records.
46 # If possible filter during the query, otherwise filter after response is received.
47 def list_records(self, type=None, name=None, content=None):
48 url = '/domains/{0}/records'.format(self.domain_id)
49 records = []
50 payload = {}
51
52 next = url
53 while next is not None:
54 payload = self._get(next)
55 if 'links' in payload \
56 and 'pages' in payload['links'] \
57 and 'next' in payload['links']['pages']:
58 next = payload['links']['pages']['next']
59 else:
60 next = None
61
62 for record in payload['domain_records']:
63 processed_record = {
64 'type': record['type'],
65 'name': "{0}.{1}".format(record['name'], self.domain_id),
66 'ttl': '',
67 'content': record['data'],
68 'id': record['id']
69 }
70 records.append(processed_record)
71
72 if type:
73 records = [record for record in records if record['type'] == type]
74 if name:
75 records = [record for record in records if record['name'] == self._full_name(name)]
76 if content:
77 records = [record for record in records if record['content'].lower() == content.lower()]
78
79 logger.debug('list_records: %s', records)
80 return records
81
82 # Create or update a record.
83 def update_record(self, identifier, type=None, name=None, content=None):
84
85 data = {}
86 if type:
87 data['type'] = type
88 if name:
89 data['name'] = self._relative_name(name)
90 if content:
91 data['data'] = content
92
93 payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)
94
95 logger.debug('update_record: %s', True)
96 return True
97
98 # Delete an existing record.
99 # If record does not exist, do nothing.
100 def delete_record(self, identifier=None, type=None, name=None, content=None):
101 if not identifier:
102 records = self.list_records(type, name, content)
103 logger.debug('records: %s', records)
104 if len(records) == 1:
105 identifier = records[0]['id']
106 else:
107 raise Exception('Record identifier could not be found.')
108 payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
109
110 # is always True at this point, if a non 200 response is returned an error is raised.
111 logger.debug('delete_record: {0}', True)
112 return True
113
114
115 # Helpers
116 def _request(self, action='GET', url='/', data=None, query_params=None):
117 if data is None:
118 data = {}
119 if query_params is None:
120 query_params = {}
121 default_headers = {
122 'Accept': 'application/json',
123 'Content-Type': 'application/json',
124 'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))
125 }
126 if not url.startswith(self.api_endpoint):
127 url = self.api_endpoint + url
128
129 r = requests.request(action, url, params=query_params,
130 data=json.dumps(data),
131 headers=default_headers)
132 r.raise_for_status() # if the request fails for any reason, throw an error.
133 if action == 'DELETE':
134 return ''
135 else:
136 return r.json()
137
[end of lexicon/providers/digitalocean.py]
[start of lexicon/providers/dnsmadeeasy.py]
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import contextlib
5 import datetime
6 import hmac
7 import json
8 import locale
9 import logging
10 from hashlib import sha1
11
12 import requests
13 from builtins import bytes
14
15 from .base import Provider as BaseProvider
16
17 logger = logging.getLogger(__name__)
18
19
20 def ProviderParser(subparser):
21 subparser.add_argument("--auth-username", help="specify username used to authenticate")
22 subparser.add_argument("--auth-token", help="specify token used authenticate=")
23
24 class Provider(BaseProvider):
25
26 def __init__(self, options, engine_overrides=None):
27 super(Provider, self).__init__(options, engine_overrides)
28 self.domain_id = None
29 self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.dnsmadeeasy.com/V2.0')
30
31 def authenticate(self):
32
33 try:
34 payload = self._get('/dns/managed/name', {'domainname': self.options['domain']})
35 except requests.exceptions.HTTPError as e:
36 if e.response.status_code == 404:
37 payload = {}
38 else:
39 raise e
40
41 if not payload or not payload['id']:
42 raise Exception('No domain found')
43
44 self.domain_id = payload['id']
45
46
47 # Create record. If record already exists with the same content, do nothing'
48 def create_record(self, type, name, content):
49 record = {
50 'type': type,
51 'name': self._relative_name(name),
52 'value': content,
53 'ttl': self.options['ttl']
54 }
55 payload = {}
56 try:
57 payload = self._post('/dns/managed/{0}/records/'.format(self.domain_id), record)
58 except requests.exceptions.HTTPError as e:
59 if e.response.status_code == 400:
60 payload = {}
61
62 # http 400 is ok here, because the record probably already exists
63 logger.debug('create_record: %s', 'name' in payload)
64 return 'name' in payload
65
66 # List all records. Return an empty list if no records found
67 # type, name and content are used to filter records.
68 # If possible filter during the query, otherwise filter after response is received.
69 def list_records(self, type=None, name=None, content=None):
70 filter = {}
71 if type:
72 filter['type'] = type
73 if name:
74 filter['recordName'] = self._relative_name(name)
75 payload = self._get('/dns/managed/{0}/records'.format(self.domain_id), filter)
76
77 records = []
78 for record in payload['data']:
79 processed_record = {
80 'type': record['type'],
81 'name': '{0}.{1}'.format(record['name'], self.options['domain']),
82 'ttl': record['ttl'],
83 'content': record['value'],
84 'id': record['id']
85 }
86
87 processed_record = self._clean_TXT_record(processed_record)
88 records.append(processed_record)
89
90 logger.debug('list_records: %s', records)
91 return records
92
93 # Create or update a record.
94 def update_record(self, identifier, type=None, name=None, content=None):
95
96 data = {
97 'id': identifier,
98 'ttl': self.options['ttl']
99 }
100
101 if name:
102 data['name'] = self._relative_name(name)
103 if content:
104 data['value'] = content
105 if type:
106 data['type'] = type
107
108 payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
109
110 logger.debug('update_record: {0}', True)
111 return True
112
113 # Delete an existing record.
114 # If record does not exist, do nothing.
115 def delete_record(self, identifier=None, type=None, name=None, content=None):
116 if not identifier:
117 records = self.list_records(type, name, content)
118 logger.debug('records: %s', records)
119 if len(records) == 1:
120 identifier = records[0]['id']
121 else:
122 raise Exception('Record identifier could not be found.')
123 payload = self._delete('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier))
124
125 # is always True at this point, if a non 200 response is returned an error is raised.
126 logger.debug('delete_record: %s', True)
127 return True
128
129
130 # Helpers
131
132 # this method allows you to set the locale when doing datetime string formatting.
133 # https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale
134 @contextlib.contextmanager
135 def setlocale(self, *args, **kw):
136 saved = locale.setlocale(locale.LC_ALL)
137 #yield locale.setlocale(*args, **kw)
138 yield locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')
139 locale.setlocale(locale.LC_ALL, saved)
140
141 def _request(self, action='GET', url='/', data=None, query_params=None):
142 if data is None:
143 data = {}
144 if query_params is None:
145 query_params = {}
146 default_headers = {
147 'Accept': 'application/json',
148 'Content-Type': 'application/json',
149 'x-dnsme-apiKey': self.options['auth_username']
150 }
151 default_auth = None
152
153 # all requests require a HMAC header and timestamp header.
154 now = datetime.datetime.utcnow()
155 # required format: Sat, 12 Feb 2011 20:59:04 GMT
156 with self.setlocale(locale.LC_TIME, 'en_US.utf8'):
157 request_date = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
158 hashed = hmac.new(bytes(self.options['auth_token'], 'ascii'),
159 bytes(request_date, 'ascii'), sha1)
160
161 default_headers['x-dnsme-requestDate'] = request_date
162 default_headers['x-dnsme-hmac'] = hashed.hexdigest()
163
164 r = requests.request(action, self.api_endpoint + url, params=query_params,
165 data=json.dumps(data),
166 headers=default_headers,
167 auth=default_auth)
168 r.raise_for_status() # if the request fails for any reason, throw an error.
169
170 # PUT and DELETE actions dont return valid json.
171 if action == 'DELETE' or action == 'PUT':
172 return r.text
173 return r.json()
174
[end of lexicon/providers/dnsmadeeasy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/digitalocean.py b/lexicon/providers/digitalocean.py
--- a/lexicon/providers/digitalocean.py
+++ b/lexicon/providers/digitalocean.py
@@ -108,7 +108,7 @@
payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))
# is always True at this point, if a non 200 response is returned an error is raised.
- logger.debug('delete_record: {0}', True)
+ logger.debug('delete_record: %s', True)
return True
diff --git a/lexicon/providers/dnsmadeeasy.py b/lexicon/providers/dnsmadeeasy.py
--- a/lexicon/providers/dnsmadeeasy.py
+++ b/lexicon/providers/dnsmadeeasy.py
@@ -107,7 +107,7 @@
payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)
- logger.debug('update_record: {0}', True)
+ logger.debug('update_record: %s', True)
return True
# Delete an existing record.
| {"golden_diff": "diff --git a/lexicon/providers/digitalocean.py b/lexicon/providers/digitalocean.py\n--- a/lexicon/providers/digitalocean.py\n+++ b/lexicon/providers/digitalocean.py\n@@ -108,7 +108,7 @@\n payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))\n \n # is always True at this point, if a non 200 response is returned an error is raised.\n- logger.debug('delete_record: {0}', True)\n+ logger.debug('delete_record: %s', True)\n return True\n \n \ndiff --git a/lexicon/providers/dnsmadeeasy.py b/lexicon/providers/dnsmadeeasy.py\n--- a/lexicon/providers/dnsmadeeasy.py\n+++ b/lexicon/providers/dnsmadeeasy.py\n@@ -107,7 +107,7 @@\n \n payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)\n \n- logger.debug('update_record: {0}', True)\n+ logger.debug('update_record: %s', True)\n return True\n \n # Delete an existing record.\n", "issue": "Fix logging TypeError (digitalocean.py)\n[This same issue](https://github.com/AnalogJ/lexicon/pull/128/commits/903af58378ab9942d817c57e0330b5f7ac26b4e9) exists in `lexicon/providers/digitalocean.py` line 111. The same edit is needed to fix it. \r\n\r\nThe error generated is:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 861, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 734, in format\r\n return fmt.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 465, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 329, in getMessage\r\n msg = msg % self.args\r\nTypeError: not all arguments converted during string formatting\r\nLogged from file digitalocean.py, line 111\r\n```\r\nThat section is:\r\n```\r\n Line 110: # is always True at this point, if a non 200 response is returned an error is raised.\r\n Line 111: logger.debug('delete_record: {0}', True)\r\n Line 112: return True\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\nimport logging\n\nimport requests\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify token used authenticate to DNS provider\")\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.domain_id = None\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.digitalocean.com/v2')\n\n def authenticate(self):\n\n payload = self._get('/domains/{0}'.format(self.options['domain']))\n self.domain_id = self.options['domain']\n\n def create_record(self, type, name, content):\n record = {\n 'type': type,\n 'name': self._relative_name(name),\n 'data': content,\n\n }\n if type == 'CNAME':\n record['data'] = record['data'].rstrip('.') + '.' # make sure a the data is always a FQDN for CNAMe.\n\n payload = self._post('/domains/{0}/records'.format(self.domain_id), record)\n\n logger.debug('create_record: %s', True)\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def list_records(self, type=None, name=None, content=None):\n url = '/domains/{0}/records'.format(self.domain_id)\n records = []\n payload = {}\n\n next = url\n while next is not None:\n payload = self._get(next)\n if 'links' in payload \\\n and 'pages' in payload['links'] \\\n and 'next' in payload['links']['pages']:\n next = payload['links']['pages']['next']\n else:\n next = None\n\n for record in payload['domain_records']:\n processed_record = {\n 'type': record['type'],\n 'name': \"{0}.{1}\".format(record['name'], self.domain_id),\n 'ttl': '',\n 'content': record['data'],\n 'id': record['id']\n }\n records.append(processed_record)\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n records = [record for record in records if record['name'] == self._full_name(name)]\n if content:\n records = [record for record in records if record['content'].lower() == content.lower()]\n\n logger.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n\n data = {}\n if type:\n data['type'] = type\n if name:\n data['name'] = self._relative_name(name)\n if content:\n data['data'] = content\n\n payload = self._put('/domains/{0}/records/{1}'.format(self.domain_id, identifier), data)\n\n logger.debug('update_record: %s', True)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n if not identifier:\n records = self.list_records(type, name, content)\n logger.debug('records: %s', records)\n if len(records) == 1:\n identifier = records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n payload = self._delete('/domains/{0}/records/{1}'.format(self.domain_id, identifier))\n\n # is always True at this point, if a non 200 response is returned an error is raised.\n logger.debug('delete_record: {0}', True)\n return True\n\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n default_headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer {0}'.format(self.options.get('auth_token'))\n }\n if not url.startswith(self.api_endpoint):\n url = self.api_endpoint + url\n\n r = requests.request(action, url, params=query_params,\n data=json.dumps(data),\n headers=default_headers)\n r.raise_for_status() # if the request fails for any reason, throw an error.\n if action == 'DELETE':\n return ''\n else:\n return r.json()\n", "path": "lexicon/providers/digitalocean.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport contextlib\nimport datetime\nimport hmac\nimport json\nimport locale\nimport logging\nfrom hashlib import sha1\n\nimport requests\nfrom builtins import bytes\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-username\", help=\"specify username used to authenticate\")\n subparser.add_argument(\"--auth-token\", help=\"specify token used authenticate=\")\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.domain_id = None\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.dnsmadeeasy.com/V2.0')\n\n def authenticate(self):\n\n try:\n payload = self._get('/dns/managed/name', {'domainname': self.options['domain']})\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 404:\n payload = {}\n else:\n raise e\n\n if not payload or not payload['id']:\n raise Exception('No domain found')\n\n self.domain_id = payload['id']\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n record = {\n 'type': type,\n 'name': self._relative_name(name),\n 'value': content,\n 'ttl': self.options['ttl']\n }\n payload = {}\n try:\n payload = self._post('/dns/managed/{0}/records/'.format(self.domain_id), record)\n except requests.exceptions.HTTPError as e:\n if e.response.status_code == 400:\n payload = {}\n\n # http 400 is ok here, because the record probably already exists\n logger.debug('create_record: %s', 'name' in payload)\n return 'name' in payload\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def list_records(self, type=None, name=None, content=None):\n filter = {}\n if type:\n filter['type'] = type\n if name:\n filter['recordName'] = self._relative_name(name)\n payload = self._get('/dns/managed/{0}/records'.format(self.domain_id), filter)\n\n records = []\n for record in payload['data']:\n processed_record = {\n 'type': record['type'],\n 'name': '{0}.{1}'.format(record['name'], self.options['domain']),\n 'ttl': record['ttl'],\n 'content': record['value'],\n 'id': record['id']\n }\n\n processed_record = self._clean_TXT_record(processed_record)\n records.append(processed_record)\n\n logger.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n\n data = {\n 'id': identifier,\n 'ttl': self.options['ttl']\n }\n\n if name:\n data['name'] = self._relative_name(name)\n if content:\n data['value'] = content\n if type:\n data['type'] = type\n\n payload = self._put('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier), data)\n\n logger.debug('update_record: {0}', True)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n if not identifier:\n records = self.list_records(type, name, content)\n logger.debug('records: %s', records)\n if len(records) == 1:\n identifier = records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n payload = self._delete('/dns/managed/{0}/records/{1}'.format(self.domain_id, identifier))\n\n # is always True at this point, if a non 200 response is returned an error is raised.\n logger.debug('delete_record: %s', True)\n return True\n\n\n # Helpers\n\n # this method allows you to set the locale when doing datetime string formatting.\n # https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale\n @contextlib.contextmanager\n def setlocale(self, *args, **kw):\n saved = locale.setlocale(locale.LC_ALL)\n #yield locale.setlocale(*args, **kw)\n yield locale.setlocale(locale.LC_TIME, 'en_US.UTF-8')\n locale.setlocale(locale.LC_ALL, saved)\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n default_headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'x-dnsme-apiKey': self.options['auth_username']\n }\n default_auth = None\n\n # all requests require a HMAC header and timestamp header.\n now = datetime.datetime.utcnow()\n # required format: Sat, 12 Feb 2011 20:59:04 GMT\n with self.setlocale(locale.LC_TIME, 'en_US.utf8'):\n request_date = now.strftime('%a, %d %b %Y %H:%M:%S GMT')\n hashed = hmac.new(bytes(self.options['auth_token'], 'ascii'), \n bytes(request_date, 'ascii'), sha1)\n\n default_headers['x-dnsme-requestDate'] = request_date\n default_headers['x-dnsme-hmac'] = hashed.hexdigest()\n\n r = requests.request(action, self.api_endpoint + url, params=query_params,\n data=json.dumps(data),\n headers=default_headers,\n auth=default_auth)\n r.raise_for_status() # if the request fails for any reason, throw an error.\n\n # PUT and DELETE actions dont return valid json.\n if action == 'DELETE' or action == 'PUT':\n return r.text\n return r.json()\n", "path": "lexicon/providers/dnsmadeeasy.py"}]} | 4,074 | 271 |
gh_patches_debug_29691 | rasdani/github-patches | git_diff | litestar-org__litestar-1838 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/_openapi/responses.py]
1 from __future__ import annotations
2
3 import re
4 from copy import copy
5 from dataclasses import asdict
6 from http import HTTPStatus
7 from inspect import Signature
8 from operator import attrgetter
9 from typing import TYPE_CHECKING, Any, Iterator
10
11 from litestar._openapi.schema_generation import create_schema
12 from litestar._signature.field import SignatureField
13 from litestar.enums import MediaType
14 from litestar.exceptions import HTTPException, ValidationException
15 from litestar.openapi.spec import OpenAPIResponse
16 from litestar.openapi.spec.enums import OpenAPIFormat, OpenAPIType
17 from litestar.openapi.spec.header import OpenAPIHeader
18 from litestar.openapi.spec.media_type import OpenAPIMediaType
19 from litestar.openapi.spec.schema import Schema
20 from litestar.response import (
21 File,
22 Redirect,
23 Stream,
24 Template,
25 )
26 from litestar.response import (
27 Response as LitestarResponse,
28 )
29 from litestar.response.base import ASGIResponse
30 from litestar.types.builtin_types import NoneType
31 from litestar.utils import get_enum_string_value, get_name
32
33 if TYPE_CHECKING:
34 from litestar.datastructures.cookie import Cookie
35 from litestar.handlers.http_handlers import HTTPRouteHandler
36 from litestar.openapi.spec.responses import Responses
37 from litestar.plugins import OpenAPISchemaPluginProtocol
38
39
40 __all__ = (
41 "create_additional_responses",
42 "create_cookie_schema",
43 "create_error_responses",
44 "create_responses",
45 "create_success_response",
46 )
47
48 CAPITAL_LETTERS_PATTERN = re.compile(r"(?=[A-Z])")
49
50
51 def pascal_case_to_text(string: str) -> str:
52 """Given a 'PascalCased' string, return its split form- 'Pascal Cased'."""
53 return " ".join(re.split(CAPITAL_LETTERS_PATTERN, string)).strip()
54
55
56 def create_cookie_schema(cookie: Cookie) -> Schema:
57 """Given a Cookie instance, return its corresponding OpenAPI schema.
58
59 Args:
60 cookie: Cookie
61
62 Returns:
63 Schema
64 """
65 cookie_copy = copy(cookie)
66 cookie_copy.value = "<string>"
67 value = cookie_copy.to_header(header="")
68 return Schema(description=cookie.description or "", example=value)
69
70
71 def create_success_response( # noqa: C901
72 route_handler: HTTPRouteHandler,
73 generate_examples: bool,
74 plugins: list[OpenAPISchemaPluginProtocol],
75 schemas: dict[str, Schema],
76 ) -> OpenAPIResponse:
77 """Create the schema for a success response."""
78 return_type = route_handler.parsed_fn_signature.return_type
79 return_annotation = return_type.annotation
80 default_descriptions: dict[Any, str] = {
81 Stream: "Stream Response",
82 Redirect: "Redirect Response",
83 File: "File Download",
84 }
85 description = (
86 route_handler.response_description
87 or default_descriptions.get(return_annotation)
88 or HTTPStatus(route_handler.status_code).description
89 )
90
91 if return_annotation is not Signature.empty and not return_type.is_subclass_of(
92 (NoneType, File, Redirect, Stream, ASGIResponse)
93 ):
94 if return_annotation is Template:
95 return_annotation = str
96 route_handler.media_type = get_enum_string_value(MediaType.HTML)
97 elif return_type.is_subclass_of(LitestarResponse):
98 return_annotation = return_type.inner_types[0].annotation if return_type.inner_types else Any
99 if not route_handler.media_type:
100 route_handler.media_type = get_enum_string_value(MediaType.JSON)
101
102 if dto := route_handler.resolve_return_dto():
103 result = dto.create_openapi_schema("return", str(route_handler), generate_examples, schemas, False)
104 else:
105 result = create_schema(
106 field=SignatureField.create(field_type=return_annotation),
107 generate_examples=generate_examples,
108 plugins=plugins,
109 schemas=schemas,
110 prefer_alias=False,
111 )
112
113 schema = result if isinstance(result, Schema) else schemas[result.value]
114
115 schema.content_encoding = route_handler.content_encoding
116 schema.content_media_type = route_handler.content_media_type
117
118 response = OpenAPIResponse(
119 content={route_handler.media_type: OpenAPIMediaType(schema=result)},
120 description=description,
121 )
122
123 elif return_type.is_subclass_of(Redirect):
124 response = OpenAPIResponse(
125 content=None,
126 description=description,
127 headers={
128 "location": OpenAPIHeader(
129 schema=Schema(type=OpenAPIType.STRING), description="target path for the redirect"
130 )
131 },
132 )
133
134 elif return_type.is_subclass_of((File, Stream)):
135 response = OpenAPIResponse(
136 content={
137 route_handler.media_type: OpenAPIMediaType(
138 schema=Schema(
139 type=OpenAPIType.STRING,
140 content_encoding=route_handler.content_encoding or "application/octet-stream",
141 content_media_type=route_handler.content_media_type,
142 ),
143 )
144 },
145 description=description,
146 headers={
147 "content-length": OpenAPIHeader(
148 schema=Schema(type=OpenAPIType.STRING), description="File size in bytes"
149 ),
150 "last-modified": OpenAPIHeader(
151 schema=Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.DATE_TIME),
152 description="Last modified data-time in RFC 2822 format",
153 ),
154 "etag": OpenAPIHeader(schema=Schema(type=OpenAPIType.STRING), description="Entity tag"),
155 },
156 )
157
158 else:
159 response = OpenAPIResponse(
160 content=None,
161 description=description,
162 )
163
164 if response.headers is None:
165 response.headers = {}
166
167 for response_header in route_handler.resolve_response_headers():
168 header = OpenAPIHeader()
169 for attribute_name, attribute_value in ((k, v) for k, v in asdict(response_header).items() if v is not None):
170 if attribute_name == "value":
171 header.schema = create_schema(
172 field=SignatureField.create(field_type=type(attribute_value)),
173 generate_examples=False,
174 plugins=plugins,
175 schemas=schemas,
176 prefer_alias=False,
177 )
178
179 elif attribute_name != "documentation_only":
180 setattr(header, attribute_name, attribute_value)
181
182 response.headers[response_header.name] = header
183
184 if cookies := route_handler.resolve_response_cookies():
185 response.headers["Set-Cookie"] = OpenAPIHeader(
186 schema=Schema(
187 all_of=[create_cookie_schema(cookie=cookie) for cookie in sorted(cookies, key=attrgetter("key"))]
188 )
189 )
190
191 return response
192
193
194 def create_error_responses(exceptions: list[type[HTTPException]]) -> Iterator[tuple[str, OpenAPIResponse]]:
195 """Create the schema for error responses, if any."""
196 grouped_exceptions: dict[int, list[type[HTTPException]]] = {}
197 for exc in exceptions:
198 if not grouped_exceptions.get(exc.status_code):
199 grouped_exceptions[exc.status_code] = []
200 grouped_exceptions[exc.status_code].append(exc)
201 for status_code, exception_group in grouped_exceptions.items():
202 exceptions_schemas = [
203 Schema(
204 type=OpenAPIType.OBJECT,
205 required=["detail", "status_code"],
206 properties={
207 "status_code": Schema(type=OpenAPIType.INTEGER),
208 "detail": Schema(type=OpenAPIType.STRING),
209 "extra": Schema(
210 type=[OpenAPIType.NULL, OpenAPIType.OBJECT, OpenAPIType.ARRAY], additional_properties=Schema()
211 ),
212 },
213 description=pascal_case_to_text(get_name(exc)),
214 examples=[{"status_code": status_code, "detail": HTTPStatus(status_code).phrase, "extra": {}}],
215 )
216 for exc in exception_group
217 ]
218 if len(exceptions_schemas) > 1: # noqa: SIM108
219 schema = Schema(one_of=exceptions_schemas)
220 else:
221 schema = exceptions_schemas[0]
222 yield str(status_code), OpenAPIResponse(
223 description=HTTPStatus(status_code).description,
224 content={MediaType.JSON: OpenAPIMediaType(schema=schema)},
225 )
226
227
228 def create_additional_responses(
229 route_handler: HTTPRouteHandler,
230 plugins: list[OpenAPISchemaPluginProtocol],
231 schemas: dict[str, Schema],
232 ) -> Iterator[tuple[str, OpenAPIResponse]]:
233 """Create the schema for additional responses, if any."""
234 if not route_handler.responses:
235 return
236
237 for status_code, additional_response in route_handler.responses.items():
238 schema = create_schema(
239 field=SignatureField.create(field_type=additional_response.data_container),
240 generate_examples=additional_response.generate_examples,
241 plugins=plugins,
242 schemas=schemas,
243 prefer_alias=False,
244 )
245 yield str(status_code), OpenAPIResponse(
246 description=additional_response.description,
247 content={additional_response.media_type: OpenAPIMediaType(schema=schema)},
248 )
249
250
251 def create_responses(
252 route_handler: HTTPRouteHandler,
253 raises_validation_error: bool,
254 generate_examples: bool,
255 plugins: list[OpenAPISchemaPluginProtocol],
256 schemas: dict[str, Schema],
257 ) -> Responses | None:
258 """Create a Response model embedded in a `Responses` dictionary for the given RouteHandler or return None."""
259
260 responses: Responses = {
261 str(route_handler.status_code): create_success_response(
262 generate_examples=generate_examples, plugins=plugins, route_handler=route_handler, schemas=schemas
263 ),
264 }
265
266 exceptions = list(route_handler.raises or [])
267 if raises_validation_error and ValidationException not in exceptions:
268 exceptions.append(ValidationException)
269 for status_code, response in create_error_responses(exceptions=exceptions):
270 responses[status_code] = response
271
272 for status_code, response in create_additional_responses(
273 route_handler=route_handler, plugins=plugins, schemas=schemas
274 ):
275 responses[status_code] = response
276
277 return responses or None
278
[end of litestar/_openapi/responses.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/_openapi/responses.py b/litestar/_openapi/responses.py
--- a/litestar/_openapi/responses.py
+++ b/litestar/_openapi/responses.py
@@ -91,13 +91,13 @@
if return_annotation is not Signature.empty and not return_type.is_subclass_of(
(NoneType, File, Redirect, Stream, ASGIResponse)
):
+ media_type = route_handler.media_type
if return_annotation is Template:
return_annotation = str
- route_handler.media_type = get_enum_string_value(MediaType.HTML)
+ media_type = media_type or MediaType.HTML
elif return_type.is_subclass_of(LitestarResponse):
return_annotation = return_type.inner_types[0].annotation if return_type.inner_types else Any
- if not route_handler.media_type:
- route_handler.media_type = get_enum_string_value(MediaType.JSON)
+ media_type = media_type or MediaType.JSON
if dto := route_handler.resolve_return_dto():
result = dto.create_openapi_schema("return", str(route_handler), generate_examples, schemas, False)
@@ -116,8 +116,7 @@
schema.content_media_type = route_handler.content_media_type
response = OpenAPIResponse(
- content={route_handler.media_type: OpenAPIMediaType(schema=result)},
- description=description,
+ content={get_enum_string_value(media_type): OpenAPIMediaType(schema=result)}, description=description
)
elif return_type.is_subclass_of(Redirect):
| {"golden_diff": "diff --git a/litestar/_openapi/responses.py b/litestar/_openapi/responses.py\n--- a/litestar/_openapi/responses.py\n+++ b/litestar/_openapi/responses.py\n@@ -91,13 +91,13 @@\n if return_annotation is not Signature.empty and not return_type.is_subclass_of(\n (NoneType, File, Redirect, Stream, ASGIResponse)\n ):\n+ media_type = route_handler.media_type\n if return_annotation is Template:\n return_annotation = str\n- route_handler.media_type = get_enum_string_value(MediaType.HTML)\n+ media_type = media_type or MediaType.HTML\n elif return_type.is_subclass_of(LitestarResponse):\n return_annotation = return_type.inner_types[0].annotation if return_type.inner_types else Any\n- if not route_handler.media_type:\n- route_handler.media_type = get_enum_string_value(MediaType.JSON)\n+ media_type = media_type or MediaType.JSON\n \n if dto := route_handler.resolve_return_dto():\n result = dto.create_openapi_schema(\"return\", str(route_handler), generate_examples, schemas, False)\n@@ -116,8 +116,7 @@\n schema.content_media_type = route_handler.content_media_type\n \n response = OpenAPIResponse(\n- content={route_handler.media_type: OpenAPIMediaType(schema=result)},\n- description=description,\n+ content={get_enum_string_value(media_type): OpenAPIMediaType(schema=result)}, description=description\n )\n \n elif return_type.is_subclass_of(Redirect):\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom copy import copy\nfrom dataclasses import asdict\nfrom http import HTTPStatus\nfrom inspect import Signature\nfrom operator import attrgetter\nfrom typing import TYPE_CHECKING, Any, Iterator\n\nfrom litestar._openapi.schema_generation import create_schema\nfrom litestar._signature.field import SignatureField\nfrom litestar.enums import MediaType\nfrom litestar.exceptions import HTTPException, ValidationException\nfrom litestar.openapi.spec import OpenAPIResponse\nfrom litestar.openapi.spec.enums import OpenAPIFormat, OpenAPIType\nfrom litestar.openapi.spec.header import OpenAPIHeader\nfrom litestar.openapi.spec.media_type import OpenAPIMediaType\nfrom litestar.openapi.spec.schema import Schema\nfrom litestar.response import (\n File,\n Redirect,\n Stream,\n Template,\n)\nfrom litestar.response import (\n Response as LitestarResponse,\n)\nfrom litestar.response.base import ASGIResponse\nfrom litestar.types.builtin_types import NoneType\nfrom litestar.utils import get_enum_string_value, get_name\n\nif TYPE_CHECKING:\n from litestar.datastructures.cookie import Cookie\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.openapi.spec.responses import Responses\n from litestar.plugins import OpenAPISchemaPluginProtocol\n\n\n__all__ = (\n \"create_additional_responses\",\n \"create_cookie_schema\",\n \"create_error_responses\",\n \"create_responses\",\n \"create_success_response\",\n)\n\nCAPITAL_LETTERS_PATTERN = re.compile(r\"(?=[A-Z])\")\n\n\ndef pascal_case_to_text(string: str) -> str:\n \"\"\"Given a 'PascalCased' string, return its split form- 'Pascal Cased'.\"\"\"\n return \" \".join(re.split(CAPITAL_LETTERS_PATTERN, string)).strip()\n\n\ndef create_cookie_schema(cookie: Cookie) -> Schema:\n \"\"\"Given a Cookie instance, return its corresponding OpenAPI schema.\n\n Args:\n cookie: Cookie\n\n Returns:\n Schema\n \"\"\"\n cookie_copy = copy(cookie)\n cookie_copy.value = \"<string>\"\n value = cookie_copy.to_header(header=\"\")\n return Schema(description=cookie.description or \"\", example=value)\n\n\ndef create_success_response( # noqa: C901\n route_handler: HTTPRouteHandler,\n generate_examples: bool,\n plugins: list[OpenAPISchemaPluginProtocol],\n schemas: dict[str, Schema],\n) -> OpenAPIResponse:\n \"\"\"Create the schema for a success response.\"\"\"\n return_type = route_handler.parsed_fn_signature.return_type\n return_annotation = return_type.annotation\n default_descriptions: dict[Any, str] = {\n Stream: \"Stream Response\",\n Redirect: \"Redirect Response\",\n File: \"File Download\",\n }\n description = (\n route_handler.response_description\n or default_descriptions.get(return_annotation)\n or HTTPStatus(route_handler.status_code).description\n )\n\n if return_annotation is not Signature.empty and not return_type.is_subclass_of(\n (NoneType, File, Redirect, Stream, ASGIResponse)\n ):\n if return_annotation is Template:\n return_annotation = str\n route_handler.media_type = get_enum_string_value(MediaType.HTML)\n elif return_type.is_subclass_of(LitestarResponse):\n return_annotation = return_type.inner_types[0].annotation if return_type.inner_types else Any\n if not route_handler.media_type:\n route_handler.media_type = get_enum_string_value(MediaType.JSON)\n\n if dto := route_handler.resolve_return_dto():\n result = dto.create_openapi_schema(\"return\", str(route_handler), generate_examples, schemas, False)\n else:\n result = create_schema(\n field=SignatureField.create(field_type=return_annotation),\n generate_examples=generate_examples,\n plugins=plugins,\n schemas=schemas,\n prefer_alias=False,\n )\n\n schema = result if isinstance(result, Schema) else schemas[result.value]\n\n schema.content_encoding = route_handler.content_encoding\n schema.content_media_type = route_handler.content_media_type\n\n response = OpenAPIResponse(\n content={route_handler.media_type: OpenAPIMediaType(schema=result)},\n description=description,\n )\n\n elif return_type.is_subclass_of(Redirect):\n response = OpenAPIResponse(\n content=None,\n description=description,\n headers={\n \"location\": OpenAPIHeader(\n schema=Schema(type=OpenAPIType.STRING), description=\"target path for the redirect\"\n )\n },\n )\n\n elif return_type.is_subclass_of((File, Stream)):\n response = OpenAPIResponse(\n content={\n route_handler.media_type: OpenAPIMediaType(\n schema=Schema(\n type=OpenAPIType.STRING,\n content_encoding=route_handler.content_encoding or \"application/octet-stream\",\n content_media_type=route_handler.content_media_type,\n ),\n )\n },\n description=description,\n headers={\n \"content-length\": OpenAPIHeader(\n schema=Schema(type=OpenAPIType.STRING), description=\"File size in bytes\"\n ),\n \"last-modified\": OpenAPIHeader(\n schema=Schema(type=OpenAPIType.STRING, format=OpenAPIFormat.DATE_TIME),\n description=\"Last modified data-time in RFC 2822 format\",\n ),\n \"etag\": OpenAPIHeader(schema=Schema(type=OpenAPIType.STRING), description=\"Entity tag\"),\n },\n )\n\n else:\n response = OpenAPIResponse(\n content=None,\n description=description,\n )\n\n if response.headers is None:\n response.headers = {}\n\n for response_header in route_handler.resolve_response_headers():\n header = OpenAPIHeader()\n for attribute_name, attribute_value in ((k, v) for k, v in asdict(response_header).items() if v is not None):\n if attribute_name == \"value\":\n header.schema = create_schema(\n field=SignatureField.create(field_type=type(attribute_value)),\n generate_examples=False,\n plugins=plugins,\n schemas=schemas,\n prefer_alias=False,\n )\n\n elif attribute_name != \"documentation_only\":\n setattr(header, attribute_name, attribute_value)\n\n response.headers[response_header.name] = header\n\n if cookies := route_handler.resolve_response_cookies():\n response.headers[\"Set-Cookie\"] = OpenAPIHeader(\n schema=Schema(\n all_of=[create_cookie_schema(cookie=cookie) for cookie in sorted(cookies, key=attrgetter(\"key\"))]\n )\n )\n\n return response\n\n\ndef create_error_responses(exceptions: list[type[HTTPException]]) -> Iterator[tuple[str, OpenAPIResponse]]:\n \"\"\"Create the schema for error responses, if any.\"\"\"\n grouped_exceptions: dict[int, list[type[HTTPException]]] = {}\n for exc in exceptions:\n if not grouped_exceptions.get(exc.status_code):\n grouped_exceptions[exc.status_code] = []\n grouped_exceptions[exc.status_code].append(exc)\n for status_code, exception_group in grouped_exceptions.items():\n exceptions_schemas = [\n Schema(\n type=OpenAPIType.OBJECT,\n required=[\"detail\", \"status_code\"],\n properties={\n \"status_code\": Schema(type=OpenAPIType.INTEGER),\n \"detail\": Schema(type=OpenAPIType.STRING),\n \"extra\": Schema(\n type=[OpenAPIType.NULL, OpenAPIType.OBJECT, OpenAPIType.ARRAY], additional_properties=Schema()\n ),\n },\n description=pascal_case_to_text(get_name(exc)),\n examples=[{\"status_code\": status_code, \"detail\": HTTPStatus(status_code).phrase, \"extra\": {}}],\n )\n for exc in exception_group\n ]\n if len(exceptions_schemas) > 1: # noqa: SIM108\n schema = Schema(one_of=exceptions_schemas)\n else:\n schema = exceptions_schemas[0]\n yield str(status_code), OpenAPIResponse(\n description=HTTPStatus(status_code).description,\n content={MediaType.JSON: OpenAPIMediaType(schema=schema)},\n )\n\n\ndef create_additional_responses(\n route_handler: HTTPRouteHandler,\n plugins: list[OpenAPISchemaPluginProtocol],\n schemas: dict[str, Schema],\n) -> Iterator[tuple[str, OpenAPIResponse]]:\n \"\"\"Create the schema for additional responses, if any.\"\"\"\n if not route_handler.responses:\n return\n\n for status_code, additional_response in route_handler.responses.items():\n schema = create_schema(\n field=SignatureField.create(field_type=additional_response.data_container),\n generate_examples=additional_response.generate_examples,\n plugins=plugins,\n schemas=schemas,\n prefer_alias=False,\n )\n yield str(status_code), OpenAPIResponse(\n description=additional_response.description,\n content={additional_response.media_type: OpenAPIMediaType(schema=schema)},\n )\n\n\ndef create_responses(\n route_handler: HTTPRouteHandler,\n raises_validation_error: bool,\n generate_examples: bool,\n plugins: list[OpenAPISchemaPluginProtocol],\n schemas: dict[str, Schema],\n) -> Responses | None:\n \"\"\"Create a Response model embedded in a `Responses` dictionary for the given RouteHandler or return None.\"\"\"\n\n responses: Responses = {\n str(route_handler.status_code): create_success_response(\n generate_examples=generate_examples, plugins=plugins, route_handler=route_handler, schemas=schemas\n ),\n }\n\n exceptions = list(route_handler.raises or [])\n if raises_validation_error and ValidationException not in exceptions:\n exceptions.append(ValidationException)\n for status_code, response in create_error_responses(exceptions=exceptions):\n responses[status_code] = response\n\n for status_code, response in create_additional_responses(\n route_handler=route_handler, plugins=plugins, schemas=schemas\n ):\n responses[status_code] = response\n\n return responses or None\n", "path": "litestar/_openapi/responses.py"}]} | 3,500 | 338 |
gh_patches_debug_23201 | rasdani/github-patches | git_diff | matrix-org__synapse-11530 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Synapse is overly accepting of content in the `unsigned` object in events received over federation
Synapse makes use of various properties within the `unsigned` object of events - either internally, or by passing them on to clients. One example is `replaces_state`, which is used to store the event id of the previous event with the same `type` and `state_key`, and is later used to populate the `prev_content` property for events served to clients.
The problem is that homeservers are free to populate `unsigned`, without it affecting the event hashes or signatures; a malicious or buggy homeserver could therefore populate the content with incorrect data.
Taking the example of `replaces_state`, Synapse overwrites this property when receiving an event, but only if there was previously an event with the same `type` and `state_key` in the room state; it is otherwise passed through unchanged. So, a malicious homeserver could confuse remote servers' clients by sending incorrect values of `replaces_state` over federation.
---
The specification is not clear on how unspecified properties within `unsigned` should be handled, but I think they should be stripped off by the receiving homeserver. This will ensure that if, in future, the C-S API spec is extended to specify new properties be added to `unsigned`, there will be no confusion about whether they were added by the local or remote homeserver.
As far as I am aware, the only properties that *should* be allowed in `unsigned` over federation are:
* `invite_room_state`
* `knock_room_state`
* `age` - though see also https://github.com/matrix-org/synapse/issues/8429.
[Aside: in an ideal world, we might have different properties for "things added by the remote homeserver - treat with caution!" vs "things added by the local homeserver - can be trusted". However, that ship has probably sailed for now.]
</issue>
<code>
[start of synapse/federation/federation_base.py]
1 # Copyright 2015, 2016 OpenMarket Ltd
2 # Copyright 2020 The Matrix.org Foundation C.I.C.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 import logging
16 from typing import TYPE_CHECKING
17
18 from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
19 from synapse.api.errors import Codes, SynapseError
20 from synapse.api.room_versions import EventFormatVersions, RoomVersion
21 from synapse.crypto.event_signing import check_event_content_hash
22 from synapse.crypto.keyring import Keyring
23 from synapse.events import EventBase, make_event_from_dict
24 from synapse.events.utils import prune_event, validate_canonicaljson
25 from synapse.http.servlet import assert_params_in_dict
26 from synapse.types import JsonDict, get_domain_from_id
27
28 if TYPE_CHECKING:
29 from synapse.server import HomeServer
30
31
32 logger = logging.getLogger(__name__)
33
34
35 class FederationBase:
36 def __init__(self, hs: "HomeServer"):
37 self.hs = hs
38
39 self.server_name = hs.hostname
40 self.keyring = hs.get_keyring()
41 self.spam_checker = hs.get_spam_checker()
42 self.store = hs.get_datastore()
43 self._clock = hs.get_clock()
44
45 async def _check_sigs_and_hash(
46 self, room_version: RoomVersion, pdu: EventBase
47 ) -> EventBase:
48 """Checks that event is correctly signed by the sending server.
49
50 Args:
51 room_version: The room version of the PDU
52 pdu: the event to be checked
53
54 Returns:
55 * the original event if the checks pass
56 * a redacted version of the event (if the signature
57 matched but the hash did not)
58 * throws a SynapseError if the signature check failed."""
59 try:
60 await _check_sigs_on_pdu(self.keyring, room_version, pdu)
61 except SynapseError as e:
62 logger.warning(
63 "Signature check failed for %s: %s",
64 pdu.event_id,
65 e,
66 )
67 raise
68
69 if not check_event_content_hash(pdu):
70 # let's try to distinguish between failures because the event was
71 # redacted (which are somewhat expected) vs actual ball-tampering
72 # incidents.
73 #
74 # This is just a heuristic, so we just assume that if the keys are
75 # about the same between the redacted and received events, then the
76 # received event was probably a redacted copy (but we then use our
77 # *actual* redacted copy to be on the safe side.)
78 redacted_event = prune_event(pdu)
79 if set(redacted_event.keys()) == set(pdu.keys()) and set(
80 redacted_event.content.keys()
81 ) == set(pdu.content.keys()):
82 logger.info(
83 "Event %s seems to have been redacted; using our redacted copy",
84 pdu.event_id,
85 )
86 else:
87 logger.warning(
88 "Event %s content has been tampered, redacting",
89 pdu.event_id,
90 )
91 return redacted_event
92
93 result = await self.spam_checker.check_event_for_spam(pdu)
94
95 if result:
96 logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
97 # we redact (to save disk space) as well as soft-failing (to stop
98 # using the event in prev_events).
99 redacted_event = prune_event(pdu)
100 redacted_event.internal_metadata.soft_failed = True
101 return redacted_event
102
103 return pdu
104
105
106 async def _check_sigs_on_pdu(
107 keyring: Keyring, room_version: RoomVersion, pdu: EventBase
108 ) -> None:
109 """Check that the given events are correctly signed
110
111 Raise a SynapseError if the event wasn't correctly signed.
112
113 Args:
114 keyring: keyring object to do the checks
115 room_version: the room version of the PDUs
116 pdus: the events to be checked
117 """
118
119 # we want to check that the event is signed by:
120 #
121 # (a) the sender's server
122 #
123 # - except in the case of invites created from a 3pid invite, which are exempt
124 # from this check, because the sender has to match that of the original 3pid
125 # invite, but the event may come from a different HS, for reasons that I don't
126 # entirely grok (why do the senders have to match? and if they do, why doesn't the
127 # joining server ask the inviting server to do the switcheroo with
128 # exchange_third_party_invite?).
129 #
130 # That's pretty awful, since redacting such an invite will render it invalid
131 # (because it will then look like a regular invite without a valid signature),
132 # and signatures are *supposed* to be valid whether or not an event has been
133 # redacted. But this isn't the worst of the ways that 3pid invites are broken.
134 #
135 # (b) for V1 and V2 rooms, the server which created the event_id
136 #
137 # let's start by getting the domain for each pdu, and flattening the event back
138 # to JSON.
139
140 # First we check that the sender event is signed by the sender's domain
141 # (except if its a 3pid invite, in which case it may be sent by any server)
142 if not _is_invite_via_3pid(pdu):
143 try:
144 await keyring.verify_event_for_server(
145 get_domain_from_id(pdu.sender),
146 pdu,
147 pdu.origin_server_ts if room_version.enforce_key_validity else 0,
148 )
149 except Exception as e:
150 errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
151 pdu.event_id,
152 get_domain_from_id(pdu.sender),
153 e,
154 )
155 raise SynapseError(403, errmsg, Codes.FORBIDDEN)
156
157 # now let's look for events where the sender's domain is different to the
158 # event id's domain (normally only the case for joins/leaves), and add additional
159 # checks. Only do this if the room version has a concept of event ID domain
160 # (ie, the room version uses old-style non-hash event IDs).
161 if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
162 pdu.event_id
163 ) != get_domain_from_id(pdu.sender):
164 try:
165 await keyring.verify_event_for_server(
166 get_domain_from_id(pdu.event_id),
167 pdu,
168 pdu.origin_server_ts if room_version.enforce_key_validity else 0,
169 )
170 except Exception as e:
171 errmsg = (
172 "event id %s: unable to verify signature for event id domain %s: %s"
173 % (
174 pdu.event_id,
175 get_domain_from_id(pdu.event_id),
176 e,
177 )
178 )
179 raise SynapseError(403, errmsg, Codes.FORBIDDEN)
180
181 # If this is a join event for a restricted room it may have been authorised
182 # via a different server from the sending server. Check those signatures.
183 if (
184 room_version.msc3083_join_rules
185 and pdu.type == EventTypes.Member
186 and pdu.membership == Membership.JOIN
187 and EventContentFields.AUTHORISING_USER in pdu.content
188 ):
189 authorising_server = get_domain_from_id(
190 pdu.content[EventContentFields.AUTHORISING_USER]
191 )
192 try:
193 await keyring.verify_event_for_server(
194 authorising_server,
195 pdu,
196 pdu.origin_server_ts if room_version.enforce_key_validity else 0,
197 )
198 except Exception as e:
199 errmsg = (
200 "event id %s: unable to verify signature for authorising server %s: %s"
201 % (
202 pdu.event_id,
203 authorising_server,
204 e,
205 )
206 )
207 raise SynapseError(403, errmsg, Codes.FORBIDDEN)
208
209
210 def _is_invite_via_3pid(event: EventBase) -> bool:
211 return (
212 event.type == EventTypes.Member
213 and event.membership == Membership.INVITE
214 and "third_party_invite" in event.content
215 )
216
217
218 def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase:
219 """Construct an EventBase from an event json received over federation
220
221 Args:
222 pdu_json: pdu as received over federation
223 room_version: The version of the room this event belongs to
224
225 Raises:
226 SynapseError: if the pdu is missing required fields or is otherwise
227 not a valid matrix event
228 """
229 # we could probably enforce a bunch of other fields here (room_id, sender,
230 # origin, etc etc)
231 assert_params_in_dict(pdu_json, ("type", "depth"))
232
233 depth = pdu_json["depth"]
234 if not isinstance(depth, int):
235 raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON)
236
237 if depth < 0:
238 raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
239 elif depth > MAX_DEPTH:
240 raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
241
242 # Validate that the JSON conforms to the specification.
243 if room_version.strict_canonicaljson:
244 validate_canonicaljson(pdu_json)
245
246 event = make_event_from_dict(pdu_json, room_version)
247 return event
248
[end of synapse/federation/federation_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -230,6 +230,10 @@
# origin, etc etc)
assert_params_in_dict(pdu_json, ("type", "depth"))
+ # Strip any unauthorized values from "unsigned" if they exist
+ if "unsigned" in pdu_json:
+ _strip_unsigned_values(pdu_json)
+
depth = pdu_json["depth"]
if not isinstance(depth, int):
raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON)
@@ -245,3 +249,24 @@
event = make_event_from_dict(pdu_json, room_version)
return event
+
+
+def _strip_unsigned_values(pdu_dict: JsonDict) -> None:
+ """
+ Strip any unsigned values unless specifically allowed, as defined by the whitelist.
+
+ pdu: the json dict to strip values from. Note that the dict is mutated by this
+ function
+ """
+ unsigned = pdu_dict["unsigned"]
+
+ if not isinstance(unsigned, dict):
+ pdu_dict["unsigned"] = {}
+
+ if pdu_dict["type"] == "m.room.member":
+ whitelist = ["knock_room_state", "invite_room_state", "age"]
+ else:
+ whitelist = ["age"]
+
+ filtered_unsigned = {k: v for k, v in unsigned.items() if k in whitelist}
+ pdu_dict["unsigned"] = filtered_unsigned
| {"golden_diff": "diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py\n--- a/synapse/federation/federation_base.py\n+++ b/synapse/federation/federation_base.py\n@@ -230,6 +230,10 @@\n # origin, etc etc)\n assert_params_in_dict(pdu_json, (\"type\", \"depth\"))\n \n+ # Strip any unauthorized values from \"unsigned\" if they exist\n+ if \"unsigned\" in pdu_json:\n+ _strip_unsigned_values(pdu_json)\n+\n depth = pdu_json[\"depth\"]\n if not isinstance(depth, int):\n raise SynapseError(400, \"Depth %r not an intger\" % (depth,), Codes.BAD_JSON)\n@@ -245,3 +249,24 @@\n \n event = make_event_from_dict(pdu_json, room_version)\n return event\n+\n+\n+def _strip_unsigned_values(pdu_dict: JsonDict) -> None:\n+ \"\"\"\n+ Strip any unsigned values unless specifically allowed, as defined by the whitelist.\n+\n+ pdu: the json dict to strip values from. Note that the dict is mutated by this\n+ function\n+ \"\"\"\n+ unsigned = pdu_dict[\"unsigned\"]\n+\n+ if not isinstance(unsigned, dict):\n+ pdu_dict[\"unsigned\"] = {}\n+\n+ if pdu_dict[\"type\"] == \"m.room.member\":\n+ whitelist = [\"knock_room_state\", \"invite_room_state\", \"age\"]\n+ else:\n+ whitelist = [\"age\"]\n+\n+ filtered_unsigned = {k: v for k, v in unsigned.items() if k in whitelist}\n+ pdu_dict[\"unsigned\"] = filtered_unsigned\n", "issue": "Synapse is overly accepting of content in the `unsigned` object in events received over federation\nSynapse makes use of various properties within the `unsigned` object of events - either internally, or by passing them on to clients. One example is `replaces_state`, which is used to store the event id of the previous event with the same `type` and `state_key`, and is later used to populate the `prev_content` property for events served to clients.\r\n\r\nThe problem is that homeservers are free to populate `unsigned`, without it affecting the event hashes or signatures; a malicious or buggy homeserver could therefore populate the content with incorrect data.\r\n\r\nTaking the example of `replaces_state`, Synapse overwrites this property when receiving an event, but only if there was previously an event with the same `type` and `state_key` in the room state; it is otherwise passed through unchanged. So, a malicious homeserver could confuse remote servers' clients by sending incorrect values of `replaces_state` over federation.\r\n\r\n---\r\n\r\nThe specification is not clear on how unspecified properties within `unsigned` should be handled, but I think they should be stripped off by the receiving homeserver. This will ensure that if, in future, the C-S API spec is extended to specify new properties be added to `unsigned`, there will be no confusion about whether they were added by the local or remote homeserver.\r\n\r\nAs far as I am aware, the only properties that *should* be allowed in `unsigned` over federation are:\r\n * `invite_room_state`\r\n * `knock_room_state`\r\n * `age` - though see also https://github.com/matrix-org/synapse/issues/8429.\r\n\r\n[Aside: in an ideal world, we might have different properties for \"things added by the remote homeserver - treat with caution!\" vs \"things added by the local homeserver - can be trusted\". However, that ship has probably sailed for now.] \n", "before_files": [{"content": "# Copyright 2015, 2016 OpenMarket Ltd\n# Copyright 2020 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom typing import TYPE_CHECKING\n\nfrom synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership\nfrom synapse.api.errors import Codes, SynapseError\nfrom synapse.api.room_versions import EventFormatVersions, RoomVersion\nfrom synapse.crypto.event_signing import check_event_content_hash\nfrom synapse.crypto.keyring import Keyring\nfrom synapse.events import EventBase, make_event_from_dict\nfrom synapse.events.utils import prune_event, validate_canonicaljson\nfrom synapse.http.servlet import assert_params_in_dict\nfrom synapse.types import JsonDict, get_domain_from_id\n\nif TYPE_CHECKING:\n from synapse.server import HomeServer\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FederationBase:\n def __init__(self, hs: \"HomeServer\"):\n self.hs = hs\n\n self.server_name = hs.hostname\n self.keyring = hs.get_keyring()\n self.spam_checker = hs.get_spam_checker()\n self.store = hs.get_datastore()\n self._clock = hs.get_clock()\n\n async def _check_sigs_and_hash(\n self, room_version: RoomVersion, pdu: EventBase\n ) -> EventBase:\n \"\"\"Checks that event is correctly signed by the sending server.\n\n Args:\n room_version: The room version of the PDU\n pdu: the event to be checked\n\n Returns:\n * the original event if the checks pass\n * a redacted version of the event (if the signature\n matched but the hash did not)\n * throws a SynapseError if the signature check failed.\"\"\"\n try:\n await _check_sigs_on_pdu(self.keyring, room_version, pdu)\n except SynapseError as e:\n logger.warning(\n \"Signature check failed for %s: %s\",\n pdu.event_id,\n e,\n )\n raise\n\n if not check_event_content_hash(pdu):\n # let's try to distinguish between failures because the event was\n # redacted (which are somewhat expected) vs actual ball-tampering\n # incidents.\n #\n # This is just a heuristic, so we just assume that if the keys are\n # about the same between the redacted and received events, then the\n # received event was probably a redacted copy (but we then use our\n # *actual* redacted copy to be on the safe side.)\n redacted_event = prune_event(pdu)\n if set(redacted_event.keys()) == set(pdu.keys()) and set(\n redacted_event.content.keys()\n ) == set(pdu.content.keys()):\n logger.info(\n \"Event %s seems to have been redacted; using our redacted copy\",\n pdu.event_id,\n )\n else:\n logger.warning(\n \"Event %s content has been tampered, redacting\",\n pdu.event_id,\n )\n return redacted_event\n\n result = await self.spam_checker.check_event_for_spam(pdu)\n\n if result:\n logger.warning(\"Event contains spam, soft-failing %s\", pdu.event_id)\n # we redact (to save disk space) as well as soft-failing (to stop\n # using the event in prev_events).\n redacted_event = prune_event(pdu)\n redacted_event.internal_metadata.soft_failed = True\n return redacted_event\n\n return pdu\n\n\nasync def _check_sigs_on_pdu(\n keyring: Keyring, room_version: RoomVersion, pdu: EventBase\n) -> None:\n \"\"\"Check that the given events are correctly signed\n\n Raise a SynapseError if the event wasn't correctly signed.\n\n Args:\n keyring: keyring object to do the checks\n room_version: the room version of the PDUs\n pdus: the events to be checked\n \"\"\"\n\n # we want to check that the event is signed by:\n #\n # (a) the sender's server\n #\n # - except in the case of invites created from a 3pid invite, which are exempt\n # from this check, because the sender has to match that of the original 3pid\n # invite, but the event may come from a different HS, for reasons that I don't\n # entirely grok (why do the senders have to match? and if they do, why doesn't the\n # joining server ask the inviting server to do the switcheroo with\n # exchange_third_party_invite?).\n #\n # That's pretty awful, since redacting such an invite will render it invalid\n # (because it will then look like a regular invite without a valid signature),\n # and signatures are *supposed* to be valid whether or not an event has been\n # redacted. But this isn't the worst of the ways that 3pid invites are broken.\n #\n # (b) for V1 and V2 rooms, the server which created the event_id\n #\n # let's start by getting the domain for each pdu, and flattening the event back\n # to JSON.\n\n # First we check that the sender event is signed by the sender's domain\n # (except if its a 3pid invite, in which case it may be sent by any server)\n if not _is_invite_via_3pid(pdu):\n try:\n await keyring.verify_event_for_server(\n get_domain_from_id(pdu.sender),\n pdu,\n pdu.origin_server_ts if room_version.enforce_key_validity else 0,\n )\n except Exception as e:\n errmsg = \"event id %s: unable to verify signature for sender %s: %s\" % (\n pdu.event_id,\n get_domain_from_id(pdu.sender),\n e,\n )\n raise SynapseError(403, errmsg, Codes.FORBIDDEN)\n\n # now let's look for events where the sender's domain is different to the\n # event id's domain (normally only the case for joins/leaves), and add additional\n # checks. Only do this if the room version has a concept of event ID domain\n # (ie, the room version uses old-style non-hash event IDs).\n if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(\n pdu.event_id\n ) != get_domain_from_id(pdu.sender):\n try:\n await keyring.verify_event_for_server(\n get_domain_from_id(pdu.event_id),\n pdu,\n pdu.origin_server_ts if room_version.enforce_key_validity else 0,\n )\n except Exception as e:\n errmsg = (\n \"event id %s: unable to verify signature for event id domain %s: %s\"\n % (\n pdu.event_id,\n get_domain_from_id(pdu.event_id),\n e,\n )\n )\n raise SynapseError(403, errmsg, Codes.FORBIDDEN)\n\n # If this is a join event for a restricted room it may have been authorised\n # via a different server from the sending server. Check those signatures.\n if (\n room_version.msc3083_join_rules\n and pdu.type == EventTypes.Member\n and pdu.membership == Membership.JOIN\n and EventContentFields.AUTHORISING_USER in pdu.content\n ):\n authorising_server = get_domain_from_id(\n pdu.content[EventContentFields.AUTHORISING_USER]\n )\n try:\n await keyring.verify_event_for_server(\n authorising_server,\n pdu,\n pdu.origin_server_ts if room_version.enforce_key_validity else 0,\n )\n except Exception as e:\n errmsg = (\n \"event id %s: unable to verify signature for authorising server %s: %s\"\n % (\n pdu.event_id,\n authorising_server,\n e,\n )\n )\n raise SynapseError(403, errmsg, Codes.FORBIDDEN)\n\n\ndef _is_invite_via_3pid(event: EventBase) -> bool:\n return (\n event.type == EventTypes.Member\n and event.membership == Membership.INVITE\n and \"third_party_invite\" in event.content\n )\n\n\ndef event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase:\n \"\"\"Construct an EventBase from an event json received over federation\n\n Args:\n pdu_json: pdu as received over federation\n room_version: The version of the room this event belongs to\n\n Raises:\n SynapseError: if the pdu is missing required fields or is otherwise\n not a valid matrix event\n \"\"\"\n # we could probably enforce a bunch of other fields here (room_id, sender,\n # origin, etc etc)\n assert_params_in_dict(pdu_json, (\"type\", \"depth\"))\n\n depth = pdu_json[\"depth\"]\n if not isinstance(depth, int):\n raise SynapseError(400, \"Depth %r not an intger\" % (depth,), Codes.BAD_JSON)\n\n if depth < 0:\n raise SynapseError(400, \"Depth too small\", Codes.BAD_JSON)\n elif depth > MAX_DEPTH:\n raise SynapseError(400, \"Depth too large\", Codes.BAD_JSON)\n\n # Validate that the JSON conforms to the specification.\n if room_version.strict_canonicaljson:\n validate_canonicaljson(pdu_json)\n\n event = make_event_from_dict(pdu_json, room_version)\n return event\n", "path": "synapse/federation/federation_base.py"}]} | 3,766 | 371 |
gh_patches_debug_13333 | rasdani/github-patches | git_diff | DDMAL__CantusDB-156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
need to have fields of chant and sequence models synced
or else, it'll lead to errors, such as not being able to retrieve chants from /chant-search
please fix
</issue>
<code>
[start of django/cantusdb_project/main_app/models/sequence.py]
1 from django.contrib.postgres.search import SearchVectorField
2 from django.db import models
3 from main_app.models import BaseModel
4 from users.models import User
5
6
7 class Sequence(BaseModel):
8 visible_status = models.CharField(max_length=1, blank=True, null=True)
9 title = models.CharField(blank=True, null=True, max_length=255)
10 incipit = models.CharField(blank=True, null=True, max_length=255)
11 siglum = models.CharField(blank=True, null=True, max_length=255)
12 folio = models.CharField(blank=True, null=True, max_length=255)
13 sequence = models.CharField(blank=True, null=True, max_length=255)
14 genre = models.ForeignKey("Genre", blank=True, null=True, on_delete=models.PROTECT)
15 rubrics = models.CharField(blank=True, null=True, max_length=255)
16 analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)
17 indexing_notes = models.TextField(blank=True, null=True)
18 date = models.CharField(blank=True, null=True, max_length=255)
19 col1 = models.CharField(blank=True, null=True, max_length=255)
20 col2 = models.CharField(blank=True, null=True, max_length=255)
21 col3 = models.CharField(blank=True, null=True, max_length=255)
22 ah_volume = models.CharField(blank=True, null=True, max_length=255)
23 source = models.ForeignKey(
24 "Source", on_delete=models.PROTECT, blank=True, null=True
25 )
26 cantus_id = models.CharField(blank=True, null=True, max_length=255)
27 image_link = models.URLField(blank=True, null=True)
28 json_info = models.JSONField(null=True, blank=True)
29
30 # The following fields (dummy fields) are just for harmonizing the chant and sequence models to have the same fields
31 # They should never be populated or displayed
32 # The order of the fields must be exactly the same between the seq and chant models
33 marginalia = models.CharField(max_length=63, null=True, blank=True)
34 sequence_number = models.PositiveIntegerField(
35 help_text='Each folio starts with "1"', null=True, blank=True
36 )
37 office = models.ForeignKey(
38 "Office", on_delete=models.PROTECT, null=True, blank=True
39 )
40 position = models.CharField(max_length=63, null=True, blank=True)
41 feast = models.ForeignKey("Feast", on_delete=models.PROTECT, null=True, blank=True)
42 mode = models.CharField(max_length=63, null=True, blank=True)
43 differentia = models.CharField(blank=True, null=True, max_length=63)
44 finalis = models.CharField(blank=True, null=True, max_length=63)
45 extra = models.CharField(blank=True, null=True, max_length=63)
46 chant_range = models.CharField(
47 blank=True,
48 null=True,
49 help_text='Example: "1-c-k-4". Optional field',
50 max_length=255,
51 )
52 addendum = models.CharField(blank=True, null=True, max_length=255)
53 manuscript_full_text_std_spelling = models.TextField(
54 help_text="Manuscript full text with standardized spelling. Enter the words "
55 "according to the manuscript but normalize their spellings following "
56 "Classical Latin forms. Use upper-case letters for proper nouns, "
57 'the first word of each chant, and the first word after "Alleluia" for '
58 "Mass Alleluias. Punctuation is omitted.",
59 null=True,
60 blank=True,
61 )
62 manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)
63 manuscript_full_text = models.TextField(
64 help_text="Enter the wording, word order and spellings as found in the manuscript"
65 ", with abbreviations resolved to standard words. Use upper-case letters as found"
66 " in the source. Retain “Xpistum” (Christum), “Ihc” (Jesus) and other instances of "
67 "Greek characters with their closest approximations of Latin letters. Some punctuation"
68 " signs and vertical dividing lines | are employed in this field. Repetenda and psalm "
69 "cues can also be recorded here. For more information, contact Cantus Database staff.",
70 null=True,
71 blank=True,
72 )
73 manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)
74 manuscript_syllabized_full_text = models.TextField(null=True, blank=True)
75 volpiano = models.TextField(null=True, blank=True)
76 volpiano_proofread = models.BooleanField(blank=True, null=True)
77 volpiano_notes = models.TextField(null=True, blank=True)
78 volpiano_intervals = models.TextField(null=True, blank=True)
79 # volpiano_intervals = ArrayField(base_field=models.IntegerField(), null=True, blank=True)
80 cao_concordances = models.CharField(blank=True, null=True, max_length=63)
81 proofread_by = models.ForeignKey(
82 User, on_delete=models.PROTECT, null=True, blank=True
83 )
84 melody_id = models.CharField(blank=True, null=True, max_length=63)
85 search_vector = SearchVectorField(null=True, editable=False)
86 content_structure = models.CharField(
87 blank=True,
88 null=True,
89 max_length=64,
90 help_text="Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.",
91 )
92
[end of django/cantusdb_project/main_app/models/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/models/sequence.py b/django/cantusdb_project/main_app/models/sequence.py
--- a/django/cantusdb_project/main_app/models/sequence.py
+++ b/django/cantusdb_project/main_app/models/sequence.py
@@ -41,6 +41,7 @@
feast = models.ForeignKey("Feast", on_delete=models.PROTECT, null=True, blank=True)
mode = models.CharField(max_length=63, null=True, blank=True)
differentia = models.CharField(blank=True, null=True, max_length=63)
+ differentia_id = models.CharField(blank=True, null=True, max_length=12)
finalis = models.CharField(blank=True, null=True, max_length=63)
extra = models.CharField(blank=True, null=True, max_length=63)
chant_range = models.CharField(
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/sequence.py b/django/cantusdb_project/main_app/models/sequence.py\n--- a/django/cantusdb_project/main_app/models/sequence.py\n+++ b/django/cantusdb_project/main_app/models/sequence.py\n@@ -41,6 +41,7 @@\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n+ differentia_id = models.CharField(blank=True, null=True, max_length=12)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n", "issue": "need to have fields of chant and sequence models synced \nor else, it'll lead to errors, such as not being able to retrieve chants from /chant-search\r\nplease fix\n", "before_files": [{"content": "from django.contrib.postgres.search import SearchVectorField\nfrom django.db import models\nfrom main_app.models import BaseModel\nfrom users.models import User\n\n\nclass Sequence(BaseModel):\n visible_status = models.CharField(max_length=1, blank=True, null=True)\n title = models.CharField(blank=True, null=True, max_length=255)\n incipit = models.CharField(blank=True, null=True, max_length=255)\n siglum = models.CharField(blank=True, null=True, max_length=255)\n folio = models.CharField(blank=True, null=True, max_length=255)\n sequence = models.CharField(blank=True, null=True, max_length=255)\n genre = models.ForeignKey(\"Genre\", blank=True, null=True, on_delete=models.PROTECT)\n rubrics = models.CharField(blank=True, null=True, max_length=255)\n analecta_hymnica = models.CharField(blank=True, null=True, max_length=255)\n indexing_notes = models.TextField(blank=True, null=True)\n date = models.CharField(blank=True, null=True, max_length=255)\n col1 = models.CharField(blank=True, null=True, max_length=255)\n col2 = models.CharField(blank=True, null=True, max_length=255)\n col3 = models.CharField(blank=True, null=True, max_length=255)\n ah_volume = models.CharField(blank=True, null=True, max_length=255)\n source = models.ForeignKey(\n \"Source\", on_delete=models.PROTECT, blank=True, null=True\n )\n cantus_id = models.CharField(blank=True, null=True, max_length=255)\n image_link = models.URLField(blank=True, null=True)\n json_info = models.JSONField(null=True, blank=True)\n\n # The following fields (dummy fields) are just for harmonizing the chant and sequence models to have the same fields\n # They should never be populated or displayed\n # The order of the fields must be exactly the same between the seq and chant models\n marginalia = models.CharField(max_length=63, null=True, blank=True)\n sequence_number = models.PositiveIntegerField(\n help_text='Each folio starts with \"1\"', null=True, blank=True\n )\n office = models.ForeignKey(\n \"Office\", on_delete=models.PROTECT, null=True, blank=True\n )\n position = models.CharField(max_length=63, null=True, blank=True)\n feast = models.ForeignKey(\"Feast\", on_delete=models.PROTECT, null=True, blank=True)\n mode = models.CharField(max_length=63, null=True, blank=True)\n differentia = models.CharField(blank=True, null=True, max_length=63)\n finalis = models.CharField(blank=True, null=True, max_length=63)\n extra = models.CharField(blank=True, null=True, max_length=63)\n chant_range = models.CharField(\n blank=True,\n null=True,\n help_text='Example: \"1-c-k-4\". Optional field',\n max_length=255,\n )\n addendum = models.CharField(blank=True, null=True, max_length=255)\n manuscript_full_text_std_spelling = models.TextField(\n help_text=\"Manuscript full text with standardized spelling. Enter the words \"\n \"according to the manuscript but normalize their spellings following \"\n \"Classical Latin forms. Use upper-case letters for proper nouns, \"\n 'the first word of each chant, and the first word after \"Alleluia\" for '\n \"Mass Alleluias. Punctuation is omitted.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_std_proofread = models.BooleanField(blank=True, null=True)\n manuscript_full_text = models.TextField(\n help_text=\"Enter the wording, word order and spellings as found in the manuscript\"\n \", with abbreviations resolved to standard words. Use upper-case letters as found\"\n \" in the source. Retain \u201cXpistum\u201d (Christum), \u201cIhc\u201d (Jesus) and other instances of \"\n \"Greek characters with their closest approximations of Latin letters. Some punctuation\"\n \" signs and vertical dividing lines | are employed in this field. Repetenda and psalm \"\n \"cues can also be recorded here. For more information, contact Cantus Database staff.\",\n null=True,\n blank=True,\n )\n manuscript_full_text_proofread = models.BooleanField(blank=True, null=True)\n manuscript_syllabized_full_text = models.TextField(null=True, blank=True)\n volpiano = models.TextField(null=True, blank=True)\n volpiano_proofread = models.BooleanField(blank=True, null=True)\n volpiano_notes = models.TextField(null=True, blank=True)\n volpiano_intervals = models.TextField(null=True, blank=True)\n # volpiano_intervals = ArrayField(base_field=models.IntegerField(), null=True, blank=True)\n cao_concordances = models.CharField(blank=True, null=True, max_length=63)\n proofread_by = models.ForeignKey(\n User, on_delete=models.PROTECT, null=True, blank=True\n )\n melody_id = models.CharField(blank=True, null=True, max_length=63)\n search_vector = SearchVectorField(null=True, editable=False)\n content_structure = models.CharField(\n blank=True,\n null=True,\n max_length=64,\n help_text=\"Additional folio number field, if folio numbers appear on the leaves but are not in the 'binding order'.\",\n )\n", "path": "django/cantusdb_project/main_app/models/sequence.py"}]} | 1,936 | 194 |
gh_patches_debug_5033 | rasdani/github-patches | git_diff | meltano__meltano-7488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: Cloud CLI logs auth token at info level
### Meltano Version
cloud branch
### Python Version
NA
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
N/A
### Description
Running `meltano cloud login` results in the input shown below. Running `meltano-cloud login` does not.
A good resolution to this would probably be to figure out what is logging these HTTP requests, and silence those log messages. My guess is that it has to do with the recent change to use `aiohttp` instead of Flask for the local auth server.
### Code
```python
Logging in to Meltano Cloud.
You will be directed to a web browser to complete login.
If a web browser does not open, open the following link:
https://auth.meltano.cloud/oauth2/authorize?client_id=45rpn5ep3g4qjut8jd3s4iq872&response_type=token&scope=email+openid+profile&redirect_uri=http%3A%2F%2Flocalhost%3A9999
2023-04-04T16:09:25.658362Z [info ] 127.0.0.1 [04/Apr/2023:16:09:25 +0000] "GET / HTTP/1.1" 200 236 "-" "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0"
2023-04-04T16:09:25.780667Z [info ] 127.0.0.1 [04/Apr/2023:16:09:25 +0000] "GET /tokens?access_token=<redacted>&token_type=Bearer&expires_in=28800 HTTP/1.1" 204 99 "http://localhost:9999/" "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0"
```
</issue>
<code>
[start of src/cloud-cli/meltano/cloud/api/auth/auth.py]
1 """Authentication for Meltano Cloud."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import sys
7 import tempfile
8 import typing as t
9 import webbrowser
10 from contextlib import asynccontextmanager
11 from http import HTTPStatus
12 from pathlib import Path
13 from urllib.parse import urlencode, urljoin
14
15 import aiohttp
16 import click
17 import jinja2
18 from aiohttp import web
19
20 from meltano.cloud.api.config import MeltanoCloudConfig
21
22 if sys.version_info <= (3, 8):
23 from cached_property import cached_property
24 else:
25 from functools import cached_property
26
27 if sys.version_info < (3, 9):
28 import importlib_resources
29 else:
30 from importlib import resources as importlib_resources
31
32 LOGIN_STATUS_CHECK_DELAY_SECONDS = 0.2
33
34
35 class MeltanoCloudAuthError(Exception):
36 """Raised when an API call returns a 403."""
37
38
39 class MeltanoCloudAuth: # noqa: WPS214
40 """Authentication methods for Meltano Cloud."""
41
42 def __init__(self, config: MeltanoCloudConfig | None = None):
43 """Initialize a MeltanoCloudAuth instance.
44
45 Args:
46 config: the MeltanoCloudConfig to use
47 """
48 self.config = config or MeltanoCloudConfig.find()
49 self.base_url = self.config.base_auth_url
50 self.client_id = self.config.app_client_id
51
52 @cached_property
53 def login_url(self) -> str:
54 """Get the oauth2 authorization URL.
55
56 Returns:
57 the oauth2 authorization URL.
58 """
59 query_params = urlencode(
60 {
61 "client_id": self.client_id,
62 "response_type": "token",
63 "scope": "email openid profile",
64 "redirect_uri": f"http://localhost:{self.config.auth_callback_port}",
65 },
66 )
67 return f"{self.base_url}/oauth2/authorize?{query_params}"
68
69 @cached_property
70 def logout_url(self) -> str:
71 """Get the Meltano Cloud logout URL.
72
73 Returns:
74 the Meltano Cloud logout URL.
75 """
76 params = urlencode(
77 {
78 "client_id": self.client_id,
79 "logout_uri": f"http://localhost:{self.config.auth_callback_port}/logout", # noqa: E501)
80 },
81 )
82 return urljoin(self.base_url, f"logout?{params}")
83
84 @asynccontextmanager
85 async def _callback_server(
86 self,
87 rendered_template_dir: Path,
88 ) -> t.AsyncIterator[web.Application]:
89 app = web.Application()
90 resource_root = importlib_resources.files(__package__)
91
92 async def callback_page(_):
93 with importlib_resources.as_file(
94 resource_root / "callback.jinja2",
95 ) as template_file, (rendered_template_dir / "callback.html").open(
96 "w",
97 ) as rendered_template_file:
98 rendered_template_file.write(
99 jinja2.Template(template_file.read_text()).render(
100 port=self.config.auth_callback_port,
101 ),
102 )
103 return web.FileResponse(rendered_template_file.name)
104
105 async def handle_tokens(request: web.Request):
106 self.config.id_token = request.query["id_token"]
107 self.config.access_token = request.query["access_token"]
108 self.config.write_to_file()
109 return web.Response(status=HTTPStatus.NO_CONTENT)
110
111 async def handle_logout(_):
112 self.config.id_token = None
113 self.config.access_token = None
114 self.config.write_to_file()
115 with importlib_resources.as_file(
116 resource_root / "logout.html",
117 ) as html_file:
118 return web.FileResponse(html_file)
119
120 app.add_routes(
121 (
122 web.get("/", callback_page),
123 web.get("/tokens", handle_tokens),
124 web.get("/logout", handle_logout),
125 ),
126 )
127 runner = web.AppRunner(app)
128 await runner.setup()
129 site = web.TCPSite(runner, "localhost", self.config.auth_callback_port)
130 await site.start()
131 try:
132 yield app
133 finally:
134 await runner.cleanup()
135
136 @asynccontextmanager
137 async def callback_server(self) -> t.AsyncIterator[web.Application]:
138 """Context manager to run callback server locally.
139
140 Yields:
141 The aiohttp web application.
142 """
143 with tempfile.TemporaryDirectory(prefix="meltano-cloud-") as tmpdir:
144 async with self._callback_server(Path(tmpdir)) as app:
145 yield app
146
147 async def login(self) -> None:
148 """Take user through login flow and get auth and id tokens."""
149 if await self.logged_in():
150 return
151 async with self.callback_server():
152 click.echo("Logging in to Meltano Cloud.")
153 click.echo("You will be directed to a web browser to complete login.")
154 click.echo("If a web browser does not open, open the following link:")
155 click.secho(self.login_url, fg="green")
156 webbrowser.open_new_tab(self.login_url)
157 while not await self.logged_in():
158 self.config.refresh()
159 await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)
160
161 async def logout(self) -> None: # noqa: WPS213
162 """Log out."""
163 if not await self.logged_in():
164 click.secho("Not logged in.", fg="green")
165 return
166 async with self.callback_server():
167 click.echo("Logging out of Meltano Cloud.")
168 click.echo("You will be directed to a web browser to complete logout.")
169 click.echo("If a web browser does not open, open the following link:")
170 click.secho(self.logout_url, fg="green")
171 webbrowser.open_new_tab(self.logout_url)
172 while await self.logged_in():
173 self.config.refresh()
174 await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)
175 click.secho("Successfully logged out.", fg="green")
176
177 def get_auth_header(self) -> dict[str, str]:
178 """Get the authorization header.
179
180 Used for authenticating to cloud API endpoints.
181
182 Returns:
183 Authorization header using ID token as bearer token.
184
185 """
186 return {"Authorization": f"Bearer {self.config.id_token}"}
187
188 def get_access_token_header(self) -> dict[str, str]:
189 """Get the access token header.
190
191 Used for authenticating to auth endpoints.
192
193 Returns:
194 Authorization header using access token as bearer token.
195 """
196 return {"Authorization": f"Bearer {self.config.access_token}"}
197
198 @asynccontextmanager
199 async def _get_user_info_response(self) -> t.AsyncIterator[aiohttp.ClientResponse]:
200 async with aiohttp.ClientSession() as session:
201 async with session.get(
202 urljoin(self.base_url, "oauth2/userInfo"),
203 headers=self.get_access_token_header(),
204 ) as response:
205 yield response
206
207 async def get_user_info_response(self) -> aiohttp.ClientResponse:
208 """Get user info.
209
210 Returns:
211 User info response
212 """
213 async with self._get_user_info_response() as response:
214 return response
215
216 async def get_user_info_json(self) -> dict:
217 """Get user info as dict.
218
219 Returns:
220 User info json
221 """
222 async with self._get_user_info_response() as response:
223 return await response.json()
224
225 async def logged_in(self) -> bool:
226 """Check if this instance is currently logged in.
227
228 Returns:
229 True if logged in, else False
230 """
231 return bool(
232 self.config.access_token
233 and self.config.id_token
234 # Perform this check at the end to avoid
235 # spamming our servers if logout fails
236 and (await self.get_user_info_response()).ok,
237 )
238
[end of src/cloud-cli/meltano/cloud/api/auth/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cloud-cli/meltano/cloud/api/auth/auth.py b/src/cloud-cli/meltano/cloud/api/auth/auth.py
--- a/src/cloud-cli/meltano/cloud/api/auth/auth.py
+++ b/src/cloud-cli/meltano/cloud/api/auth/auth.py
@@ -124,7 +124,7 @@
web.get("/logout", handle_logout),
),
)
- runner = web.AppRunner(app)
+ runner = web.AppRunner(app, access_log=None)
await runner.setup()
site = web.TCPSite(runner, "localhost", self.config.auth_callback_port)
await site.start()
| {"golden_diff": "diff --git a/src/cloud-cli/meltano/cloud/api/auth/auth.py b/src/cloud-cli/meltano/cloud/api/auth/auth.py\n--- a/src/cloud-cli/meltano/cloud/api/auth/auth.py\n+++ b/src/cloud-cli/meltano/cloud/api/auth/auth.py\n@@ -124,7 +124,7 @@\n web.get(\"/logout\", handle_logout),\n ),\n )\n- runner = web.AppRunner(app)\n+ runner = web.AppRunner(app, access_log=None)\n await runner.setup()\n site = web.TCPSite(runner, \"localhost\", self.config.auth_callback_port)\n await site.start()\n", "issue": "bug: Cloud CLI logs auth token at info level\n### Meltano Version\n\ncloud branch\n\n### Python Version\n\nNA\n\n### Bug scope\n\nCLI (options, error messages, logging, etc.)\n\n### Operating System\n\nN/A\n\n### Description\n\nRunning `meltano cloud login` results in the input shown below. Running `meltano-cloud login` does not.\r\n\r\nA good resolution to this would probably be to figure out what is logging these HTTP requests, and silence those log messages. My guess is that it has to do with the recent change to use `aiohttp` instead of Flask for the local auth server.\n\n### Code\n\n```python\nLogging in to Meltano Cloud.\r\nYou will be directed to a web browser to complete login.\r\nIf a web browser does not open, open the following link:\r\nhttps://auth.meltano.cloud/oauth2/authorize?client_id=45rpn5ep3g4qjut8jd3s4iq872&response_type=token&scope=email+openid+profile&redirect_uri=http%3A%2F%2Flocalhost%3A9999\r\n2023-04-04T16:09:25.658362Z [info ] 127.0.0.1 [04/Apr/2023:16:09:25 +0000] \"GET / HTTP/1.1\" 200 236 \"-\" \"Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0\"\r\n2023-04-04T16:09:25.780667Z [info ] 127.0.0.1 [04/Apr/2023:16:09:25 +0000] \"GET /tokens?access_token=<redacted>&token_type=Bearer&expires_in=28800 HTTP/1.1\" 204 99 \"http://localhost:9999/\" \"Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/111.0\"\n```\n\n", "before_files": [{"content": "\"\"\"Authentication for Meltano Cloud.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport sys\nimport tempfile\nimport typing as t\nimport webbrowser\nfrom contextlib import asynccontextmanager\nfrom http import HTTPStatus\nfrom pathlib import Path\nfrom urllib.parse import urlencode, urljoin\n\nimport aiohttp\nimport click\nimport jinja2\nfrom aiohttp import web\n\nfrom meltano.cloud.api.config import MeltanoCloudConfig\n\nif sys.version_info <= (3, 8):\n from cached_property import cached_property\nelse:\n from functools import cached_property\n\nif sys.version_info < (3, 9):\n import importlib_resources\nelse:\n from importlib import resources as importlib_resources\n\nLOGIN_STATUS_CHECK_DELAY_SECONDS = 0.2\n\n\nclass MeltanoCloudAuthError(Exception):\n \"\"\"Raised when an API call returns a 403.\"\"\"\n\n\nclass MeltanoCloudAuth: # noqa: WPS214\n \"\"\"Authentication methods for Meltano Cloud.\"\"\"\n\n def __init__(self, config: MeltanoCloudConfig | None = None):\n \"\"\"Initialize a MeltanoCloudAuth instance.\n\n Args:\n config: the MeltanoCloudConfig to use\n \"\"\"\n self.config = config or MeltanoCloudConfig.find()\n self.base_url = self.config.base_auth_url\n self.client_id = self.config.app_client_id\n\n @cached_property\n def login_url(self) -> str:\n \"\"\"Get the oauth2 authorization URL.\n\n Returns:\n the oauth2 authorization URL.\n \"\"\"\n query_params = urlencode(\n {\n \"client_id\": self.client_id,\n \"response_type\": \"token\",\n \"scope\": \"email openid profile\",\n \"redirect_uri\": f\"http://localhost:{self.config.auth_callback_port}\",\n },\n )\n return f\"{self.base_url}/oauth2/authorize?{query_params}\"\n\n @cached_property\n def logout_url(self) -> str:\n \"\"\"Get the Meltano Cloud logout URL.\n\n Returns:\n the Meltano Cloud logout URL.\n \"\"\"\n params = urlencode(\n {\n \"client_id\": self.client_id,\n \"logout_uri\": f\"http://localhost:{self.config.auth_callback_port}/logout\", # noqa: E501)\n },\n )\n return urljoin(self.base_url, f\"logout?{params}\")\n\n @asynccontextmanager\n async def _callback_server(\n self,\n rendered_template_dir: Path,\n ) -> t.AsyncIterator[web.Application]:\n app = web.Application()\n resource_root = importlib_resources.files(__package__)\n\n async def callback_page(_):\n with importlib_resources.as_file(\n resource_root / \"callback.jinja2\",\n ) as template_file, (rendered_template_dir / \"callback.html\").open(\n \"w\",\n ) as rendered_template_file:\n rendered_template_file.write(\n jinja2.Template(template_file.read_text()).render(\n port=self.config.auth_callback_port,\n ),\n )\n return web.FileResponse(rendered_template_file.name)\n\n async def handle_tokens(request: web.Request):\n self.config.id_token = request.query[\"id_token\"]\n self.config.access_token = request.query[\"access_token\"]\n self.config.write_to_file()\n return web.Response(status=HTTPStatus.NO_CONTENT)\n\n async def handle_logout(_):\n self.config.id_token = None\n self.config.access_token = None\n self.config.write_to_file()\n with importlib_resources.as_file(\n resource_root / \"logout.html\",\n ) as html_file:\n return web.FileResponse(html_file)\n\n app.add_routes(\n (\n web.get(\"/\", callback_page),\n web.get(\"/tokens\", handle_tokens),\n web.get(\"/logout\", handle_logout),\n ),\n )\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, \"localhost\", self.config.auth_callback_port)\n await site.start()\n try:\n yield app\n finally:\n await runner.cleanup()\n\n @asynccontextmanager\n async def callback_server(self) -> t.AsyncIterator[web.Application]:\n \"\"\"Context manager to run callback server locally.\n\n Yields:\n The aiohttp web application.\n \"\"\"\n with tempfile.TemporaryDirectory(prefix=\"meltano-cloud-\") as tmpdir:\n async with self._callback_server(Path(tmpdir)) as app:\n yield app\n\n async def login(self) -> None:\n \"\"\"Take user through login flow and get auth and id tokens.\"\"\"\n if await self.logged_in():\n return\n async with self.callback_server():\n click.echo(\"Logging in to Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete login.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.login_url, fg=\"green\")\n webbrowser.open_new_tab(self.login_url)\n while not await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n\n async def logout(self) -> None: # noqa: WPS213\n \"\"\"Log out.\"\"\"\n if not await self.logged_in():\n click.secho(\"Not logged in.\", fg=\"green\")\n return\n async with self.callback_server():\n click.echo(\"Logging out of Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete logout.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.logout_url, fg=\"green\")\n webbrowser.open_new_tab(self.logout_url)\n while await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n click.secho(\"Successfully logged out.\", fg=\"green\")\n\n def get_auth_header(self) -> dict[str, str]:\n \"\"\"Get the authorization header.\n\n Used for authenticating to cloud API endpoints.\n\n Returns:\n Authorization header using ID token as bearer token.\n\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.id_token}\"}\n\n def get_access_token_header(self) -> dict[str, str]:\n \"\"\"Get the access token header.\n\n Used for authenticating to auth endpoints.\n\n Returns:\n Authorization header using access token as bearer token.\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.access_token}\"}\n\n @asynccontextmanager\n async def _get_user_info_response(self) -> t.AsyncIterator[aiohttp.ClientResponse]:\n async with aiohttp.ClientSession() as session:\n async with session.get(\n urljoin(self.base_url, \"oauth2/userInfo\"),\n headers=self.get_access_token_header(),\n ) as response:\n yield response\n\n async def get_user_info_response(self) -> aiohttp.ClientResponse:\n \"\"\"Get user info.\n\n Returns:\n User info response\n \"\"\"\n async with self._get_user_info_response() as response:\n return response\n\n async def get_user_info_json(self) -> dict:\n \"\"\"Get user info as dict.\n\n Returns:\n User info json\n \"\"\"\n async with self._get_user_info_response() as response:\n return await response.json()\n\n async def logged_in(self) -> bool:\n \"\"\"Check if this instance is currently logged in.\n\n Returns:\n True if logged in, else False\n \"\"\"\n return bool(\n self.config.access_token\n and self.config.id_token\n # Perform this check at the end to avoid\n # spamming our servers if logout fails\n and (await self.get_user_info_response()).ok,\n )\n", "path": "src/cloud-cli/meltano/cloud/api/auth/auth.py"}]} | 3,313 | 135 |
gh_patches_debug_28234 | rasdani/github-patches | git_diff | quantumlib__Cirq-3054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support multi-qubit measurements in `cirq.plot_state_histogram`
Quote from the docstring: "Currently this function assumes each measurement gate applies to only a single qubit."
Currently, I get an incorrect histogram if I didn't read the docstring and used a multi-qubit measurement (I always make circuits like this):
```python
qubits = cirq.LineQubit.range(3)
c = cirq.Circuit(
(cirq.X**0.4).on_each(*qubits),
cirq.measure(*qubits), # One multi-qubit measurement
)
cirq.plot_state_histogram(cirq.sample(c, repetitions=10000))
# Incorrect output, no warning or error
```

If I use single-qubit measurement gates, I get the expected histogram:
```python
qubits = cirq.LineQubit.range(3)
c = cirq.Circuit(
(cirq.X**0.4).on_each(*qubits),
cirq.measure_each(*qubits), # One measurement per qubit
)
cirq.plot_state_histogram(cirq.sample(c, repetitions=10000))
```

This looks like it could be fixed by adding some logic to `plot_state_histogram` (https://github.com/quantumlib/Cirq/blob/master/cirq/study/visualize.py#L22) that checks for multi-qubit measurements and either correctly interpret them or raise an error.
</issue>
<code>
[start of cirq/study/visualize.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Tool to visualize the results of a study."""
16
17 import numpy as np
18
19 from cirq.study import trial_result
20
21
22 def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:
23 """Plot the state histogram from a single result with repetitions.
24
25 States is a bitstring representation of all the qubit states in a single
26 result.
27 Currently this function assumes each measurement gate applies to only
28 a single qubit.
29
30 Args:
31 result: The trial results to plot.
32
33 Returns:
34 The histogram. A list of values plotted on the y-axis.
35 """
36
37 # pyplot import is deferred because it requires a system dependency
38 # (python3-tk) that `python -m pip install cirq` can't handle for the user.
39 # This allows cirq to be usable without python3-tk.
40 import matplotlib.pyplot as plt
41
42 num_qubits = len(result.measurements.keys())
43 states = 2**num_qubits
44 values = np.zeros(states)
45
46 # measurements is a dict of {measurement gate key:
47 # array(repetitions, boolean result)}
48 # Convert this to an array of repetitions, each with an array of booleans.
49 # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}
50 # --> array([[True, False], [True, False]])
51 measurement_by_result = np.array([
52 v.transpose()[0] for k, v in result.measurements.items()]).transpose()
53
54 for meas in measurement_by_result:
55 # Convert each array of booleans to a string representation.
56 # e.g. [True, False] -> [1, 0] -> '10' -> 2
57 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)
58 values[state_ind] += 1
59
60 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]
61 plt.bar(np.arange(states), values, tick_label=plot_labels)
62 plt.xlabel('qubit state')
63 plt.ylabel('result count')
64 plt.show()
65
66 return values
67
[end of cirq/study/visualize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/study/visualize.py b/cirq/study/visualize.py
--- a/cirq/study/visualize.py
+++ b/cirq/study/visualize.py
@@ -24,8 +24,6 @@
States is a bitstring representation of all the qubit states in a single
result.
- Currently this function assumes each measurement gate applies to only
- a single qubit.
Args:
result: The trial results to plot.
@@ -39,17 +37,15 @@
# This allows cirq to be usable without python3-tk.
import matplotlib.pyplot as plt
- num_qubits = len(result.measurements.keys())
+ num_qubits = sum([value.shape[1] for value in result.measurements.values()])
states = 2**num_qubits
values = np.zeros(states)
-
# measurements is a dict of {measurement gate key:
# array(repetitions, boolean result)}
# Convert this to an array of repetitions, each with an array of booleans.
# e.g. {q1: array([[True, True]]), q2: array([[False, False]])}
# --> array([[True, False], [True, False]])
- measurement_by_result = np.array([
- v.transpose()[0] for k, v in result.measurements.items()]).transpose()
+ measurement_by_result = np.hstack(list(result.measurements.values()))
for meas in measurement_by_result:
# Convert each array of booleans to a string representation.
| {"golden_diff": "diff --git a/cirq/study/visualize.py b/cirq/study/visualize.py\n--- a/cirq/study/visualize.py\n+++ b/cirq/study/visualize.py\n@@ -24,8 +24,6 @@\n \n States is a bitstring representation of all the qubit states in a single\n result.\n- Currently this function assumes each measurement gate applies to only\n- a single qubit.\n \n Args:\n result: The trial results to plot.\n@@ -39,17 +37,15 @@\n # This allows cirq to be usable without python3-tk.\n import matplotlib.pyplot as plt\n \n- num_qubits = len(result.measurements.keys())\n+ num_qubits = sum([value.shape[1] for value in result.measurements.values()])\n states = 2**num_qubits\n values = np.zeros(states)\n-\n # measurements is a dict of {measurement gate key:\n # array(repetitions, boolean result)}\n # Convert this to an array of repetitions, each with an array of booleans.\n # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}\n # --> array([[True, False], [True, False]])\n- measurement_by_result = np.array([\n- v.transpose()[0] for k, v in result.measurements.items()]).transpose()\n+ measurement_by_result = np.hstack(list(result.measurements.values()))\n \n for meas in measurement_by_result:\n # Convert each array of booleans to a string representation.\n", "issue": "Support multi-qubit measurements in `cirq.plot_state_histogram`\nQuote from the docstring: \"Currently this function assumes each measurement gate applies to only a single qubit.\"\r\n\r\nCurrently, I get an incorrect histogram if I didn't read the docstring and used a multi-qubit measurement (I always make circuits like this):\r\n```python\r\nqubits = cirq.LineQubit.range(3)\r\nc = cirq.Circuit(\r\n (cirq.X**0.4).on_each(*qubits),\r\n cirq.measure(*qubits), # One multi-qubit measurement\r\n)\r\ncirq.plot_state_histogram(cirq.sample(c, repetitions=10000))\r\n# Incorrect output, no warning or error\r\n```\r\n\r\n\r\nIf I use single-qubit measurement gates, I get the expected histogram:\r\n```python\r\nqubits = cirq.LineQubit.range(3)\r\nc = cirq.Circuit(\r\n (cirq.X**0.4).on_each(*qubits),\r\n cirq.measure_each(*qubits), # One measurement per qubit\r\n)\r\ncirq.plot_state_histogram(cirq.sample(c, repetitions=10000))\r\n```\r\n\r\n\r\nThis looks like it could be fixed by adding some logic to `plot_state_histogram` (https://github.com/quantumlib/Cirq/blob/master/cirq/study/visualize.py#L22) that checks for multi-qubit measurements and either correctly interpret them or raise an error.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool to visualize the results of a study.\"\"\"\n\nimport numpy as np\n\nfrom cirq.study import trial_result\n\n\ndef plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray:\n \"\"\"Plot the state histogram from a single result with repetitions.\n\n States is a bitstring representation of all the qubit states in a single\n result.\n Currently this function assumes each measurement gate applies to only\n a single qubit.\n\n Args:\n result: The trial results to plot.\n\n Returns:\n The histogram. A list of values plotted on the y-axis.\n \"\"\"\n\n # pyplot import is deferred because it requires a system dependency\n # (python3-tk) that `python -m pip install cirq` can't handle for the user.\n # This allows cirq to be usable without python3-tk.\n import matplotlib.pyplot as plt\n\n num_qubits = len(result.measurements.keys())\n states = 2**num_qubits\n values = np.zeros(states)\n\n # measurements is a dict of {measurement gate key:\n # array(repetitions, boolean result)}\n # Convert this to an array of repetitions, each with an array of booleans.\n # e.g. {q1: array([[True, True]]), q2: array([[False, False]])}\n # --> array([[True, False], [True, False]])\n measurement_by_result = np.array([\n v.transpose()[0] for k, v in result.measurements.items()]).transpose()\n\n for meas in measurement_by_result:\n # Convert each array of booleans to a string representation.\n # e.g. [True, False] -> [1, 0] -> '10' -> 2\n state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2)\n values[state_ind] += 1\n\n plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)]\n plt.bar(np.arange(states), values, tick_label=plot_labels)\n plt.xlabel('qubit state')\n plt.ylabel('result count')\n plt.show()\n\n return values\n", "path": "cirq/study/visualize.py"}]} | 1,695 | 344 |
gh_patches_debug_26493 | rasdani/github-patches | git_diff | Pylons__pyramid-2567 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exception views squash the original exception if an exception view predicate is tested and fails
If you register an exception view with a predicate such as `config.add_view(view, context=Exception, request_method='GET')`, this will cause any errors from a POST request (without a matching exception view) to be squashed into a `PredicateMismatch` exception which will be propagated up the wsgi stack instead of the original exception.
</issue>
<code>
[start of pyramid/tweens.py]
1 import sys
2
3 from pyramid.interfaces import (
4 IExceptionViewClassifier,
5 IRequest,
6 )
7
8 from zope.interface import providedBy
9 from pyramid.view import _call_view
10
11 def excview_tween_factory(handler, registry):
12 """ A :term:`tween` factory which produces a tween that catches an
13 exception raised by downstream tweens (or the main Pyramid request
14 handler) and, if possible, converts it into a Response using an
15 :term:`exception view`."""
16
17 def excview_tween(request):
18 attrs = request.__dict__
19 try:
20 response = handler(request)
21 except Exception as exc:
22 # WARNING: do not assign the result of sys.exc_info() to a local
23 # var here, doing so will cause a leak. We used to actually
24 # explicitly delete both "exception" and "exc_info" from ``attrs``
25 # in a ``finally:`` clause below, but now we do not because these
26 # attributes are useful to upstream tweens. This actually still
27 # apparently causes a reference cycle, but it is broken
28 # successfully by the garbage collector (see
29 # https://github.com/Pylons/pyramid/issues/1223).
30 attrs['exc_info'] = sys.exc_info()
31 attrs['exception'] = exc
32 # clear old generated request.response, if any; it may
33 # have been mutated by the view, and its state is not
34 # sane (e.g. caching headers)
35 if 'response' in attrs:
36 del attrs['response']
37 # we use .get instead of .__getitem__ below due to
38 # https://github.com/Pylons/pyramid/issues/700
39 request_iface = attrs.get('request_iface', IRequest)
40 provides = providedBy(exc)
41 response = _call_view(
42 registry,
43 request,
44 exc,
45 provides,
46 '',
47 view_classifier=IExceptionViewClassifier,
48 request_iface=request_iface.combined
49 )
50 if response is None:
51 raise
52
53 return response
54
55 return excview_tween
56
57 MAIN = 'MAIN'
58 INGRESS = 'INGRESS'
59 EXCVIEW = 'pyramid.tweens.excview_tween_factory'
60
[end of pyramid/tweens.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/tweens.py b/pyramid/tweens.py
--- a/pyramid/tweens.py
+++ b/pyramid/tweens.py
@@ -1,5 +1,7 @@
import sys
+from pyramid.compat import reraise
+from pyramid.exceptions import PredicateMismatch
from pyramid.interfaces import (
IExceptionViewClassifier,
IRequest,
@@ -38,17 +40,26 @@
# https://github.com/Pylons/pyramid/issues/700
request_iface = attrs.get('request_iface', IRequest)
provides = providedBy(exc)
- response = _call_view(
- registry,
- request,
- exc,
- provides,
- '',
- view_classifier=IExceptionViewClassifier,
- request_iface=request_iface.combined
- )
+ try:
+ response = _call_view(
+ registry,
+ request,
+ exc,
+ provides,
+ '',
+ view_classifier=IExceptionViewClassifier,
+ request_iface=request_iface.combined
+ )
+
+ # if views matched but did not pass predicates, squash the error
+ # and re-raise the original exception
+ except PredicateMismatch:
+ response = None
+
+ # re-raise the original exception as no exception views were
+ # able to handle the error
if response is None:
- raise
+ reraise(*attrs['exc_info'])
return response
| {"golden_diff": "diff --git a/pyramid/tweens.py b/pyramid/tweens.py\n--- a/pyramid/tweens.py\n+++ b/pyramid/tweens.py\n@@ -1,5 +1,7 @@\n import sys\n \n+from pyramid.compat import reraise\n+from pyramid.exceptions import PredicateMismatch\n from pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n@@ -38,17 +40,26 @@\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n- response = _call_view(\n- registry,\n- request,\n- exc,\n- provides,\n- '',\n- view_classifier=IExceptionViewClassifier,\n- request_iface=request_iface.combined\n- )\n+ try:\n+ response = _call_view(\n+ registry,\n+ request,\n+ exc,\n+ provides,\n+ '',\n+ view_classifier=IExceptionViewClassifier,\n+ request_iface=request_iface.combined\n+ )\n+\n+ # if views matched but did not pass predicates, squash the error\n+ # and re-raise the original exception\n+ except PredicateMismatch:\n+ response = None\n+\n+ # re-raise the original exception as no exception views were\n+ # able to handle the error\n if response is None:\n- raise\n+ reraise(*attrs['exc_info'])\n \n return response\n", "issue": "exception views squash the original exception if an exception view predicate is tested and fails\nIf you register an exception view with a predicate such as `config.add_view(view, context=Exception, request_method='GET')`, this will cause any errors from a POST request (without a matching exception view) to be squashed into a `PredicateMismatch` exception which will be propagated up the wsgi stack instead of the original exception.\n\n", "before_files": [{"content": "import sys\n\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n if response is None:\n raise\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}]} | 1,210 | 325 |
gh_patches_debug_8056 | rasdani/github-patches | git_diff | googleapis__python-bigquery-80 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chore: replace Artman with bazel for synthesizing code
The synthtool should start using bazel instead of Artman.
</issue>
<code>
[start of synth.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is used to synthesize generated parts of this library."""
16
17 import synthtool as s
18 from synthtool import gcp
19
20 gapic = gcp.GAPICGenerator()
21 common = gcp.CommonTemplates()
22 version = 'v2'
23
24 library = gapic.py_library(
25 'bigquery',
26 version,
27 config_path='/google/cloud/bigquery/'
28 'artman_bigquery_v2.yaml',
29 artman_output_name='bigquery-v2',
30 include_protos=True,
31 )
32
33 s.move(
34 [
35 library / "google/cloud/bigquery_v2/gapic/enums.py",
36 library / "google/cloud/bigquery_v2/types.py",
37 library / "google/cloud/bigquery_v2/proto/location*",
38 library / "google/cloud/bigquery_v2/proto/encryption_config*",
39 library / "google/cloud/bigquery_v2/proto/model*",
40 library / "google/cloud/bigquery_v2/proto/standard_sql*",
41 ],
42 )
43
44 # Fix up proto docs that are missing summary line.
45 s.replace(
46 "google/cloud/bigquery_v2/proto/model_pb2.py",
47 '"""Attributes:',
48 '"""Protocol buffer.\n\n Attributes:',
49 )
50 s.replace(
51 "google/cloud/bigquery_v2/proto/encryption_config_pb2.py",
52 '"""Attributes:',
53 '"""Encryption configuration.\n\n Attributes:',
54 )
55
56 # Remove non-ascii characters from docstrings for Python 2.7.
57 # Format quoted strings as plain text.
58 s.replace("google/cloud/bigquery_v2/proto/*.py", "[“”]", '``')
59
60 # ----------------------------------------------------------------------------
61 # Add templated files
62 # ----------------------------------------------------------------------------
63 templated_files = common.py_library(cov_level=100)
64 s.move(templated_files, excludes=["noxfile.py"])
65
66 s.shell.run(["nox", "-s", "blacken"], hide_output=False)
67
[end of synth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synth.py b/synth.py
--- a/synth.py
+++ b/synth.py
@@ -17,16 +17,14 @@
import synthtool as s
from synthtool import gcp
-gapic = gcp.GAPICGenerator()
+gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
version = 'v2'
library = gapic.py_library(
- 'bigquery',
- version,
- config_path='/google/cloud/bigquery/'
- 'artman_bigquery_v2.yaml',
- artman_output_name='bigquery-v2',
+ service='bigquery',
+ version=version,
+ bazel_target=f"//google/cloud/bigquery/{version}:bigquery-{version}-py",
include_protos=True,
)
| {"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -17,16 +17,14 @@\n import synthtool as s\n from synthtool import gcp\n \n-gapic = gcp.GAPICGenerator()\n+gapic = gcp.GAPICBazel()\n common = gcp.CommonTemplates()\n version = 'v2'\n \n library = gapic.py_library(\n- 'bigquery',\n- version,\n- config_path='/google/cloud/bigquery/'\n- 'artman_bigquery_v2.yaml',\n- artman_output_name='bigquery-v2',\n+ service='bigquery',\n+ version=version,\n+ bazel_target=f\"//google/cloud/bigquery/{version}:bigquery-{version}-py\",\n include_protos=True,\n )\n", "issue": "chore: replace Artman with bazel for synthesizing code\nThe synthtool should start using bazel instead of Artman.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\ncommon = gcp.CommonTemplates()\nversion = 'v2'\n\nlibrary = gapic.py_library(\n 'bigquery',\n version,\n config_path='/google/cloud/bigquery/'\n 'artman_bigquery_v2.yaml',\n artman_output_name='bigquery-v2',\n include_protos=True,\n)\n\ns.move(\n [\n library / \"google/cloud/bigquery_v2/gapic/enums.py\",\n library / \"google/cloud/bigquery_v2/types.py\",\n library / \"google/cloud/bigquery_v2/proto/location*\",\n library / \"google/cloud/bigquery_v2/proto/encryption_config*\",\n library / \"google/cloud/bigquery_v2/proto/model*\",\n library / \"google/cloud/bigquery_v2/proto/standard_sql*\",\n ],\n)\n\n# Fix up proto docs that are missing summary line.\ns.replace(\n \"google/cloud/bigquery_v2/proto/model_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Protocol buffer.\\n\\n Attributes:',\n)\ns.replace(\n \"google/cloud/bigquery_v2/proto/encryption_config_pb2.py\",\n '\"\"\"Attributes:',\n '\"\"\"Encryption configuration.\\n\\n Attributes:',\n)\n\n# Remove non-ascii characters from docstrings for Python 2.7.\n# Format quoted strings as plain text.\ns.replace(\"google/cloud/bigquery_v2/proto/*.py\", \"[\u201c\u201d]\", '``')\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(cov_level=100)\ns.move(templated_files, excludes=[\"noxfile.py\"])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False)\n", "path": "synth.py"}]} | 1,200 | 179 |
gh_patches_debug_3011 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-10572 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Most recent available `mambaforge=4.10` is simply too old
Hello guys, just wanted to ask you if it's possible to have a more modern version available for `mambaforge` - the best and latest available to be sourced on RTD via the configuration file is 4.10 which is simply too old (maximum conda 4.10 and mamba 0.19) - updating to a modern mamba doesn't work, as you can see from me changing the conf file in https://github.com/ESMValGroup/ESMValTool/pull/3310/files with output in https://readthedocs.org/projects/esmvaltool/builds/21390633/ - mamba is stuck at 0.19.0, which, in turn, slows down the environment creation process to around 10 minutes (for more recent conda's, updating mamba to something like >=1.4.8 works very well, and updates conda to 23.3 or 23.4 too, but in this case the base versions are too old). If you need any help whatsoever, I offer to help, and once more, many thanks for your great work on RTD :beer:
</issue>
<code>
[start of readthedocs/builds/constants_docker.py]
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # Adding a new tool/version to this setting requires:
13 #
14 # - a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``)
16 #
17 # - running the script ``./scripts/compile_version_upload.sh`` in
18 # development and production environments to compile and cache the new
19 # tool/version
20 #
21 # Note that when updating this options, you should also update the file:
22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.17",
36 "3.9": "3.9.17",
37 "3.10": "3.10.12",
38 "3.11": "3.11.4",
39 # Always point to the latest stable release.
40 "3": "3.11.4",
41 "miniconda3-4.7": "miniconda3-4.7.12",
42 "mambaforge-4.10": "mambaforge-4.10.3-10",
43 },
44 "nodejs": {
45 "14": "14.20.1",
46 "16": "16.18.1",
47 "18": "18.16.1", # LTS
48 "19": "19.0.1",
49 "20": "20.3.1",
50 },
51 "rust": {
52 "1.55": "1.55.0",
53 "1.61": "1.61.0",
54 "1.64": "1.64.0",
55 "1.70": "1.70.0",
56 },
57 "golang": {
58 "1.17": "1.17.13",
59 "1.18": "1.18.10",
60 "1.19": "1.19.10",
61 "1.20": "1.20.5",
62 },
63 },
64 }
65
[end of readthedocs/builds/constants_docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -40,6 +40,7 @@
"3": "3.11.4",
"miniconda3-4.7": "miniconda3-4.7.12",
"mambaforge-4.10": "mambaforge-4.10.3-10",
+ "mambaforge-22.9": "mambaforge-22.9.0-3",
},
"nodejs": {
"14": "14.20.1",
| {"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -40,6 +40,7 @@\n \"3\": \"3.11.4\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n+ \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n", "issue": "Most recent available `mambaforge=4.10` is simply too old\nHello guys, just wanted to ask you if it's possible to have a more modern version available for `mambaforge` - the best and latest available to be sourced on RTD via the configuration file is 4.10 which is simply too old (maximum conda 4.10 and mamba 0.19) - updating to a modern mamba doesn't work, as you can see from me changing the conf file in https://github.com/ESMValGroup/ESMValTool/pull/3310/files with output in https://readthedocs.org/projects/esmvaltool/builds/21390633/ - mamba is stuck at 0.19.0, which, in turn, slows down the environment creation process to around 10 minutes (for more recent conda's, updating mamba to something like >=1.4.8 works very well, and updates conda to 23.3 or 23.4 too, but in this case the base versions are too old). If you need any help whatsoever, I offer to help, and once more, many thanks for your great work on RTD :beer: \n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.17\",\n \"3.9\": \"3.9.17\",\n \"3.10\": \"3.10.12\",\n \"3.11\": \"3.11.4\",\n # Always point to the latest stable release.\n \"3\": \"3.11.4\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]} | 1,611 | 170 |
gh_patches_debug_29058 | rasdani/github-patches | git_diff | modin-project__modin-2149 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add note about braceexpand for cloud examples.
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Modin version** (`modin.__version__`):
- **Python version**:
- **Code we can use to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of examples/cluster/h2o-runner.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14
15 # pip install git+https://github.com/intel-go/ibis.git@develop
16
17 # NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
18
19 # the following import turns on experimental mode in Modin,
20 # including enabling running things in remote cloud
21 import modin.experimental.pandas as pd # noqa: F401
22 from modin.experimental.cloud import create_cluster
23
24 from h2o import run_benchmark
25
26 test_cluster = create_cluster(
27 "aws",
28 "aws_credentials",
29 cluster_name="rayscale-test",
30 region="eu-north-1",
31 zone="eu-north-1b",
32 image="ami-00e1e82d7d4ca80d3",
33 )
34 with test_cluster:
35 parameters = {
36 "no_pandas": False,
37 "pandas_mode": "Modin_on_ray",
38 "ray_tmpdir": "/tmp",
39 "ray_memory": 1024 * 1024 * 1024,
40 "extended_functionality": False,
41 }
42
43 # G1... - for groupby queries; J1... - for join queries;
44 # Additional required files inside h2o-data folder:
45 # - J1_1e6_1e0_0_0.csv
46 # - J1_1e6_1e3_0_0.csv
47 # - J1_1e6_1e6_0_0.csv
48 for data_file in ["G1_5e5_1e2_0_0.csv", "J1_1e6_NA_0_0.csv"]:
49 parameters["data_file"] = f"https://modin-datasets.s3.amazonaws.com/h2o/{data_file}"
50 run_benchmark(parameters)
51
[end of examples/cluster/h2o-runner.py]
[start of examples/cluster/mortgage-runner.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14
15 # pip install git+https://github.com/intel-go/ibis.git@develop
16
17 # NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
18
19 # the following import turns on experimental mode in Modin,
20 # including enabling running things in remote cloud
21 import modin.experimental.pandas as pd # noqa: F401
22 from modin.experimental.cloud import create_cluster
23
24 from mortgage import run_benchmark
25
26 test_cluster = create_cluster(
27 "aws",
28 "aws_credentials",
29 cluster_name="rayscale-test",
30 region="eu-north-1",
31 zone="eu-north-1b",
32 image="ami-00e1e82d7d4ca80d3",
33 )
34 with test_cluster:
35
36 parameters = {
37 "data_file": "https://modin-datasets.s3.amazonaws.com/mortgage",
38 # "data_file": "s3://modin-datasets/mortgage",
39 "dfiles_num": 1,
40 "no_ml": True,
41 "validation": False,
42 "no_ibis": True,
43 "no_pandas": False,
44 "pandas_mode": "Modin_on_ray",
45 "ray_tmpdir": "/tmp",
46 "ray_memory": 1024 * 1024 * 1024,
47 }
48
49 run_benchmark(parameters)
50
[end of examples/cluster/mortgage-runner.py]
[start of examples/cluster/taxi-runner.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14
15 # pip install git+https://github.com/intel-go/ibis.git@develop
16
17 # NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
18
19 import sys
20
21 USE_OMNISCI = "--omnisci" in sys.argv
22
23 # the following import turns on experimental mode in Modin,
24 # including enabling running things in remote cloud
25 import modin.experimental.pandas as pd # noqa: F401
26 from modin.experimental.cloud import create_cluster
27
28 from taxi import run_benchmark as run_benchmark
29
30 cluster_params = {}
31 if USE_OMNISCI:
32 cluster_params["cluster_type"] = "omnisci"
33 test_cluster = create_cluster(
34 "aws",
35 "aws_credentials",
36 cluster_name="rayscale-test",
37 region="eu-north-1",
38 zone="eu-north-1b",
39 image="ami-00e1e82d7d4ca80d3",
40 **cluster_params,
41 )
42 with test_cluster:
43 data_file = "https://modin-datasets.s3.amazonaws.com/trips_data.csv"
44 if USE_OMNISCI:
45 # Workaround for GH#2099
46 from modin.experimental.cloud import get_connection
47
48 data_file, remote_data_file = "/tmp/trips_data.csv", data_file
49 get_connection().modules["subprocess"].check_call(
50 ["wget", remote_data_file, "-O", data_file]
51 )
52
53 # Omniscripts check for files being present when given local file paths,
54 # so replace "glob" there with a remote one
55 import utils.utils
56
57 utils.utils.glob = get_connection().modules["glob"]
58
59 parameters = {
60 "data_file": data_file,
61 # "data_file": "s3://modin-datasets/trips_data.csv",
62 "dfiles_num": 1,
63 "validation": False,
64 "no_ibis": True,
65 "no_pandas": False,
66 "pandas_mode": "Modin_on_omnisci" if USE_OMNISCI else "Modin_on_ray",
67 "ray_tmpdir": "/tmp",
68 "ray_memory": 1024 * 1024 * 1024,
69 }
70
71 run_benchmark(parameters)
72
[end of examples/cluster/taxi-runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/cluster/h2o-runner.py b/examples/cluster/h2o-runner.py
--- a/examples/cluster/h2o-runner.py
+++ b/examples/cluster/h2o-runner.py
@@ -12,9 +12,10 @@
# governing permissions and limitations under the License.
-# pip install git+https://github.com/intel-go/ibis.git@develop
+# pip install git+https://github.com/intel-ai/ibis.git@develop
+# pip install braceexpand
-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH
# the following import turns on experimental mode in Modin,
# including enabling running things in remote cloud
diff --git a/examples/cluster/mortgage-runner.py b/examples/cluster/mortgage-runner.py
--- a/examples/cluster/mortgage-runner.py
+++ b/examples/cluster/mortgage-runner.py
@@ -12,9 +12,10 @@
# governing permissions and limitations under the License.
-# pip install git+https://github.com/intel-go/ibis.git@develop
+# pip install git+https://github.com/intel-ai/ibis.git@develop
+# pip install braceexpand
-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH
# the following import turns on experimental mode in Modin,
# including enabling running things in remote cloud
diff --git a/examples/cluster/taxi-runner.py b/examples/cluster/taxi-runner.py
--- a/examples/cluster/taxi-runner.py
+++ b/examples/cluster/taxi-runner.py
@@ -12,9 +12,10 @@
# governing permissions and limitations under the License.
-# pip install git+https://github.com/intel-go/ibis.git@develop
+# pip install git+https://github.com/intel-ai/ibis.git@develop
+# pip install braceexpand
-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH
+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH
import sys
| {"golden_diff": "diff --git a/examples/cluster/h2o-runner.py b/examples/cluster/h2o-runner.py\n--- a/examples/cluster/h2o-runner.py\n+++ b/examples/cluster/h2o-runner.py\n@@ -12,9 +12,10 @@\n # governing permissions and limitations under the License.\n \n \n-# pip install git+https://github.com/intel-go/ibis.git@develop\n+# pip install git+https://github.com/intel-ai/ibis.git@develop\n+# pip install braceexpand\n \n-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH\n \n # the following import turns on experimental mode in Modin,\n # including enabling running things in remote cloud\ndiff --git a/examples/cluster/mortgage-runner.py b/examples/cluster/mortgage-runner.py\n--- a/examples/cluster/mortgage-runner.py\n+++ b/examples/cluster/mortgage-runner.py\n@@ -12,9 +12,10 @@\n # governing permissions and limitations under the License.\n \n \n-# pip install git+https://github.com/intel-go/ibis.git@develop\n+# pip install git+https://github.com/intel-ai/ibis.git@develop\n+# pip install braceexpand\n \n-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH\n \n # the following import turns on experimental mode in Modin,\n # including enabling running things in remote cloud\ndiff --git a/examples/cluster/taxi-runner.py b/examples/cluster/taxi-runner.py\n--- a/examples/cluster/taxi-runner.py\n+++ b/examples/cluster/taxi-runner.py\n@@ -12,9 +12,10 @@\n # governing permissions and limitations under the License.\n \n \n-# pip install git+https://github.com/intel-go/ibis.git@develop\n+# pip install git+https://github.com/intel-ai/ibis.git@develop\n+# pip install braceexpand\n \n-# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n+# NOTE: expects https://github.com/intel-ai/omniscripts checked out and in PYTHONPATH\n \n import sys\n", "issue": "Add note about braceexpand for cloud examples.\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Modin version** (`modin.__version__`):\r\n- **Python version**:\r\n- **Code we can use to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\n# pip install git+https://github.com/intel-go/ibis.git@develop\n\n# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n\n# the following import turns on experimental mode in Modin,\n# including enabling running things in remote cloud\nimport modin.experimental.pandas as pd # noqa: F401\nfrom modin.experimental.cloud import create_cluster\n\nfrom h2o import run_benchmark\n\ntest_cluster = create_cluster(\n \"aws\",\n \"aws_credentials\",\n cluster_name=\"rayscale-test\",\n region=\"eu-north-1\",\n zone=\"eu-north-1b\",\n image=\"ami-00e1e82d7d4ca80d3\",\n)\nwith test_cluster:\n parameters = {\n \"no_pandas\": False,\n \"pandas_mode\": \"Modin_on_ray\",\n \"ray_tmpdir\": \"/tmp\",\n \"ray_memory\": 1024 * 1024 * 1024,\n \"extended_functionality\": False,\n }\n\n # G1... - for groupby queries; J1... - for join queries;\n # Additional required files inside h2o-data folder:\n # - J1_1e6_1e0_0_0.csv\n # - J1_1e6_1e3_0_0.csv\n # - J1_1e6_1e6_0_0.csv\n for data_file in [\"G1_5e5_1e2_0_0.csv\", \"J1_1e6_NA_0_0.csv\"]:\n parameters[\"data_file\"] = f\"https://modin-datasets.s3.amazonaws.com/h2o/{data_file}\"\n run_benchmark(parameters)\n", "path": "examples/cluster/h2o-runner.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\n# pip install git+https://github.com/intel-go/ibis.git@develop\n\n# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n\n# the following import turns on experimental mode in Modin,\n# including enabling running things in remote cloud\nimport modin.experimental.pandas as pd # noqa: F401\nfrom modin.experimental.cloud import create_cluster\n\nfrom mortgage import run_benchmark\n\ntest_cluster = create_cluster(\n \"aws\",\n \"aws_credentials\",\n cluster_name=\"rayscale-test\",\n region=\"eu-north-1\",\n zone=\"eu-north-1b\",\n image=\"ami-00e1e82d7d4ca80d3\",\n)\nwith test_cluster:\n\n parameters = {\n \"data_file\": \"https://modin-datasets.s3.amazonaws.com/mortgage\",\n # \"data_file\": \"s3://modin-datasets/mortgage\",\n \"dfiles_num\": 1,\n \"no_ml\": True,\n \"validation\": False,\n \"no_ibis\": True,\n \"no_pandas\": False,\n \"pandas_mode\": \"Modin_on_ray\",\n \"ray_tmpdir\": \"/tmp\",\n \"ray_memory\": 1024 * 1024 * 1024,\n }\n\n run_benchmark(parameters)\n", "path": "examples/cluster/mortgage-runner.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\n# pip install git+https://github.com/intel-go/ibis.git@develop\n\n# NOTE: expects https://github.com/intel-go/omniscripts/ checked out and in PYTHONPATH\n\nimport sys\n\nUSE_OMNISCI = \"--omnisci\" in sys.argv\n\n# the following import turns on experimental mode in Modin,\n# including enabling running things in remote cloud\nimport modin.experimental.pandas as pd # noqa: F401\nfrom modin.experimental.cloud import create_cluster\n\nfrom taxi import run_benchmark as run_benchmark\n\ncluster_params = {}\nif USE_OMNISCI:\n cluster_params[\"cluster_type\"] = \"omnisci\"\ntest_cluster = create_cluster(\n \"aws\",\n \"aws_credentials\",\n cluster_name=\"rayscale-test\",\n region=\"eu-north-1\",\n zone=\"eu-north-1b\",\n image=\"ami-00e1e82d7d4ca80d3\",\n **cluster_params,\n)\nwith test_cluster:\n data_file = \"https://modin-datasets.s3.amazonaws.com/trips_data.csv\"\n if USE_OMNISCI:\n # Workaround for GH#2099\n from modin.experimental.cloud import get_connection\n\n data_file, remote_data_file = \"/tmp/trips_data.csv\", data_file\n get_connection().modules[\"subprocess\"].check_call(\n [\"wget\", remote_data_file, \"-O\", data_file]\n )\n\n # Omniscripts check for files being present when given local file paths,\n # so replace \"glob\" there with a remote one\n import utils.utils\n\n utils.utils.glob = get_connection().modules[\"glob\"]\n\n parameters = {\n \"data_file\": data_file,\n # \"data_file\": \"s3://modin-datasets/trips_data.csv\",\n \"dfiles_num\": 1,\n \"validation\": False,\n \"no_ibis\": True,\n \"no_pandas\": False,\n \"pandas_mode\": \"Modin_on_omnisci\" if USE_OMNISCI else \"Modin_on_ray\",\n \"ray_tmpdir\": \"/tmp\",\n \"ray_memory\": 1024 * 1024 * 1024,\n }\n\n run_benchmark(parameters)\n", "path": "examples/cluster/taxi-runner.py"}]} | 2,767 | 525 |
gh_patches_debug_18135 | rasdani/github-patches | git_diff | streamlink__streamlink-3484 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Turkuvaz Plugin missing Streams
Hi,
first of all to be sure installed Streamlink 2.0.0 via Python3 again.
After that tested all streams one by one with the turkuvaz.py
Most of them are working, only 2 of 9 channels missing, "error: No plugin can handle URL"
A2 and A Haber TV:
https://www.atv.com.tr/a2tv/canli-yayin
https://www.ahaber.com.tr/video/canli-yayin
</issue>
<code>
[start of src/streamlink/plugins/turkuvaz.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import useragents, validate
6 from streamlink.stream import HLSStream
7
8 log = logging.getLogger(__name__)
9
10
11 class Turkuvaz(Plugin):
12 """
13 Plugin to support ATV/A2TV Live streams from www.atv.com.tr and www.a2tv.com.tr
14 """
15
16 _url_re = re.compile(r"""(?x)https?://(?:www\.)?
17 (?:
18 (?:
19 (atvavrupa)\.tv
20 |
21 (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\.com\.tr
22 )/webtv/(?:live-broadcast|canli-yayin)
23 |
24 sabah\.com\.tr/(apara)/canli-yayin
25 )""")
26 _hls_url = "https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8"
27 _token_url = "https://securevideotoken.tmgrup.com.tr/webtv/secure"
28 _token_schema = validate.Schema(validate.all(
29 {
30 "Success": True,
31 "Url": validate.url(),
32 },
33 validate.get("Url"))
34 )
35
36 @classmethod
37 def can_handle_url(cls, url):
38 return cls._url_re.match(url) is not None
39
40 def _get_streams(self):
41 url_m = self._url_re.match(self.url)
42 domain = url_m.group(1) or url_m.group(2) or url_m.group(3)
43 # remap the domain to channel
44 channel = {"atv": "atvhd",
45 "ahaber": "ahaberhd",
46 "apara": "aparahd",
47 "aspor": "asporhd",
48 "anews": "anewshd",
49 "minikacocuk": "minikagococuk"}.get(domain, domain)
50 hls_url = self._hls_url.format(channel=channel)
51 # get the secure HLS URL
52 res = self.session.http.get(self._token_url,
53 params="url={0}".format(hls_url),
54 headers={"Referer": self.url,
55 "User-Agent": useragents.CHROME})
56
57 secure_hls_url = self.session.http.json(res, schema=self._token_schema)
58
59 log.debug("Found HLS URL: {0}".format(secure_hls_url))
60 return HLSStream.parse_variant_playlist(self.session, secure_hls_url)
61
62
63 __plugin__ = Turkuvaz
64
[end of src/streamlink/plugins/turkuvaz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py
--- a/src/streamlink/plugins/turkuvaz.py
+++ b/src/streamlink/plugins/turkuvaz.py
@@ -20,6 +20,10 @@
|
(atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\.com\.tr
)/webtv/(?:live-broadcast|canli-yayin)
+ |
+ (ahaber)\.com\.tr/video/canli-yayin
+ |
+ atv\.com\.tr/(a2tv)/canli-yayin
|
sabah\.com\.tr/(apara)/canli-yayin
)""")
@@ -39,7 +43,7 @@
def _get_streams(self):
url_m = self._url_re.match(self.url)
- domain = url_m.group(1) or url_m.group(2) or url_m.group(3)
+ domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5)
# remap the domain to channel
channel = {"atv": "atvhd",
"ahaber": "ahaberhd",
| {"golden_diff": "diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py\n--- a/src/streamlink/plugins/turkuvaz.py\n+++ b/src/streamlink/plugins/turkuvaz.py\n@@ -20,6 +20,10 @@\n |\n (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\\.com\\.tr\n )/webtv/(?:live-broadcast|canli-yayin)\n+ |\n+ (ahaber)\\.com\\.tr/video/canli-yayin\n+ |\n+ atv\\.com\\.tr/(a2tv)/canli-yayin\n |\n sabah\\.com\\.tr/(apara)/canli-yayin\n )\"\"\")\n@@ -39,7 +43,7 @@\n \n def _get_streams(self):\n url_m = self._url_re.match(self.url)\n- domain = url_m.group(1) or url_m.group(2) or url_m.group(3)\n+ domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5)\n # remap the domain to channel\n channel = {\"atv\": \"atvhd\",\n \"ahaber\": \"ahaberhd\",\n", "issue": "Turkuvaz Plugin missing Streams\nHi,\r\n\r\nfirst of all to be sure installed Streamlink 2.0.0 via Python3 again.\r\n\r\nAfter that tested all streams one by one with the turkuvaz.py\r\n\r\nMost of them are working, only 2 of 9 channels missing, \"error: No plugin can handle URL\"\r\n\r\nA2 and A Haber TV:\r\n\r\nhttps://www.atv.com.tr/a2tv/canli-yayin\r\nhttps://www.ahaber.com.tr/video/canli-yayin\r\n\r\n\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\nclass Turkuvaz(Plugin):\n \"\"\"\n Plugin to support ATV/A2TV Live streams from www.atv.com.tr and www.a2tv.com.tr\n \"\"\"\n\n _url_re = re.compile(r\"\"\"(?x)https?://(?:www\\.)?\n (?:\n (?:\n (atvavrupa)\\.tv\n |\n (atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\\.com\\.tr\n )/webtv/(?:live-broadcast|canli-yayin)\n |\n sabah\\.com\\.tr/(apara)/canli-yayin\n )\"\"\")\n _hls_url = \"https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8\"\n _token_url = \"https://securevideotoken.tmgrup.com.tr/webtv/secure\"\n _token_schema = validate.Schema(validate.all(\n {\n \"Success\": True,\n \"Url\": validate.url(),\n },\n validate.get(\"Url\"))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n url_m = self._url_re.match(self.url)\n domain = url_m.group(1) or url_m.group(2) or url_m.group(3)\n # remap the domain to channel\n channel = {\"atv\": \"atvhd\",\n \"ahaber\": \"ahaberhd\",\n \"apara\": \"aparahd\",\n \"aspor\": \"asporhd\",\n \"anews\": \"anewshd\",\n \"minikacocuk\": \"minikagococuk\"}.get(domain, domain)\n hls_url = self._hls_url.format(channel=channel)\n # get the secure HLS URL\n res = self.session.http.get(self._token_url,\n params=\"url={0}\".format(hls_url),\n headers={\"Referer\": self.url,\n \"User-Agent\": useragents.CHROME})\n\n secure_hls_url = self.session.http.json(res, schema=self._token_schema)\n\n log.debug(\"Found HLS URL: {0}\".format(secure_hls_url))\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n\n\n__plugin__ = Turkuvaz\n", "path": "src/streamlink/plugins/turkuvaz.py"}]} | 1,332 | 305 |
gh_patches_debug_30776 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-223 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Marshall's
</issue>
<code>
[start of locations/spiders/marshalls.py]
1 import json
2 import re
3 import scrapy
4 from locations.items import GeojsonPointItem
5
6 STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
7 "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
8 "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
9 "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
10 "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
11
12 DAYS = {'Mon': 'Mo', 'Tue': 'Tu',
13 'Wed': 'We', 'Thu': 'Th',
14 'Fri': 'Fr', 'Sat': 'Sa',
15 'Sun': 'Su'}
16
17 URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'
18
19
20 def normalize_time(hours):
21
22 if not hours:
23 return ''
24
25 day_times = hours.split(',')
26 normalize_day_times = []
27
28 for day_time in day_times:
29 day, hours = [x.strip() for x in day_time.split(': ')]
30 normalize_hours = []
31
32 if re.search('-', day):
33 days = [x.strip() for x in day.split('-')]
34 norm_days = '-'.join([DAYS.get(x, '') for x in days])
35 else:
36 norm_days = DAYS.get(day, '')
37
38 if re.search('CLOSED', hours):
39 norm_hours = ' off'
40 normalize_hours.append(norm_hours)
41 else:
42 if re.search('-', hours):
43 hours = [x.strip() for x in hours.split('-')]
44
45 for hour in hours:
46
47 if hour[-1] == 'p':
48 if re.search(':', hour[:-1]):
49 hora, minute = [x.strip() for x in hour[:-1].split(':')]
50 if int(hora) < 12:
51 norm_hours = str(int(hora) + 12) + ':' + minute
52 else:
53 if int(hour[:-1]) < 12:
54 norm_hours = str(int(hour[:-1]) + 12) + ":00"
55
56 elif hour[-1] == 'a':
57 if re.search(':', hour[:-1]):
58 hora, minute = [x.strip() for x in hour[:-1].split(':')]
59 norm_hours = hora + ':' + minute
60 else:
61 norm_hours = hour[:-1] + ":00"
62
63 normalize_hours.append(norm_hours)
64
65 normalize_day_times.append(' '.join([norm_days, '-'.join(normalize_hours)]))
66 return '; '.join(normalize_day_times)
67
68
69 class MarshallsSpider(scrapy.Spider):
70
71 name = "marshalls"
72 allowed_domains = ["mktsvc.tjx.com", 'www.marshallsonline.com']
73
74 def start_requests(self):
75 url = URL
76
77 headers = {
78 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',
79 'Origin': 'https://www.marshallsonline.com',
80 'Accept-Encoding': 'gzip, deflate, br',
81 'Accept': 'application/json, text/plain, */*',
82 'Referer': 'https://www.marshallsonline.com/store-finder/by-state',
83 'Connection': 'keep-alive',
84 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
85 }
86
87 for state in STATES:
88 form_data = {'chain': '10', 'lang': 'en', 'state': state}
89
90 yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data,
91 headers=headers, callback=self.parse)
92
93 def parse(self, response):
94
95 data = json.loads(response.body_as_unicode())
96 stores = data.get('Stores', None)
97
98 for store in stores:
99 lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]
100 store['ref'] = URL + str(store.get('StoreID', None))
101
102 opening_hours = normalize_time(store.get('Hours', ''))
103
104 if opening_hours:
105 store['opening_hours'] = opening_hours
106 store.pop('Hours', None)
107
108 yield GeojsonPointItem(
109 properties=store,
110 lon_lat=lon_lat
111 )
112
[end of locations/spiders/marshalls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/marshalls.py b/locations/spiders/marshalls.py
--- a/locations/spiders/marshalls.py
+++ b/locations/spiders/marshalls.py
@@ -17,10 +17,20 @@
URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'
+NORMALIZE_KEYS = (
+ ('addr:full', ['Address', 'Address2']),
+ ('addr:city', ['City']),
+ ('addr:state', ['State']),
+ ('addr:postcode', ['Zip']),
+ ('addr:country', ['Country']),
+ ('phone', ['Phone']),
+ )
+
+
def normalize_time(hours):
if not hours:
- return ''
+ return ''
day_times = hours.split(',')
normalize_day_times = []
@@ -94,18 +104,23 @@
data = json.loads(response.body_as_unicode())
stores = data.get('Stores', None)
+ props = {}
for store in stores:
- lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]
- store['ref'] = URL + str(store.get('StoreID', None))
+ lon_lat = [store.pop('Longitude', ''), store.pop('Latitude', None)]
+ props['ref'] = store.pop('StoreID', None)
+ props['website'] = URL
+
+ for new_key, old_keys in NORMALIZE_KEYS:
+ props[new_key] = ", ".join([store.pop(key, '').strip() for key in old_keys if store[key]])
- opening_hours = normalize_time(store.get('Hours', ''))
+ opening_hours = normalize_time(store.pop('Hours', ''))
if opening_hours:
- store['opening_hours'] = opening_hours
- store.pop('Hours', None)
+ props['opening_hours'] = opening_hours
+ props.pop('Hours', None)
yield GeojsonPointItem(
- properties=store,
+ properties=props,
lon_lat=lon_lat
)
| {"golden_diff": "diff --git a/locations/spiders/marshalls.py b/locations/spiders/marshalls.py\n--- a/locations/spiders/marshalls.py\n+++ b/locations/spiders/marshalls.py\n@@ -17,10 +17,20 @@\n URL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'\n \n \n+NORMALIZE_KEYS = (\n+ ('addr:full', ['Address', 'Address2']),\n+ ('addr:city', ['City']),\n+ ('addr:state', ['State']),\n+ ('addr:postcode', ['Zip']),\n+ ('addr:country', ['Country']),\n+ ('phone', ['Phone']),\n+ )\n+\n+\n def normalize_time(hours):\n \n if not hours:\n- return ''\n+ return ''\n \n day_times = hours.split(',')\n normalize_day_times = []\n@@ -94,18 +104,23 @@\n \n data = json.loads(response.body_as_unicode())\n stores = data.get('Stores', None)\n+ props = {}\n \n for store in stores:\n- lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]\n- store['ref'] = URL + str(store.get('StoreID', None))\n+ lon_lat = [store.pop('Longitude', ''), store.pop('Latitude', None)]\n+ props['ref'] = store.pop('StoreID', None)\n+ props['website'] = URL\n+\n+ for new_key, old_keys in NORMALIZE_KEYS:\n+ props[new_key] = \", \".join([store.pop(key, '').strip() for key in old_keys if store[key]])\n \n- opening_hours = normalize_time(store.get('Hours', ''))\n+ opening_hours = normalize_time(store.pop('Hours', ''))\n \n if opening_hours:\n- store['opening_hours'] = opening_hours\n- store.pop('Hours', None)\n+ props['opening_hours'] = opening_hours\n+ props.pop('Hours', None)\n \n yield GeojsonPointItem(\n- properties=store,\n+ properties=props,\n lon_lat=lon_lat\n )\n", "issue": "Marshall's\n\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n\nDAYS = {'Mon': 'Mo', 'Tue': 'Tu',\n 'Wed': 'We', 'Thu': 'Th',\n 'Fri': 'Fr', 'Sat': 'Sa',\n 'Sun': 'Su'}\n\nURL = 'https://mktsvc.tjx.com/storelocator/GetSearchResultsByState'\n\n\ndef normalize_time(hours):\n\n if not hours:\n return ''\n\n day_times = hours.split(',')\n normalize_day_times = []\n\n for day_time in day_times:\n day, hours = [x.strip() for x in day_time.split(': ')]\n normalize_hours = []\n\n if re.search('-', day):\n days = [x.strip() for x in day.split('-')]\n norm_days = '-'.join([DAYS.get(x, '') for x in days])\n else:\n norm_days = DAYS.get(day, '')\n\n if re.search('CLOSED', hours):\n norm_hours = ' off'\n normalize_hours.append(norm_hours)\n else:\n if re.search('-', hours):\n hours = [x.strip() for x in hours.split('-')]\n\n for hour in hours:\n\n if hour[-1] == 'p':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n if int(hora) < 12:\n norm_hours = str(int(hora) + 12) + ':' + minute\n else:\n if int(hour[:-1]) < 12:\n norm_hours = str(int(hour[:-1]) + 12) + \":00\"\n\n elif hour[-1] == 'a':\n if re.search(':', hour[:-1]):\n hora, minute = [x.strip() for x in hour[:-1].split(':')]\n norm_hours = hora + ':' + minute\n else:\n norm_hours = hour[:-1] + \":00\"\n\n normalize_hours.append(norm_hours)\n\n normalize_day_times.append(' '.join([norm_days, '-'.join(normalize_hours)]))\n return '; '.join(normalize_day_times)\n\n\nclass MarshallsSpider(scrapy.Spider):\n\n name = \"marshalls\"\n allowed_domains = [\"mktsvc.tjx.com\", 'www.marshallsonline.com']\n\n def start_requests(self):\n url = URL\n\n headers = {\n 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6',\n 'Origin': 'https://www.marshallsonline.com',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept': 'application/json, text/plain, */*',\n 'Referer': 'https://www.marshallsonline.com/store-finder/by-state',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n }\n\n for state in STATES:\n form_data = {'chain': '10', 'lang': 'en', 'state': state}\n\n yield scrapy.http.FormRequest(url=url, method='POST', formdata=form_data,\n headers=headers, callback=self.parse)\n\n def parse(self, response):\n\n data = json.loads(response.body_as_unicode())\n stores = data.get('Stores', None)\n\n for store in stores:\n lon_lat = [store.pop('Longitude', None), store.pop('Latitude', None)]\n store['ref'] = URL + str(store.get('StoreID', None))\n\n opening_hours = normalize_time(store.get('Hours', ''))\n\n if opening_hours:\n store['opening_hours'] = opening_hours\n store.pop('Hours', None)\n\n yield GeojsonPointItem(\n properties=store,\n lon_lat=lon_lat\n )\n", "path": "locations/spiders/marshalls.py"}]} | 1,757 | 464 |
gh_patches_debug_11984 | rasdani/github-patches | git_diff | dotkom__onlineweb4-420 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mark rules error for anonymous users on events
'AnonymousUser' object has no attribute 'mark_rules'
</issue>
<code>
[start of apps/events/forms.py]
1 from django import forms
2 from captcha.fields import CaptchaField
3
4 class CaptchaForm(forms.Form):
5 def __init__(self, *args, **kwargs):
6 user = kwargs.pop('user', None)
7 super(CaptchaForm, self).__init__(*args, **kwargs)
8 # Removing mark rules field if user has already accepted the rules
9 if user and user.mark_rules:
10 del self.fields['mark_rules']
11 mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>')
12 captcha = CaptchaField()
13
[end of apps/events/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/forms.py b/apps/events/forms.py
--- a/apps/events/forms.py
+++ b/apps/events/forms.py
@@ -6,7 +6,7 @@
user = kwargs.pop('user', None)
super(CaptchaForm, self).__init__(*args, **kwargs)
# Removing mark rules field if user has already accepted the rules
- if user and user.mark_rules:
+ if user and user.is_authenticated() and user.mark_rules:
del self.fields['mark_rules']
mark_rules = forms.BooleanField(label=u'Jeg godtar <a href="/profile/#marks" target="_blank">prikkreglene</a>')
captcha = CaptchaField()
| {"golden_diff": "diff --git a/apps/events/forms.py b/apps/events/forms.py\n--- a/apps/events/forms.py\n+++ b/apps/events/forms.py\n@@ -6,7 +6,7 @@\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n- if user and user.mark_rules:\n+ if user and user.is_authenticated() and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "issue": "Mark rules error for anonymous users on events\n'AnonymousUser' object has no attribute 'mark_rules'\n\n", "before_files": [{"content": "from django import forms\nfrom captcha.fields import CaptchaField\n\nclass CaptchaForm(forms.Form):\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(CaptchaForm, self).__init__(*args, **kwargs)\n # Removing mark rules field if user has already accepted the rules\n if user and user.mark_rules:\n del self.fields['mark_rules']\n mark_rules = forms.BooleanField(label=u'Jeg godtar <a href=\"/profile/#marks\" target=\"_blank\">prikkreglene</a>')\n captcha = CaptchaField()\n", "path": "apps/events/forms.py"}]} | 700 | 148 |
gh_patches_debug_219 | rasdani/github-patches | git_diff | pypa__setuptools-2427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx setup should be stricter
I noticed that some of the docs pages are unreachable when navigating from the main RTD page. In particular, _I know_ that there's `history.rst` that is only accessible if one knows the URL upfront.
I tracked this to https://github.com/pypa/setuptools/pull/2097 which removes entries from the TOC but doesn't reintroduce them in other places.
Sphinx has a few toggles that make it nitpicky about warnings. I think this should be enabled in the CI to prevent such problems in the future. This should catch implicit orphan pages as well as dead references or typos.
</issue>
<code>
[start of docs/conf.py]
1 import subprocess
2 import sys
3 import os
4
5
6 # hack to run the bootstrap script so that jaraco.packaging.sphinx
7 # can invoke setup.py
8 'READTHEDOCS' in os.environ and subprocess.check_call(
9 [sys.executable, '-m', 'bootstrap'],
10 cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),
11 )
12
13 # -- Project information -----------------------------------------------------
14
15 github_url = 'https://github.com'
16 github_sponsors_url = f'{github_url}/sponsors'
17
18 # -- General configuration --
19
20 extensions = [
21 'sphinx.ext.extlinks', # allows to create custom roles easily
22 'jaraco.packaging.sphinx',
23 'rst.linker',
24 ]
25
26 # Add any paths that contain templates here, relative to this directory.
27 templates_path = ['_templates']
28
29 # The master toctree document.
30 master_doc = 'index'
31
32 # List of directories, relative to source directory, that shouldn't be searched
33 # for source files.
34 exclude_trees = []
35
36 # The name of the Pygments (syntax highlighting) style to use.
37 pygments_style = 'sphinx'
38
39 # -- Options for extlinks extension ---------------------------------------
40 extlinks = {
41 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
42 }
43
44 # -- Options for HTML output --
45
46 # The theme to use for HTML and HTML Help pages. Major themes that come with
47 # Sphinx are currently 'default' and 'sphinxdoc'.
48 html_theme = 'nature'
49
50 # Add any paths that contain custom themes here, relative to this directory.
51 html_theme_path = ['_theme']
52
53 # If true, SmartyPants will be used to convert quotes and dashes to
54 # typographically correct entities.
55 html_use_smartypants = True
56
57 # Custom sidebar templates, maps document names to template names.
58 html_sidebars = {
59 'index': [
60 'relations.html', 'sourcelink.html', 'indexsidebar.html',
61 'searchbox.html']}
62
63 # If false, no module index is generated.
64 html_use_modindex = False
65
66 # If false, no index is generated.
67 html_use_index = False
68
69 # -- Options for LaTeX output --
70
71 # Grouping the document tree into LaTeX files. List of tuples
72 # (source start file, target name, title, author,
73 # documentclass [howto/manual]).
74 latex_documents = [(
75 'index', 'Setuptools.tex', 'Setuptools Documentation',
76 'The fellowship of the packaging', 'manual',
77 )]
78
79 link_files = {
80 '../CHANGES.rst': dict(
81 using=dict(
82 BB='https://bitbucket.org',
83 GH='https://github.com',
84 ),
85 replace=[
86 dict(
87 pattern=r'(Issue )?#(?P<issue>\d+)',
88 url='{package_url}/issues/{issue}',
89 ),
90 dict(
91 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
92 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
93 ),
94 dict(
95 pattern=r'Distribute #(?P<distribute>\d+)',
96 url='{BB}/tarek/distribute/issue/{distribute}',
97 ),
98 dict(
99 pattern=r'Buildout #(?P<buildout>\d+)',
100 url='{GH}/buildout/buildout/issues/{buildout}',
101 ),
102 dict(
103 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
104 url='http://bugs.python.org/setuptools/issue{old_setuptools}',
105 ),
106 dict(
107 pattern=r'Jython #(?P<jython>\d+)',
108 url='http://bugs.jython.org/issue{jython}',
109 ),
110 dict(
111 pattern=r'(Python #|bpo-)(?P<python>\d+)',
112 url='http://bugs.python.org/issue{python}',
113 ),
114 dict(
115 pattern=r'Interop #(?P<interop>\d+)',
116 url='{GH}/pypa/interoperability-peps/issues/{interop}',
117 ),
118 dict(
119 pattern=r'Pip #(?P<pip>\d+)',
120 url='{GH}/pypa/pip/issues/{pip}',
121 ),
122 dict(
123 pattern=r'Packaging #(?P<packaging>\d+)',
124 url='{GH}/pypa/packaging/issues/{packaging}',
125 ),
126 dict(
127 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
128 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
129 ),
130 dict(
131 pattern=r'PEP[- ](?P<pep_number>\d+)',
132 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
133 ),
134 dict(
135 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
136 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
137 ),
138 dict(
139 pattern=r'pypa/distutils#(?P<distutils>\d+)',
140 url='{GH}/pypa/distutils/issues/{distutils}',
141 ),
142 dict(
143 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
144 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
145 ),
146 ],
147 ),
148 }
149
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -146,3 +146,12 @@
],
),
}
+
+
+# Be strict about any broken references:
+nitpicky = True
+
+
+# Ref: https://github.com/python-attrs/attrs/pull/571/files\
+# #diff-85987f48f1258d9ee486e3191495582dR82
+default_role = 'any'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -146,3 +146,12 @@\n ],\n ),\n }\n+\n+\n+# Be strict about any broken references:\n+nitpicky = True\n+\n+\n+# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n+# #diff-85987f48f1258d9ee486e3191495582dR82\n+default_role = 'any'\n", "issue": "Sphinx setup should be stricter\nI noticed that some of the docs pages are unreachable when navigating from the main RTD page. In particular, _I know_ that there's `history.rst` that is only accessible if one knows the URL upfront.\r\n\r\nI tracked this to https://github.com/pypa/setuptools/pull/2097 which removes entries from the TOC but doesn't reintroduce them in other places.\r\n\r\nSphinx has a few toggles that make it nitpicky about warnings. I think this should be enabled in the CI to prevent such problems in the future. This should catch implicit orphan pages as well as dead references or typos.\n", "before_files": [{"content": "import subprocess\nimport sys\nimport os\n\n\n# hack to run the bootstrap script so that jaraco.packaging.sphinx\n# can invoke setup.py\n'READTHEDOCS' in os.environ and subprocess.check_call(\n [sys.executable, '-m', 'bootstrap'],\n cwd=os.path.join(os.path.dirname(__file__), os.path.pardir),\n)\n\n# -- Project information -----------------------------------------------------\n\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\n\n# -- General configuration --\n\nextensions = [\n 'sphinx.ext.extlinks', # allows to create custom roles easily\n 'jaraco.packaging.sphinx',\n 'rst.linker',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for extlinks extension ---------------------------------------\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\n\n# -- Options for HTML output --\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'nature'\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['_theme']\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {\n 'index': [\n 'relations.html', 'sourcelink.html', 'indexsidebar.html',\n 'searchbox.html']}\n\n# If false, no module index is generated.\nhtml_use_modindex = False\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# -- Options for LaTeX output --\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author,\n# documentclass [howto/manual]).\nlatex_documents = [(\n 'index', 'Setuptools.tex', 'Setuptools Documentation',\n 'The fellowship of the packaging', 'manual',\n)]\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n", "path": "docs/conf.py"}]} | 2,192 | 130 |
gh_patches_debug_24596 | rasdani/github-patches | git_diff | ddionrails__ddionrails-201 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change "templates" path in staging and production settings
In `settings/hewing.py`and `settings/production.py` the path in `TEMPLATES` `DIRS` is hard coded. This leads to `TemplateDoesNotExist` if the path on a server changes.
</issue>
<code>
[start of settings/hewing.py]
1 from .base import * # noqa
2
3 WSGI_APPLICATION = "ddionrails.wsgi_hewing.application"
4
5 DEBUG = True
6
7 ALLOWED_HOSTS = ["hewing.soep.de", "ddionrails.soep.de"]
8
9 # django-debug-toolbar
10 # ------------------------------------------------------------------------------
11 # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
12 INSTALLED_APPS += ["debug_toolbar"]
13 # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
14 MIDDLEWARE = ["debug_toolbar.middleware.DebugToolbarMiddleware"] + MIDDLEWARE
15 # https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
16
17 SYSTEM_NAME = "system"
18 SYSTEM_REPO_URL = "https://github.com/paneldata/system.git"
19 BACKUP_NAME = "backup"
20 BACKUP_REPO_URL = "https://github.com/ddionrails/test-backup.git"
21 IMPORT_BRANCH = "development"
22
23 LOGGING = {
24 "version": 1,
25 "disable_existing_loggers": False,
26 "handlers": {
27 "file": {
28 "level": "DEBUG",
29 "class": "logging.FileHandler",
30 "filename": "/tmp/dor-debug.log",
31 }
32 },
33 "loggers": {
34 "django.request": {"handlers": ["file"], "level": "DEBUG", "propagate": True},
35 "imports": {"handlers": ["file"], "level": "DEBUG", "propagate": True},
36 },
37 }
38
39 RQ_QUEUES = {
40 "default": {"HOST": "localhost", "PORT": 6379, "DB": 0, "DEFAULT_TIMEOUT": 360},
41 "high": {
42 "URL": os.getenv(
43 "REDISTOGO_URL", "redis://localhost:6379/0"
44 ), # If you're on Heroku
45 "DEFAULT_TIMEOUT": 500,
46 },
47 "low": {"HOST": "localhost", "PORT": 6379, "DB": 0},
48 }
49
50
51 TEMPLATES = [
52 {
53 "BACKEND": "django.template.backends.django.DjangoTemplates",
54 "DIRS": ["/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates"],
55 "APP_DIRS": True,
56 "OPTIONS": {
57 "context_processors": [
58 "django.template.context_processors.debug",
59 "django.template.context_processors.request",
60 "django.contrib.auth.context_processors.auth",
61 "django.contrib.messages.context_processors.messages",
62 "studies.models.context",
63 ]
64 },
65 }
66 ]
67
68 # SECURITY
69 # ------------------------------------------------------------------------------
70 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
71 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
72 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
73 SECURE_SSL_REDIRECT = True
74 # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
75 SESSION_COOKIE_SECURE = True
76 # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
77 SESSION_COOKIE_HTTPONLY = True
78 # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
79 CSRF_COOKIE_SECURE = True
80 # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
81 CSRF_COOKIE_HTTPONLY = True
82 # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
83 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
84 SECURE_HSTS_SECONDS = 60
85 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
86 SECURE_HSTS_INCLUDE_SUBDOMAINS = True
87 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
88 SECURE_HSTS_PRELOAD = True
89 # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
90 SECURE_CONTENT_TYPE_NOSNIFF = True
91 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
92 SECURE_BROWSER_XSS_FILTER = True
93 # https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
94 X_FRAME_OPTIONS = "DENY"
95
[end of settings/hewing.py]
[start of settings/production.py]
1 from .base import * # noqa
2
3 WSGI_APPLICATION = "ddionrails.wsgi_production.application"
4 DEBUG = False
5 ALLOWED_HOSTS = [".paneldata.org", "paneldata.soep.de", "data.soep.de"]
6
7 SYSTEM_NAME = "system"
8 SYSTEM_REPO_URL = "https://github.com/paneldata/system.git"
9 BACKUP_NAME = "backup"
10 BACKUP_REPO_URL = "https://github.com/ddionrails/test-backup.git"
11 IMPORT_BRANCH = "master"
12
13 LOGGING = {
14 "version": 1,
15 "disable_existing_loggers": False,
16 "handlers": {
17 "file": {
18 "level": "DEBUG",
19 "class": "logging.FileHandler",
20 "filename": "/tmp/dor-debug.log",
21 }
22 },
23 "loggers": {
24 "django.request": {"handlers": ["file"], "level": "DEBUG", "propagate": True},
25 "imports": {"handlers": ["file"], "level": "DEBUG", "propagate": True},
26 },
27 }
28
29 RQ_QUEUES = {
30 "default": {"HOST": "localhost", "PORT": 6379, "DB": 0, "DEFAULT_TIMEOUT": 360},
31 "high": {
32 "URL": os.getenv(
33 "REDISTOGO_URL", "redis://localhost:6379/0"
34 ), # If you're on Heroku
35 "DEFAULT_TIMEOUT": 500,
36 },
37 "low": {"HOST": "localhost", "PORT": 6379, "DB": 0},
38 }
39
40
41 TEMPLATES = [
42 {
43 "BACKEND": "django.template.backends.django.DjangoTemplates",
44 "DIRS": ["/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates"],
45 "APP_DIRS": True,
46 "OPTIONS": {
47 "context_processors": [
48 "django.template.context_processors.debug",
49 "django.template.context_processors.request",
50 "django.contrib.auth.context_processors.auth",
51 "django.contrib.messages.context_processors.messages",
52 "studies.models.context",
53 ]
54 },
55 }
56 ]
57
58 # SECURITY
59 # ------------------------------------------------------------------------------
60 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
61 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
62 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
63 SECURE_SSL_REDIRECT = True
64 # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
65 SESSION_COOKIE_SECURE = True
66 # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
67 SESSION_COOKIE_HTTPONLY = True
68 # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
69 CSRF_COOKIE_SECURE = True
70 # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
71 CSRF_COOKIE_HTTPONLY = True
72 # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
73 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
74 SECURE_HSTS_SECONDS = 60
75 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
76 SECURE_HSTS_INCLUDE_SUBDOMAINS = True
77 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
78 SECURE_HSTS_PRELOAD = True
79 # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
80 SECURE_CONTENT_TYPE_NOSNIFF = True
81 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
82 SECURE_BROWSER_XSS_FILTER = True
83 # https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
84 X_FRAME_OPTIONS = "DENY"
85
[end of settings/production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/settings/hewing.py b/settings/hewing.py
--- a/settings/hewing.py
+++ b/settings/hewing.py
@@ -1,4 +1,4 @@
-from .base import * # noqa
+from .base import * # noqa
WSGI_APPLICATION = "ddionrails.wsgi_hewing.application"
@@ -51,7 +51,7 @@
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
- "DIRS": ["/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates"],
+ "DIRS": [BASE_DIR + "/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
diff --git a/settings/production.py b/settings/production.py
--- a/settings/production.py
+++ b/settings/production.py
@@ -1,4 +1,4 @@
-from .base import * # noqa
+from .base import * # noqa
WSGI_APPLICATION = "ddionrails.wsgi_production.application"
DEBUG = False
@@ -41,7 +41,7 @@
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
- "DIRS": ["/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates"],
+ "DIRS": [BASE_DIR + "/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
| {"golden_diff": "diff --git a/settings/hewing.py b/settings/hewing.py\n--- a/settings/hewing.py\n+++ b/settings/hewing.py\n@@ -1,4 +1,4 @@\n-from .base import * # noqa\n+from .base import * # noqa\n \n WSGI_APPLICATION = \"ddionrails.wsgi_hewing.application\"\n \n@@ -51,7 +51,7 @@\n TEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n- \"DIRS\": [\"/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates\"],\n+ \"DIRS\": [BASE_DIR + \"/templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\ndiff --git a/settings/production.py b/settings/production.py\n--- a/settings/production.py\n+++ b/settings/production.py\n@@ -1,4 +1,4 @@\n-from .base import * # noqa\n+from .base import * # noqa\n \n WSGI_APPLICATION = \"ddionrails.wsgi_production.application\"\n DEBUG = False\n@@ -41,7 +41,7 @@\n TEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n- \"DIRS\": [\"/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates\"],\n+ \"DIRS\": [BASE_DIR + \"/templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n", "issue": "Change \"templates\" path in staging and production settings\nIn `settings/hewing.py`and `settings/production.py` the path in `TEMPLATES` `DIRS` is hard coded. This leads to `TemplateDoesNotExist` if the path on a server changes.\n", "before_files": [{"content": "from .base import * # noqa\n\nWSGI_APPLICATION = \"ddionrails.wsgi_hewing.application\"\n\nDEBUG = True\n\nALLOWED_HOSTS = [\"hewing.soep.de\", \"ddionrails.soep.de\"]\n\n# django-debug-toolbar\n# ------------------------------------------------------------------------------\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites\nINSTALLED_APPS += [\"debug_toolbar\"]\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware\nMIDDLEWARE = [\"debug_toolbar.middleware.DebugToolbarMiddleware\"] + MIDDLEWARE\n# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config\n\nSYSTEM_NAME = \"system\"\nSYSTEM_REPO_URL = \"https://github.com/paneldata/system.git\"\nBACKUP_NAME = \"backup\"\nBACKUP_REPO_URL = \"https://github.com/ddionrails/test-backup.git\"\nIMPORT_BRANCH = \"development\"\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"file\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.FileHandler\",\n \"filename\": \"/tmp/dor-debug.log\",\n }\n },\n \"loggers\": {\n \"django.request\": {\"handlers\": [\"file\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"imports\": {\"handlers\": [\"file\"], \"level\": \"DEBUG\", \"propagate\": True},\n },\n}\n\nRQ_QUEUES = {\n \"default\": {\"HOST\": \"localhost\", \"PORT\": 6379, \"DB\": 0, \"DEFAULT_TIMEOUT\": 360},\n \"high\": {\n \"URL\": os.getenv(\n \"REDISTOGO_URL\", \"redis://localhost:6379/0\"\n ), # If you're on Heroku\n \"DEFAULT_TIMEOUT\": 500,\n },\n \"low\": {\"HOST\": \"localhost\", \"PORT\": 6379, \"DB\": 0},\n}\n\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"studies.models.context\",\n ]\n },\n }\n]\n\n# SECURITY\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect\nSECURE_SSL_REDIRECT = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure\nSESSION_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly\nSESSION_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure\nCSRF_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly\nCSRF_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds\nSECURE_HSTS_SECONDS = 60\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload\nSECURE_HSTS_PRELOAD = True\n# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff\nSECURE_CONTENT_TYPE_NOSNIFF = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter\nSECURE_BROWSER_XSS_FILTER = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "settings/hewing.py"}, {"content": "from .base import * # noqa\n\nWSGI_APPLICATION = \"ddionrails.wsgi_production.application\"\nDEBUG = False\nALLOWED_HOSTS = [\".paneldata.org\", \"paneldata.soep.de\", \"data.soep.de\"]\n\nSYSTEM_NAME = \"system\"\nSYSTEM_REPO_URL = \"https://github.com/paneldata/system.git\"\nBACKUP_NAME = \"backup\"\nBACKUP_REPO_URL = \"https://github.com/ddionrails/test-backup.git\"\nIMPORT_BRANCH = \"master\"\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"file\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.FileHandler\",\n \"filename\": \"/tmp/dor-debug.log\",\n }\n },\n \"loggers\": {\n \"django.request\": {\"handlers\": [\"file\"], \"level\": \"DEBUG\", \"propagate\": True},\n \"imports\": {\"handlers\": [\"file\"], \"level\": \"DEBUG\", \"propagate\": True},\n },\n}\n\nRQ_QUEUES = {\n \"default\": {\"HOST\": \"localhost\", \"PORT\": 6379, \"DB\": 0, \"DEFAULT_TIMEOUT\": 360},\n \"high\": {\n \"URL\": os.getenv(\n \"REDISTOGO_URL\", \"redis://localhost:6379/0\"\n ), # If you're on Heroku\n \"DEFAULT_TIMEOUT\": 500,\n },\n \"low\": {\"HOST\": \"localhost\", \"PORT\": 6379, \"DB\": 0},\n}\n\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"/data/WWW/vhosts/paneldata.soep.de/ddionrails2/ddionrails/templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"studies.models.context\",\n ]\n },\n }\n]\n\n# SECURITY\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect\nSECURE_SSL_REDIRECT = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure\nSESSION_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly\nSESSION_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure\nCSRF_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly\nCSRF_COOKIE_HTTPONLY = True\n# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds\nSECURE_HSTS_SECONDS = 60\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains\nSECURE_HSTS_INCLUDE_SUBDOMAINS = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload\nSECURE_HSTS_PRELOAD = True\n# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff\nSECURE_CONTENT_TYPE_NOSNIFF = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter\nSECURE_BROWSER_XSS_FILTER = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options\nX_FRAME_OPTIONS = \"DENY\"\n", "path": "settings/production.py"}]} | 2,629 | 332 |
gh_patches_debug_34686 | rasdani/github-patches | git_diff | wagtail__wagtail-1225 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Memory leak in RoutablePage
When resolving/reversing URLs, `RoutablePage` calls Djangos `django.core.urlresolvers.get_resolver` function to build a resolver object.
This function is wrapped in an unlimited lru cache. As each time we call it is usually with a different page instance, this lru cache would grow forever.
I've not seen any issues caused by this in the wild, but worth fixing.
</issue>
<code>
[start of wagtail/contrib/wagtailroutablepage/models.py]
1 from __future__ import unicode_literals
2
3 from six import string_types
4
5 from django.http import Http404
6 from django.core.urlresolvers import get_resolver
7 from django.core.exceptions import ImproperlyConfigured
8
9 from wagtail.wagtailcore.models import Page
10 from wagtail.wagtailcore.url_routing import RouteResult
11
12
13 class RoutablePageMixin(object):
14 """
15 This class can be mixed in to a Page subclass to allow urlconfs to be
16 embedded inside pages.
17 """
18 #: Set this to a tuple of ``django.conf.urls.url`` objects.
19 subpage_urls = None
20
21 def reverse_subpage(self, name, args=None, kwargs=None):
22 """
23 This method does the same job as Djangos' built in "urlresolvers.reverse()" function for subpage urlconfs.
24 """
25 args = args or []
26 kwargs = kwargs or {}
27
28 if self.subpage_urls is None:
29 raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__)
30
31 resolver = get_resolver(self.subpage_urls)
32 return resolver.reverse(name, *args, **kwargs)
33
34 def resolve_subpage(self, path):
35 """
36 This finds a view method/function from a URL path.
37 """
38 if self.subpage_urls is None:
39 raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__)
40
41 resolver = get_resolver(self.subpage_urls)
42 view, args, kwargs = resolver.resolve(path)
43
44 # If view is a string, find it as an attribute of self
45 if isinstance(view, string_types):
46 view = getattr(self, view)
47
48 return view, args, kwargs
49
50 def route(self, request, path_components):
51 """
52 This hooks the subpage urls into Wagtails routing.
53 """
54 if self.live:
55 try:
56 path = '/'
57 if path_components:
58 path += '/'.join(path_components) + '/'
59
60 view, args, kwargs = self.resolve_subpage(path)
61 return RouteResult(self, args=(view, args, kwargs))
62 except Http404:
63 pass
64
65 return super(RoutablePageMixin, self).route(request, path_components)
66
67 def serve(self, request, view, args, kwargs):
68 return view(request, *args, **kwargs)
69
70 def serve_preview(self, request, mode_name):
71 view, args, kwargs = self.resolve_subpage('/')
72 return view(request, *args, **kwargs)
73
74
75 class RoutablePage(RoutablePageMixin, Page):
76 """
77 This class extends Page by adding methods to allow urlconfs
78 to be embedded inside pages
79 """
80
81 is_abstract = True
82
83 class Meta:
84 abstract = True
85
[end of wagtail/contrib/wagtailroutablepage/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py
--- a/wagtail/contrib/wagtailroutablepage/models.py
+++ b/wagtail/contrib/wagtailroutablepage/models.py
@@ -3,8 +3,7 @@
from six import string_types
from django.http import Http404
-from django.core.urlresolvers import get_resolver
-from django.core.exceptions import ImproperlyConfigured
+from django.core.urlresolvers import RegexURLResolver
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.url_routing import RouteResult
@@ -18,28 +17,36 @@
#: Set this to a tuple of ``django.conf.urls.url`` objects.
subpage_urls = None
+ @classmethod
+ def get_subpage_urls(cls):
+ if cls.subpage_urls:
+ return cls.subpage_urls
+
+ return ()
+
+ @classmethod
+ def get_resolver(cls):
+ if '_routablepage_urlresolver' not in cls.__dict__:
+ subpage_urls = cls.get_subpage_urls()
+ cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)
+
+ return cls._routablepage_urlresolver
+
def reverse_subpage(self, name, args=None, kwargs=None):
"""
- This method does the same job as Djangos' built in "urlresolvers.reverse()" function for subpage urlconfs.
+ This method does the same job as Djangos' built in
+ "urlresolvers.reverse()" function for subpage urlconfs.
"""
args = args or []
kwargs = kwargs or {}
- if self.subpage_urls is None:
- raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__)
-
- resolver = get_resolver(self.subpage_urls)
- return resolver.reverse(name, *args, **kwargs)
+ return self.get_resolver().reverse(name, *args, **kwargs)
def resolve_subpage(self, path):
"""
This finds a view method/function from a URL path.
"""
- if self.subpage_urls is None:
- raise ImproperlyConfigured("You must set 'subpage_urls' on " + type(self).__name__)
-
- resolver = get_resolver(self.subpage_urls)
- view, args, kwargs = resolver.resolve(path)
+ view, args, kwargs = self.get_resolver().resolve(path)
# If view is a string, find it as an attribute of self
if isinstance(view, string_types):
| {"golden_diff": "diff --git a/wagtail/contrib/wagtailroutablepage/models.py b/wagtail/contrib/wagtailroutablepage/models.py\n--- a/wagtail/contrib/wagtailroutablepage/models.py\n+++ b/wagtail/contrib/wagtailroutablepage/models.py\n@@ -3,8 +3,7 @@\n from six import string_types\n \n from django.http import Http404\n-from django.core.urlresolvers import get_resolver\n-from django.core.exceptions import ImproperlyConfigured\n+from django.core.urlresolvers import RegexURLResolver\n \n from wagtail.wagtailcore.models import Page\n from wagtail.wagtailcore.url_routing import RouteResult\n@@ -18,28 +17,36 @@\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n \n+ @classmethod\n+ def get_subpage_urls(cls):\n+ if cls.subpage_urls:\n+ return cls.subpage_urls\n+\n+ return ()\n+\n+ @classmethod\n+ def get_resolver(cls):\n+ if '_routablepage_urlresolver' not in cls.__dict__:\n+ subpage_urls = cls.get_subpage_urls()\n+ cls._routablepage_urlresolver = RegexURLResolver(r'^/', subpage_urls)\n+\n+ return cls._routablepage_urlresolver\n+\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n- This method does the same job as Djangos' built in \"urlresolvers.reverse()\" function for subpage urlconfs.\n+ This method does the same job as Djangos' built in\n+ \"urlresolvers.reverse()\" function for subpage urlconfs.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n \n- if self.subpage_urls is None:\n- raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n-\n- resolver = get_resolver(self.subpage_urls)\n- return resolver.reverse(name, *args, **kwargs)\n+ return self.get_resolver().reverse(name, *args, **kwargs)\n \n def resolve_subpage(self, path):\n \"\"\"\n This finds a view method/function from a URL path.\n \"\"\"\n- if self.subpage_urls is None:\n- raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n-\n- resolver = get_resolver(self.subpage_urls)\n- view, args, kwargs = resolver.resolve(path)\n+ view, args, kwargs = self.get_resolver().resolve(path)\n \n # If view is a string, find it as an attribute of self\n if isinstance(view, string_types):\n", "issue": "Memory leak in RoutablePage\nWhen resolving/reversing URLs, `RoutablePage` calls Djangos `django.core.urlresolvers.get_resolver` function to build a resolver object.\n\nThis function is wrapped in an unlimited lru cache. As each time we call it is usually with a different page instance, this lru cache would grow forever.\n\nI've not seen any issues caused by this in the wild, but worth fixing.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom six import string_types\n\nfrom django.http import Http404\nfrom django.core.urlresolvers import get_resolver\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.url_routing import RouteResult\n\n\nclass RoutablePageMixin(object):\n \"\"\"\n This class can be mixed in to a Page subclass to allow urlconfs to be\n embedded inside pages.\n \"\"\"\n #: Set this to a tuple of ``django.conf.urls.url`` objects.\n subpage_urls = None\n\n def reverse_subpage(self, name, args=None, kwargs=None):\n \"\"\"\n This method does the same job as Djangos' built in \"urlresolvers.reverse()\" function for subpage urlconfs.\n \"\"\"\n args = args or []\n kwargs = kwargs or {}\n\n if self.subpage_urls is None:\n raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n\n resolver = get_resolver(self.subpage_urls)\n return resolver.reverse(name, *args, **kwargs)\n\n def resolve_subpage(self, path):\n \"\"\"\n This finds a view method/function from a URL path.\n \"\"\"\n if self.subpage_urls is None:\n raise ImproperlyConfigured(\"You must set 'subpage_urls' on \" + type(self).__name__)\n\n resolver = get_resolver(self.subpage_urls)\n view, args, kwargs = resolver.resolve(path)\n\n # If view is a string, find it as an attribute of self\n if isinstance(view, string_types):\n view = getattr(self, view)\n\n return view, args, kwargs\n\n def route(self, request, path_components):\n \"\"\"\n This hooks the subpage urls into Wagtails routing.\n \"\"\"\n if self.live:\n try:\n path = '/'\n if path_components:\n path += '/'.join(path_components) + '/'\n\n view, args, kwargs = self.resolve_subpage(path)\n return RouteResult(self, args=(view, args, kwargs))\n except Http404:\n pass\n\n return super(RoutablePageMixin, self).route(request, path_components)\n\n def serve(self, request, view, args, kwargs):\n return view(request, *args, **kwargs)\n\n def serve_preview(self, request, mode_name):\n view, args, kwargs = self.resolve_subpage('/')\n return view(request, *args, **kwargs)\n\n\nclass RoutablePage(RoutablePageMixin, Page):\n \"\"\"\n This class extends Page by adding methods to allow urlconfs\n to be embedded inside pages\n \"\"\"\n\n is_abstract = True\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/wagtailroutablepage/models.py"}]} | 1,400 | 594 |
gh_patches_debug_24360 | rasdani/github-patches | git_diff | rasterio__rasterio-2827 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Turn down logging level for non-existing files in the Python file VSI plugin
To prevent pointless sidecar searching as discussed in https://github.com/rasterio/rasterio/discussions/2825.
</issue>
<code>
[start of rasterio/io.py]
1 """Classes capable of reading and writing datasets
2
3 Instances of these classes are called dataset objects.
4 """
5
6 import logging
7
8 from rasterio._base import get_dataset_driver, driver_can_create, driver_can_create_copy
9 from rasterio._io import (
10 DatasetReaderBase,
11 DatasetWriterBase,
12 BufferedDatasetWriterBase,
13 MemoryFileBase,
14 )
15 from rasterio.windows import WindowMethodsMixin
16 from rasterio.env import ensure_env
17 from rasterio.transform import TransformMethodsMixin
18 from rasterio._path import _UnparsedPath
19
20 try:
21 from rasterio._filepath import FilePathBase
22 except ImportError:
23 FilePathBase = object
24
25
26 log = logging.getLogger(__name__)
27
28
29 class DatasetReader(DatasetReaderBase, WindowMethodsMixin, TransformMethodsMixin):
30 """An unbuffered data and metadata reader"""
31
32 def __repr__(self):
33 return "<{} DatasetReader name='{}' mode='{}'>".format(
34 self.closed and 'closed' or 'open', self.name, self.mode)
35
36
37 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin, TransformMethodsMixin):
38 """An unbuffered data and metadata writer. Its methods write data
39 directly to disk.
40 """
41
42 def __repr__(self):
43 return "<{} DatasetWriter name='{}' mode='{}'>".format(
44 self.closed and 'closed' or 'open', self.name, self.mode)
45
46
47 class BufferedDatasetWriter(
48 BufferedDatasetWriterBase, WindowMethodsMixin, TransformMethodsMixin
49 ):
50 """Maintains data and metadata in a buffer, writing to disk or
51 network only when `close()` is called.
52
53 This allows incremental updates to datasets using formats that don't
54 otherwise support updates, such as JPEG.
55 """
56
57 def __repr__(self):
58 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format(
59 self.closed and 'closed' or 'open', self.name, self.mode)
60
61
62 class MemoryFile(MemoryFileBase):
63 """A BytesIO-like object, backed by an in-memory file.
64
65 This allows formatted files to be read and written without I/O.
66
67 A MemoryFile created with initial bytes becomes immutable. A
68 MemoryFile created without initial bytes may be written to using
69 either file-like or dataset interfaces.
70
71 Examples
72 --------
73
74 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
75 format driver
76
77 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:
78 ... with memfile.open() as src:
79 ... pprint.pprint(src.profile)
80 ...
81 {'count': 3,
82 'crs': CRS({'init': 'epsg:32618'}),
83 'driver': 'GTiff',
84 'dtype': 'uint8',
85 'height': 718,
86 'interleave': 'pixel',
87 'nodata': 0.0,
88 'tiled': False,
89 'transform': Affine(300.0379266750948, 0.0, 101985.0,
90 0.0, -300.041782729805, 2826915.0),
91 'width': 791}
92
93 """
94
95 def __init__(self, file_or_bytes=None, dirname=None, filename=None, ext=".tif"):
96 """Create a new file in memory
97
98 Parameters
99 ----------
100 file_or_bytes : file-like object or bytes, optional
101 File or bytes holding initial data.
102 filename : str, optional
103 An optional filename. A unique one will otherwise be generated.
104 ext : str, optional
105 An optional extension.
106
107 Returns
108 -------
109 MemoryFile
110 """
111 super().__init__(
112 file_or_bytes=file_or_bytes, dirname=dirname, filename=filename, ext=ext
113 )
114
115 @ensure_env
116 def open(self, driver=None, width=None, height=None, count=None, crs=None,
117 transform=None, dtype=None, nodata=None, sharing=False, **kwargs):
118 """Open the file and return a Rasterio dataset object.
119
120 If data has already been written, the file is opened in 'r'
121 mode. Otherwise, the file is opened in 'w' mode.
122
123 Parameters
124 ----------
125 Note well that there is no `path` parameter: a `MemoryFile`
126 contains a single dataset and there is no need to specify a
127 path.
128
129 Other parameters are optional and have the same semantics as the
130 parameters of `rasterio.open()`.
131 """
132 mempath = _UnparsedPath(self.name)
133
134 if self.closed:
135 raise OSError("I/O operation on closed file.")
136 if len(self) > 0:
137 log.debug("VSI path: {}".format(mempath.path))
138 return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)
139 else:
140 writer = get_writer_for_driver(driver)
141 return writer(
142 mempath,
143 "w+",
144 driver=driver,
145 width=width,
146 height=height,
147 count=count,
148 crs=crs,
149 transform=transform,
150 dtype=dtype,
151 nodata=nodata,
152 sharing=sharing,
153 **kwargs
154 )
155
156 def __enter__(self):
157 return self
158
159 def __exit__(self, *args):
160 self.close()
161
162
163 class _FilePath(FilePathBase):
164 """A BytesIO-like object, backed by a Python file object.
165
166 Examples
167 --------
168
169 A GeoTIFF can be loaded in memory and accessed using the GeoTIFF
170 format driver
171
172 >>> with open('tests/data/RGB.byte.tif', 'rb') as f, FilePath(f) as vsi_file:
173 ... with vsi_file.open() as src:
174 ... pprint.pprint(src.profile)
175 ...
176 {'count': 3,
177 'crs': CRS({'init': 'epsg:32618'}),
178 'driver': 'GTiff',
179 'dtype': 'uint8',
180 'height': 718,
181 'interleave': 'pixel',
182 'nodata': 0.0,
183 'tiled': False,
184 'transform': Affine(300.0379266750948, 0.0, 101985.0,
185 0.0, -300.041782729805, 2826915.0),
186 'width': 791}
187
188 """
189
190 def __init__(self, filelike_obj, dirname=None, filename=None):
191 """Create a new wrapper around the provided file-like object.
192
193 Parameters
194 ----------
195 filelike_obj : file-like object
196 Open file-like object. Currently only reading is supported.
197 filename : str, optional
198 An optional filename. A unique one will otherwise be generated.
199
200 Returns
201 -------
202 PythonVSIFile
203 """
204 super().__init__(
205 filelike_obj, dirname=dirname, filename=filename
206 )
207
208 @ensure_env
209 def open(self, driver=None, sharing=False, **kwargs):
210 """Open the file and return a Rasterio dataset object.
211
212 The provided file-like object is assumed to be readable.
213 Writing is currently not supported.
214
215 Parameters are optional and have the same semantics as the
216 parameters of `rasterio.open()`.
217 """
218 mempath = _UnparsedPath(self.name)
219
220 if self.closed:
221 raise IOError("I/O operation on closed file.")
222 # Assume we were given a non-empty file-like object
223 log.debug("VSI path: {}".format(mempath.path))
224 return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)
225
226 def __enter__(self):
227 return self
228
229 def __exit__(self, *args):
230 self.close()
231
232
233 if FilePathBase is not object:
234 # only make this object available if the cython extension was compiled
235 FilePath = _FilePath
236
237
238 class ZipMemoryFile(MemoryFile):
239 """A read-only BytesIO-like object backed by an in-memory zip file.
240
241 This allows a zip file containing formatted files to be read
242 without I/O.
243 """
244
245 def __init__(self, file_or_bytes=None):
246 super().__init__(file_or_bytes, ext="zip")
247
248 @ensure_env
249 def open(self, path, driver=None, sharing=False, **kwargs):
250 """Open a dataset within the zipped stream.
251
252 Parameters
253 ----------
254 path : str
255 Path to a dataset in the zip file, relative to the root of the
256 archive.
257
258 Other parameters are optional and have the same semantics as the
259 parameters of `rasterio.open()`.
260
261 Returns
262 -------
263 A Rasterio dataset object
264 """
265 zippath = _UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))
266
267 if self.closed:
268 raise OSError("I/O operation on closed file.")
269 return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs)
270
271
272 def get_writer_for_driver(driver):
273 """Return the writer class appropriate for the specified driver."""
274 if not driver:
275 raise ValueError("'driver' is required to read/write dataset.")
276 cls = None
277 if driver_can_create(driver):
278 cls = DatasetWriter
279 elif driver_can_create_copy(driver): # pragma: no branch
280 cls = BufferedDatasetWriter
281 return cls
282
283
284 def get_writer_for_path(path, driver=None):
285 """Return the writer class appropriate for the existing dataset."""
286 if not driver:
287 driver = get_dataset_driver(path)
288 return get_writer_for_driver(driver)
289
[end of rasterio/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/io.py b/rasterio/io.py
--- a/rasterio/io.py
+++ b/rasterio/io.py
@@ -13,7 +13,7 @@
MemoryFileBase,
)
from rasterio.windows import WindowMethodsMixin
-from rasterio.env import ensure_env
+from rasterio.env import Env, ensure_env
from rasterio.transform import TransformMethodsMixin
from rasterio._path import _UnparsedPath
@@ -214,13 +214,25 @@
Parameters are optional and have the same semantics as the
parameters of `rasterio.open()`.
+
+ Returns
+ -------
+ DatasetReader
+
+ Raises
+ ------
+ IOError
+ If the memory file is closed.
+
"""
mempath = _UnparsedPath(self.name)
if self.closed:
raise IOError("I/O operation on closed file.")
+
# Assume we were given a non-empty file-like object
log.debug("VSI path: {}".format(mempath.path))
+
return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)
def __enter__(self):
| {"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -13,7 +13,7 @@\n MemoryFileBase,\n )\n from rasterio.windows import WindowMethodsMixin\n-from rasterio.env import ensure_env\n+from rasterio.env import Env, ensure_env\n from rasterio.transform import TransformMethodsMixin\n from rasterio._path import _UnparsedPath\n \n@@ -214,13 +214,25 @@\n \n Parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n+\n+ Returns\n+ -------\n+ DatasetReader\n+\n+ Raises\n+ ------\n+ IOError\n+ If the memory file is closed.\n+\n \"\"\"\n mempath = _UnparsedPath(self.name)\n \n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n+\n # Assume we were given a non-empty file-like object\n log.debug(\"VSI path: {}\".format(mempath.path))\n+\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n \n def __enter__(self):\n", "issue": "Turn down logging level for non-existing files in the Python file VSI plugin\nTo prevent pointless sidecar searching as discussed in https://github.com/rasterio/rasterio/discussions/2825.\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\n\nfrom rasterio._base import get_dataset_driver, driver_can_create, driver_can_create_copy\nfrom rasterio._io import (\n DatasetReaderBase,\n DatasetWriterBase,\n BufferedDatasetWriterBase,\n MemoryFileBase,\n)\nfrom rasterio.windows import WindowMethodsMixin\nfrom rasterio.env import ensure_env\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio._path import _UnparsedPath\n\ntry:\n from rasterio._filepath import FilePathBase\nexcept ImportError:\n FilePathBase = object\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin, TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin, TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(\n BufferedDatasetWriterBase, WindowMethodsMixin, TransformMethodsMixin\n):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, MemoryFile(f) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def __init__(self, file_or_bytes=None, dirname=None, filename=None, ext=\".tif\"):\n \"\"\"Create a new file in memory\n\n Parameters\n ----------\n file_or_bytes : file-like object or bytes, optional\n File or bytes holding initial data.\n filename : str, optional\n An optional filename. A unique one will otherwise be generated.\n ext : str, optional\n An optional extension.\n\n Returns\n -------\n MemoryFile\n \"\"\"\n super().__init__(\n file_or_bytes=file_or_bytes, dirname=dirname, filename=filename, ext=ext\n )\n\n @ensure_env\n def open(self, driver=None, width=None, height=None, count=None, crs=None,\n transform=None, dtype=None, nodata=None, sharing=False, **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r'\n mode. Otherwise, the file is opened in 'w' mode.\n\n Parameters\n ----------\n Note well that there is no `path` parameter: a `MemoryFile`\n contains a single dataset and there is no need to specify a\n path.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n \"\"\"\n mempath = _UnparsedPath(self.name)\n\n if self.closed:\n raise OSError(\"I/O operation on closed file.\")\n if len(self) > 0:\n log.debug(\"VSI path: {}\".format(mempath.path))\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n else:\n writer = get_writer_for_driver(driver)\n return writer(\n mempath,\n \"w+\",\n driver=driver,\n width=width,\n height=height,\n count=count,\n crs=crs,\n transform=transform,\n dtype=dtype,\n nodata=nodata,\n sharing=sharing,\n **kwargs\n )\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n\nclass _FilePath(FilePathBase):\n \"\"\"A BytesIO-like object, backed by a Python file object.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, FilePath(f) as vsi_file:\n ... with vsi_file.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def __init__(self, filelike_obj, dirname=None, filename=None):\n \"\"\"Create a new wrapper around the provided file-like object.\n\n Parameters\n ----------\n filelike_obj : file-like object\n Open file-like object. Currently only reading is supported.\n filename : str, optional\n An optional filename. A unique one will otherwise be generated.\n\n Returns\n -------\n PythonVSIFile\n \"\"\"\n super().__init__(\n filelike_obj, dirname=dirname, filename=filename\n )\n\n @ensure_env\n def open(self, driver=None, sharing=False, **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n The provided file-like object is assumed to be readable.\n Writing is currently not supported.\n\n Parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n \"\"\"\n mempath = _UnparsedPath(self.name)\n\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n # Assume we were given a non-empty file-like object\n log.debug(\"VSI path: {}\".format(mempath.path))\n return DatasetReader(mempath, driver=driver, sharing=sharing, **kwargs)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n\nif FilePathBase is not object:\n # only make this object available if the cython extension was compiled\n FilePath = _FilePath\n\n\nclass ZipMemoryFile(MemoryFile):\n \"\"\"A read-only BytesIO-like object backed by an in-memory zip file.\n\n This allows a zip file containing formatted files to be read\n without I/O.\n \"\"\"\n\n def __init__(self, file_or_bytes=None):\n super().__init__(file_or_bytes, ext=\"zip\")\n\n @ensure_env\n def open(self, path, driver=None, sharing=False, **kwargs):\n \"\"\"Open a dataset within the zipped stream.\n\n Parameters\n ----------\n path : str\n Path to a dataset in the zip file, relative to the root of the\n archive.\n\n Other parameters are optional and have the same semantics as the\n parameters of `rasterio.open()`.\n\n Returns\n -------\n A Rasterio dataset object\n \"\"\"\n zippath = _UnparsedPath('/vsizip{0}/{1}'.format(self.name, path.lstrip('/')))\n\n if self.closed:\n raise OSError(\"I/O operation on closed file.\")\n return DatasetReader(zippath, driver=driver, sharing=sharing, **kwargs)\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n if not driver:\n raise ValueError(\"'driver' is required to read/write dataset.\")\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path, driver=None):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n if not driver:\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]} | 3,476 | 258 |
gh_patches_debug_27871 | rasdani/github-patches | git_diff | kserve__kserve-1640 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Anonymous credentials for single model serving with s3
/kind feature
**Describe the solution you'd like**
[A clear and concise description of what you want to happen.]
Update [download from s3](https://github.com/kubeflow/kfserving/blob/4fd401e348174f954fee7596dc3d54f197f4aa8c/python/kfserving/kfserving/storage.py#L86) to configure boto3 to use anon credentials if anonymous credentials env is set to true.
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
Would be nice if anon e2e test were also added.
</issue>
<code>
[start of python/kfserving/kfserving/storage.py]
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import glob
16 import gzip
17 import logging
18 import mimetypes
19 import os
20 import re
21 import json
22 import shutil
23 import tarfile
24 import tempfile
25 import zipfile
26 from urllib.parse import urlparse
27
28 import boto3
29 import requests
30 from azure.storage.blob import BlockBlobService
31 from google.auth import exceptions
32 from google.cloud import storage
33
34 from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS
35
36 _GCS_PREFIX = "gs://"
37 _S3_PREFIX = "s3://"
38 _BLOB_RE = "https://(.+?).blob.core.windows.net/(.+)"
39 _LOCAL_PREFIX = "file://"
40 _URI_RE = "https?://(.+)/(.+)"
41 _HTTP_PREFIX = "http(s)://"
42 _HEADERS_SUFFIX = "-headers"
43
44
45 class Storage(object): # pylint: disable=too-few-public-methods
46 @staticmethod
47 def download(uri: str, out_dir: str = None) -> str:
48 logging.info("Copying contents of %s to local", uri)
49
50 is_local = False
51 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
52 is_local = True
53
54 if out_dir is None:
55 if is_local:
56 # noop if out_dir is not set and the path is local
57 return Storage._download_local(uri)
58 out_dir = tempfile.mkdtemp()
59 elif not os.path.exists(out_dir):
60 os.mkdir(out_dir)
61
62 if uri.startswith(_GCS_PREFIX):
63 Storage._download_gcs(uri, out_dir)
64 elif uri.startswith(_S3_PREFIX):
65 Storage._download_s3(uri, out_dir)
66 elif re.search(_BLOB_RE, uri):
67 Storage._download_blob(uri, out_dir)
68 elif is_local:
69 return Storage._download_local(uri, out_dir)
70 elif re.search(_URI_RE, uri):
71 return Storage._download_from_uri(uri, out_dir)
72 elif uri.startswith(MODEL_MOUNT_DIRS):
73 # Don't need to download models if this InferenceService is running in the multi-model
74 # serving mode. The model agent will download models.
75 return out_dir
76 else:
77 raise Exception("Cannot recognize storage type for " + uri +
78 "\n'%s', '%s', '%s', and '%s' are the current available storage type." %
79 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))
80
81 logging.info("Successfully copied %s to %s", uri, out_dir)
82 return out_dir
83
84 @staticmethod
85 def _download_s3(uri, temp_dir: str):
86 s3 = boto3.resource('s3', endpoint_url=os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"))
87 parsed = urlparse(uri, scheme='s3')
88 bucket_name = parsed.netloc
89 bucket_path = parsed.path.lstrip('/')
90
91 bucket = s3.Bucket(bucket_name)
92 for obj in bucket.objects.filter(Prefix=bucket_path):
93 # Skip where boto3 lists the directory as an object
94 if obj.key.endswith("/"):
95 continue
96 # In the case where bucket_path points to a single object, set the target key to bucket_path
97 # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir
98 target_key = (
99 obj.key
100 if bucket_path == obj.key
101 else obj.key.replace(bucket_path, "", 1).lstrip("/")
102 )
103 target = f"{temp_dir}/{target_key}"
104 if not os.path.exists(os.path.dirname(target)):
105 os.makedirs(os.path.dirname(target), exist_ok=True)
106 bucket.download_file(obj.key, target)
107
108 @staticmethod
109 def _download_gcs(uri, temp_dir: str):
110 try:
111 storage_client = storage.Client()
112 except exceptions.DefaultCredentialsError:
113 storage_client = storage.Client.create_anonymous_client()
114 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
115 bucket_name = bucket_args[0]
116 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
117 bucket = storage_client.bucket(bucket_name)
118 prefix = bucket_path
119 if not prefix.endswith("/"):
120 prefix = prefix + "/"
121 blobs = bucket.list_blobs(prefix=prefix)
122 count = 0
123 for blob in blobs:
124 # Replace any prefix from the object key with temp_dir
125 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
126
127 # Create necessary subdirectory to store the object locally
128 if "/" in subdir_object_key:
129 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
130 if not os.path.isdir(local_object_dir):
131 os.makedirs(local_object_dir, exist_ok=True)
132 if subdir_object_key.strip() != "":
133 dest_path = os.path.join(temp_dir, subdir_object_key)
134 logging.info("Downloading: %s", dest_path)
135 blob.download_to_filename(dest_path)
136 count = count + 1
137 if count == 0:
138 raise RuntimeError("Failed to fetch model. \
139 The path or model %s does not exist." % uri)
140
141 @staticmethod
142 def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals
143 match = re.search(_BLOB_RE, uri)
144 account_name = match.group(1)
145 storage_url = match.group(2)
146 container_name, prefix = storage_url.split("/", 1)
147
148 logging.info("Connecting to BLOB account: [%s], container: [%s], prefix: [%s]",
149 account_name,
150 container_name,
151 prefix)
152 try:
153 block_blob_service = BlockBlobService(account_name=account_name)
154 blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
155 except Exception: # pylint: disable=broad-except
156 token = Storage._get_azure_storage_token()
157 if token is None:
158 logging.warning("Azure credentials not found, retrying anonymous access")
159 block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)
160 blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
161 count = 0
162 for blob in blobs:
163 dest_path = os.path.join(out_dir, blob.name)
164 if "/" in blob.name:
165 head, tail = os.path.split(blob.name)
166 if prefix is not None:
167 head = head[len(prefix):]
168 if head.startswith('/'):
169 head = head[1:]
170 dir_path = os.path.join(out_dir, head)
171 dest_path = os.path.join(dir_path, tail)
172 if not os.path.isdir(dir_path):
173 os.makedirs(dir_path)
174
175 logging.info("Downloading: %s to %s", blob.name, dest_path)
176 block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)
177 count = count + 1
178 if count == 0:
179 raise RuntimeError("Failed to fetch model. \
180 The path or model %s does not exist." % (uri))
181
182 @staticmethod
183 def _get_azure_storage_token():
184 tenant_id = os.getenv("AZ_TENANT_ID", "")
185 client_id = os.getenv("AZ_CLIENT_ID", "")
186 client_secret = os.getenv("AZ_CLIENT_SECRET", "")
187 subscription_id = os.getenv("AZ_SUBSCRIPTION_ID", "")
188
189 if tenant_id == "" or client_id == "" or client_secret == "" or subscription_id == "":
190 return None
191
192 # note the SP must have "Storage Blob Data Owner" perms for this to work
193 import adal
194 from azure.storage.common import TokenCredential
195
196 authority_url = "https://login.microsoftonline.com/" + tenant_id
197
198 context = adal.AuthenticationContext(authority_url)
199
200 token = context.acquire_token_with_client_credentials(
201 "https://storage.azure.com/",
202 client_id,
203 client_secret)
204
205 token_credential = TokenCredential(token["accessToken"])
206
207 logging.info("Retrieved SP token credential for client_id: %s", client_id)
208
209 return token_credential
210
211 @staticmethod
212 def _download_local(uri, out_dir=None):
213 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
214 if not os.path.exists(local_path):
215 raise RuntimeError("Local path %s does not exist." % (uri))
216
217 if out_dir is None:
218 return local_path
219 elif not os.path.isdir(out_dir):
220 os.makedirs(out_dir)
221
222 if os.path.isdir(local_path):
223 local_path = os.path.join(local_path, "*")
224
225 for src in glob.glob(local_path):
226 _, tail = os.path.split(src)
227 dest_path = os.path.join(out_dir, tail)
228 logging.info("Linking: %s to %s", src, dest_path)
229 os.symlink(src, dest_path)
230 return out_dir
231
232 @staticmethod
233 def _download_from_uri(uri, out_dir=None):
234 url = urlparse(uri)
235 filename = os.path.basename(url.path)
236 mimetype, encoding = mimetypes.guess_type(url.path)
237 local_path = os.path.join(out_dir, filename)
238
239 if filename == '':
240 raise ValueError('No filename contained in URI: %s' % (uri))
241
242 # Get header information from host url
243 headers = {}
244 host_uri = url.hostname
245
246 headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, "{}")
247 headers = json.loads(headers_json)
248
249 with requests.get(uri, stream=True, headers=headers) as response:
250 if response.status_code != 200:
251 raise RuntimeError("URI: %s returned a %s response code." % (uri, response.status_code))
252 if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\
253 .startswith('application/zip'):
254 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/zip\'" % uri)
255 if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\
256 .startswith('application/x-tar'):
257 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/x-tar\'" % uri)
258 if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \
259 not response.headers.get('Content-Type', '').startswith('application/octet-stream'):
260 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/octet-stream\'"
261 % uri)
262
263 if encoding == 'gzip':
264 stream = gzip.GzipFile(fileobj=response.raw)
265 local_path = os.path.join(out_dir, f'{filename}.tar')
266 else:
267 stream = response.raw
268 with open(local_path, 'wb') as out:
269 shutil.copyfileobj(stream, out)
270
271 if mimetype in ["application/x-tar", "application/zip"]:
272 if mimetype == "application/x-tar":
273 archive = tarfile.open(local_path, 'r', encoding='utf-8')
274 else:
275 archive = zipfile.ZipFile(local_path, 'r')
276 archive.extractall(out_dir)
277 archive.close()
278 os.remove(local_path)
279
280 return out_dir
281
[end of python/kfserving/kfserving/storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -25,6 +25,8 @@
import zipfile
from urllib.parse import urlparse
+from botocore.client import Config
+from botocore import UNSIGNED
import boto3
import requests
from azure.storage.blob import BlockBlobService
@@ -81,9 +83,26 @@
logging.info("Successfully copied %s to %s", uri, out_dir)
return out_dir
+ @staticmethod
+ def get_S3_config():
+ # anon environment variable defined in s3_secret.go
+ anon = ("True" == os.getenv("awsAnonymousCredential", "false").capitalize())
+ if anon:
+ return Config(signature_version=UNSIGNED)
+ else:
+ return None
+
@staticmethod
def _download_s3(uri, temp_dir: str):
- s3 = boto3.resource('s3', endpoint_url=os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"))
+ # Boto3 looks at various configuration locations until it finds configuration values.
+ # lookup order:
+ # 1. Config object passed in as the config parameter when creating S3 resource
+ # if awsAnonymousCredential env var true, passed in via config
+ # 2. Environment variables
+ # 3. ~/.aws/config file
+ s3 = boto3.resource('s3',
+ endpoint_url=os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"),
+ config=Storage.get_S3_config())
parsed = urlparse(uri, scheme='s3')
bucket_name = parsed.netloc
bucket_path = parsed.path.lstrip('/')
| {"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -25,6 +25,8 @@\n import zipfile\n from urllib.parse import urlparse\n \n+from botocore.client import Config\n+from botocore import UNSIGNED\n import boto3\n import requests\n from azure.storage.blob import BlockBlobService\n@@ -81,9 +83,26 @@\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n \n+ @staticmethod\n+ def get_S3_config():\n+ # anon environment variable defined in s3_secret.go\n+ anon = (\"True\" == os.getenv(\"awsAnonymousCredential\", \"false\").capitalize())\n+ if anon:\n+ return Config(signature_version=UNSIGNED)\n+ else:\n+ return None\n+\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- s3 = boto3.resource('s3', endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n+ # Boto3 looks at various configuration locations until it finds configuration values.\n+ # lookup order:\n+ # 1. Config object passed in as the config parameter when creating S3 resource\n+ # if awsAnonymousCredential env var true, passed in via config\n+ # 2. Environment variables\n+ # 3. ~/.aws/config file\n+ s3 = boto3.resource('s3',\n+ endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"),\n+ config=Storage.get_S3_config())\n parsed = urlparse(uri, scheme='s3')\n bucket_name = parsed.netloc\n bucket_path = parsed.path.lstrip('/')\n", "issue": "Anonymous credentials for single model serving with s3\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\n[A clear and concise description of what you want to happen.]\r\nUpdate [download from s3](https://github.com/kubeflow/kfserving/blob/4fd401e348174f954fee7596dc3d54f197f4aa8c/python/kfserving/kfserving/storage.py#L86) to configure boto3 to use anon credentials if anonymous credentials env is set to true.\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\nWould be nice if anon e2e test were also added.\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport gzip\nimport logging\nimport mimetypes\nimport os\nimport re\nimport json\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\nfrom urllib.parse import urlparse\n\nimport boto3\nimport requests\nfrom azure.storage.blob import BlockBlobService\nfrom google.auth import exceptions\nfrom google.cloud import storage\n\nfrom kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_BLOB_RE = \"https://(.+?).blob.core.windows.net/(.+)\"\n_LOCAL_PREFIX = \"file://\"\n_URI_RE = \"https?://(.+)/(.+)\"\n_HTTP_PREFIX = \"http(s)://\"\n_HEADERS_SUFFIX = \"-headers\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n\n is_local = False\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n is_local = True\n\n if out_dir is None:\n if is_local:\n # noop if out_dir is not set and the path is local\n return Storage._download_local(uri)\n out_dir = tempfile.mkdtemp()\n elif not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n elif re.search(_BLOB_RE, uri):\n Storage._download_blob(uri, out_dir)\n elif is_local:\n return Storage._download_local(uri, out_dir)\n elif re.search(_URI_RE, uri):\n return Storage._download_from_uri(uri, out_dir)\n elif uri.startswith(MODEL_MOUNT_DIRS):\n # Don't need to download models if this InferenceService is running in the multi-model\n # serving mode. The model agent will download models.\n return out_dir\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n s3 = boto3.resource('s3', endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n parsed = urlparse(uri, scheme='s3')\n bucket_name = parsed.netloc\n bucket_path = parsed.path.lstrip('/')\n\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=bucket_path):\n # Skip where boto3 lists the directory as an object\n if obj.key.endswith(\"/\"):\n continue\n # In the case where bucket_path points to a single object, set the target key to bucket_path\n # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir\n target_key = (\n obj.key\n if bucket_path == obj.key\n else obj.key.replace(bucket_path, \"\", 1).lstrip(\"/\")\n )\n target = f\"{temp_dir}/{target_key}\"\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target), exist_ok=True)\n bucket.download_file(obj.key, target)\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n count = 0\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % uri)\n\n @staticmethod\n def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n\n logging.info(\"Connecting to BLOB account: [%s], container: [%s], prefix: [%s]\",\n account_name,\n container_name,\n prefix)\n try:\n block_blob_service = BlockBlobService(account_name=account_name)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n except Exception: # pylint: disable=broad-except\n token = Storage._get_azure_storage_token()\n if token is None:\n logging.warning(\"Azure credentials not found, retrying anonymous access\")\n block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n count = 0\n for blob in blobs:\n dest_path = os.path.join(out_dir, blob.name)\n if \"/\" in blob.name:\n head, tail = os.path.split(blob.name)\n if prefix is not None:\n head = head[len(prefix):]\n if head.startswith('/'):\n head = head[1:]\n dir_path = os.path.join(out_dir, head)\n dest_path = os.path.join(dir_path, tail)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n logging.info(\"Downloading: %s to %s\", blob.name, dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _get_azure_storage_token():\n tenant_id = os.getenv(\"AZ_TENANT_ID\", \"\")\n client_id = os.getenv(\"AZ_CLIENT_ID\", \"\")\n client_secret = os.getenv(\"AZ_CLIENT_SECRET\", \"\")\n subscription_id = os.getenv(\"AZ_SUBSCRIPTION_ID\", \"\")\n\n if tenant_id == \"\" or client_id == \"\" or client_secret == \"\" or subscription_id == \"\":\n return None\n\n # note the SP must have \"Storage Blob Data Owner\" perms for this to work\n import adal\n from azure.storage.common import TokenCredential\n\n authority_url = \"https://login.microsoftonline.com/\" + tenant_id\n\n context = adal.AuthenticationContext(authority_url)\n\n token = context.acquire_token_with_client_credentials(\n \"https://storage.azure.com/\",\n client_id,\n client_secret)\n\n token_credential = TokenCredential(token[\"accessToken\"])\n\n logging.info(\"Retrieved SP token credential for client_id: %s\", client_id)\n\n return token_credential\n\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise RuntimeError(\"Local path %s does not exist.\" % (uri))\n\n if out_dir is None:\n return local_path\n elif not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if os.path.isdir(local_path):\n local_path = os.path.join(local_path, \"*\")\n\n for src in glob.glob(local_path):\n _, tail = os.path.split(src)\n dest_path = os.path.join(out_dir, tail)\n logging.info(\"Linking: %s to %s\", src, dest_path)\n os.symlink(src, dest_path)\n return out_dir\n\n @staticmethod\n def _download_from_uri(uri, out_dir=None):\n url = urlparse(uri)\n filename = os.path.basename(url.path)\n mimetype, encoding = mimetypes.guess_type(url.path)\n local_path = os.path.join(out_dir, filename)\n\n if filename == '':\n raise ValueError('No filename contained in URI: %s' % (uri))\n\n # Get header information from host url\n headers = {}\n host_uri = url.hostname\n\n headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, \"{}\")\n headers = json.loads(headers_json)\n\n with requests.get(uri, stream=True, headers=headers) as response:\n if response.status_code != 200:\n raise RuntimeError(\"URI: %s returned a %s response code.\" % (uri, response.status_code))\n if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\\\n .startswith('application/zip'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/zip\\'\" % uri)\n if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\\\n .startswith('application/x-tar'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/x-tar\\'\" % uri)\n if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \\\n not response.headers.get('Content-Type', '').startswith('application/octet-stream'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/octet-stream\\'\"\n % uri)\n\n if encoding == 'gzip':\n stream = gzip.GzipFile(fileobj=response.raw)\n local_path = os.path.join(out_dir, f'{filename}.tar')\n else:\n stream = response.raw\n with open(local_path, 'wb') as out:\n shutil.copyfileobj(stream, out)\n\n if mimetype in [\"application/x-tar\", \"application/zip\"]:\n if mimetype == \"application/x-tar\":\n archive = tarfile.open(local_path, 'r', encoding='utf-8')\n else:\n archive = zipfile.ZipFile(local_path, 'r')\n archive.extractall(out_dir)\n archive.close()\n os.remove(local_path)\n\n return out_dir\n", "path": "python/kfserving/kfserving/storage.py"}]} | 3,953 | 403 |
gh_patches_debug_39264 | rasdani/github-patches | git_diff | rasterio__rasterio-660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Skip local rasterfill.cpp for GDAL >= 2.0
Our local rasterfill.cpp (see #253) is in GDAL 2.+: https://github.com/OSGeo/gdal/pull/47.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Two environmental variables influence this script.
4 #
5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
6 # libraries, and data files.
7 #
8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
9 # source or binary distribution. This is essential when creating self-contained
10 # binary wheels.
11
12 import logging
13 import os
14 import pprint
15 import shutil
16 import subprocess
17 import sys
18
19 from setuptools import setup
20 from setuptools.extension import Extension
21
22
23 logging.basicConfig()
24 log = logging.getLogger()
25
26
27 def check_output(cmd):
28 # since subprocess.check_output doesn't exist in 2.6
29 # we wrap it here.
30 try:
31 out = subprocess.check_output(cmd)
32 return out.decode('utf')
33 except AttributeError:
34 # For some reasone check_output doesn't exist
35 # So fall back on Popen
36 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
37 out, err = p.communicate()
38 return out
39
40
41 def copy_data_tree(datadir, destdir):
42 try:
43 shutil.rmtree(destdir)
44 except OSError:
45 pass
46 shutil.copytree(datadir, destdir)
47
48
49 # python -W all setup.py ...
50 if 'all' in sys.warnoptions:
51 log.level = logging.DEBUG
52
53 # Parse the version from the rasterio module.
54 with open('rasterio/__init__.py') as f:
55 for line in f:
56 if line.find("__version__") >= 0:
57 version = line.split("=")[1].strip()
58 version = version.strip('"')
59 version = version.strip("'")
60 continue
61
62 with open('VERSION.txt', 'w') as f:
63 f.write(version)
64
65 # Use Cython if available.
66 try:
67 from Cython.Build import cythonize
68 except ImportError:
69 cythonize = None
70
71 # By default we'll try to get options via gdal-config. On systems without,
72 # options will need to be set in setup.cfg or on the setup command line.
73 include_dirs = []
74 library_dirs = []
75 libraries = []
76 extra_link_args = []
77 gdal_output = [None]*3
78
79 try:
80 import numpy
81 include_dirs.append(numpy.get_include())
82 except ImportError:
83 log.critical("Numpy and its headers are required to run setup(). Exiting.")
84 sys.exit(1)
85
86 try:
87 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
88 for i, flag in enumerate(("--cflags", "--libs", "--datadir")):
89 gdal_output[i] = check_output([gdal_config, flag]).strip()
90
91 for item in gdal_output[0].split():
92 if item.startswith("-I"):
93 include_dirs.extend(item[2:].split(":"))
94 for item in gdal_output[1].split():
95 if item.startswith("-L"):
96 library_dirs.extend(item[2:].split(":"))
97 elif item.startswith("-l"):
98 libraries.append(item[2:])
99 else:
100 # e.g. -framework GDAL
101 extra_link_args.append(item)
102
103 except Exception as e:
104 if os.name == "nt":
105 log.info(("Building on Windows requires extra options to setup.py to locate needed GDAL files.\n"
106 "More information is available in the README."))
107 else:
108 log.warning("Failed to get options via gdal-config: %s", str(e))
109
110
111 # Conditionally copy the GDAL data. To be used in conjunction with
112 # the bdist_wheel command to make self-contained binary wheels.
113 if os.environ.get('PACKAGE_DATA'):
114 destdir = 'rasterio/gdal_data'
115 if gdal_output[2]:
116 log.info("Copying gdal data from %s" % gdal_output[2])
117 copy_data_tree(gdal_output[2], destdir)
118 else:
119 # check to see if GDAL_DATA is defined
120 gdal_data = os.environ.get('GDAL_DATA', None)
121 if gdal_data:
122 log.info("Copying gdal_data from %s" % gdal_data)
123 copy_data_tree(gdal_data, destdir)
124
125 # Conditionally copy PROJ.4 data.
126 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
127 if os.path.exists(projdatadir):
128 log.info("Copying proj_data from %s" % projdatadir)
129 copy_data_tree(projdatadir, 'rasterio/proj_data')
130
131 ext_options = dict(
132 include_dirs=include_dirs,
133 library_dirs=library_dirs,
134 libraries=libraries,
135 extra_link_args=extra_link_args)
136
137 if not os.name == "nt":
138 # These options fail on Windows if using Visual Studio
139 ext_options['extra_compile_args'] = ['-Wno-unused-parameter',
140 '-Wno-unused-function']
141
142 cythonize_options = {}
143 if os.environ.get('CYTHON_COVERAGE'):
144 cythonize_options['compiler_directives'] = {'linetrace': True}
145 cythonize_options['annotate'] = True
146 ext_options['define_macros'] = [('CYTHON_TRACE', '1'),
147 ('CYTHON_TRACE_NOGIL', '1')]
148
149 log.debug('ext_options:\n%s', pprint.pformat(ext_options))
150
151 # When building from a repo, Cython is required.
152 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
153 log.info("MANIFEST.in found, presume a repo, cythonizing...")
154 if not cythonize:
155 log.critical(
156 "Cython.Build.cythonize not found. "
157 "Cython is required to build from a repo.")
158 sys.exit(1)
159 ext_modules = cythonize([
160 Extension(
161 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
162 Extension(
163 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
164 Extension(
165 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),
166 Extension(
167 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
168 Extension(
169 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),
170 Extension(
171 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
172 Extension(
173 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),
174 Extension(
175 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
176 Extension(
177 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
178 ], quiet=True, **cythonize_options)
179
180 # If there's no manifest template, as in an sdist, we just specify .c files.
181 else:
182 ext_modules = [
183 Extension(
184 'rasterio._base', ['rasterio/_base.c'], **ext_options),
185 Extension(
186 'rasterio._io', ['rasterio/_io.c'], **ext_options),
187 Extension(
188 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),
189 Extension(
190 'rasterio._features', ['rasterio/_features.c'], **ext_options),
191 Extension(
192 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),
193 Extension(
194 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
195 Extension(
196 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),
197 Extension(
198 'rasterio._err', ['rasterio/_err.c'], **ext_options),
199 Extension(
200 'rasterio._example', ['rasterio/_example.c'], **ext_options),
201 ]
202
203 with open('README.rst') as f:
204 readme = f.read()
205
206 # Runtime requirements.
207 inst_reqs = ['affine', 'cligj', 'numpy', 'snuggs', 'click-plugins']
208
209 if sys.version_info < (3, 4):
210 inst_reqs.append('enum34')
211
212 setup_args = dict(
213 name='rasterio',
214 version=version,
215 description="Fast and direct raster I/O for use with Numpy and SciPy",
216 long_description=readme,
217 classifiers=[
218 'Development Status :: 4 - Beta',
219 'Intended Audience :: Developers',
220 'Intended Audience :: Information Technology',
221 'Intended Audience :: Science/Research',
222 'License :: OSI Approved :: BSD License',
223 'Programming Language :: C',
224 'Programming Language :: Python :: 2.6',
225 'Programming Language :: Python :: 2.7',
226 'Programming Language :: Python :: 3.3',
227 'Programming Language :: Python :: 3.4',
228 'Topic :: Multimedia :: Graphics :: Graphics Conversion',
229 'Topic :: Scientific/Engineering :: GIS'],
230 keywords='raster gdal',
231 author='Sean Gillies',
232 author_email='[email protected]',
233 url='https://github.com/mapbox/rasterio',
234 license='BSD',
235 package_dir={'': '.'},
236 packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],
237 entry_points='''
238 [console_scripts]
239 rio=rasterio.rio.main:main_group
240
241 [rasterio.rio_commands]
242 bounds=rasterio.rio.bounds:bounds
243 calc=rasterio.rio.calc:calc
244 clip=rasterio.rio.clip:clip
245 convert=rasterio.rio.convert:convert
246 edit-info=rasterio.rio.edit_info:edit
247 env=rasterio.rio.env:env
248 info=rasterio.rio.info:info
249 insp=rasterio.rio.insp:insp
250 mask=rasterio.rio.mask:mask
251 merge=rasterio.rio.merge:merge
252 overview=rasterio.rio.overview:overview
253 rasterize=rasterio.rio.rasterize:rasterize
254 sample=rasterio.rio.sample:sample
255 shapes=rasterio.rio.shapes:shapes
256 stack=rasterio.rio.stack:stack
257 warp=rasterio.rio.warp:warp
258 transform=rasterio.rio.transform:transform
259 ''',
260 include_package_data=True,
261 ext_modules=ext_modules,
262 zip_safe=False,
263 install_requires=inst_reqs,
264 extras_require={
265 'ipython': ['ipython>=2.0'],
266 's3': ['boto3'],
267 'test': ['boto3', 'packaging']})
268
269 if os.environ.get('PACKAGE_DATA'):
270 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
271
272 setup(**setup_args)
273
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,8 @@
library_dirs = []
libraries = []
extra_link_args = []
-gdal_output = [None]*3
+gdal2plus = False
+gdal_output = [None]*4
try:
import numpy
@@ -85,7 +86,7 @@
try:
gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
- for i, flag in enumerate(("--cflags", "--libs", "--datadir")):
+ for i, flag in enumerate(("--cflags", "--libs", "--datadir", "--version")):
gdal_output[i] = check_output([gdal_config, flag]).strip()
for item in gdal_output[0].split():
@@ -99,6 +100,9 @@
else:
# e.g. -framework GDAL
extra_link_args.append(item)
+ # datadir, gdal_output[2] handled below
+ for item in gdal_output[3].split():
+ gdal2plus = not item.startswith("1.")
except Exception as e:
if os.name == "nt":
@@ -148,6 +152,14 @@
log.debug('ext_options:\n%s', pprint.pformat(ext_options))
+if gdal2plus:
+ # GDAL>=2.0 does not require vendorized rasterfill.cpp
+ cython_fill = ['rasterio/_fill.pyx']
+ sdist_fill = ['rasterio/_fill.cpp']
+else:
+ cython_fill = ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp']
+ sdist_fill = ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp']
+
# When building from a repo, Cython is required.
if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
log.info("MANIFEST.in found, presume a repo, cythonizing...")
@@ -170,7 +182,7 @@
Extension(
'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
Extension(
- 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),
+ 'rasterio._fill', cython_fill, **ext_options),
Extension(
'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
Extension(
@@ -193,12 +205,11 @@
Extension(
'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
Extension(
- 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),
+ 'rasterio._fill', sdist_fill, **ext_options),
Extension(
'rasterio._err', ['rasterio/_err.c'], **ext_options),
Extension(
- 'rasterio._example', ['rasterio/_example.c'], **ext_options),
- ]
+ 'rasterio._example', ['rasterio/_example.c'], **ext_options)]
with open('README.rst') as f:
readme = f.read()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,7 +74,8 @@\n library_dirs = []\n libraries = []\n extra_link_args = []\n-gdal_output = [None]*3\n+gdal2plus = False\n+gdal_output = [None]*4\n \n try:\n import numpy\n@@ -85,7 +86,7 @@\n \n try:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n- for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n+ for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\", \"--version\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n \n for item in gdal_output[0].split():\n@@ -99,6 +100,9 @@\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n+ # datadir, gdal_output[2] handled below\n+ for item in gdal_output[3].split():\n+ gdal2plus = not item.startswith(\"1.\")\n \n except Exception as e:\n if os.name == \"nt\":\n@@ -148,6 +152,14 @@\n \n log.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n \n+if gdal2plus:\n+ # GDAL>=2.0 does not require vendorized rasterfill.cpp\n+ cython_fill = ['rasterio/_fill.pyx']\n+ sdist_fill = ['rasterio/_fill.cpp']\n+else:\n+ cython_fill = ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp']\n+ sdist_fill = ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp']\n+\n # When building from a repo, Cython is required.\n if os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n@@ -170,7 +182,7 @@\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n- 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n+ 'rasterio._fill', cython_fill, **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n@@ -193,12 +205,11 @@\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n- 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n+ 'rasterio._fill', sdist_fill, **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n- 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n- ]\n+ 'rasterio._example', ['rasterio/_example.c'], **ext_options)]\n \n with open('README.rst') as f:\n readme = f.read()\n", "issue": "Skip local rasterfill.cpp for GDAL >= 2.0\nOur local rasterfill.cpp (see #253) is in GDAL 2.+: https://github.com/OSGeo/gdal/pull/47.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\ncythonize_options = {}\nif os.environ.get('CYTHON_COVERAGE'):\n cythonize_options['compiler_directives'] = {'linetrace': True}\n cythonize_options['annotate'] = True\n ext_options['define_macros'] = [('CYTHON_TRACE', '1'),\n ('CYTHON_TRACE_NOGIL', '1')]\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True, **cythonize_options)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = ['affine', 'cligj', 'numpy', 'snuggs', 'click-plugins']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio', 'rasterio.tools'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.bounds:bounds\n calc=rasterio.rio.calc:calc\n clip=rasterio.rio.clip:clip\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.edit_info:edit\n env=rasterio.rio.env:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.insp:insp\n mask=rasterio.rio.mask:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.rasterize:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.shapes:shapes\n stack=rasterio.rio.stack:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.transform:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0'],\n 's3': ['boto3'],\n 'test': ['boto3', 'packaging']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}]} | 3,652 | 755 |
gh_patches_debug_39320 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3323 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider woods_coffee is broken
During the global build at 2021-10-20-14-42-48, spider **woods_coffee** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/woods_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson))
</issue>
<code>
[start of locations/spiders/woods_coffee.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import re
5
6 daysKey = {
7 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',
8 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'
9 }
10
11 class WoodsCoffeeSpider(scrapy.Spider):
12 name = "woods_coffee"
13 item_attributes = { 'brand': "Woods Coffee" }
14 allowed_domains = ["www.woodscoffee.com"]
15 start_urls = (
16 'https://woodscoffee.com/locations/',
17 )
18
19 def store_hours(self, hours):
20 hours = hours.replace('–','-')
21 hours = hours.replace(u'\xa0', u' ')
22 days = hours.split(': ')[0].strip()
23
24 if('-' in days):
25 startDay = daysKey[days.split('-')[0]]
26 endDay = daysKey[days.split('-')[1]]
27 dayOutput = startDay + "-" + endDay
28 else:
29 if('DAILY' in days):
30 startDay='Mo'
31 endDay='Su'
32 dayOutput = startDay + "-" + endDay
33 else:
34 dayOutput = daysKey[days]
35
36 bothHours = hours.split(': ')[1].replace(' ','')
37 openHours = bothHours.split("-")[0]
38 closeHours = bothHours.split("-")[1]
39
40 if("AM" in openHours):
41 openHours = openHours.replace("AM","")
42 if(":" in openHours):
43 openH = openHours.split(":")[0]
44 openM = openHours.split(":")[1]
45 else:
46 openH = openHours
47 openM = "00"
48 openHours = openH + ":" + openM
49
50 if("PM" in openHours):
51 openHours = openHours.replace("PM","")
52 if(":" in openHours):
53 openH = openHours.split(":")[0]
54 openM = openHours.split(":")[1]
55 else:
56 openH = openHours
57 openM = "00"
58 openH = str(int(openH) + 12)
59 openHours = openH + ":" + openM
60
61 if("AM" in closeHours):
62 closeHours = closeHours.replace("AM","")
63 if(":" in closeHours):
64 closeH = closeHours.split(":")[0]
65 closeM = closeHours.split(":")[1]
66 else:
67 closeH = closeHours
68 closeM = "00"
69 closeHours = closeH + ":" + closeM
70
71 if("PM" in closeHours):
72 closeHours = closeHours.replace("PM","")
73 if(":" in closeHours):
74 closeH = closeHours.split(":")[0]
75 closeM = closeHours.split(":")[1]
76 else:
77 closeH = closeHours
78 closeM = "00"
79 closeH = str(int(closeH) + 12)
80 closeHours = closeH + ":" + closeM
81
82 return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';'
83
84 def parse(self, response):
85 for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div"):
86 cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first();
87 cityString = cityState.split(",")[0].strip()
88 stateString = cityState.split(",")[1].strip()
89
90 addressString = match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not(.//strong)]/text()").extract_first().strip()
91 postcodeString = addressString.split(stateString)[1].strip()
92 addressString = addressString.split(stateString)[0].replace(',','').strip().strip(cityString).strip()
93
94
95 if(match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()").extract_first() is None):
96 phoneString = ""
97 else:
98 phoneString = match.xpath(".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()").extract_first()
99 phoneString = phoneString.replace(' ','').strip()
100
101 hoursString = ""
102 for hoursMatch in match.xpath(".//p[contains(@style,'text-align: center;')]/strong//following-sibling::text()"):
103 hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\n',''))
104 hoursString = hoursString.strip(';').strip()
105
106 yield GeojsonPointItem(
107 ref=match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first(),
108 addr_full=addressString,
109 city=cityString,
110 state=stateString,
111 postcode=postcodeString,
112 phone=phoneString,
113 opening_hours=hoursString,
114 website=match.xpath(".//a/@href").extract_first(),
115 )
116
[end of locations/spiders/woods_coffee.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/woods_coffee.py b/locations/spiders/woods_coffee.py
--- a/locations/spiders/woods_coffee.py
+++ b/locations/spiders/woods_coffee.py
@@ -1,16 +1,17 @@
# -*- coding: utf-8 -*-
import scrapy
+
from locations.items import GeojsonPointItem
-import re
daysKey = {
'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',
'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'
}
+
class WoodsCoffeeSpider(scrapy.Spider):
name = "woods_coffee"
- item_attributes = { 'brand': "Woods Coffee" }
+ item_attributes = {'brand': "Woods Coffee", "brand_wikidata": "Q8033255"}
allowed_domains = ["www.woodscoffee.com"]
start_urls = (
'https://woodscoffee.com/locations/',
@@ -82,8 +83,8 @@
return dayOutput +' '+ openHours.replace(' ','') + "-" + closeHours + ';'
def parse(self, response):
- for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div"):
- cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first();
+ for match in response.xpath("//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div/parent::div"):
+ cityState = match.xpath(".//div[contains(@class,'heading-text el-text')]/div/p/text()").extract_first()
cityString = cityState.split(",")[0].strip()
stateString = cityState.split(",")[1].strip()
@@ -103,13 +104,17 @@
hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\n',''))
hoursString = hoursString.strip(';').strip()
+ name = match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first()
+
yield GeojsonPointItem(
- ref=match.xpath(".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()").extract_first(),
+ ref=name,
+ name=name,
addr_full=addressString,
city=cityString,
state=stateString,
postcode=postcodeString,
+ country="USA",
phone=phoneString,
opening_hours=hoursString,
- website=match.xpath(".//a/@href").extract_first(),
+ website=response.urljoin(match.xpath(".//a/@href").extract_first()),
)
| {"golden_diff": "diff --git a/locations/spiders/woods_coffee.py b/locations/spiders/woods_coffee.py\n--- a/locations/spiders/woods_coffee.py\n+++ b/locations/spiders/woods_coffee.py\n@@ -1,16 +1,17 @@\n # -*- coding: utf-8 -*-\n import scrapy\n+\n from locations.items import GeojsonPointItem\n-import re\n \n daysKey = {\n 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'\n }\n \n+\n class WoodsCoffeeSpider(scrapy.Spider):\n name = \"woods_coffee\"\n- item_attributes = { 'brand': \"Woods Coffee\" }\n+ item_attributes = {'brand': \"Woods Coffee\", \"brand_wikidata\": \"Q8033255\"}\n allowed_domains = [\"www.woodscoffee.com\"]\n start_urls = (\n 'https://woodscoffee.com/locations/',\n@@ -82,8 +83,8 @@\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n \n def parse(self, response):\n- for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div\"):\n- cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first();\n+ for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div/parent::div\"):\n+ cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first()\n cityString = cityState.split(\",\")[0].strip()\n stateString = cityState.split(\",\")[1].strip()\n \n@@ -103,13 +104,17 @@\n hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\\n',''))\n hoursString = hoursString.strip(';').strip()\n \n+ name = match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first()\n+\n yield GeojsonPointItem(\n- ref=match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first(),\n+ ref=name,\n+ name=name,\n addr_full=addressString,\n city=cityString,\n state=stateString,\n postcode=postcodeString,\n+ country=\"USA\",\n phone=phoneString,\n opening_hours=hoursString,\n- website=match.xpath(\".//a/@href\").extract_first(),\n+ website=response.urljoin(match.xpath(\".//a/@href\").extract_first()),\n )\n", "issue": "Spider woods_coffee is broken\nDuring the global build at 2021-10-20-14-42-48, spider **woods_coffee** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/woods_coffee.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/woods_coffee.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport re\n\ndaysKey = {\n 'MONDAY': 'Mo', 'TUESDAY': 'Tu', 'WEDNESDAY': 'We', 'THURSDAY': 'Th',\n 'FRIDAY': 'Fr', 'SATURDAY': 'Sa', 'SUNDAY': 'Su'\n}\n\nclass WoodsCoffeeSpider(scrapy.Spider):\n name = \"woods_coffee\"\n item_attributes = { 'brand': \"Woods Coffee\" }\n allowed_domains = [\"www.woodscoffee.com\"]\n start_urls = (\n 'https://woodscoffee.com/locations/',\n )\n\n def store_hours(self, hours):\n hours = hours.replace('\u2013','-')\n hours = hours.replace(u'\\xa0', u' ')\n days = hours.split(': ')[0].strip()\n\n if('-' in days):\n startDay = daysKey[days.split('-')[0]]\n endDay = daysKey[days.split('-')[1]]\n dayOutput = startDay + \"-\" + endDay\n else:\n if('DAILY' in days):\n startDay='Mo'\n endDay='Su'\n dayOutput = startDay + \"-\" + endDay\n else:\n dayOutput = daysKey[days]\n\n bothHours = hours.split(': ')[1].replace(' ','')\n openHours = bothHours.split(\"-\")[0]\n closeHours = bothHours.split(\"-\")[1]\n\n if(\"AM\" in openHours):\n openHours = openHours.replace(\"AM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openHours = openH + \":\" + openM\n\n if(\"PM\" in openHours):\n openHours = openHours.replace(\"PM\",\"\")\n if(\":\" in openHours):\n openH = openHours.split(\":\")[0]\n openM = openHours.split(\":\")[1]\n else:\n openH = openHours\n openM = \"00\"\n openH = str(int(openH) + 12)\n openHours = openH + \":\" + openM\n\n if(\"AM\" in closeHours):\n closeHours = closeHours.replace(\"AM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeHours = closeH + \":\" + closeM\n\n if(\"PM\" in closeHours):\n closeHours = closeHours.replace(\"PM\",\"\")\n if(\":\" in closeHours):\n closeH = closeHours.split(\":\")[0]\n closeM = closeHours.split(\":\")[1]\n else:\n closeH = closeHours\n closeM = \"00\"\n closeH = str(int(closeH) + 12)\n closeHours = closeH + \":\" + closeM\n\n return dayOutput +' '+ openHours.replace(' ','') + \"-\" + closeHours + ';'\n\n def parse(self, response):\n for match in response.xpath(\"//h2[contains(@class,'font-weight-700 text-uppercase')]/parent::div/parent::div\"):\n cityState = match.xpath(\".//div[contains(@class,'heading-text el-text')]/div/p/text()\").extract_first();\n cityString = cityState.split(\",\")[0].strip()\n stateString = cityState.split(\",\")[1].strip()\n\n addressString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not(.//strong)]/text()\").extract_first().strip()\n postcodeString = addressString.split(stateString)[1].strip()\n addressString = addressString.split(stateString)[0].replace(',','').strip().strip(cityString).strip()\n\n\n if(match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first() is None):\n phoneString = \"\"\n else:\n phoneString = match.xpath(\".//div[contains(@class,'uncode_text_column')]/p[contains(@style,'text-align: center;')][not (.//strong)]/br/following-sibling::text()\").extract_first()\n phoneString = phoneString.replace(' ','').strip()\n\n hoursString = \"\"\n for hoursMatch in match.xpath(\".//p[contains(@style,'text-align: center;')]/strong//following-sibling::text()\"):\n hoursString = hoursString +' '+self.store_hours(hoursMatch.extract().replace('\\n',''))\n hoursString = hoursString.strip(';').strip()\n\n yield GeojsonPointItem(\n ref=match.xpath(\".//h2[contains(@class,'font-weight-700 text-uppercase')]/span/text()\").extract_first(),\n addr_full=addressString,\n city=cityString,\n state=stateString,\n postcode=postcodeString,\n phone=phoneString,\n opening_hours=hoursString,\n website=match.xpath(\".//a/@href\").extract_first(),\n )\n", "path": "locations/spiders/woods_coffee.py"}]} | 2,121 | 634 |
gh_patches_debug_18119 | rasdani/github-patches | git_diff | scipy__scipy-8222 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation should discourage odeint
odeint is not the recommended way to solve an IVP, but that's not mentioned in the documentation. I am not suggesting that odeint be removed, but we should discourage users from using it.
</issue>
<code>
[start of scipy/integrate/odepack.py]
1 # Author: Travis Oliphant
2 from __future__ import division, print_function, absolute_import
3
4 __all__ = ['odeint']
5
6 from . import _odepack
7 from copy import copy
8 import warnings
9
10 class ODEintWarning(Warning):
11 pass
12
13 _msgs = {2: "Integration successful.",
14 1: "Nothing was done; the integration time was 0.",
15 -1: "Excess work done on this call (perhaps wrong Dfun type).",
16 -2: "Excess accuracy requested (tolerances too small).",
17 -3: "Illegal input detected (internal error).",
18 -4: "Repeated error test failures (internal error).",
19 -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
20 -6: "Error weight became zero during problem.",
21 -7: "Internal workspace insufficient to finish (internal error)."
22 }
23
24
25 def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
26 ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
27 hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
28 mxords=5, printmessg=0):
29 """
30 Integrate a system of ordinary differential equations.
31
32 Solve a system of ordinary differential equations using lsoda from the
33 FORTRAN library odepack.
34
35 Solves the initial value problem for stiff or non-stiff systems
36 of first order ode-s::
37
38 dy/dt = func(y, t, ...)
39
40 where y can be a vector.
41
42 *Note*: The first two arguments of ``func(y, t, ...)`` are in the
43 opposite order of the arguments in the system definition function used
44 by the `scipy.integrate.ode` class.
45
46 Parameters
47 ----------
48 func : callable(y, t, ...)
49 Computes the derivative of y at t.
50 y0 : array
51 Initial condition on y (can be a vector).
52 t : array
53 A sequence of time points for which to solve for y. The initial
54 value point should be the first element of this sequence.
55 args : tuple, optional
56 Extra arguments to pass to function.
57 Dfun : callable(y, t, ...)
58 Gradient (Jacobian) of `func`.
59 col_deriv : bool, optional
60 True if `Dfun` defines derivatives down columns (faster),
61 otherwise `Dfun` should define derivatives across rows.
62 full_output : bool, optional
63 True if to return a dictionary of optional outputs as the second output
64 printmessg : bool, optional
65 Whether to print the convergence message
66
67 Returns
68 -------
69 y : array, shape (len(t), len(y0))
70 Array containing the value of y for each desired time in t,
71 with the initial value `y0` in the first row.
72 infodict : dict, only returned if full_output == True
73 Dictionary containing additional output information
74
75 ======= ============================================================
76 key meaning
77 ======= ============================================================
78 'hu' vector of step sizes successfully used for each time step.
79 'tcur' vector with the value of t reached for each time step.
80 (will always be at least as large as the input times).
81 'tolsf' vector of tolerance scale factors, greater than 1.0,
82 computed when a request for too much accuracy was detected.
83 'tsw' value of t at the time of the last method switch
84 (given for each time step)
85 'nst' cumulative number of time steps
86 'nfe' cumulative number of function evaluations for each time step
87 'nje' cumulative number of jacobian evaluations for each time step
88 'nqu' a vector of method orders for each successful step.
89 'imxer' index of the component of largest magnitude in the
90 weighted local error vector (e / ewt) on an error return, -1
91 otherwise.
92 'lenrw' the length of the double work array required.
93 'leniw' the length of integer work array required.
94 'mused' a vector of method indicators for each successful time step:
95 1: adams (nonstiff), 2: bdf (stiff)
96 ======= ============================================================
97
98 Other Parameters
99 ----------------
100 ml, mu : int, optional
101 If either of these are not None or non-negative, then the
102 Jacobian is assumed to be banded. These give the number of
103 lower and upper non-zero diagonals in this banded matrix.
104 For the banded case, `Dfun` should return a matrix whose
105 rows contain the non-zero bands (starting with the lowest diagonal).
106 Thus, the return matrix `jac` from `Dfun` should have shape
107 ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
108 The data in `jac` must be stored such that ``jac[i - j + mu, j]``
109 holds the derivative of the `i`th equation with respect to the `j`th
110 state variable. If `col_deriv` is True, the transpose of this
111 `jac` must be returned.
112 rtol, atol : float, optional
113 The input parameters `rtol` and `atol` determine the error
114 control performed by the solver. The solver will control the
115 vector, e, of estimated local errors in y, according to an
116 inequality of the form ``max-norm of (e / ewt) <= 1``,
117 where ewt is a vector of positive error weights computed as
118 ``ewt = rtol * abs(y) + atol``.
119 rtol and atol can be either vectors the same length as y or scalars.
120 Defaults to 1.49012e-8.
121 tcrit : ndarray, optional
122 Vector of critical points (e.g. singularities) where integration
123 care should be taken.
124 h0 : float, (0: solver-determined), optional
125 The step size to be attempted on the first step.
126 hmax : float, (0: solver-determined), optional
127 The maximum absolute step size allowed.
128 hmin : float, (0: solver-determined), optional
129 The minimum absolute step size allowed.
130 ixpr : bool, optional
131 Whether to generate extra printing at method switches.
132 mxstep : int, (0: solver-determined), optional
133 Maximum number of (internally defined) steps allowed for each
134 integration point in t.
135 mxhnil : int, (0: solver-determined), optional
136 Maximum number of messages printed.
137 mxordn : int, (0: solver-determined), optional
138 Maximum order to be allowed for the non-stiff (Adams) method.
139 mxords : int, (0: solver-determined), optional
140 Maximum order to be allowed for the stiff (BDF) method.
141
142 See Also
143 --------
144 ode : a more object-oriented integrator based on VODE.
145 quad : for finding the area under a curve.
146
147 Examples
148 --------
149 The second order differential equation for the angle `theta` of a
150 pendulum acted on by gravity with friction can be written::
151
152 theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
153
154 where `b` and `c` are positive constants, and a prime (') denotes a
155 derivative. To solve this equation with `odeint`, we must first convert
156 it to a system of first order equations. By defining the angular
157 velocity ``omega(t) = theta'(t)``, we obtain the system::
158
159 theta'(t) = omega(t)
160 omega'(t) = -b*omega(t) - c*sin(theta(t))
161
162 Let `y` be the vector [`theta`, `omega`]. We implement this system
163 in python as:
164
165 >>> def pend(y, t, b, c):
166 ... theta, omega = y
167 ... dydt = [omega, -b*omega - c*np.sin(theta)]
168 ... return dydt
169 ...
170
171 We assume the constants are `b` = 0.25 and `c` = 5.0:
172
173 >>> b = 0.25
174 >>> c = 5.0
175
176 For initial conditions, we assume the pendulum is nearly vertical
177 with `theta(0)` = `pi` - 0.1, and it initially at rest, so
178 `omega(0)` = 0. Then the vector of initial conditions is
179
180 >>> y0 = [np.pi - 0.1, 0.0]
181
182 We generate a solution 101 evenly spaced samples in the interval
183 0 <= `t` <= 10. So our array of times is:
184
185 >>> t = np.linspace(0, 10, 101)
186
187 Call `odeint` to generate the solution. To pass the parameters
188 `b` and `c` to `pend`, we give them to `odeint` using the `args`
189 argument.
190
191 >>> from scipy.integrate import odeint
192 >>> sol = odeint(pend, y0, t, args=(b, c))
193
194 The solution is an array with shape (101, 2). The first column
195 is `theta(t)`, and the second is `omega(t)`. The following code
196 plots both components.
197
198 >>> import matplotlib.pyplot as plt
199 >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
200 >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
201 >>> plt.legend(loc='best')
202 >>> plt.xlabel('t')
203 >>> plt.grid()
204 >>> plt.show()
205 """
206
207 if ml is None:
208 ml = -1 # changed to zero inside function call
209 if mu is None:
210 mu = -1 # changed to zero inside function call
211 t = copy(t)
212 y0 = copy(y0)
213 output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
214 full_output, rtol, atol, tcrit, h0, hmax, hmin,
215 ixpr, mxstep, mxhnil, mxordn, mxords)
216 if output[-1] < 0:
217 warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
218 warnings.warn(warning_msg, ODEintWarning)
219 elif printmessg:
220 warning_msg = _msgs[output[-1]]
221 warnings.warn(warning_msg, ODEintWarning)
222
223 if full_output:
224 output[1]['message'] = _msgs[output[-1]]
225
226 output = output[:-1]
227 if len(output) == 1:
228 return output[0]
229 else:
230 return output
231
[end of scipy/integrate/odepack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scipy/integrate/odepack.py b/scipy/integrate/odepack.py
--- a/scipy/integrate/odepack.py
+++ b/scipy/integrate/odepack.py
@@ -28,6 +28,9 @@
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
+
+ .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
+ differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
@@ -39,9 +42,9 @@
where y can be a vector.
- *Note*: The first two arguments of ``func(y, t, ...)`` are in the
- opposite order of the arguments in the system definition function used
- by the `scipy.integrate.ode` class.
+ .. note:: The first two arguments of ``func(y, t, ...)`` are in the
+ opposite order of the arguments in the system definition
+ function used by the `scipy.integrate.ode` class.
Parameters
----------
| {"golden_diff": "diff --git a/scipy/integrate/odepack.py b/scipy/integrate/odepack.py\n--- a/scipy/integrate/odepack.py\n+++ b/scipy/integrate/odepack.py\n@@ -28,6 +28,9 @@\n mxords=5, printmessg=0):\n \"\"\"\n Integrate a system of ordinary differential equations.\n+ \n+ .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a\n+ differential equation.\n \n Solve a system of ordinary differential equations using lsoda from the\n FORTRAN library odepack.\n@@ -39,9 +42,9 @@\n \n where y can be a vector.\n \n- *Note*: The first two arguments of ``func(y, t, ...)`` are in the\n- opposite order of the arguments in the system definition function used\n- by the `scipy.integrate.ode` class.\n+ .. note:: The first two arguments of ``func(y, t, ...)`` are in the\n+ opposite order of the arguments in the system definition\n+ function used by the `scipy.integrate.ode` class.\n \n Parameters\n ----------\n", "issue": "Documentation should discourage odeint\nodeint is not the recommended way to solve an IVP, but that's not mentioned in the documentation. I am not suggesting that odeint be removed, but we should discourage users from using it.\n", "before_files": [{"content": "# Author: Travis Oliphant\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['odeint']\n\nfrom . import _odepack\nfrom copy import copy\nimport warnings\n\nclass ODEintWarning(Warning):\n pass\n\n_msgs = {2: \"Integration successful.\",\n 1: \"Nothing was done; the integration time was 0.\",\n -1: \"Excess work done on this call (perhaps wrong Dfun type).\",\n -2: \"Excess accuracy requested (tolerances too small).\",\n -3: \"Illegal input detected (internal error).\",\n -4: \"Repeated error test failures (internal error).\",\n -5: \"Repeated convergence failures (perhaps bad Jacobian or tolerances).\",\n -6: \"Error weight became zero during problem.\",\n -7: \"Internal workspace insufficient to finish (internal error).\"\n }\n\n\ndef odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,\n ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,\n hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,\n mxords=5, printmessg=0):\n \"\"\"\n Integrate a system of ordinary differential equations.\n\n Solve a system of ordinary differential equations using lsoda from the\n FORTRAN library odepack.\n\n Solves the initial value problem for stiff or non-stiff systems\n of first order ode-s::\n\n dy/dt = func(y, t, ...)\n\n where y can be a vector.\n\n *Note*: The first two arguments of ``func(y, t, ...)`` are in the\n opposite order of the arguments in the system definition function used\n by the `scipy.integrate.ode` class.\n\n Parameters\n ----------\n func : callable(y, t, ...)\n Computes the derivative of y at t.\n y0 : array\n Initial condition on y (can be a vector).\n t : array\n A sequence of time points for which to solve for y. The initial\n value point should be the first element of this sequence.\n args : tuple, optional\n Extra arguments to pass to function.\n Dfun : callable(y, t, ...)\n Gradient (Jacobian) of `func`.\n col_deriv : bool, optional\n True if `Dfun` defines derivatives down columns (faster),\n otherwise `Dfun` should define derivatives across rows.\n full_output : bool, optional\n True if to return a dictionary of optional outputs as the second output\n printmessg : bool, optional\n Whether to print the convergence message\n\n Returns\n -------\n y : array, shape (len(t), len(y0))\n Array containing the value of y for each desired time in t,\n with the initial value `y0` in the first row.\n infodict : dict, only returned if full_output == True\n Dictionary containing additional output information\n\n ======= ============================================================\n key meaning\n ======= ============================================================\n 'hu' vector of step sizes successfully used for each time step.\n 'tcur' vector with the value of t reached for each time step.\n (will always be at least as large as the input times).\n 'tolsf' vector of tolerance scale factors, greater than 1.0,\n computed when a request for too much accuracy was detected.\n 'tsw' value of t at the time of the last method switch\n (given for each time step)\n 'nst' cumulative number of time steps\n 'nfe' cumulative number of function evaluations for each time step\n 'nje' cumulative number of jacobian evaluations for each time step\n 'nqu' a vector of method orders for each successful step.\n 'imxer' index of the component of largest magnitude in the\n weighted local error vector (e / ewt) on an error return, -1\n otherwise.\n 'lenrw' the length of the double work array required.\n 'leniw' the length of integer work array required.\n 'mused' a vector of method indicators for each successful time step:\n 1: adams (nonstiff), 2: bdf (stiff)\n ======= ============================================================\n\n Other Parameters\n ----------------\n ml, mu : int, optional\n If either of these are not None or non-negative, then the\n Jacobian is assumed to be banded. These give the number of\n lower and upper non-zero diagonals in this banded matrix.\n For the banded case, `Dfun` should return a matrix whose\n rows contain the non-zero bands (starting with the lowest diagonal).\n Thus, the return matrix `jac` from `Dfun` should have shape\n ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.\n The data in `jac` must be stored such that ``jac[i - j + mu, j]``\n holds the derivative of the `i`th equation with respect to the `j`th\n state variable. If `col_deriv` is True, the transpose of this\n `jac` must be returned.\n rtol, atol : float, optional\n The input parameters `rtol` and `atol` determine the error\n control performed by the solver. The solver will control the\n vector, e, of estimated local errors in y, according to an\n inequality of the form ``max-norm of (e / ewt) <= 1``,\n where ewt is a vector of positive error weights computed as\n ``ewt = rtol * abs(y) + atol``.\n rtol and atol can be either vectors the same length as y or scalars.\n Defaults to 1.49012e-8.\n tcrit : ndarray, optional\n Vector of critical points (e.g. singularities) where integration\n care should be taken.\n h0 : float, (0: solver-determined), optional\n The step size to be attempted on the first step.\n hmax : float, (0: solver-determined), optional\n The maximum absolute step size allowed.\n hmin : float, (0: solver-determined), optional\n The minimum absolute step size allowed.\n ixpr : bool, optional\n Whether to generate extra printing at method switches.\n mxstep : int, (0: solver-determined), optional\n Maximum number of (internally defined) steps allowed for each\n integration point in t.\n mxhnil : int, (0: solver-determined), optional\n Maximum number of messages printed.\n mxordn : int, (0: solver-determined), optional\n Maximum order to be allowed for the non-stiff (Adams) method.\n mxords : int, (0: solver-determined), optional\n Maximum order to be allowed for the stiff (BDF) method.\n\n See Also\n --------\n ode : a more object-oriented integrator based on VODE.\n quad : for finding the area under a curve.\n\n Examples\n --------\n The second order differential equation for the angle `theta` of a\n pendulum acted on by gravity with friction can be written::\n\n theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0\n\n where `b` and `c` are positive constants, and a prime (') denotes a\n derivative. To solve this equation with `odeint`, we must first convert\n it to a system of first order equations. By defining the angular\n velocity ``omega(t) = theta'(t)``, we obtain the system::\n\n theta'(t) = omega(t)\n omega'(t) = -b*omega(t) - c*sin(theta(t))\n\n Let `y` be the vector [`theta`, `omega`]. We implement this system\n in python as:\n\n >>> def pend(y, t, b, c):\n ... theta, omega = y\n ... dydt = [omega, -b*omega - c*np.sin(theta)]\n ... return dydt\n ...\n \n We assume the constants are `b` = 0.25 and `c` = 5.0:\n\n >>> b = 0.25\n >>> c = 5.0\n\n For initial conditions, we assume the pendulum is nearly vertical\n with `theta(0)` = `pi` - 0.1, and it initially at rest, so\n `omega(0)` = 0. Then the vector of initial conditions is\n\n >>> y0 = [np.pi - 0.1, 0.0]\n\n We generate a solution 101 evenly spaced samples in the interval\n 0 <= `t` <= 10. So our array of times is:\n\n >>> t = np.linspace(0, 10, 101)\n\n Call `odeint` to generate the solution. To pass the parameters\n `b` and `c` to `pend`, we give them to `odeint` using the `args`\n argument.\n\n >>> from scipy.integrate import odeint\n >>> sol = odeint(pend, y0, t, args=(b, c))\n\n The solution is an array with shape (101, 2). The first column\n is `theta(t)`, and the second is `omega(t)`. The following code\n plots both components.\n\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')\n >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')\n >>> plt.legend(loc='best')\n >>> plt.xlabel('t')\n >>> plt.grid()\n >>> plt.show()\n \"\"\"\n\n if ml is None:\n ml = -1 # changed to zero inside function call\n if mu is None:\n mu = -1 # changed to zero inside function call\n t = copy(t)\n y0 = copy(y0)\n output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,\n full_output, rtol, atol, tcrit, h0, hmax, hmin,\n ixpr, mxstep, mxhnil, mxordn, mxords)\n if output[-1] < 0:\n warning_msg = _msgs[output[-1]] + \" Run with full_output = 1 to get quantitative information.\"\n warnings.warn(warning_msg, ODEintWarning)\n elif printmessg:\n warning_msg = _msgs[output[-1]]\n warnings.warn(warning_msg, ODEintWarning)\n\n if full_output:\n output[1]['message'] = _msgs[output[-1]]\n\n output = output[:-1]\n if len(output) == 1:\n return output[0]\n else:\n return output\n", "path": "scipy/integrate/odepack.py"}]} | 3,629 | 260 |
gh_patches_debug_20028 | rasdani/github-patches | git_diff | pypa__setuptools-3207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Removal of convert_path break some packages
### setuptools version
setuptools=61.0.0
### Python version
Python 3.9
### OS
all
### Additional environment information
_No response_
### Description
Some packages such as Cartopy have been broken by upgrade to setuptools=61.0.0
https://github.com/SciTools/cartopy/issues/2021
This is because of commit https://github.com/pypa/setuptools/commit/1ee962510ba66578f6069e6a675b3715ad12ac0b which removes the import
```python
from distutils.util import convert_path
```
This should at least be in the changelog as breakage of API
### Expected behavior
Removing import from __init__.py should be documented in the [CHANGES.rst file](https://github.com/pypa/setuptools/blob/main/CHANGES.rst) for version 61.0.0
### How to Reproduce
Try installing Cartopy with lastest setuptools version
### Output
```console
ImportError: cannot import name 'convert_path' from 'setuptools'
```
</issue>
<code>
[start of setuptools/__init__.py]
1 """Extensions to the 'distutils' for large or complex distributions"""
2
3 import functools
4 import os
5 import re
6
7 import _distutils_hack.override # noqa: F401
8
9 import distutils.core
10 from distutils.errors import DistutilsOptionError
11
12 from ._deprecation_warning import SetuptoolsDeprecationWarning
13
14 import setuptools.version
15 from setuptools.extension import Extension
16 from setuptools.dist import Distribution
17 from setuptools.depends import Require
18 from setuptools.discovery import PackageFinder, PEP420PackageFinder
19 from . import monkey
20 from . import logging
21
22
23 __all__ = [
24 'setup',
25 'Distribution',
26 'Command',
27 'Extension',
28 'Require',
29 'SetuptoolsDeprecationWarning',
30 'find_packages',
31 'find_namespace_packages',
32 ]
33
34 __version__ = setuptools.version.__version__
35
36 bootstrap_install_from = None
37
38
39 find_packages = PackageFinder.find
40 find_namespace_packages = PEP420PackageFinder.find
41
42
43 def _install_setup_requires(attrs):
44 # Note: do not use `setuptools.Distribution` directly, as
45 # our PEP 517 backend patch `distutils.core.Distribution`.
46 class MinimalDistribution(distutils.core.Distribution):
47 """
48 A minimal version of a distribution for supporting the
49 fetch_build_eggs interface.
50 """
51
52 def __init__(self, attrs):
53 _incl = 'dependency_links', 'setup_requires'
54 filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
55 super().__init__(filtered)
56 # Prevent accidentally triggering discovery with incomplete set of attrs
57 self.set_defaults._disable()
58
59 def finalize_options(self):
60 """
61 Disable finalize_options to avoid building the working set.
62 Ref #2158.
63 """
64
65 dist = MinimalDistribution(attrs)
66
67 # Honor setup.cfg's options.
68 dist.parse_config_files(ignore_option_errors=True)
69 if dist.setup_requires:
70 dist.fetch_build_eggs(dist.setup_requires)
71
72
73 def setup(**attrs):
74 # Make sure we have any requirements needed to interpret 'attrs'.
75 logging.configure()
76 _install_setup_requires(attrs)
77 return distutils.core.setup(**attrs)
78
79
80 setup.__doc__ = distutils.core.setup.__doc__
81
82
83 _Command = monkey.get_unpatched(distutils.core.Command)
84
85
86 class Command(_Command):
87 __doc__ = _Command.__doc__
88
89 command_consumes_arguments = False
90
91 def __init__(self, dist, **kw):
92 """
93 Construct the command for dist, updating
94 vars(self) with any keyword parameters.
95 """
96 super().__init__(dist)
97 vars(self).update(kw)
98
99 def _ensure_stringlike(self, option, what, default=None):
100 val = getattr(self, option)
101 if val is None:
102 setattr(self, option, default)
103 return default
104 elif not isinstance(val, str):
105 raise DistutilsOptionError(
106 "'%s' must be a %s (got `%s`)" % (option, what, val)
107 )
108 return val
109
110 def ensure_string_list(self, option):
111 r"""Ensure that 'option' is a list of strings. If 'option' is
112 currently a string, we split it either on /,\s*/ or /\s+/, so
113 "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
114 ["foo", "bar", "baz"].
115 """
116 val = getattr(self, option)
117 if val is None:
118 return
119 elif isinstance(val, str):
120 setattr(self, option, re.split(r',\s*|\s+', val))
121 else:
122 if isinstance(val, list):
123 ok = all(isinstance(v, str) for v in val)
124 else:
125 ok = False
126 if not ok:
127 raise DistutilsOptionError(
128 "'%s' must be a list of strings (got %r)" % (option, val)
129 )
130
131 def reinitialize_command(self, command, reinit_subcommands=0, **kw):
132 cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
133 vars(cmd).update(kw)
134 return cmd
135
136
137 def _find_all_simple(path):
138 """
139 Find all files under 'path'
140 """
141 results = (
142 os.path.join(base, file)
143 for base, dirs, files in os.walk(path, followlinks=True)
144 for file in files
145 )
146 return filter(os.path.isfile, results)
147
148
149 def findall(dir=os.curdir):
150 """
151 Find all files under 'dir' and return the list of full filenames.
152 Unless dir is '.', return full filenames with dir prepended.
153 """
154 files = _find_all_simple(dir)
155 if dir == os.curdir:
156 make_rel = functools.partial(os.path.relpath, start=dir)
157 files = map(make_rel, files)
158 return list(files)
159
160
161 class sic(str):
162 """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
163
164
165 # Apply monkey patches
166 monkey.patch_all()
167
[end of setuptools/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/__init__.py b/setuptools/__init__.py
--- a/setuptools/__init__.py
+++ b/setuptools/__init__.py
@@ -3,11 +3,13 @@
import functools
import os
import re
+import warnings
import _distutils_hack.override # noqa: F401
import distutils.core
from distutils.errors import DistutilsOptionError
+from distutils.util import convert_path as _convert_path
from ._deprecation_warning import SetuptoolsDeprecationWarning
@@ -158,6 +160,19 @@
return list(files)
[email protected](_convert_path)
+def convert_path(pathname):
+ from inspect import cleandoc
+
+ msg = """
+ The function `convert_path` is considered internal and not part of the public API.
+ Its direct usage by 3rd-party packages is considered deprecated and the function
+ may be removed in the future.
+ """
+ warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)
+ return _convert_path(pathname)
+
+
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
| {"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -3,11 +3,13 @@\n import functools\n import os\n import re\n+import warnings\n \n import _distutils_hack.override # noqa: F401\n \n import distutils.core\n from distutils.errors import DistutilsOptionError\n+from distutils.util import convert_path as _convert_path\n \n from ._deprecation_warning import SetuptoolsDeprecationWarning\n \n@@ -158,6 +160,19 @@\n return list(files)\n \n \[email protected](_convert_path)\n+def convert_path(pathname):\n+ from inspect import cleandoc\n+\n+ msg = \"\"\"\n+ The function `convert_path` is considered internal and not part of the public API.\n+ Its direct usage by 3rd-party packages is considered deprecated and the function\n+ may be removed in the future.\n+ \"\"\"\n+ warnings.warn(cleandoc(msg), SetuptoolsDeprecationWarning)\n+ return _convert_path(pathname)\n+\n+\n class sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n", "issue": "[BUG] Removal of convert_path break some packages\n### setuptools version\n\nsetuptools=61.0.0\n\n### Python version\n\nPython 3.9\n\n### OS\n\nall\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nSome packages such as Cartopy have been broken by upgrade to setuptools=61.0.0\r\n\r\nhttps://github.com/SciTools/cartopy/issues/2021\r\n\r\nThis is because of commit https://github.com/pypa/setuptools/commit/1ee962510ba66578f6069e6a675b3715ad12ac0b which removes the import\r\n```python\r\nfrom distutils.util import convert_path\r\n```\r\n\r\nThis should at least be in the changelog as breakage of API\n\n### Expected behavior\n\nRemoving import from __init__.py should be documented in the [CHANGES.rst file](https://github.com/pypa/setuptools/blob/main/CHANGES.rst) for version 61.0.0\n\n### How to Reproduce\n\nTry installing Cartopy with lastest setuptools version\n\n### Output\n\n```console\r\nImportError: cannot import name 'convert_path' from 'setuptools'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport functools\nimport os\nimport re\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom setuptools.discovery import PackageFinder, PEP420PackageFinder\nfrom . import monkey\nfrom . import logging\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n super().__init__(filtered)\n # Prevent accidentally triggering discovery with incomplete set of attrs\n self.set_defaults._disable()\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n logging.configure()\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n super().__init__(dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]} | 2,289 | 272 |
gh_patches_debug_1446 | rasdani/github-patches | git_diff | deis__deis-427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
permalinks in the documentation
It would be nice to permalink a specific header in the Deis documentation, much like how Stackato's documentation is built: http://docs.stackato.com/client/index.html#getting-help
This is probably a flag set somewhere in Sphinx to get this set up, but would be awesome for referential purposes on IRC or by email.
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # deis documentation build configuration file, created by
4 # sphinx-quickstart on Fri Jul 26 12:12:00 2013.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import os
15 import sys
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 #sys.path.insert(0, os.path.abspath('.'))
21 sys.path.insert(0, os.path.abspath('..'))
22 # create local_settings.py for SECRET_KEY if necessary
23 local_settings_path = os.path.abspath(
24 os.path.join('..', 'deis', 'local_settings.py'))
25 if not os.path.exists(local_settings_path):
26 with open(local_settings_path, 'w') as local_settings:
27 local_settings.write("SECRET_KEY = 'DummySecretKey'\n")
28 # set up Django
29 os.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'
30 from django.conf import settings # noqa
31
32 # -- General configuration -----------------------------------------------------
33
34 # If your documentation needs a minimal Sphinx version, state it here.
35 #needs_sphinx = '1.0'
36
37 # Add any Sphinx extension module names here, as strings. They can be extensions
38 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
39 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
40 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain']
41
42 # Add any paths that contain templates here, relative to this directory.
43 templates_path = ['_templates']
44
45 # The suffix of source filenames.
46 source_suffix = '.rst'
47
48 # The encoding of source files.
49 #source_encoding = 'utf-8-sig'
50
51 # The master toctree document.
52 master_doc = 'toctree'
53
54 # General information about the project.
55 project = u'deis'
56 copyright = u'2013, OpDemand LLC'
57
58 # The version info for the project you're documenting, acts as replacement for
59 # |version| and |release|, also used in various other places throughout the
60 # built documents.
61 #
62 from deis import __version__
63
64 # The short X.Y version.
65 version = __version__.rsplit('.', 1)[0]
66 # The full version, including alpha/beta/rc tags.
67 release = __version__
68
69 # The language for content autogenerated by Sphinx. Refer to documentation
70 # for a list of supported languages.
71 #language = None
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The reST default role (used for this markup: `text`) to use for all documents.
84 #default_role = None
85
86 # If true, '()' will be appended to :func: etc. cross-reference text.
87 #add_function_parentheses = True
88
89 # If true, the current module name will be prepended to all description
90 # unit titles (such as .. function::).
91 #add_module_names = True
92
93 # If true, sectionauthor and moduleauthor directives will be shown in the
94 # output. They are ignored by default.
95 #show_authors = False
96
97 # The name of the Pygments (syntax highlighting) style to use.
98 pygments_style = 'sphinx'
99
100 # A list of ignored prefixes for module index sorting.
101 #modindex_common_prefix = []
102
103 # If true, keep warnings as "system message" paragraphs in the built documents.
104 #keep_warnings = False
105
106
107 # -- Options for HTML output ---------------------------------------------------
108
109 # The theme to use for HTML and HTML Help pages. See the documentation for
110 # a list of builtin themes.
111 html_theme = 'deis'
112
113 # Theme options are theme-specific and customize the look and feel of a theme
114 # further. For a list of options available for each theme, see the
115 # documentation.
116 #html_theme_options = {}
117
118 # Add any paths that contain custom themes here, relative to this directory.
119 html_theme_path = ['theme']
120
121 # The name for this set of Sphinx documents. If None, it defaults to
122 # "<project> v<release> documentation".
123 #html_title = None
124
125 # A shorter title for the navigation bar. Default is the same as html_title.
126 #html_short_title = None
127
128 # The name of an image file (relative to this directory) to place at the top
129 # of the sidebar.
130 #html_logo = None
131
132 # The name of an image file (within the static path) to use as favicon of the
133 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
134 # pixels large.
135 #html_favicon = None
136
137 # Add any paths that contain custom static files (such as style sheets) here,
138 # relative to this directory. They are copied after the builtin static files,
139 # so a file named "default.css" will overwrite the builtin "default.css".
140 html_static_path = ['../web/static']
141
142 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
143 # using the given strftime format.
144 #html_last_updated_fmt = '%b %d, %Y'
145
146 # If true, SmartyPants will be used to convert quotes and dashes to
147 # typographically correct entities.
148 html_use_smartypants = True
149
150 html_add_permalinks = None
151
152 # Custom sidebar templates, maps document names to template names.
153 #html_sidebars = {}
154
155 # Additional templates that should be rendered to pages, maps page names to
156 # template names.
157 #html_additional_pages = {}
158
159 # If false, no module index is generated.
160 #html_domain_indices = True
161
162 # If false, no index is generated.
163 #html_use_index = True
164
165 # If true, the index is split into individual pages for each letter.
166 #html_split_index = False
167
168 # If true, links to the reST sources are added to the pages.
169 #html_show_sourcelink = True
170
171 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
172 #html_show_sphinx = True
173
174 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
175 #html_show_copyright = True
176
177 # If true, an OpenSearch description file will be output, and all pages will
178 # contain a <link> tag referring to it. The value of this option must be the
179 # base URL from which the finished HTML is served.
180 #html_use_opensearch = ''
181
182 # This is the file name suffix for HTML files (e.g. ".xhtml").
183 #html_file_suffix = None
184
185 # Output file base name for HTML help builder.
186 htmlhelp_basename = 'deisdoc'
187
188
189 # -- Options for LaTeX output --------------------------------------------------
190
191 latex_elements = {
192 # The paper size ('letterpaper' or 'a4paper').
193 #'papersize': 'letterpaper',
194
195 # The font size ('10pt', '11pt' or '12pt').
196 #'pointsize': '10pt',
197
198 # Additional stuff for the LaTeX preamble.
199 #'preamble': '',
200 }
201
202 # Grouping the document tree into LaTeX files. List of tuples
203 # (source start file, target name, title, author, documentclass [howto/manual]).
204 latex_documents = [
205 ('index', 'deis.tex', u'deis Documentation',
206 u'Author', 'manual'),
207 ]
208
209 # The name of an image file (relative to this directory) to place at the top of
210 # the title page.
211 #latex_logo = None
212
213 # For "manual" documents, if this is true, then toplevel headings are parts,
214 # not chapters.
215 #latex_use_parts = False
216
217 # If true, show page references after internal links.
218 #latex_show_pagerefs = False
219
220 # If true, show URL addresses after external links.
221 #latex_show_urls = False
222
223 # Documents to append as an appendix to all manuals.
224 #latex_appendices = []
225
226 # If false, no module index is generated.
227 #latex_domain_indices = True
228
229
230 # -- Options for manual page output --------------------------------------------
231
232 # One entry per manual page. List of tuples
233 # (source start file, name, description, authors, manual section).
234 man_pages = [
235 ('index', 'deis', u'deis Documentation',
236 [u'Author'], 1)
237 ]
238
239 # If true, show URL addresses after external links.
240 #man_show_urls = False
241
242
243 # -- Options for Texinfo output ------------------------------------------------
244
245 # Grouping the document tree into Texinfo files. List of tuples
246 # (source start file, target name, title, author,
247 # dir menu entry, description, category)
248 texinfo_documents = [
249 ('index', 'deis', u'deis Documentation',
250 u'Author', 'deis', 'One line description of project.',
251 'Miscellaneous'),
252 ]
253
254 # Documents to append as an appendix to all manuals.
255 #texinfo_appendices = []
256
257 # If false, no module index is generated.
258 #texinfo_domain_indices = True
259
260 # How to display URL addresses: 'footnote', 'no', or 'inline'.
261 #texinfo_show_urls = 'footnote'
262
263 # If true, do not generate a @detailmenu in the "Top" node's menu.
264 #texinfo_no_detailmenu = False
265
266
267 # -- Options for Epub output ---------------------------------------------------
268
269 # Bibliographic Dublin Core info.
270 epub_title = u'deis'
271 epub_author = u'OpDemand LLC'
272 epub_publisher = u'OpDemand LLC'
273 epub_copyright = u'2013, OpDemand LLC'
274
275 # The language of the text. It defaults to the language option
276 # or en if the language is not set.
277 #epub_language = ''
278
279 # The scheme of the identifier. Typical schemes are ISBN or URL.
280 #epub_scheme = ''
281
282 # The unique identifier of the text. This can be a ISBN number
283 # or the project homepage.
284 #epub_identifier = ''
285
286 # A unique identification for the text.
287 #epub_uid = ''
288
289 # A tuple containing the cover image and cover page html template filenames.
290 #epub_cover = ()
291
292 # A sequence of (type, uri, title) tuples for the guide element of content.opf.
293 #epub_guide = ()
294
295 # HTML files that should be inserted before the pages created by sphinx.
296 # The format is a list of tuples containing the path and title.
297 #epub_pre_files = []
298
299 # HTML files shat should be inserted after the pages created by sphinx.
300 # The format is a list of tuples containing the path and title.
301 #epub_post_files = []
302
303 # A list of files that should not be packed into the epub file.
304 #epub_exclude_files = []
305
306 # The depth of the table of contents in toc.ncx.
307 #epub_tocdepth = 3
308
309 # Allow duplicate toc entries.
310 #epub_tocdup = True
311
312 # Fix unsupported image types using the PIL.
313 #epub_fix_images = False
314
315 # Scale large images.
316 #epub_max_image_width = 0
317
318 # If 'no', URL addresses will not be shown.
319 #epub_show_urls = 'inline'
320
321 # If false, no index is generated.
322 #epub_use_index = True
323
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -147,7 +147,7 @@
# typographically correct entities.
html_use_smartypants = True
-html_add_permalinks = None
+html_add_permalinks = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -147,7 +147,7 @@\n # typographically correct entities.\n html_use_smartypants = True\n \n-html_add_permalinks = None\n+html_add_permalinks = True\n \n # Custom sidebar templates, maps document names to template names.\n #html_sidebars = {}\n", "issue": "permalinks in the documentation\nIt would be nice to permalink a specific header in the Deis documentation, much like how Stackato's documentation is built: http://docs.stackato.com/client/index.html#getting-help\n\nThis is probably a flag set somewhere in Sphinx to get this set up, but would be awesome for referential purposes on IRC or by email.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('..'))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = None\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n", "path": "docs/conf.py"}]} | 4,031 | 87 |
gh_patches_debug_9280 | rasdani/github-patches | git_diff | nerfstudio-project__nerfstudio-2002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
a simple mistake in ExponentialDecayScheduler
When training nerfacto with --optimizers.fields.scheduler.warmup-steps 5000, it will crash soon.
**Expected behavior**
The lr should ramps up to 1e-2 in 5000 steps. However, the lr increase like this.

**Additional context**
This is caused by a typo in [ExponentialDecayScheduler](https://github.com/nerfstudio-project/nerfstudio/blob/e94d9031ab711bd755655adafba1b986e980f27b/nerfstudio/engine/schedulers.py#LL125C27-L125C27),
where `lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1))`
should be `lr = self.config.lr_pre_warmup + (lr_init - self.config.lr_pre_warmup) * np.sin(0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1))`
</issue>
<code>
[start of nerfstudio/engine/schedulers.py]
1 # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """Scheduler Classes"""
17
18 from abc import abstractmethod
19 from dataclasses import dataclass, field
20 from typing import Literal, Optional, Tuple, Type
21
22 import numpy as np
23 from torch.optim import Optimizer, lr_scheduler
24
25 try:
26 from torch.optim.lr_scheduler import LRScheduler
27 except ImportError:
28 # Backwards compatibility for PyTorch 1.x
29 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
30
31 from nerfstudio.configs.base_config import InstantiateConfig
32
33
34 @dataclass
35 class SchedulerConfig(InstantiateConfig):
36 """Basic scheduler config"""
37
38 _target: Type = field(default_factory=lambda: Scheduler)
39 """target class to instantiate"""
40
41
42 class Scheduler:
43 """Base scheduler"""
44
45 config: SchedulerConfig
46
47 def __init__(self, config: SchedulerConfig) -> None:
48 super().__init__()
49 self.config = config
50
51 @abstractmethod
52 def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:
53 """Abstract method that returns a scheduler object.
54
55 Args:
56 optimizer: The optimizer to use.
57 lr_init: The initial learning rate.
58 Returns:
59 The scheduler object.
60 """
61
62
63 @dataclass
64 class MultiStepSchedulerConfig(SchedulerConfig):
65 """Config for multi step scheduler where lr decays by gamma every milestone"""
66
67 _target: Type = field(default_factory=lambda: MultiStepScheduler)
68 """target class to instantiate"""
69 max_steps: int = 1000000
70 """The maximum number of steps."""
71 gamma: float = 0.33
72 """The learning rate decay factor."""
73 milestones: Tuple[int, ...] = (500000, 750000, 900000)
74 """The milestone steps at which to decay the learning rate."""
75
76
77 class MultiStepScheduler(Scheduler):
78 """Multi step scheduler where lr decays by gamma every milestone"""
79
80 config: MultiStepSchedulerConfig
81
82 def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:
83 scheduler = lr_scheduler.MultiStepLR(
84 optimizer=optimizer,
85 milestones=self.config.milestones,
86 gamma=self.config.gamma,
87 )
88 return scheduler
89
90
91 @dataclass
92 class ExponentialDecaySchedulerConfig(SchedulerConfig):
93 """Config for exponential decay scheduler with warmup"""
94
95 _target: Type = field(default_factory=lambda: ExponentialDecayScheduler)
96 """target class to instantiate"""
97 lr_pre_warmup: float = 1e-8
98 """Learning rate before warmup."""
99 lr_final: Optional[float] = None
100 """Final learning rate. If not provided, it will be set to the optimizers learning rate."""
101 warmup_steps: int = 0
102 """Number of warmup steps."""
103 max_steps: int = 100000
104 """The maximum number of steps."""
105 ramp: Literal["linear", "cosine"] = "cosine"
106 """The ramp function to use during the warmup."""
107
108
109 class ExponentialDecayScheduler(Scheduler):
110 """Exponential decay scheduler with linear warmup. Scheduler first ramps up to `lr_init` in `warmup_steps`
111 steps, then exponentially decays to `lr_final` in `max_steps` steps.
112 """
113
114 config: ExponentialDecaySchedulerConfig
115
116 def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:
117 if self.config.lr_final is None:
118 lr_final = lr_init
119 else:
120 lr_final = self.config.lr_final
121
122 def func(step):
123 if step < self.config.warmup_steps:
124 if self.config.ramp == "cosine":
125 lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(
126 0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1)
127 )
128 else:
129 lr = (
130 self.config.lr_pre_warmup
131 + (lr_init - self.config.lr_pre_warmup) * step / self.config.warmup_steps
132 )
133 else:
134 t = np.clip(
135 (step - self.config.warmup_steps) / (self.config.max_steps - self.config.warmup_steps), 0, 1
136 )
137 lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
138 return lr / lr_init # divided by lr_init because the multiplier is with the initial learning rate
139
140 scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=func)
141 return scheduler
142
143
144 @dataclass
145 class CosineDecaySchedulerConfig(SchedulerConfig):
146 """Config for cosine decay schedule"""
147
148 _target: Type = field(default_factory=lambda: CosineDecayScheduler)
149 """target class to instantiate"""
150 warm_up_end: int = 5000
151 """Iteration number where warmp ends"""
152 learning_rate_alpha: float = 0.05
153 """Learning rate alpha value"""
154 max_steps: int = 300000
155 """The maximum number of steps."""
156
157
158 class CosineDecayScheduler(Scheduler):
159 """Cosine decay scheduler with linear warmup"""
160
161 config: CosineDecaySchedulerConfig
162
163 def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:
164 def func(step):
165 if step < self.config.warm_up_end:
166 learning_factor = step / self.config.warm_up_end
167 else:
168 alpha = self.config.learning_rate_alpha
169 progress = (step - self.config.warm_up_end) / (self.config.max_steps - self.config.warm_up_end)
170 learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha
171 return learning_factor
172
173 scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=func)
174 return scheduler
175
[end of nerfstudio/engine/schedulers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nerfstudio/engine/schedulers.py b/nerfstudio/engine/schedulers.py
--- a/nerfstudio/engine/schedulers.py
+++ b/nerfstudio/engine/schedulers.py
@@ -122,7 +122,7 @@
def func(step):
if step < self.config.warmup_steps:
if self.config.ramp == "cosine":
- lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(
+ lr = self.config.lr_pre_warmup + (lr_init - self.config.lr_pre_warmup) * np.sin(
0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1)
)
else:
| {"golden_diff": "diff --git a/nerfstudio/engine/schedulers.py b/nerfstudio/engine/schedulers.py\n--- a/nerfstudio/engine/schedulers.py\n+++ b/nerfstudio/engine/schedulers.py\n@@ -122,7 +122,7 @@\n def func(step):\n if step < self.config.warmup_steps:\n if self.config.ramp == \"cosine\":\n- lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(\n+ lr = self.config.lr_pre_warmup + (lr_init - self.config.lr_pre_warmup) * np.sin(\n 0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1)\n )\n else:\n", "issue": "a simple mistake in ExponentialDecayScheduler\nWhen training nerfacto with --optimizers.fields.scheduler.warmup-steps 5000, it will crash soon.\r\n\r\n**Expected behavior**\r\nThe lr should ramps up to 1e-2 in 5000 steps. However, the lr increase like this.\r\n\r\n\r\n**Additional context**\r\nThis is caused by a typo in [ExponentialDecayScheduler](https://github.com/nerfstudio-project/nerfstudio/blob/e94d9031ab711bd755655adafba1b986e980f27b/nerfstudio/engine/schedulers.py#LL125C27-L125C27), \r\nwhere `lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1))` \r\nshould be `lr = self.config.lr_pre_warmup + (lr_init - self.config.lr_pre_warmup) * np.sin(0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1))`\r\n\n", "before_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Scheduler Classes\"\"\"\n\nfrom abc import abstractmethod\nfrom dataclasses import dataclass, field\nfrom typing import Literal, Optional, Tuple, Type\n\nimport numpy as np\nfrom torch.optim import Optimizer, lr_scheduler\n\ntry:\n from torch.optim.lr_scheduler import LRScheduler\nexcept ImportError:\n # Backwards compatibility for PyTorch 1.x\n from torch.optim.lr_scheduler import _LRScheduler as LRScheduler\n\nfrom nerfstudio.configs.base_config import InstantiateConfig\n\n\n@dataclass\nclass SchedulerConfig(InstantiateConfig):\n \"\"\"Basic scheduler config\"\"\"\n\n _target: Type = field(default_factory=lambda: Scheduler)\n \"\"\"target class to instantiate\"\"\"\n\n\nclass Scheduler:\n \"\"\"Base scheduler\"\"\"\n\n config: SchedulerConfig\n\n def __init__(self, config: SchedulerConfig) -> None:\n super().__init__()\n self.config = config\n\n @abstractmethod\n def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:\n \"\"\"Abstract method that returns a scheduler object.\n\n Args:\n optimizer: The optimizer to use.\n lr_init: The initial learning rate.\n Returns:\n The scheduler object.\n \"\"\"\n\n\n@dataclass\nclass MultiStepSchedulerConfig(SchedulerConfig):\n \"\"\"Config for multi step scheduler where lr decays by gamma every milestone\"\"\"\n\n _target: Type = field(default_factory=lambda: MultiStepScheduler)\n \"\"\"target class to instantiate\"\"\"\n max_steps: int = 1000000\n \"\"\"The maximum number of steps.\"\"\"\n gamma: float = 0.33\n \"\"\"The learning rate decay factor.\"\"\"\n milestones: Tuple[int, ...] = (500000, 750000, 900000)\n \"\"\"The milestone steps at which to decay the learning rate.\"\"\"\n\n\nclass MultiStepScheduler(Scheduler):\n \"\"\"Multi step scheduler where lr decays by gamma every milestone\"\"\"\n\n config: MultiStepSchedulerConfig\n\n def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:\n scheduler = lr_scheduler.MultiStepLR(\n optimizer=optimizer,\n milestones=self.config.milestones,\n gamma=self.config.gamma,\n )\n return scheduler\n\n\n@dataclass\nclass ExponentialDecaySchedulerConfig(SchedulerConfig):\n \"\"\"Config for exponential decay scheduler with warmup\"\"\"\n\n _target: Type = field(default_factory=lambda: ExponentialDecayScheduler)\n \"\"\"target class to instantiate\"\"\"\n lr_pre_warmup: float = 1e-8\n \"\"\"Learning rate before warmup.\"\"\"\n lr_final: Optional[float] = None\n \"\"\"Final learning rate. If not provided, it will be set to the optimizers learning rate.\"\"\"\n warmup_steps: int = 0\n \"\"\"Number of warmup steps.\"\"\"\n max_steps: int = 100000\n \"\"\"The maximum number of steps.\"\"\"\n ramp: Literal[\"linear\", \"cosine\"] = \"cosine\"\n \"\"\"The ramp function to use during the warmup.\"\"\"\n\n\nclass ExponentialDecayScheduler(Scheduler):\n \"\"\"Exponential decay scheduler with linear warmup. Scheduler first ramps up to `lr_init` in `warmup_steps`\n steps, then exponentially decays to `lr_final` in `max_steps` steps.\n \"\"\"\n\n config: ExponentialDecaySchedulerConfig\n\n def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:\n if self.config.lr_final is None:\n lr_final = lr_init\n else:\n lr_final = self.config.lr_final\n\n def func(step):\n if step < self.config.warmup_steps:\n if self.config.ramp == \"cosine\":\n lr = self.config.lr_pre_warmup + (1 - self.config.lr_pre_warmup) * np.sin(\n 0.5 * np.pi * np.clip(step / self.config.warmup_steps, 0, 1)\n )\n else:\n lr = (\n self.config.lr_pre_warmup\n + (lr_init - self.config.lr_pre_warmup) * step / self.config.warmup_steps\n )\n else:\n t = np.clip(\n (step - self.config.warmup_steps) / (self.config.max_steps - self.config.warmup_steps), 0, 1\n )\n lr = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return lr / lr_init # divided by lr_init because the multiplier is with the initial learning rate\n\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=func)\n return scheduler\n\n\n@dataclass\nclass CosineDecaySchedulerConfig(SchedulerConfig):\n \"\"\"Config for cosine decay schedule\"\"\"\n\n _target: Type = field(default_factory=lambda: CosineDecayScheduler)\n \"\"\"target class to instantiate\"\"\"\n warm_up_end: int = 5000\n \"\"\"Iteration number where warmp ends\"\"\"\n learning_rate_alpha: float = 0.05\n \"\"\"Learning rate alpha value\"\"\"\n max_steps: int = 300000\n \"\"\"The maximum number of steps.\"\"\"\n\n\nclass CosineDecayScheduler(Scheduler):\n \"\"\"Cosine decay scheduler with linear warmup\"\"\"\n\n config: CosineDecaySchedulerConfig\n\n def get_scheduler(self, optimizer: Optimizer, lr_init: float) -> LRScheduler:\n def func(step):\n if step < self.config.warm_up_end:\n learning_factor = step / self.config.warm_up_end\n else:\n alpha = self.config.learning_rate_alpha\n progress = (step - self.config.warm_up_end) / (self.config.max_steps - self.config.warm_up_end)\n learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha\n return learning_factor\n\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=func)\n return scheduler\n", "path": "nerfstudio/engine/schedulers.py"}]} | 2,746 | 171 |
gh_patches_debug_12137 | rasdani/github-patches | git_diff | Textualize__rich-2029 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Incorrect type for print_json indent in __init__.py
**Describe the bug**
The type for `indent` (`indent: int = 2`) in `print_json` from the `__init__.py` file seems to be incorrect. In `console.py` it is typed as `indent: Union[None, int, str] = 2` and `print_json` in the init calls from console.py so it seems like they should have the same type.
</issue>
<code>
[start of rich/__init__.py]
1 """Rich text and beautiful formatting in the terminal."""
2
3 import os
4 from typing import Callable, IO, TYPE_CHECKING, Any, Optional
5
6 from ._extension import load_ipython_extension
7
8 __all__ = ["get_console", "reconfigure", "print", "inspect"]
9
10 if TYPE_CHECKING:
11 from .console import Console
12
13 # Global console used by alternative print
14 _console: Optional["Console"] = None
15
16 _IMPORT_CWD = os.path.abspath(os.getcwd())
17
18
19 def get_console() -> "Console":
20 """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
21 and hasn't been explicitly given one.
22
23 Returns:
24 Console: A console instance.
25 """
26 global _console
27 if _console is None:
28 from .console import Console
29
30 _console = Console()
31
32 return _console
33
34
35 def reconfigure(*args: Any, **kwargs: Any) -> None:
36 """Reconfigures the global console by replacing it with another.
37
38 Args:
39 console (Console): Replacement console instance.
40 """
41 from rich.console import Console
42
43 new_console = Console(*args, **kwargs)
44 _console = get_console()
45 _console.__dict__ = new_console.__dict__
46
47
48 def print(
49 *objects: Any,
50 sep: str = " ",
51 end: str = "\n",
52 file: Optional[IO[str]] = None,
53 flush: bool = False,
54 ) -> None:
55 r"""Print object(s) supplied via positional arguments.
56 This function has an identical signature to the built-in print.
57 For more advanced features, see the :class:`~rich.console.Console` class.
58
59 Args:
60 sep (str, optional): Separator between printed objects. Defaults to " ".
61 end (str, optional): Character to write at end of output. Defaults to "\\n".
62 file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
63 flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
64
65 """
66 from .console import Console
67
68 write_console = get_console() if file is None else Console(file=file)
69 return write_console.print(*objects, sep=sep, end=end)
70
71
72 def print_json(
73 json: Optional[str] = None,
74 *,
75 data: Any = None,
76 indent: int = 2,
77 highlight: bool = True,
78 skip_keys: bool = False,
79 ensure_ascii: bool = True,
80 check_circular: bool = True,
81 allow_nan: bool = True,
82 default: Optional[Callable[[Any], Any]] = None,
83 sort_keys: bool = False,
84 ) -> None:
85 """Pretty prints JSON. Output will be valid JSON.
86
87 Args:
88 json (str): A string containing JSON.
89 data (Any): If json is not supplied, then encode this data.
90 indent (int, optional): Number of spaces to indent. Defaults to 2.
91 highlight (bool, optional): Enable highlighting of output: Defaults to True.
92 skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
93 ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
94 check_circular (bool, optional): Check for circular references. Defaults to True.
95 allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
96 default (Callable, optional): A callable that converts values that can not be encoded
97 in to something that can be JSON encoded. Defaults to None.
98 sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
99 """
100
101 get_console().print_json(
102 json,
103 data=data,
104 indent=indent,
105 highlight=highlight,
106 skip_keys=skip_keys,
107 ensure_ascii=ensure_ascii,
108 check_circular=check_circular,
109 allow_nan=allow_nan,
110 default=default,
111 sort_keys=sort_keys,
112 )
113
114
115 def inspect(
116 obj: Any,
117 *,
118 console: Optional["Console"] = None,
119 title: Optional[str] = None,
120 help: bool = False,
121 methods: bool = False,
122 docs: bool = True,
123 private: bool = False,
124 dunder: bool = False,
125 sort: bool = True,
126 all: bool = False,
127 value: bool = True,
128 ) -> None:
129 """Inspect any Python object.
130
131 * inspect(<OBJECT>) to see summarized info.
132 * inspect(<OBJECT>, methods=True) to see methods.
133 * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
134 * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
135 * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
136 * inspect(<OBJECT>, all=True) to see all attributes.
137
138 Args:
139 obj (Any): An object to inspect.
140 title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
141 help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
142 methods (bool, optional): Enable inspection of callables. Defaults to False.
143 docs (bool, optional): Also render doc strings. Defaults to True.
144 private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
145 dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
146 sort (bool, optional): Sort attributes alphabetically. Defaults to True.
147 all (bool, optional): Show all attributes. Defaults to False.
148 value (bool, optional): Pretty print value. Defaults to True.
149 """
150 _console = console or get_console()
151 from rich._inspect import Inspect
152
153 # Special case for inspect(inspect)
154 is_inspect = obj is inspect
155
156 _inspect = Inspect(
157 obj,
158 title=title,
159 help=is_inspect or help,
160 methods=is_inspect or methods,
161 docs=is_inspect or docs,
162 private=private,
163 dunder=dunder,
164 sort=sort,
165 all=all,
166 value=value,
167 )
168 _console.print(_inspect)
169
170
171 if __name__ == "__main__": # pragma: no cover
172 print("Hello, **World**")
173
[end of rich/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rich/__init__.py b/rich/__init__.py
--- a/rich/__init__.py
+++ b/rich/__init__.py
@@ -1,7 +1,7 @@
"""Rich text and beautiful formatting in the terminal."""
import os
-from typing import Callable, IO, TYPE_CHECKING, Any, Optional
+from typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union
from ._extension import load_ipython_extension
@@ -73,7 +73,7 @@
json: Optional[str] = None,
*,
data: Any = None,
- indent: int = 2,
+ indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = True,
| {"golden_diff": "diff --git a/rich/__init__.py b/rich/__init__.py\n--- a/rich/__init__.py\n+++ b/rich/__init__.py\n@@ -1,7 +1,7 @@\n \"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n \n import os\n-from typing import Callable, IO, TYPE_CHECKING, Any, Optional\n+from typing import Callable, IO, TYPE_CHECKING, Any, Optional, Union\n \n from ._extension import load_ipython_extension\n \n@@ -73,7 +73,7 @@\n json: Optional[str] = None,\n *,\n data: Any = None,\n- indent: int = 2,\n+ indent: Union[None, int, str] = 2,\n highlight: bool = True,\n skip_keys: bool = False,\n ensure_ascii: bool = True,\n", "issue": "[BUG] Incorrect type for print_json indent in __init__.py\n**Describe the bug**\r\n\r\nThe type for `indent` (`indent: int = 2`) in `print_json` from the `__init__.py` file seems to be incorrect. In `console.py` it is typed as `indent: Union[None, int, str] = 2` and `print_json` in the init calls from console.py so it seems like they should have the same type.\n", "before_files": [{"content": "\"\"\"Rich text and beautiful formatting in the terminal.\"\"\"\n\nimport os\nfrom typing import Callable, IO, TYPE_CHECKING, Any, Optional\n\nfrom ._extension import load_ipython_extension\n\n__all__ = [\"get_console\", \"reconfigure\", \"print\", \"inspect\"]\n\nif TYPE_CHECKING:\n from .console import Console\n\n# Global console used by alternative print\n_console: Optional[\"Console\"] = None\n\n_IMPORT_CWD = os.path.abspath(os.getcwd())\n\n\ndef get_console() -> \"Console\":\n \"\"\"Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,\n and hasn't been explicitly given one.\n\n Returns:\n Console: A console instance.\n \"\"\"\n global _console\n if _console is None:\n from .console import Console\n\n _console = Console()\n\n return _console\n\n\ndef reconfigure(*args: Any, **kwargs: Any) -> None:\n \"\"\"Reconfigures the global console by replacing it with another.\n\n Args:\n console (Console): Replacement console instance.\n \"\"\"\n from rich.console import Console\n\n new_console = Console(*args, **kwargs)\n _console = get_console()\n _console.__dict__ = new_console.__dict__\n\n\ndef print(\n *objects: Any,\n sep: str = \" \",\n end: str = \"\\n\",\n file: Optional[IO[str]] = None,\n flush: bool = False,\n) -> None:\n r\"\"\"Print object(s) supplied via positional arguments.\n This function has an identical signature to the built-in print.\n For more advanced features, see the :class:`~rich.console.Console` class.\n\n Args:\n sep (str, optional): Separator between printed objects. Defaults to \" \".\n end (str, optional): Character to write at end of output. Defaults to \"\\\\n\".\n file (IO[str], optional): File to write to, or None for stdout. Defaults to None.\n flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.\n\n \"\"\"\n from .console import Console\n\n write_console = get_console() if file is None else Console(file=file)\n return write_console.print(*objects, sep=sep, end=end)\n\n\ndef print_json(\n json: Optional[str] = None,\n *,\n data: Any = None,\n indent: int = 2,\n highlight: bool = True,\n skip_keys: bool = False,\n ensure_ascii: bool = True,\n check_circular: bool = True,\n allow_nan: bool = True,\n default: Optional[Callable[[Any], Any]] = None,\n sort_keys: bool = False,\n) -> None:\n \"\"\"Pretty prints JSON. Output will be valid JSON.\n\n Args:\n json (str): A string containing JSON.\n data (Any): If json is not supplied, then encode this data.\n indent (int, optional): Number of spaces to indent. Defaults to 2.\n highlight (bool, optional): Enable highlighting of output: Defaults to True.\n skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.\n ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.\n check_circular (bool, optional): Check for circular references. Defaults to True.\n allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.\n default (Callable, optional): A callable that converts values that can not be encoded\n in to something that can be JSON encoded. Defaults to None.\n sort_keys (bool, optional): Sort dictionary keys. Defaults to False.\n \"\"\"\n\n get_console().print_json(\n json,\n data=data,\n indent=indent,\n highlight=highlight,\n skip_keys=skip_keys,\n ensure_ascii=ensure_ascii,\n check_circular=check_circular,\n allow_nan=allow_nan,\n default=default,\n sort_keys=sort_keys,\n )\n\n\ndef inspect(\n obj: Any,\n *,\n console: Optional[\"Console\"] = None,\n title: Optional[str] = None,\n help: bool = False,\n methods: bool = False,\n docs: bool = True,\n private: bool = False,\n dunder: bool = False,\n sort: bool = True,\n all: bool = False,\n value: bool = True,\n) -> None:\n \"\"\"Inspect any Python object.\n\n * inspect(<OBJECT>) to see summarized info.\n * inspect(<OBJECT>, methods=True) to see methods.\n * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.\n * inspect(<OBJECT>, private=True) to see private attributes (single underscore).\n * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.\n * inspect(<OBJECT>, all=True) to see all attributes.\n\n Args:\n obj (Any): An object to inspect.\n title (str, optional): Title to display over inspect result, or None use type. Defaults to None.\n help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.\n methods (bool, optional): Enable inspection of callables. Defaults to False.\n docs (bool, optional): Also render doc strings. Defaults to True.\n private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.\n dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.\n sort (bool, optional): Sort attributes alphabetically. Defaults to True.\n all (bool, optional): Show all attributes. Defaults to False.\n value (bool, optional): Pretty print value. Defaults to True.\n \"\"\"\n _console = console or get_console()\n from rich._inspect import Inspect\n\n # Special case for inspect(inspect)\n is_inspect = obj is inspect\n\n _inspect = Inspect(\n obj,\n title=title,\n help=is_inspect or help,\n methods=is_inspect or methods,\n docs=is_inspect or docs,\n private=private,\n dunder=dunder,\n sort=sort,\n all=all,\n value=value,\n )\n _console.print(_inspect)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n print(\"Hello, **World**\")\n", "path": "rich/__init__.py"}]} | 2,412 | 180 |
gh_patches_debug_33079 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-16054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to download videos from http://channel.nationalgeographic.com.
Example:
$ youtube-dl -v "http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/"
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-v', 'http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2018.03.26.1
[debug] Python version 3.4.2 (CPython) - Linux-3.16.0-5-amd64-x86_64-with-debian-8.10
[debug] exe versions: ffmpeg 3.2.10-1, ffprobe 3.2.10-1, rtmpdump 2.4
[debug] Proxy map: {}
[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Requesting header
WARNING: Falling back on generic information extractor.
[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Downloading webpage
[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Extracting information
ERROR: Unsupported URL: http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/
Traceback (most recent call last):
File "/home/ant/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 785, in extract_info
ie_result = ie.extract(url)
File "/home/ant/bin/youtube-dl/youtube_dl/extractor/common.py", line 440, in extract
ie_result = self._real_extract(url)
File "/home/ant/bin/youtube-dl/youtube_dl/extractor/generic.py", line 3143, in _real_extract
raise UnsupportedError(url)
youtube_dl.utils.UnsupportedError: Unsupported URL: http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/
Thank you in advance. :)
</issue>
<code>
[start of youtube_dl/extractor/nationalgeographic.py]
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from .adobepass import AdobePassIE
7 from .theplatform import ThePlatformIE
8 from ..utils import (
9 smuggle_url,
10 url_basename,
11 update_url_query,
12 get_element_by_class,
13 )
14
15
16 class NationalGeographicVideoIE(InfoExtractor):
17 IE_NAME = 'natgeo:video'
18 _VALID_URL = r'https?://video\.nationalgeographic\.com/.*?'
19
20 _TESTS = [
21 {
22 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
23 'md5': '730855d559abbad6b42c2be1fa584917',
24 'info_dict': {
25 'id': '0000014b-70a1-dd8c-af7f-f7b559330001',
26 'ext': 'mp4',
27 'title': 'Mating Crabs Busted by Sharks',
28 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
29 'timestamp': 1423523799,
30 'upload_date': '20150209',
31 'uploader': 'NAGS',
32 },
33 'add_ie': ['ThePlatform'],
34 },
35 {
36 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws',
37 'md5': '6a3105eb448c070503b3105fb9b320b5',
38 'info_dict': {
39 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e',
40 'ext': 'mp4',
41 'title': 'The Real Jaws',
42 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6',
43 'timestamp': 1433772632,
44 'upload_date': '20150608',
45 'uploader': 'NAGS',
46 },
47 'add_ie': ['ThePlatform'],
48 },
49 ]
50
51 def _real_extract(self, url):
52 name = url_basename(url)
53
54 webpage = self._download_webpage(url, name)
55 guid = self._search_regex(
56 r'id="(?:videoPlayer|player-container)"[^>]+data-guid="([^"]+)"',
57 webpage, 'guid')
58
59 return {
60 '_type': 'url_transparent',
61 'ie_key': 'ThePlatform',
62 'url': smuggle_url(
63 'http://link.theplatform.com/s/ngs/media/guid/2423130747/%s?mbr=true' % guid,
64 {'force_smil_url': True}),
65 'id': guid,
66 }
67
68
69 class NationalGeographicIE(ThePlatformIE, AdobePassIE):
70 IE_NAME = 'natgeo'
71 _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:(?:wild/)?[^/]+/)?(?:videos|episodes)/(?P<id>[^/?]+)'
72
73 _TESTS = [
74 {
75 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',
76 'md5': '518c9aa655686cf81493af5cc21e2a04',
77 'info_dict': {
78 'id': 'vKInpacll2pC',
79 'ext': 'mp4',
80 'title': 'Uncovering a Universal Knowledge',
81 'description': 'md5:1a89148475bf931b3661fcd6ddb2ae3a',
82 'timestamp': 1458680907,
83 'upload_date': '20160322',
84 'uploader': 'NEWA-FNG-NGTV',
85 },
86 'add_ie': ['ThePlatform'],
87 },
88 {
89 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',
90 'md5': 'c4912f656b4cbe58f3e000c489360989',
91 'info_dict': {
92 'id': 'Pok5lWCkiEFA',
93 'ext': 'mp4',
94 'title': 'The Stunning Red Bird of Paradise',
95 'description': 'md5:7bc8cd1da29686be4d17ad1230f0140c',
96 'timestamp': 1459362152,
97 'upload_date': '20160330',
98 'uploader': 'NEWA-FNG-NGTV',
99 },
100 'add_ie': ['ThePlatform'],
101 },
102 {
103 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episodes/the-power-of-miracles/',
104 'only_matching': True,
105 },
106 {
107 'url': 'http://channel.nationalgeographic.com/videos/treasures-rediscovered/',
108 'only_matching': True,
109 }
110 ]
111
112 def _real_extract(self, url):
113 display_id = self._match_id(url)
114 webpage = self._download_webpage(url, display_id)
115 release_url = self._search_regex(
116 r'video_auth_playlist_url\s*=\s*"([^"]+)"',
117 webpage, 'release url')
118 theplatform_path = self._search_regex(r'https?://link\.theplatform\.com/s/([^?]+)', release_url, 'theplatform path')
119 video_id = theplatform_path.split('/')[-1]
120 query = {
121 'mbr': 'true',
122 }
123 is_auth = self._search_regex(r'video_is_auth\s*=\s*"([^"]+)"', webpage, 'is auth', fatal=False)
124 if is_auth == 'auth':
125 auth_resource_id = self._search_regex(
126 r"video_auth_resourceId\s*=\s*'([^']+)'",
127 webpage, 'auth resource id')
128 query['auth'] = self._extract_mvpd_auth(url, video_id, 'natgeo', auth_resource_id)
129
130 formats = []
131 subtitles = {}
132 for key, value in (('switch', 'http'), ('manifest', 'm3u')):
133 tp_query = query.copy()
134 tp_query.update({
135 key: value,
136 })
137 tp_formats, tp_subtitles = self._extract_theplatform_smil(
138 update_url_query(release_url, tp_query), video_id, 'Downloading %s SMIL data' % value)
139 formats.extend(tp_formats)
140 subtitles = self._merge_subtitles(subtitles, tp_subtitles)
141 self._sort_formats(formats)
142
143 info = self._extract_theplatform_metadata(theplatform_path, display_id)
144 info.update({
145 'id': video_id,
146 'formats': formats,
147 'subtitles': subtitles,
148 'display_id': display_id,
149 })
150 return info
151
152
153 class NationalGeographicEpisodeGuideIE(InfoExtractor):
154 IE_NAME = 'natgeo:episodeguide'
155 _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:wild/)?(?P<id>[^/]+)/episode-guide'
156 _TESTS = [
157 {
158 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episode-guide/',
159 'info_dict': {
160 'id': 'the-story-of-god-with-morgan-freeman-season-1',
161 'title': 'The Story of God with Morgan Freeman - Season 1',
162 },
163 'playlist_mincount': 6,
164 },
165 {
166 'url': 'http://channel.nationalgeographic.com/underworld-inc/episode-guide/?s=2',
167 'info_dict': {
168 'id': 'underworld-inc-season-2',
169 'title': 'Underworld, Inc. - Season 2',
170 },
171 'playlist_mincount': 7,
172 },
173 ]
174
175 def _real_extract(self, url):
176 display_id = self._match_id(url)
177 webpage = self._download_webpage(url, display_id)
178 show = get_element_by_class('show', webpage)
179 selected_season = self._search_regex(
180 r'<div[^>]+class="select-seasons[^"]*".*?<a[^>]*>(.*?)</a>',
181 webpage, 'selected season')
182 entries = [
183 self.url_result(self._proto_relative_url(entry_url), 'NationalGeographic')
184 for entry_url in re.findall('(?s)<div[^>]+class="col-inner"[^>]*?>.*?<a[^>]+href="([^"]+)"', webpage)]
185 return self.playlist_result(
186 entries, '%s-%s' % (display_id, selected_season.lower().replace(' ', '-')),
187 '%s - %s' % (show, selected_season))
188
[end of youtube_dl/extractor/nationalgeographic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/nationalgeographic.py b/youtube_dl/extractor/nationalgeographic.py
--- a/youtube_dl/extractor/nationalgeographic.py
+++ b/youtube_dl/extractor/nationalgeographic.py
@@ -68,11 +68,11 @@
class NationalGeographicIE(ThePlatformIE, AdobePassIE):
IE_NAME = 'natgeo'
- _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:(?:wild/)?[^/]+/)?(?:videos|episodes)/(?P<id>[^/?]+)'
+ _VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:(?:(?:wild/)?[^/]+/)?(?:videos|episodes)|u)/(?P<id>[^/?]+)'
_TESTS = [
{
- 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',
+ 'url': 'http://channel.nationalgeographic.com/u/kdi9Ld0PN2molUUIMSBGxoeDhD729KRjQcnxtetilWPMevo8ZwUBIDuPR0Q3D2LVaTsk0MPRkRWDB8ZhqWVeyoxfsZZm36yRp1j-zPfsHEyI_EgAeFY/',
'md5': '518c9aa655686cf81493af5cc21e2a04',
'info_dict': {
'id': 'vKInpacll2pC',
@@ -86,7 +86,7 @@
'add_ie': ['ThePlatform'],
},
{
- 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',
+ 'url': 'http://channel.nationalgeographic.com/u/kdvOstqYaBY-vSBPyYgAZRUL4sWUJ5XUUPEhc7ISyBHqoIO4_dzfY3K6EjHIC0hmFXoQ7Cpzm6RkET7S3oMlm6CFnrQwSUwo/',
'md5': 'c4912f656b4cbe58f3e000c489360989',
'info_dict': {
'id': 'Pok5lWCkiEFA',
@@ -106,6 +106,14 @@
{
'url': 'http://channel.nationalgeographic.com/videos/treasures-rediscovered/',
'only_matching': True,
+ },
+ {
+ 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',
+ 'only_matching': True,
}
]
| {"golden_diff": "diff --git a/youtube_dl/extractor/nationalgeographic.py b/youtube_dl/extractor/nationalgeographic.py\n--- a/youtube_dl/extractor/nationalgeographic.py\n+++ b/youtube_dl/extractor/nationalgeographic.py\n@@ -68,11 +68,11 @@\n \n class NationalGeographicIE(ThePlatformIE, AdobePassIE):\n IE_NAME = 'natgeo'\n- _VALID_URL = r'https?://channel\\.nationalgeographic\\.com/(?:(?:wild/)?[^/]+/)?(?:videos|episodes)/(?P<id>[^/?]+)'\n+ _VALID_URL = r'https?://channel\\.nationalgeographic\\.com/(?:(?:(?:wild/)?[^/]+/)?(?:videos|episodes)|u)/(?P<id>[^/?]+)'\n \n _TESTS = [\n {\n- 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',\n+ 'url': 'http://channel.nationalgeographic.com/u/kdi9Ld0PN2molUUIMSBGxoeDhD729KRjQcnxtetilWPMevo8ZwUBIDuPR0Q3D2LVaTsk0MPRkRWDB8ZhqWVeyoxfsZZm36yRp1j-zPfsHEyI_EgAeFY/',\n 'md5': '518c9aa655686cf81493af5cc21e2a04',\n 'info_dict': {\n 'id': 'vKInpacll2pC',\n@@ -86,7 +86,7 @@\n 'add_ie': ['ThePlatform'],\n },\n {\n- 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',\n+ 'url': 'http://channel.nationalgeographic.com/u/kdvOstqYaBY-vSBPyYgAZRUL4sWUJ5XUUPEhc7ISyBHqoIO4_dzfY3K6EjHIC0hmFXoQ7Cpzm6RkET7S3oMlm6CFnrQwSUwo/',\n 'md5': 'c4912f656b4cbe58f3e000c489360989',\n 'info_dict': {\n 'id': 'Pok5lWCkiEFA',\n@@ -106,6 +106,14 @@\n {\n 'url': 'http://channel.nationalgeographic.com/videos/treasures-rediscovered/',\n 'only_matching': True,\n+ },\n+ {\n+ 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',\n+ 'only_matching': True,\n+ },\n+ {\n+ 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',\n+ 'only_matching': True,\n }\n ]\n", "issue": "Unable to download videos from http://channel.nationalgeographic.com.\nExample:\r\n\r\n$ youtube-dl -v \"http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/\"\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-v', 'http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.03.26.1\r\n[debug] Python version 3.4.2 (CPython) - Linux-3.16.0-5-amd64-x86_64-with-debian-8.10\r\n[debug] exe versions: ffmpeg 3.2.10-1, ffprobe 3.2.10-1, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Requesting header\r\nWARNING: Falling back on generic information extractor.\r\n[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Downloading webpage\r\n[generic] kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg: Extracting information\r\nERROR: Unsupported URL: http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/\r\nTraceback (most recent call last):\r\n File \"/home/ant/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 785, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"/home/ant/bin/youtube-dl/youtube_dl/extractor/common.py\", line 440, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/home/ant/bin/youtube-dl/youtube_dl/extractor/generic.py\", line 3143, in _real_extract\r\n raise UnsupportedError(url)\r\nyoutube_dl.utils.UnsupportedError: Unsupported URL: http://channel.nationalgeographic.com/u/kcOIVhcWjca1n65QtmFg_5vIMZ9j1S1CXT46o65HkAANx6SUvJvQAQfYjGC0CkQwGNSgnX54f2aoFg/\r\n\r\nThank you in advance. :)\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom .adobepass import AdobePassIE\nfrom .theplatform import ThePlatformIE\nfrom ..utils import (\n smuggle_url,\n url_basename,\n update_url_query,\n get_element_by_class,\n)\n\n\nclass NationalGeographicVideoIE(InfoExtractor):\n IE_NAME = 'natgeo:video'\n _VALID_URL = r'https?://video\\.nationalgeographic\\.com/.*?'\n\n _TESTS = [\n {\n 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',\n 'md5': '730855d559abbad6b42c2be1fa584917',\n 'info_dict': {\n 'id': '0000014b-70a1-dd8c-af7f-f7b559330001',\n 'ext': 'mp4',\n 'title': 'Mating Crabs Busted by Sharks',\n 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',\n 'timestamp': 1423523799,\n 'upload_date': '20150209',\n 'uploader': 'NAGS',\n },\n 'add_ie': ['ThePlatform'],\n },\n {\n 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws',\n 'md5': '6a3105eb448c070503b3105fb9b320b5',\n 'info_dict': {\n 'id': 'ngc-I0IauNSWznb_UV008GxSbwY35BZvgi2e',\n 'ext': 'mp4',\n 'title': 'The Real Jaws',\n 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6',\n 'timestamp': 1433772632,\n 'upload_date': '20150608',\n 'uploader': 'NAGS',\n },\n 'add_ie': ['ThePlatform'],\n },\n ]\n\n def _real_extract(self, url):\n name = url_basename(url)\n\n webpage = self._download_webpage(url, name)\n guid = self._search_regex(\n r'id=\"(?:videoPlayer|player-container)\"[^>]+data-guid=\"([^\"]+)\"',\n webpage, 'guid')\n\n return {\n '_type': 'url_transparent',\n 'ie_key': 'ThePlatform',\n 'url': smuggle_url(\n 'http://link.theplatform.com/s/ngs/media/guid/2423130747/%s?mbr=true' % guid,\n {'force_smil_url': True}),\n 'id': guid,\n }\n\n\nclass NationalGeographicIE(ThePlatformIE, AdobePassIE):\n IE_NAME = 'natgeo'\n _VALID_URL = r'https?://channel\\.nationalgeographic\\.com/(?:(?:wild/)?[^/]+/)?(?:videos|episodes)/(?P<id>[^/?]+)'\n\n _TESTS = [\n {\n 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/videos/uncovering-a-universal-knowledge/',\n 'md5': '518c9aa655686cf81493af5cc21e2a04',\n 'info_dict': {\n 'id': 'vKInpacll2pC',\n 'ext': 'mp4',\n 'title': 'Uncovering a Universal Knowledge',\n 'description': 'md5:1a89148475bf931b3661fcd6ddb2ae3a',\n 'timestamp': 1458680907,\n 'upload_date': '20160322',\n 'uploader': 'NEWA-FNG-NGTV',\n },\n 'add_ie': ['ThePlatform'],\n },\n {\n 'url': 'http://channel.nationalgeographic.com/wild/destination-wild/videos/the-stunning-red-bird-of-paradise/',\n 'md5': 'c4912f656b4cbe58f3e000c489360989',\n 'info_dict': {\n 'id': 'Pok5lWCkiEFA',\n 'ext': 'mp4',\n 'title': 'The Stunning Red Bird of Paradise',\n 'description': 'md5:7bc8cd1da29686be4d17ad1230f0140c',\n 'timestamp': 1459362152,\n 'upload_date': '20160330',\n 'uploader': 'NEWA-FNG-NGTV',\n },\n 'add_ie': ['ThePlatform'],\n },\n {\n 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episodes/the-power-of-miracles/',\n 'only_matching': True,\n },\n {\n 'url': 'http://channel.nationalgeographic.com/videos/treasures-rediscovered/',\n 'only_matching': True,\n }\n ]\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n release_url = self._search_regex(\n r'video_auth_playlist_url\\s*=\\s*\"([^\"]+)\"',\n webpage, 'release url')\n theplatform_path = self._search_regex(r'https?://link\\.theplatform\\.com/s/([^?]+)', release_url, 'theplatform path')\n video_id = theplatform_path.split('/')[-1]\n query = {\n 'mbr': 'true',\n }\n is_auth = self._search_regex(r'video_is_auth\\s*=\\s*\"([^\"]+)\"', webpage, 'is auth', fatal=False)\n if is_auth == 'auth':\n auth_resource_id = self._search_regex(\n r\"video_auth_resourceId\\s*=\\s*'([^']+)'\",\n webpage, 'auth resource id')\n query['auth'] = self._extract_mvpd_auth(url, video_id, 'natgeo', auth_resource_id)\n\n formats = []\n subtitles = {}\n for key, value in (('switch', 'http'), ('manifest', 'm3u')):\n tp_query = query.copy()\n tp_query.update({\n key: value,\n })\n tp_formats, tp_subtitles = self._extract_theplatform_smil(\n update_url_query(release_url, tp_query), video_id, 'Downloading %s SMIL data' % value)\n formats.extend(tp_formats)\n subtitles = self._merge_subtitles(subtitles, tp_subtitles)\n self._sort_formats(formats)\n\n info = self._extract_theplatform_metadata(theplatform_path, display_id)\n info.update({\n 'id': video_id,\n 'formats': formats,\n 'subtitles': subtitles,\n 'display_id': display_id,\n })\n return info\n\n\nclass NationalGeographicEpisodeGuideIE(InfoExtractor):\n IE_NAME = 'natgeo:episodeguide'\n _VALID_URL = r'https?://channel\\.nationalgeographic\\.com/(?:wild/)?(?P<id>[^/]+)/episode-guide'\n _TESTS = [\n {\n 'url': 'http://channel.nationalgeographic.com/the-story-of-god-with-morgan-freeman/episode-guide/',\n 'info_dict': {\n 'id': 'the-story-of-god-with-morgan-freeman-season-1',\n 'title': 'The Story of God with Morgan Freeman - Season 1',\n },\n 'playlist_mincount': 6,\n },\n {\n 'url': 'http://channel.nationalgeographic.com/underworld-inc/episode-guide/?s=2',\n 'info_dict': {\n 'id': 'underworld-inc-season-2',\n 'title': 'Underworld, Inc. - Season 2',\n },\n 'playlist_mincount': 7,\n },\n ]\n\n def _real_extract(self, url):\n display_id = self._match_id(url)\n webpage = self._download_webpage(url, display_id)\n show = get_element_by_class('show', webpage)\n selected_season = self._search_regex(\n r'<div[^>]+class=\"select-seasons[^\"]*\".*?<a[^>]*>(.*?)</a>',\n webpage, 'selected season')\n entries = [\n self.url_result(self._proto_relative_url(entry_url), 'NationalGeographic')\n for entry_url in re.findall('(?s)<div[^>]+class=\"col-inner\"[^>]*?>.*?<a[^>]+href=\"([^\"]+)\"', webpage)]\n return self.playlist_result(\n entries, '%s-%s' % (display_id, selected_season.lower().replace(' ', '-')),\n '%s - %s' % (show, selected_season))\n", "path": "youtube_dl/extractor/nationalgeographic.py"}]} | 3,932 | 719 |
gh_patches_debug_1103 | rasdani/github-patches | git_diff | Pylons__pyramid-2226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update to Sphinx 1.3.4 when released
There is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.
When 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.
</issue>
<code>
[start of setup.py]
1 ##############################################################################
2 #
3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors.
4 # All Rights Reserved.
5 #
6 # This software is subject to the provisions of the BSD-like license at
7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
11 # FITNESS FOR A PARTICULAR PURPOSE
12 #
13 ##############################################################################
14
15 import os
16 import sys
17
18 from setuptools import setup, find_packages
19
20 py_version = sys.version_info[:2]
21
22 PY3 = py_version[0] == 3
23
24 if PY3:
25 if py_version < (3, 2):
26 raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')
27 else:
28 if py_version < (2, 6):
29 raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')
30
31 here = os.path.abspath(os.path.dirname(__file__))
32 try:
33 with open(os.path.join(here, 'README.rst')) as f:
34 README = f.read()
35 with open(os.path.join(here, 'CHANGES.txt')) as f:
36 CHANGES = f.read()
37 except IOError:
38 README = CHANGES = ''
39
40 install_requires=[
41 'setuptools',
42 'WebOb >= 1.3.1', # request.domain and CookieProfile
43 'repoze.lru >= 0.4', # py3 compat
44 'zope.interface >= 3.8.0', # has zope.interface.registry
45 'zope.deprecation >= 3.5.0', # py3 compat
46 'venusian >= 1.0a3', # ``ignore``
47 'translationstring >= 0.4', # py3 compat
48 'PasteDeploy >= 1.5.0', # py3 compat
49 ]
50
51 tests_require = [
52 'WebTest >= 1.3.1', # py3 compat
53 ]
54
55 if not PY3:
56 tests_require.append('zope.component>=3.11.0')
57
58 docs_extras = [
59 'Sphinx >= 1.3.1',
60 'docutils',
61 'repoze.sphinx.autointerface',
62 'pylons_sphinx_latesturl',
63 'pylons-sphinx-themes',
64 'sphinxcontrib-programoutput',
65 ]
66
67 testing_extras = tests_require + [
68 'nose',
69 'coverage',
70 'virtualenv', # for scaffolding tests
71 ]
72
73 setup(name='pyramid',
74 version='1.5.8',
75 description='The Pyramid Web Framework, a Pylons project',
76 long_description=README + '\n\n' + CHANGES,
77 classifiers=[
78 "Intended Audience :: Developers",
79 "Programming Language :: Python",
80 "Programming Language :: Python :: 2.6",
81 "Programming Language :: Python :: 2.7",
82 "Programming Language :: Python :: 3",
83 "Programming Language :: Python :: 3.2",
84 "Programming Language :: Python :: 3.3",
85 "Programming Language :: Python :: 3.4",
86 "Programming Language :: Python :: 3.5",
87 "Programming Language :: Python :: Implementation :: CPython",
88 "Programming Language :: Python :: Implementation :: PyPy",
89 "Framework :: Pyramid",
90 "Topic :: Internet :: WWW/HTTP",
91 "Topic :: Internet :: WWW/HTTP :: WSGI",
92 "License :: Repoze Public License",
93 ],
94 keywords='web wsgi pylons pyramid',
95 author="Chris McDonough, Agendaless Consulting",
96 author_email="[email protected]",
97 url="http://docs.pylonsproject.org/en/latest/docs/pyramid.html",
98 license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
99 packages=find_packages(),
100 include_package_data=True,
101 zip_safe=False,
102 install_requires = install_requires,
103 extras_require = {
104 'testing':testing_extras,
105 'docs':docs_extras,
106 },
107 tests_require = tests_require,
108 test_suite="pyramid.tests",
109 entry_points = """\
110 [pyramid.scaffold]
111 starter=pyramid.scaffolds:StarterProjectTemplate
112 zodb=pyramid.scaffolds:ZODBProjectTemplate
113 alchemy=pyramid.scaffolds:AlchemyProjectTemplate
114 [console_scripts]
115 pcreate = pyramid.scripts.pcreate:main
116 pserve = pyramid.scripts.pserve:main
117 pshell = pyramid.scripts.pshell:main
118 proutes = pyramid.scripts.proutes:main
119 pviews = pyramid.scripts.pviews:main
120 ptweens = pyramid.scripts.ptweens:main
121 prequest = pyramid.scripts.prequest:main
122 pdistreport = pyramid.scripts.pdistreport:main
123 [paste.server_runner]
124 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner
125 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner
126 """
127 )
128
129
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
tests_require.append('zope.component>=3.11.0')
docs_extras = [
- 'Sphinx >= 1.3.1',
+ 'Sphinx >= 1.3.4',
'docutils',
'repoze.sphinx.autointerface',
'pylons_sphinx_latesturl',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n tests_require.append('zope.component>=3.11.0')\n \n docs_extras = [\n- 'Sphinx >= 1.3.1',\n+ 'Sphinx >= 1.3.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Update to Sphinx 1.3.4 when released\nThere is a [bug in Sphinx 1.3.3 and 1.3.1](https://github.com/sphinx-doc/sphinx/issues/2189) (I haven't tried 1.3.2) where next and previous links in Sphinx documentation are broken when going into children and across sibling directories.\n\nWhen 1.3.4 is released, we need to pin sphinx to 1.3.4, which will include the commit made 8 days after the 1.3.3 release.\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\npy_version = sys.version_info[:2]\n\nPY3 = py_version[0] == 3\n\nif PY3:\n if py_version < (3, 2):\n raise RuntimeError('On Python 3, Pyramid requires Python 3.2 or better')\nelse:\n if py_version < (2, 6):\n raise RuntimeError('On Python 2, Pyramid requires Python 2.6 or better')\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires=[\n 'setuptools',\n 'WebOb >= 1.3.1', # request.domain and CookieProfile\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n ]\n\nif not PY3:\n tests_require.append('zope.component>=3.11.0')\n\ndocs_extras = [\n 'Sphinx >= 1.3.1',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-programoutput',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.5.8',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"http://docs.pylonsproject.org/en/latest/docs/pyramid.html\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = install_requires,\n extras_require = {\n 'testing':testing_extras,\n 'docs':docs_extras,\n },\n tests_require = tests_require,\n test_suite=\"pyramid.tests\",\n entry_points = \"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n\n", "path": "setup.py"}]} | 2,055 | 106 |
gh_patches_debug_9329 | rasdani/github-patches | git_diff | pypa__pip-481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad error message on malformed VCS string
The item in my requirements.txt:
```
git://github.com/alex/django-fixture-generator.git#egg=fixture_generator
```
The resulting error message:
``` python
Downloading/unpacking fixture-generator from git://github.com/alex/django-fixture-generator.git (from -r requirements/development.txt (line 3))
Exception:
Traceback (most recent call last):
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/basecommand.py", line 126, in main
self.run(options, args)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/install.py", line 223, in run
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py", line 961, in prepare_files
self.unpack_url(url, location, self.is_download)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py", line 1073, in unpack_url
return unpack_vcs_link(link, location, only_download)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/download.py", line 293, in unpack_vcs_link
vcs_backend.unpack(location)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py", line 225, in unpack
self.obtain(location)
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/git.py", line 97, in obtain
url, rev = self.get_url_rev()
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/git.py", line 183, in get_url_rev
url, rev = super(Git, self).get_url_rev()
File "/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py", line 117, in get_url_rev
url = self.url.split('+', 1)[1]
IndexError: list index out of range
```
</issue>
<code>
[start of pip/vcs/__init__.py]
1 """Handles all VCS (version control) support"""
2
3 import os
4 import shutil
5
6 from pip.backwardcompat import urlparse, urllib
7 from pip.log import logger
8 from pip.util import (display_path, backup_dir, find_command,
9 ask, rmtree, ask_path_exists)
10
11
12 __all__ = ['vcs', 'get_src_requirement']
13
14
15 class VcsSupport(object):
16 _registry = {}
17 schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
18
19 def __init__(self):
20 # Register more schemes with urlparse for various version control systems
21 urlparse.uses_netloc.extend(self.schemes)
22 urlparse.uses_fragment.extend(self.schemes)
23 super(VcsSupport, self).__init__()
24
25 def __iter__(self):
26 return self._registry.__iter__()
27
28 @property
29 def backends(self):
30 return list(self._registry.values())
31
32 @property
33 def dirnames(self):
34 return [backend.dirname for backend in self.backends]
35
36 @property
37 def all_schemes(self):
38 schemes = []
39 for backend in self.backends:
40 schemes.extend(backend.schemes)
41 return schemes
42
43 def register(self, cls):
44 if not hasattr(cls, 'name'):
45 logger.warn('Cannot register VCS %s' % cls.__name__)
46 return
47 if cls.name not in self._registry:
48 self._registry[cls.name] = cls
49
50 def unregister(self, cls=None, name=None):
51 if name in self._registry:
52 del self._registry[name]
53 elif cls in self._registry.values():
54 del self._registry[cls.name]
55 else:
56 logger.warn('Cannot unregister because no class or name given')
57
58 def get_backend_name(self, location):
59 """
60 Return the name of the version control backend if found at given
61 location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')
62 """
63 for vc_type in self._registry.values():
64 path = os.path.join(location, vc_type.dirname)
65 if os.path.exists(path):
66 return vc_type.name
67 return None
68
69 def get_backend(self, name):
70 name = name.lower()
71 if name in self._registry:
72 return self._registry[name]
73
74 def get_backend_from_location(self, location):
75 vc_type = self.get_backend_name(location)
76 if vc_type:
77 return self.get_backend(vc_type)
78 return None
79
80
81 vcs = VcsSupport()
82
83
84 class VersionControl(object):
85 name = ''
86 dirname = ''
87
88 def __init__(self, url=None, *args, **kwargs):
89 self.url = url
90 self._cmd = None
91 super(VersionControl, self).__init__(*args, **kwargs)
92
93 def _filter(self, line):
94 return (logger.INFO, line)
95
96 def _is_local_repository(self, repo):
97 """
98 posix absolute paths start with os.path.sep,
99 win32 ones ones start with drive (like c:\\folder)
100 """
101 drive, tail = os.path.splitdrive(repo)
102 return repo.startswith(os.path.sep) or drive
103
104 @property
105 def cmd(self):
106 if self._cmd is not None:
107 return self._cmd
108 command = find_command(self.name)
109 logger.info('Found command %r at %r' % (self.name, command))
110 self._cmd = command
111 return command
112
113 def get_url_rev(self):
114 """
115 Returns the correct repository URL and revision by parsing the given
116 repository URL
117 """
118 url = self.url.split('+', 1)[1]
119 scheme, netloc, path, query, frag = urlparse.urlsplit(url)
120 rev = None
121 if '@' in path:
122 path, rev = path.rsplit('@', 1)
123 url = urlparse.urlunsplit((scheme, netloc, path, query, ''))
124 return url, rev
125
126 def get_info(self, location):
127 """
128 Returns (url, revision), where both are strings
129 """
130 assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
131 return self.get_url(location), self.get_revision(location)
132
133 def normalize_url(self, url):
134 """
135 Normalize a URL for comparison by unquoting it and removing any trailing slash.
136 """
137 return urllib.unquote(url).rstrip('/')
138
139 def compare_urls(self, url1, url2):
140 """
141 Compare two repo URLs for identity, ignoring incidental differences.
142 """
143 return (self.normalize_url(url1) == self.normalize_url(url2))
144
145 def parse_vcs_bundle_file(self, content):
146 """
147 Takes the contents of the bundled text file that explains how to revert
148 the stripped off version control data of the given package and returns
149 the URL and revision of it.
150 """
151 raise NotImplementedError
152
153 def obtain(self, dest):
154 """
155 Called when installing or updating an editable package, takes the
156 source path of the checkout.
157 """
158 raise NotImplementedError
159
160 def switch(self, dest, url, rev_options):
161 """
162 Switch the repo at ``dest`` to point to ``URL``.
163 """
164 raise NotImplemented
165
166 def update(self, dest, rev_options):
167 """
168 Update an already-existing repo to the given ``rev_options``.
169 """
170 raise NotImplementedError
171
172 def check_destination(self, dest, url, rev_options, rev_display):
173 """
174 Prepare a location to receive a checkout/clone.
175
176 Return True if the location is ready for (and requires) a
177 checkout/clone, False otherwise.
178 """
179 checkout = True
180 prompt = False
181 if os.path.exists(dest):
182 checkout = False
183 if os.path.exists(os.path.join(dest, self.dirname)):
184 existing_url = self.get_url(dest)
185 if self.compare_urls(existing_url, url):
186 logger.info('%s in %s exists, and has correct URL (%s)' %
187 (self.repo_name.title(), display_path(dest),
188 url))
189 logger.notify('Updating %s %s%s' %
190 (display_path(dest), self.repo_name,
191 rev_display))
192 self.update(dest, rev_options)
193 else:
194 logger.warn('%s %s in %s exists with URL %s' %
195 (self.name, self.repo_name,
196 display_path(dest), existing_url))
197 prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
198 ('s', 'i', 'w', 'b'))
199 else:
200 logger.warn('Directory %s already exists, '
201 'and is not a %s %s.' %
202 (dest, self.name, self.repo_name))
203 prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
204 if prompt:
205 logger.warn('The plan is to install the %s repository %s' %
206 (self.name, url))
207 response = ask_path_exists('What to do? %s' % prompt[0],
208 prompt[1])
209
210 if response == 's':
211 logger.notify('Switching %s %s to %s%s' %
212 (self.repo_name, display_path(dest), url,
213 rev_display))
214 self.switch(dest, url, rev_options)
215 elif response == 'i':
216 # do nothing
217 pass
218 elif response == 'w':
219 logger.warn('Deleting %s' % display_path(dest))
220 rmtree(dest)
221 checkout = True
222 elif response == 'b':
223 dest_dir = backup_dir(dest)
224 logger.warn('Backing up %s to %s'
225 % (display_path(dest), dest_dir))
226 shutil.move(dest, dest_dir)
227 checkout = True
228 return checkout
229
230 def unpack(self, location):
231 if os.path.exists(location):
232 rmtree(location)
233 self.obtain(location)
234
235 def get_src_requirement(self, dist, location, find_tags=False):
236 raise NotImplementedError
237
238
239 def get_src_requirement(dist, location, find_tags):
240 version_control = vcs.get_backend_from_location(location)
241 if version_control:
242 return version_control().get_src_requirement(dist, location, find_tags)
243 logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location)
244 return dist.as_requirement()
245
[end of pip/vcs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pip/vcs/__init__.py b/pip/vcs/__init__.py
--- a/pip/vcs/__init__.py
+++ b/pip/vcs/__init__.py
@@ -115,6 +115,9 @@
Returns the correct repository URL and revision by parsing the given
repository URL
"""
+ error_message= "Sorry, '{}' is a malformed url. In requirements files, the \
+format is <vcs>+<protocol>://<url>, e.g. svn+http://myrepo/svn/MyApp#egg=MyApp"
+ assert '+' in self.url, error_message.format(self.url)
url = self.url.split('+', 1)[1]
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
rev = None
| {"golden_diff": "diff --git a/pip/vcs/__init__.py b/pip/vcs/__init__.py\n--- a/pip/vcs/__init__.py\n+++ b/pip/vcs/__init__.py\n@@ -115,6 +115,9 @@\n Returns the correct repository URL and revision by parsing the given\n repository URL\n \"\"\"\n+ error_message= \"Sorry, '{}' is a malformed url. In requirements files, the \\\n+format is <vcs>+<protocol>://<url>, e.g. svn+http://myrepo/svn/MyApp#egg=MyApp\"\n+ assert '+' in self.url, error_message.format(self.url)\n url = self.url.split('+', 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n", "issue": "Bad error message on malformed VCS string\nThe item in my requirements.txt:\n\n```\ngit://github.com/alex/django-fixture-generator.git#egg=fixture_generator\n```\n\nThe resulting error message:\n\n``` python\nDownloading/unpacking fixture-generator from git://github.com/alex/django-fixture-generator.git (from -r requirements/development.txt (line 3))\nException:\nTraceback (most recent call last):\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/basecommand.py\", line 126, in main\n self.run(options, args)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/install.py\", line 223, in run\n requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py\", line 961, in prepare_files\n self.unpack_url(url, location, self.is_download)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/req.py\", line 1073, in unpack_url\n return unpack_vcs_link(link, location, only_download)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/download.py\", line 293, in unpack_vcs_link\n vcs_backend.unpack(location)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py\", line 225, in unpack\n self.obtain(location)\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/git.py\", line 97, in obtain\n url, rev = self.get_url_rev()\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/git.py\", line 183, in get_url_rev\n url, rev = super(Git, self).get_url_rev()\n File \"/home/alex/.virtualenvs/tracebin/local/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/vcs/__init__.py\", line 117, in get_url_rev\n url = self.url.split('+', 1)[1]\nIndexError: list index out of range\n```\n\n", "before_files": [{"content": "\"\"\"Handles all VCS (version control) support\"\"\"\n\nimport os\nimport shutil\n\nfrom pip.backwardcompat import urlparse, urllib\nfrom pip.log import logger\nfrom pip.util import (display_path, backup_dir, find_command,\n ask, rmtree, ask_path_exists)\n\n\n__all__ = ['vcs', 'get_src_requirement']\n\n\nclass VcsSupport(object):\n _registry = {}\n schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']\n\n def __init__(self):\n # Register more schemes with urlparse for various version control systems\n urlparse.uses_netloc.extend(self.schemes)\n urlparse.uses_fragment.extend(self.schemes)\n super(VcsSupport, self).__init__()\n\n def __iter__(self):\n return self._registry.__iter__()\n\n @property\n def backends(self):\n return list(self._registry.values())\n\n @property\n def dirnames(self):\n return [backend.dirname for backend in self.backends]\n\n @property\n def all_schemes(self):\n schemes = []\n for backend in self.backends:\n schemes.extend(backend.schemes)\n return schemes\n\n def register(self, cls):\n if not hasattr(cls, 'name'):\n logger.warn('Cannot register VCS %s' % cls.__name__)\n return\n if cls.name not in self._registry:\n self._registry[cls.name] = cls\n\n def unregister(self, cls=None, name=None):\n if name in self._registry:\n del self._registry[name]\n elif cls in self._registry.values():\n del self._registry[cls.name]\n else:\n logger.warn('Cannot unregister because no class or name given')\n\n def get_backend_name(self, location):\n \"\"\"\n Return the name of the version control backend if found at given\n location, e.g. vcs.get_backend_name('/path/to/vcs/checkout')\n \"\"\"\n for vc_type in self._registry.values():\n path = os.path.join(location, vc_type.dirname)\n if os.path.exists(path):\n return vc_type.name\n return None\n\n def get_backend(self, name):\n name = name.lower()\n if name in self._registry:\n return self._registry[name]\n\n def get_backend_from_location(self, location):\n vc_type = self.get_backend_name(location)\n if vc_type:\n return self.get_backend(vc_type)\n return None\n\n\nvcs = VcsSupport()\n\n\nclass VersionControl(object):\n name = ''\n dirname = ''\n\n def __init__(self, url=None, *args, **kwargs):\n self.url = url\n self._cmd = None\n super(VersionControl, self).__init__(*args, **kwargs)\n\n def _filter(self, line):\n return (logger.INFO, line)\n\n def _is_local_repository(self, repo):\n \"\"\"\n posix absolute paths start with os.path.sep,\n win32 ones ones start with drive (like c:\\\\folder)\n \"\"\"\n drive, tail = os.path.splitdrive(repo)\n return repo.startswith(os.path.sep) or drive\n\n @property\n def cmd(self):\n if self._cmd is not None:\n return self._cmd\n command = find_command(self.name)\n logger.info('Found command %r at %r' % (self.name, command))\n self._cmd = command\n return command\n\n def get_url_rev(self):\n \"\"\"\n Returns the correct repository URL and revision by parsing the given\n repository URL\n \"\"\"\n url = self.url.split('+', 1)[1]\n scheme, netloc, path, query, frag = urlparse.urlsplit(url)\n rev = None\n if '@' in path:\n path, rev = path.rsplit('@', 1)\n url = urlparse.urlunsplit((scheme, netloc, path, query, ''))\n return url, rev\n\n def get_info(self, location):\n \"\"\"\n Returns (url, revision), where both are strings\n \"\"\"\n assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location\n return self.get_url(location), self.get_revision(location)\n\n def normalize_url(self, url):\n \"\"\"\n Normalize a URL for comparison by unquoting it and removing any trailing slash.\n \"\"\"\n return urllib.unquote(url).rstrip('/')\n\n def compare_urls(self, url1, url2):\n \"\"\"\n Compare two repo URLs for identity, ignoring incidental differences.\n \"\"\"\n return (self.normalize_url(url1) == self.normalize_url(url2))\n\n def parse_vcs_bundle_file(self, content):\n \"\"\"\n Takes the contents of the bundled text file that explains how to revert\n the stripped off version control data of the given package and returns\n the URL and revision of it.\n \"\"\"\n raise NotImplementedError\n\n def obtain(self, dest):\n \"\"\"\n Called when installing or updating an editable package, takes the\n source path of the checkout.\n \"\"\"\n raise NotImplementedError\n\n def switch(self, dest, url, rev_options):\n \"\"\"\n Switch the repo at ``dest`` to point to ``URL``.\n \"\"\"\n raise NotImplemented\n\n def update(self, dest, rev_options):\n \"\"\"\n Update an already-existing repo to the given ``rev_options``.\n \"\"\"\n raise NotImplementedError\n\n def check_destination(self, dest, url, rev_options, rev_display):\n \"\"\"\n Prepare a location to receive a checkout/clone.\n\n Return True if the location is ready for (and requires) a\n checkout/clone, False otherwise.\n \"\"\"\n checkout = True\n prompt = False\n if os.path.exists(dest):\n checkout = False\n if os.path.exists(os.path.join(dest, self.dirname)):\n existing_url = self.get_url(dest)\n if self.compare_urls(existing_url, url):\n logger.info('%s in %s exists, and has correct URL (%s)' %\n (self.repo_name.title(), display_path(dest),\n url))\n logger.notify('Updating %s %s%s' %\n (display_path(dest), self.repo_name,\n rev_display))\n self.update(dest, rev_options)\n else:\n logger.warn('%s %s in %s exists with URL %s' %\n (self.name, self.repo_name,\n display_path(dest), existing_url))\n prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',\n ('s', 'i', 'w', 'b'))\n else:\n logger.warn('Directory %s already exists, '\n 'and is not a %s %s.' %\n (dest, self.name, self.repo_name))\n prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))\n if prompt:\n logger.warn('The plan is to install the %s repository %s' %\n (self.name, url))\n response = ask_path_exists('What to do? %s' % prompt[0],\n prompt[1])\n\n if response == 's':\n logger.notify('Switching %s %s to %s%s' %\n (self.repo_name, display_path(dest), url,\n rev_display))\n self.switch(dest, url, rev_options)\n elif response == 'i':\n # do nothing\n pass\n elif response == 'w':\n logger.warn('Deleting %s' % display_path(dest))\n rmtree(dest)\n checkout = True\n elif response == 'b':\n dest_dir = backup_dir(dest)\n logger.warn('Backing up %s to %s'\n % (display_path(dest), dest_dir))\n shutil.move(dest, dest_dir)\n checkout = True\n return checkout\n\n def unpack(self, location):\n if os.path.exists(location):\n rmtree(location)\n self.obtain(location)\n\n def get_src_requirement(self, dist, location, find_tags=False):\n raise NotImplementedError\n\n\ndef get_src_requirement(dist, location, find_tags):\n version_control = vcs.get_backend_from_location(location)\n if version_control:\n return version_control().get_src_requirement(dist, location, find_tags)\n logger.warn('cannot determine version of editable source in %s (is not SVN checkout, Git clone, Mercurial clone or Bazaar branch)' % location)\n return dist.as_requirement()\n", "path": "pip/vcs/__init__.py"}]} | 3,646 | 180 |
gh_patches_debug_20865 | rasdani/github-patches | git_diff | nilearn__nilearn-742 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve task mask pattern in examples
``` python
classification_target = stimuli[
np.logical_not(resting_state)] == category
```
From #740, @AlexandreAbraham said:
> I already found this line hard to understand before (I particularly dislike playing with the priority of operators = and ==) but now the new_line make it even more obscure. Maybe we could change by:
>
> ```
> task_mask = np.logical_not(resting_state) # or sample_mask?
> classification_target = (stimuli[task_mask] == category)
> ```
Quickly git-grepping it looks like this pattern is both present in:
examples/decoding/plot_haxby_different_estimators.py
examples/decoding/plot_haxby_full_analysis.py
They may be other places where this is used.
</issue>
<code>
[start of examples/decoding/plot_haxby_different_estimators.py]
1 """
2 Different classifiers in decoding the Haxby dataset
3 =====================================================
4
5 Here we compare different classifiers on a visual object recognition
6 decoding task.
7 """
8
9 import time
10
11 ### Fetch data using nilearn dataset fetcher ################################
12 from nilearn import datasets
13 haxby_dataset = datasets.fetch_haxby(n_subjects=1)
14
15 # print basic information on the dataset
16 print('First subject anatomical nifti image (3D) located is at: %s' %
17 haxby_dataset.anat[0])
18 print('First subject functional nifti image (4D) is located at: %s' %
19 haxby_dataset.func[0])
20
21 # load labels
22 import numpy as np
23 labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
24 stimuli = labels['labels']
25 # identify resting state labels in order to be able to remove them
26 resting_state = stimuli == b'rest'
27
28 # find names of remaining active labels
29 categories = np.unique(stimuli[np.logical_not(resting_state)])
30
31 # extract tags indicating to which acquisition run a tag belongs
32 session_labels = labels["chunks"][np.logical_not(resting_state)]
33
34 # Load the fMRI data
35 from nilearn.input_data import NiftiMasker
36
37 # For decoding, standardizing is often very important
38 mask_filename = haxby_dataset.mask_vt[0]
39 masker = NiftiMasker(mask_img=mask_filename, standardize=True)
40 func_filename = haxby_dataset.func[0]
41 masked_timecourses = masker.fit_transform(
42 func_filename)[np.logical_not(resting_state)]
43
44 # Classifiers definition
45
46 # A support vector classifier
47 from sklearn.svm import SVC
48 svm = SVC(C=1., kernel="linear")
49
50 from sklearn.grid_search import GridSearchCV
51 # GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that
52 # can significantly speed up the fitting process on computers with
53 # multiple cores
54 svm_cv = GridSearchCV(SVC(C=1., kernel="linear"),
55 param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
56 scoring='f1')
57
58 # The logistic regression
59 from sklearn.linear_model import LogisticRegression, RidgeClassifier, \
60 RidgeClassifierCV
61 logistic = LogisticRegression(C=1., penalty="l1")
62 logistic_50 = LogisticRegression(C=50., penalty="l1")
63 logistic_l2 = LogisticRegression(C=1., penalty="l2")
64
65 logistic_cv = GridSearchCV(LogisticRegression(C=1., penalty="l1"),
66 param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
67 scoring='f1')
68 logistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty="l2"),
69 param_grid={
70 'C': [.1, .5, 1., 5., 10., 50., 100.]},
71 scoring='f1')
72
73 ridge = RidgeClassifier()
74 ridge_cv = RidgeClassifierCV()
75
76
77 # Make a data splitting object for cross validation
78 from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
79 cv = LeaveOneLabelOut(session_labels)
80
81 classifiers = {'SVC': svm,
82 'SVC cv': svm_cv,
83 'log l1': logistic,
84 'log l1 50': logistic_50,
85 'log l1 cv': logistic_cv,
86 'log l2': logistic_l2,
87 'log l2 cv': logistic_l2_cv,
88 'ridge': ridge,
89 'ridge cv': ridge_cv}
90
91 classifiers_scores = {}
92
93 for classifier_name, classifier in sorted(classifiers.items()):
94 classifiers_scores[classifier_name] = {}
95 print(70 * '_')
96
97 for category in categories:
98 classification_target = stimuli[
99 np.logical_not(resting_state)] == category
100 t0 = time.time()
101 classifiers_scores[classifier_name][category] = cross_val_score(
102 classifier,
103 masked_timecourses,
104 classification_target,
105 cv=cv, scoring="f1")
106
107 print("%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs" % (
108 classifier_name, category,
109 classifiers_scores[classifier_name][category].mean(),
110 classifiers_scores[classifier_name][category].std(),
111 time.time() - t0))
112
113 ###############################################################################
114 # make a rudimentary diagram
115 import matplotlib.pyplot as plt
116 plt.figure()
117
118 tick_position = np.arange(len(categories))
119 plt.xticks(tick_position, categories, rotation=45)
120
121 for color, classifier_name in zip(
122 ['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],
123 sorted(classifiers)):
124 score_means = [classifiers_scores[classifier_name][category].mean()
125 for category in categories]
126 plt.bar(tick_position, score_means, label=classifier_name,
127 width=.11, color=color)
128 tick_position = tick_position + .09
129
130 plt.ylabel('Classification accurancy (f1 score)')
131 plt.xlabel('Visual stimuli category')
132 plt.ylim(ymin=0)
133 plt.legend(loc='lower center', ncol=3)
134 plt.title('Category-specific classification accuracy for different classifiers')
135 plt.tight_layout()
136
137 ###############################################################################
138 # Plot the face vs house map for the different estimators
139
140 # use the average EPI as a background
141 from nilearn import image
142 mean_epi_img = image.mean_img(func_filename)
143
144 # Restrict the decoding to face vs house
145 condition_mask = np.logical_or(stimuli == b'face', stimuli == b'house')
146 masked_timecourses = masked_timecourses[
147 condition_mask[np.logical_not(resting_state)]]
148 stimuli = stimuli[condition_mask]
149 # Transform the stimuli to binary values
150 stimuli = (stimuli == b'face').astype(np.int)
151
152 from nilearn.plotting import plot_stat_map, show
153
154 for classifier_name, classifier in sorted(classifiers.items()):
155 classifier.fit(masked_timecourses, stimuli)
156
157 if hasattr(classifier, 'coef_'):
158 weights = classifier.coef_[0]
159 elif hasattr(classifier, 'best_estimator_'):
160 weights = classifier.best_estimator_.coef_[0]
161 else:
162 continue
163 weight_img = masker.inverse_transform(weights)
164 weight_map = weight_img.get_data()
165 threshold = np.max(np.abs(weight_map)) * 1e-3
166 plot_stat_map(weight_img, bg_img=mean_epi_img,
167 display_mode='z', cut_coords=[-17],
168 threshold=threshold,
169 title='%s: face vs house' % classifier_name)
170
171 show()
172
[end of examples/decoding/plot_haxby_different_estimators.py]
[start of examples/decoding/plot_haxby_full_analysis.py]
1 """
2 ROI-based decoding analysis in Haxby et al. dataset
3 =====================================================
4
5 In this script we reproduce the data analysis conducted by
6 Haxby et al. in "Distributed and Overlapping Representations of Faces and
7 Objects in Ventral Temporal Cortex".
8
9 Specifically, we look at decoding accuracy for different objects in
10 three different masks: the full ventral stream (mask_vt), the house
11 selective areas (mask_house) and the face selective areas (mask_face),
12 that have been defined via a standard GLM-based analysis.
13
14 """
15
16
17 ### Fetch data using nilearn dataset fetcher ################################
18 from nilearn import datasets
19 haxby_dataset = datasets.fetch_haxby(n_subjects=1)
20
21 # print basic information on the dataset
22 print('First subject anatomical nifti image (3D) located is at: %s' %
23 haxby_dataset.anat[0])
24 print('First subject functional nifti image (4D) is located at: %s' %
25 haxby_dataset.func[0])
26
27 # Load nilearn NiftiMasker, the practical masking and unmasking tool
28 from nilearn.input_data import NiftiMasker
29
30 # load labels
31 import numpy as np
32 labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
33 stimuli = labels['labels']
34
35 # identify resting state labels in order to be able to remove them
36 resting_state = stimuli == b"rest"
37
38 # find names of remaining active labels
39 categories = np.unique(stimuli[np.logical_not(resting_state)])
40
41 # extract tags indicating to which acquisition run a tag belongs
42 session_labels = labels["chunks"][np.logical_not(resting_state)]
43
44 # The classifier: a support vector classifier
45 from sklearn.svm import SVC
46 classifier = SVC(C=1., kernel="linear")
47
48 # A classifier to set the chance level
49 from sklearn.dummy import DummyClassifier
50 dummy_classifier = DummyClassifier()
51
52 # Make a data splitting object for cross validation
53 from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
54 cv = LeaveOneLabelOut(session_labels)
55
56 func_filename = haxby_dataset.func[0]
57 mask_names = ['mask_vt', 'mask_face', 'mask_house']
58
59 mask_scores = {}
60 mask_chance_scores = {}
61
62 for mask_name in mask_names:
63 print("Working on mask %s" % mask_name)
64 # For decoding, standardizing is often very important
65 mask_filename = haxby_dataset[mask_name][0]
66 masker = NiftiMasker(mask_img=mask_filename, standardize=True)
67 masked_timecourses = masker.fit_transform(
68 func_filename)[np.logical_not(resting_state)]
69
70 mask_scores[mask_name] = {}
71 mask_chance_scores[mask_name] = {}
72
73 for category in categories:
74 print("Processing %s %s" % (mask_name, category))
75 classification_target = stimuli[
76 np.logical_not(resting_state)] == category
77 mask_scores[mask_name][category] = cross_val_score(
78 classifier,
79 masked_timecourses,
80 classification_target,
81 cv=cv, scoring="f1")
82
83 mask_chance_scores[mask_name][category] = cross_val_score(
84 dummy_classifier,
85 masked_timecourses,
86 classification_target,
87 cv=cv, scoring="f1")
88
89 print("Scores: %1.2f +- %1.2f" % (
90 mask_scores[mask_name][category].mean(),
91 mask_scores[mask_name][category].std()))
92
93 # make a rudimentary diagram
94 import matplotlib.pyplot as plt
95 plt.figure()
96
97 tick_position = np.arange(len(categories))
98 plt.xticks(tick_position, categories, rotation=45)
99
100 for color, mask_name in zip('rgb', mask_names):
101 score_means = [mask_scores[mask_name][category].mean()
102 for category in categories]
103 plt.bar(tick_position, score_means, label=mask_name,
104 width=.25, color=color)
105
106 score_chance = [mask_chance_scores[mask_name][category].mean()
107 for category in categories]
108 plt.bar(tick_position, score_chance,
109 width=.25, edgecolor='k', facecolor='none')
110
111 tick_position = tick_position + .2
112
113 plt.ylabel('Classification accurancy (f1 score)')
114 plt.xlabel('Visual stimuli category')
115 plt.legend(loc='best')
116 plt.title('Category-specific classification accuracy for different masks')
117 plt.tight_layout()
118
119
120 plt.show()
121
[end of examples/decoding/plot_haxby_full_analysis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/decoding/plot_haxby_different_estimators.py b/examples/decoding/plot_haxby_different_estimators.py
--- a/examples/decoding/plot_haxby_different_estimators.py
+++ b/examples/decoding/plot_haxby_different_estimators.py
@@ -95,8 +95,8 @@
print(70 * '_')
for category in categories:
- classification_target = stimuli[
- np.logical_not(resting_state)] == category
+ task_mask = np.logical_not(resting_state)
+ classification_target = (stimuli[task_mask] == category)
t0 = time.time()
classifiers_scores[classifier_name][category] = cross_val_score(
classifier,
diff --git a/examples/decoding/plot_haxby_full_analysis.py b/examples/decoding/plot_haxby_full_analysis.py
--- a/examples/decoding/plot_haxby_full_analysis.py
+++ b/examples/decoding/plot_haxby_full_analysis.py
@@ -72,8 +72,8 @@
for category in categories:
print("Processing %s %s" % (mask_name, category))
- classification_target = stimuli[
- np.logical_not(resting_state)] == category
+ task_mask = np.logical_not(resting_state)
+ classification_target = (stimuli[task_mask] == category)
mask_scores[mask_name][category] = cross_val_score(
classifier,
masked_timecourses,
| {"golden_diff": "diff --git a/examples/decoding/plot_haxby_different_estimators.py b/examples/decoding/plot_haxby_different_estimators.py\n--- a/examples/decoding/plot_haxby_different_estimators.py\n+++ b/examples/decoding/plot_haxby_different_estimators.py\n@@ -95,8 +95,8 @@\n print(70 * '_')\n \n for category in categories:\n- classification_target = stimuli[\n- np.logical_not(resting_state)] == category\n+ task_mask = np.logical_not(resting_state)\n+ classification_target = (stimuli[task_mask] == category)\n t0 = time.time()\n classifiers_scores[classifier_name][category] = cross_val_score(\n classifier,\ndiff --git a/examples/decoding/plot_haxby_full_analysis.py b/examples/decoding/plot_haxby_full_analysis.py\n--- a/examples/decoding/plot_haxby_full_analysis.py\n+++ b/examples/decoding/plot_haxby_full_analysis.py\n@@ -72,8 +72,8 @@\n \n for category in categories:\n print(\"Processing %s %s\" % (mask_name, category))\n- classification_target = stimuli[\n- np.logical_not(resting_state)] == category\n+ task_mask = np.logical_not(resting_state)\n+ classification_target = (stimuli[task_mask] == category)\n mask_scores[mask_name][category] = cross_val_score(\n classifier,\n masked_timecourses,\n", "issue": "Improve task mask pattern in examples\n``` python\nclassification_target = stimuli[\n np.logical_not(resting_state)] == category\n```\n\nFrom #740, @AlexandreAbraham said:\n\n> I already found this line hard to understand before (I particularly dislike playing with the priority of operators = and ==) but now the new_line make it even more obscure. Maybe we could change by:\n> \n> ```\n> task_mask = np.logical_not(resting_state) # or sample_mask?\n> classification_target = (stimuli[task_mask] == category)\n> ```\n\nQuickly git-grepping it looks like this pattern is both present in:\nexamples/decoding/plot_haxby_different_estimators.py\nexamples/decoding/plot_haxby_full_analysis.py\n\nThey may be other places where this is used.\n\n", "before_files": [{"content": "\"\"\"\nDifferent classifiers in decoding the Haxby dataset\n=====================================================\n\nHere we compare different classifiers on a visual object recognition\ndecoding task.\n\"\"\"\n\nimport time\n\n### Fetch data using nilearn dataset fetcher ################################\nfrom nilearn import datasets\nhaxby_dataset = datasets.fetch_haxby(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject anatomical nifti image (3D) located is at: %s' %\n haxby_dataset.anat[0])\nprint('First subject functional nifti image (4D) is located at: %s' %\n haxby_dataset.func[0])\n\n# load labels\nimport numpy as np\nlabels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=\" \")\nstimuli = labels['labels']\n# identify resting state labels in order to be able to remove them\nresting_state = stimuli == b'rest'\n\n# find names of remaining active labels\ncategories = np.unique(stimuli[np.logical_not(resting_state)])\n\n# extract tags indicating to which acquisition run a tag belongs\nsession_labels = labels[\"chunks\"][np.logical_not(resting_state)]\n\n# Load the fMRI data\nfrom nilearn.input_data import NiftiMasker\n\n# For decoding, standardizing is often very important\nmask_filename = haxby_dataset.mask_vt[0]\nmasker = NiftiMasker(mask_img=mask_filename, standardize=True)\nfunc_filename = haxby_dataset.func[0]\nmasked_timecourses = masker.fit_transform(\n func_filename)[np.logical_not(resting_state)]\n\n# Classifiers definition\n\n# A support vector classifier\nfrom sklearn.svm import SVC\nsvm = SVC(C=1., kernel=\"linear\")\n\nfrom sklearn.grid_search import GridSearchCV\n# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that\n# can significantly speed up the fitting process on computers with\n# multiple cores\nsvm_cv = GridSearchCV(SVC(C=1., kernel=\"linear\"),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1')\n\n# The logistic regression\nfrom sklearn.linear_model import LogisticRegression, RidgeClassifier, \\\n RidgeClassifierCV\nlogistic = LogisticRegression(C=1., penalty=\"l1\")\nlogistic_50 = LogisticRegression(C=50., penalty=\"l1\")\nlogistic_l2 = LogisticRegression(C=1., penalty=\"l2\")\n\nlogistic_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l1\"),\n param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1')\nlogistic_l2_cv = GridSearchCV(LogisticRegression(C=1., penalty=\"l2\"),\n param_grid={\n 'C': [.1, .5, 1., 5., 10., 50., 100.]},\n scoring='f1')\n\nridge = RidgeClassifier()\nridge_cv = RidgeClassifierCV()\n\n\n# Make a data splitting object for cross validation\nfrom sklearn.cross_validation import LeaveOneLabelOut, cross_val_score\ncv = LeaveOneLabelOut(session_labels)\n\nclassifiers = {'SVC': svm,\n 'SVC cv': svm_cv,\n 'log l1': logistic,\n 'log l1 50': logistic_50,\n 'log l1 cv': logistic_cv,\n 'log l2': logistic_l2,\n 'log l2 cv': logistic_l2_cv,\n 'ridge': ridge,\n 'ridge cv': ridge_cv}\n\nclassifiers_scores = {}\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifiers_scores[classifier_name] = {}\n print(70 * '_')\n\n for category in categories:\n classification_target = stimuli[\n np.logical_not(resting_state)] == category\n t0 = time.time()\n classifiers_scores[classifier_name][category] = cross_val_score(\n classifier,\n masked_timecourses,\n classification_target,\n cv=cv, scoring=\"f1\")\n\n print(\"%10s: %14s -- scores: %1.2f +- %1.2f, time %.2fs\" % (\n classifier_name, category,\n classifiers_scores[classifier_name][category].mean(),\n classifiers_scores[classifier_name][category].std(),\n time.time() - t0))\n\n###############################################################################\n# make a rudimentary diagram\nimport matplotlib.pyplot as plt\nplt.figure()\n\ntick_position = np.arange(len(categories))\nplt.xticks(tick_position, categories, rotation=45)\n\nfor color, classifier_name in zip(\n ['b', 'c', 'm', 'g', 'y', 'k', '.5', 'r', '#ffaaaa'],\n sorted(classifiers)):\n score_means = [classifiers_scores[classifier_name][category].mean()\n for category in categories]\n plt.bar(tick_position, score_means, label=classifier_name,\n width=.11, color=color)\n tick_position = tick_position + .09\n\nplt.ylabel('Classification accurancy (f1 score)')\nplt.xlabel('Visual stimuli category')\nplt.ylim(ymin=0)\nplt.legend(loc='lower center', ncol=3)\nplt.title('Category-specific classification accuracy for different classifiers')\nplt.tight_layout()\n\n###############################################################################\n# Plot the face vs house map for the different estimators\n\n# use the average EPI as a background\nfrom nilearn import image\nmean_epi_img = image.mean_img(func_filename)\n\n# Restrict the decoding to face vs house\ncondition_mask = np.logical_or(stimuli == b'face', stimuli == b'house')\nmasked_timecourses = masked_timecourses[\n condition_mask[np.logical_not(resting_state)]]\nstimuli = stimuli[condition_mask]\n# Transform the stimuli to binary values\nstimuli = (stimuli == b'face').astype(np.int)\n\nfrom nilearn.plotting import plot_stat_map, show\n\nfor classifier_name, classifier in sorted(classifiers.items()):\n classifier.fit(masked_timecourses, stimuli)\n\n if hasattr(classifier, 'coef_'):\n weights = classifier.coef_[0]\n elif hasattr(classifier, 'best_estimator_'):\n weights = classifier.best_estimator_.coef_[0]\n else:\n continue\n weight_img = masker.inverse_transform(weights)\n weight_map = weight_img.get_data()\n threshold = np.max(np.abs(weight_map)) * 1e-3\n plot_stat_map(weight_img, bg_img=mean_epi_img,\n display_mode='z', cut_coords=[-17],\n threshold=threshold,\n title='%s: face vs house' % classifier_name)\n\nshow()\n", "path": "examples/decoding/plot_haxby_different_estimators.py"}, {"content": "\"\"\"\nROI-based decoding analysis in Haxby et al. dataset\n=====================================================\n\nIn this script we reproduce the data analysis conducted by\nHaxby et al. in \"Distributed and Overlapping Representations of Faces and\nObjects in Ventral Temporal Cortex\".\n\nSpecifically, we look at decoding accuracy for different objects in\nthree different masks: the full ventral stream (mask_vt), the house\nselective areas (mask_house) and the face selective areas (mask_face),\nthat have been defined via a standard GLM-based analysis.\n\n\"\"\"\n\n\n### Fetch data using nilearn dataset fetcher ################################\nfrom nilearn import datasets\nhaxby_dataset = datasets.fetch_haxby(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject anatomical nifti image (3D) located is at: %s' %\n haxby_dataset.anat[0])\nprint('First subject functional nifti image (4D) is located at: %s' %\n haxby_dataset.func[0])\n\n# Load nilearn NiftiMasker, the practical masking and unmasking tool\nfrom nilearn.input_data import NiftiMasker\n\n# load labels\nimport numpy as np\nlabels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=\" \")\nstimuli = labels['labels']\n\n# identify resting state labels in order to be able to remove them\nresting_state = stimuli == b\"rest\"\n\n# find names of remaining active labels\ncategories = np.unique(stimuli[np.logical_not(resting_state)])\n\n# extract tags indicating to which acquisition run a tag belongs\nsession_labels = labels[\"chunks\"][np.logical_not(resting_state)]\n\n# The classifier: a support vector classifier\nfrom sklearn.svm import SVC\nclassifier = SVC(C=1., kernel=\"linear\")\n\n# A classifier to set the chance level\nfrom sklearn.dummy import DummyClassifier\ndummy_classifier = DummyClassifier()\n\n# Make a data splitting object for cross validation\nfrom sklearn.cross_validation import LeaveOneLabelOut, cross_val_score\ncv = LeaveOneLabelOut(session_labels)\n\nfunc_filename = haxby_dataset.func[0]\nmask_names = ['mask_vt', 'mask_face', 'mask_house']\n\nmask_scores = {}\nmask_chance_scores = {}\n\nfor mask_name in mask_names:\n print(\"Working on mask %s\" % mask_name)\n # For decoding, standardizing is often very important\n mask_filename = haxby_dataset[mask_name][0]\n masker = NiftiMasker(mask_img=mask_filename, standardize=True)\n masked_timecourses = masker.fit_transform(\n func_filename)[np.logical_not(resting_state)]\n\n mask_scores[mask_name] = {}\n mask_chance_scores[mask_name] = {}\n\n for category in categories:\n print(\"Processing %s %s\" % (mask_name, category))\n classification_target = stimuli[\n np.logical_not(resting_state)] == category\n mask_scores[mask_name][category] = cross_val_score(\n classifier,\n masked_timecourses,\n classification_target,\n cv=cv, scoring=\"f1\")\n\n mask_chance_scores[mask_name][category] = cross_val_score(\n dummy_classifier,\n masked_timecourses,\n classification_target,\n cv=cv, scoring=\"f1\")\n\n print(\"Scores: %1.2f +- %1.2f\" % (\n mask_scores[mask_name][category].mean(),\n mask_scores[mask_name][category].std()))\n\n# make a rudimentary diagram\nimport matplotlib.pyplot as plt\nplt.figure()\n\ntick_position = np.arange(len(categories))\nplt.xticks(tick_position, categories, rotation=45)\n\nfor color, mask_name in zip('rgb', mask_names):\n score_means = [mask_scores[mask_name][category].mean()\n for category in categories]\n plt.bar(tick_position, score_means, label=mask_name,\n width=.25, color=color)\n\n score_chance = [mask_chance_scores[mask_name][category].mean()\n for category in categories]\n plt.bar(tick_position, score_chance,\n width=.25, edgecolor='k', facecolor='none')\n\n tick_position = tick_position + .2\n\nplt.ylabel('Classification accurancy (f1 score)')\nplt.xlabel('Visual stimuli category')\nplt.legend(loc='best')\nplt.title('Category-specific classification accuracy for different masks')\nplt.tight_layout()\n\n\nplt.show()\n", "path": "examples/decoding/plot_haxby_full_analysis.py"}]} | 3,808 | 319 |
gh_patches_debug_49140 | rasdani/github-patches | git_diff | horovod__horovod-2121 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in computing gradients when using allgather
**Environment:**
1. Framework: TensorFlow
2. Framework version: 2.0
3. Horovod version: 0.18.2
I am trying to get the median of a tensor computed across all batches and all processes. However, I got an error TypeError: Expected int32, got None of type 'NoneType' instead.It seems that computing gradients does not work well with horovod's allgather operation. A simple illustration of what I would like to achieve is as follows:
>with tf.GradientTape() as tape:
    my_tensor = compute_my_tensor()
    gathered_my_tensor = hvd.allgather(my_tensor)
    median = get_median(gathered_my_tensor)
    loss = get_loss(my_tensor, median, training=True)
tape = hvd.DistributedGradientTape(tape)
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
BTW, when I use eager mode of tensorflow, there will be no error
</issue>
<code>
[start of horovod/tensorflow/mpi_ops.py]
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 # Modifications copyright (C) 2019 Uber Technologies, Inc.
3 # Modifications copyright Microsoft
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # =============================================================================
17 """Inter-process communication using MPI."""
18
19 import re
20 import tensorflow as tf
21 from tensorflow.python.framework import load_library
22 from tensorflow.python.framework import ops
23 from tensorflow.python.platform import resource_loader
24
25 from horovod.common.util import get_ext_suffix, get_average_backwards_compatibility_fun, gpu_available, \
26 num_rank_is_power_2
27 from horovod.common.basics import HorovodBasics as _HorovodBasics
28 from horovod.tensorflow.util import _executing_eagerly
29
30
31 def _load_library(name):
32 """Loads a .so file containing the specified operators.
33
34 Args:
35 name: The name of the .so file to load.
36
37 Raises:
38 NotFoundError if were not able to load .so file.
39 """
40 filename = resource_loader.get_path_to_datafile(name)
41 library = load_library.load_op_library(filename)
42 return library
43
44
45 MPI_LIB = _load_library('mpi_lib' + get_ext_suffix())
46
47 _basics = _HorovodBasics(__file__, 'mpi_lib')
48
49 # import basic methods
50 init = _basics.init
51 shutdown = _basics.shutdown
52 size = _basics.size
53 local_size = _basics.local_size
54 rank = _basics.rank
55 local_rank = _basics.local_rank
56 mpi_threads_supported = _basics.mpi_threads_supported
57 mpi_enabled = _basics.mpi_enabled
58 mpi_built = _basics.mpi_built
59 gloo_enabled = _basics.gloo_enabled
60 gloo_built = _basics.gloo_built
61 nccl_built = _basics.nccl_built
62 ddl_built = _basics.ddl_built
63 ccl_built = _basics.ccl_built
64
65 # import reduction op values
66 Average = _basics.Average
67 Sum = _basics.Sum
68 Adasum = _basics.Adasum
69
70 is_homogeneous = _basics.is_homogeneous
71
72 handle_average_backwards_compatibility = get_average_backwards_compatibility_fun(_basics)
73
74 check_num_rank_power_of_2 = num_rank_is_power_2
75
76
77 # This function will create a default device map which includes all visible devices.
78 # Please run this function in a subprocess
79 def _check_has_gpu():
80 import tensorflow as tf
81 return tf.test.is_gpu_available()
82
83
84 def _normalize_name(name):
85 """Normalizes operation name to TensorFlow rules."""
86 return re.sub('[^a-zA-Z0-9_]', '_', name)
87
88
89 def _allreduce(tensor, name=None, op=Sum):
90 """An op which reduces an input tensor over all the Horovod processes. The
91 default reduction is a sum.
92
93 The reduction operation is keyed by the name of the op. The tensor type and
94 shape must be the same on all Horovod processes for a given name. The reduction
95 will not start until all processes are ready to send and receive the tensor.
96
97 Returns:
98 A tensor of the same shape and type as `tensor`, summed across all
99 processes.
100 """
101 if name is None and not _executing_eagerly():
102 name = 'HorovodAllreduce_%s' % _normalize_name(tensor.name)
103 return MPI_LIB.horovod_allreduce(tensor, name=name, reduce_op=op)
104
105
106 @ops.RegisterGradient('HorovodAllreduce')
107 def _allreduce_grad(op, grad):
108 """Gradient for allreduce op.
109
110 Args:
111 op: An operation.
112 grad: `Tensor` gradient with respect to the output of the op.
113
114 Returns:
115 The gradient with respect to the input of the op.
116 """
117 reduce_op = op.get_attr('reduce_op')
118 return _allreduce(grad, op=reduce_op)
119
120
121 def allgather(tensor, name=None):
122 """An op which concatenates the input tensor with the same input tensor on
123 all other Horovod processes.
124
125 The concatenation is done on the first dimension, so the input tensors on the
126 different processes must have the same rank and shape, except for the first
127 dimension, which is allowed to be different.
128
129 Returns:
130 A tensor of the same type as `tensor`, concatenated on dimension zero
131 across all processes. The shape is identical to the input shape, except for
132 the first dimension, which may be greater and is the sum of all first
133 dimensions of the tensors in different Horovod processes.
134 """
135 if name is None and not _executing_eagerly():
136 name = 'HorovodAllgather_%s' % _normalize_name(tensor.name)
137 return MPI_LIB.horovod_allgather(tensor, name=name)
138
139
140 @ops.RegisterGradient('HorovodAllgather')
141 def _allgather_grad(op, grad):
142 """Gradient for allgather op.
143
144 Args:
145 op: An operation.
146 grad: `Tensor` gradient with respect to the output of the op.
147
148 Returns:
149 The gradient with respect to the input of the op.
150 """
151 grad = _allreduce(grad)
152
153 with tf.device('/cpu:0'):
154 # Keep the tensor of split sizes on CPU.
155 x = op.inputs[0]
156 d0 = x.get_shape().as_list()[0]
157 d = tf.convert_to_tensor([d0], dtype=tf.int32)
158
159 s = size()
160 d = tf.reshape(allgather(d), [s])
161
162 splits = tf.split(grad, num_or_size_splits=d, axis=0)
163 return splits[rank()]
164
165
166 def broadcast(tensor, root_rank, name=None):
167 """An op which broadcasts the input tensor on root rank to the same input tensor
168 on all other Horovod processes.
169
170 The broadcast operation is keyed by the name of the op. The tensor type and
171 shape must be the same on all Horovod processes for a given name. The broadcast
172 will not start until all processes are ready to send and receive the tensor.
173
174 Returns:
175 A tensor of the same shape and type as `tensor`, with the value broadcasted
176 from root rank.
177 """
178 if name is None and not _executing_eagerly():
179 name = 'HorovodBroadcast_%s' % _normalize_name(tensor.name)
180 return MPI_LIB.horovod_broadcast(tensor, name=name, root_rank=root_rank)
181
182
183 @ops.RegisterGradient('HorovodBroadcast')
184 def _broadcast_grad(op, grad):
185 """Gradient for broadcast op.
186
187 Args:
188 op: An operation.
189 grad: `Tensor` gradient with respect to the output of the op.
190
191 Returns:
192 The gradient with respect to the input of the op.
193 """
194 root_rank = op.get_attr('root_rank')
195 grad_reduced = _allreduce(grad)
196 if rank() != root_rank:
197 return grad_reduced * 0
198 return grad_reduced
199
200
201 def join():
202 return MPI_LIB.horovod_join()
203
204
205 def size_op(name=None):
206 """An op that returns the number of Horovod processes.
207
208 This operation determines the return value at the graph execution time,
209 rather than at the graph construction time, and so allows for a graph to be
210 constructed in a different environment than where it will be executed.
211
212 Returns:
213 An integer scalar containing the number of Horovod processes.
214 """
215 return MPI_LIB.horovod_size(name=name)
216
217
218 ops.NotDifferentiable('HorovodSize')
219
220
221 def local_size_op(name=None):
222 """An op that returns the number of Horovod processes within the
223 node the current process is running on.
224
225 This operation determines the return value at the graph execution time,
226 rather than at the graph construction time, and so allows for a graph to be
227 constructed in a different environment than where it will be executed.
228
229 Returns:
230 An integer scalar containing the number of local Horovod processes.
231 """
232 return MPI_LIB.horovod_local_size(name=name)
233
234
235 ops.NotDifferentiable('HorovodLocalSize')
236
237
238 def rank_op(name=None):
239 """An op that returns the Horovod rank of the calling process.
240
241 This operation determines the return value at the graph execution time,
242 rather than at the graph construction time, and so allows for a graph to be
243 constructed in a different environment than where it will be executed.
244
245 Returns:
246 An integer scalar with the Horovod rank of the calling process.
247 """
248 return MPI_LIB.horovod_rank(name=name)
249
250
251 ops.NotDifferentiable('HorovodRank')
252
253
254 def local_rank_op(name=None):
255 """An op that returns the local Horovod rank of the calling process, within the
256 node that it is running on. For example, if there are seven processes running
257 on a node, their local ranks will be zero through six, inclusive.
258
259 This operation determines the return value at the graph execution time,
260 rather than at the graph construction time, and so allows for a graph to be
261 constructed in a different environment than where it will be executed.
262
263 Returns:
264 An integer scalar with the local Horovod rank of the calling process.
265 """
266 return MPI_LIB.horovod_rank(name=name)
267
268
269 ops.NotDifferentiable('HorovodLocalRank')
270
[end of horovod/tensorflow/mpi_ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/horovod/tensorflow/mpi_ops.py b/horovod/tensorflow/mpi_ops.py
--- a/horovod/tensorflow/mpi_ops.py
+++ b/horovod/tensorflow/mpi_ops.py
@@ -152,8 +152,8 @@
with tf.device('/cpu:0'):
# Keep the tensor of split sizes on CPU.
x = op.inputs[0]
- d0 = x.get_shape().as_list()[0]
- d = tf.convert_to_tensor([d0], dtype=tf.int32)
+ d = tf.shape(x)
+ d = tf.reshape(d[0], [1])
s = size()
d = tf.reshape(allgather(d), [s])
| {"golden_diff": "diff --git a/horovod/tensorflow/mpi_ops.py b/horovod/tensorflow/mpi_ops.py\n--- a/horovod/tensorflow/mpi_ops.py\n+++ b/horovod/tensorflow/mpi_ops.py\n@@ -152,8 +152,8 @@\n with tf.device('/cpu:0'):\n # Keep the tensor of split sizes on CPU.\n x = op.inputs[0]\n- d0 = x.get_shape().as_list()[0]\n- d = tf.convert_to_tensor([d0], dtype=tf.int32)\n+ d = tf.shape(x)\n+ d = tf.reshape(d[0], [1])\n \n s = size()\n d = tf.reshape(allgather(d), [s])\n", "issue": "Error in computing gradients when using allgather\n**Environment:**\r\n1. Framework: TensorFlow\r\n2. Framework version: 2.0\r\n3. Horovod version: 0.18.2\r\n\r\nI am trying to get the median of a tensor computed across all batches and all processes. However, I got an error TypeError: Expected int32, got None of type 'NoneType' instead.It seems that computing gradients does not work well with horovod's allgather operation. A simple illustration of what I would like to achieve is as follows:\r\n\r\n>with tf.GradientTape() as tape: \r\n    my_tensor = compute_my_tensor() \r\n    gathered_my_tensor = hvd.allgather(my_tensor) \r\n    median = get_median(gathered_my_tensor)\r\n    loss = get_loss(my_tensor, median, training=True)\r\ntape = hvd.DistributedGradientTape(tape)\r\ngrads = tape.gradient(loss, trainable_variables)\r\noptimizer.apply_gradients(zip(grads, trainable_variables))\r\n\r\nBTW, when I use eager mode of tensorflow, there will be no error\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2019 Uber Technologies, Inc.\n# Modifications copyright Microsoft\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Inter-process communication using MPI.\"\"\"\n\nimport re\nimport tensorflow as tf\nfrom tensorflow.python.framework import load_library\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import resource_loader\n\nfrom horovod.common.util import get_ext_suffix, get_average_backwards_compatibility_fun, gpu_available, \\\n num_rank_is_power_2\nfrom horovod.common.basics import HorovodBasics as _HorovodBasics\nfrom horovod.tensorflow.util import _executing_eagerly\n\n\ndef _load_library(name):\n \"\"\"Loads a .so file containing the specified operators.\n\n Args:\n name: The name of the .so file to load.\n\n Raises:\n NotFoundError if were not able to load .so file.\n \"\"\"\n filename = resource_loader.get_path_to_datafile(name)\n library = load_library.load_op_library(filename)\n return library\n\n\nMPI_LIB = _load_library('mpi_lib' + get_ext_suffix())\n\n_basics = _HorovodBasics(__file__, 'mpi_lib')\n\n# import basic methods\ninit = _basics.init\nshutdown = _basics.shutdown\nsize = _basics.size\nlocal_size = _basics.local_size\nrank = _basics.rank\nlocal_rank = _basics.local_rank\nmpi_threads_supported = _basics.mpi_threads_supported\nmpi_enabled = _basics.mpi_enabled\nmpi_built = _basics.mpi_built\ngloo_enabled = _basics.gloo_enabled\ngloo_built = _basics.gloo_built\nnccl_built = _basics.nccl_built\nddl_built = _basics.ddl_built\nccl_built = _basics.ccl_built\n\n# import reduction op values\nAverage = _basics.Average\nSum = _basics.Sum\nAdasum = _basics.Adasum\n\nis_homogeneous = _basics.is_homogeneous\n\nhandle_average_backwards_compatibility = get_average_backwards_compatibility_fun(_basics)\n\ncheck_num_rank_power_of_2 = num_rank_is_power_2\n\n\n# This function will create a default device map which includes all visible devices.\n# Please run this function in a subprocess\ndef _check_has_gpu():\n import tensorflow as tf\n return tf.test.is_gpu_available()\n\n\ndef _normalize_name(name):\n \"\"\"Normalizes operation name to TensorFlow rules.\"\"\"\n return re.sub('[^a-zA-Z0-9_]', '_', name)\n\n\ndef _allreduce(tensor, name=None, op=Sum):\n \"\"\"An op which reduces an input tensor over all the Horovod processes. The\n default reduction is a sum.\n\n The reduction operation is keyed by the name of the op. The tensor type and\n shape must be the same on all Horovod processes for a given name. The reduction\n will not start until all processes are ready to send and receive the tensor.\n\n Returns:\n A tensor of the same shape and type as `tensor`, summed across all\n processes.\n \"\"\"\n if name is None and not _executing_eagerly():\n name = 'HorovodAllreduce_%s' % _normalize_name(tensor.name)\n return MPI_LIB.horovod_allreduce(tensor, name=name, reduce_op=op)\n\n\[email protected]('HorovodAllreduce')\ndef _allreduce_grad(op, grad):\n \"\"\"Gradient for allreduce op.\n\n Args:\n op: An operation.\n grad: `Tensor` gradient with respect to the output of the op.\n\n Returns:\n The gradient with respect to the input of the op.\n \"\"\"\n reduce_op = op.get_attr('reduce_op')\n return _allreduce(grad, op=reduce_op)\n\n\ndef allgather(tensor, name=None):\n \"\"\"An op which concatenates the input tensor with the same input tensor on\n all other Horovod processes.\n\n The concatenation is done on the first dimension, so the input tensors on the\n different processes must have the same rank and shape, except for the first\n dimension, which is allowed to be different.\n\n Returns:\n A tensor of the same type as `tensor`, concatenated on dimension zero\n across all processes. The shape is identical to the input shape, except for\n the first dimension, which may be greater and is the sum of all first\n dimensions of the tensors in different Horovod processes.\n \"\"\"\n if name is None and not _executing_eagerly():\n name = 'HorovodAllgather_%s' % _normalize_name(tensor.name)\n return MPI_LIB.horovod_allgather(tensor, name=name)\n\n\[email protected]('HorovodAllgather')\ndef _allgather_grad(op, grad):\n \"\"\"Gradient for allgather op.\n\n Args:\n op: An operation.\n grad: `Tensor` gradient with respect to the output of the op.\n\n Returns:\n The gradient with respect to the input of the op.\n \"\"\"\n grad = _allreduce(grad)\n\n with tf.device('/cpu:0'):\n # Keep the tensor of split sizes on CPU.\n x = op.inputs[0]\n d0 = x.get_shape().as_list()[0]\n d = tf.convert_to_tensor([d0], dtype=tf.int32)\n\n s = size()\n d = tf.reshape(allgather(d), [s])\n\n splits = tf.split(grad, num_or_size_splits=d, axis=0)\n return splits[rank()]\n\n\ndef broadcast(tensor, root_rank, name=None):\n \"\"\"An op which broadcasts the input tensor on root rank to the same input tensor\n on all other Horovod processes.\n\n The broadcast operation is keyed by the name of the op. The tensor type and\n shape must be the same on all Horovod processes for a given name. The broadcast\n will not start until all processes are ready to send and receive the tensor.\n\n Returns:\n A tensor of the same shape and type as `tensor`, with the value broadcasted\n from root rank.\n \"\"\"\n if name is None and not _executing_eagerly():\n name = 'HorovodBroadcast_%s' % _normalize_name(tensor.name)\n return MPI_LIB.horovod_broadcast(tensor, name=name, root_rank=root_rank)\n\n\[email protected]('HorovodBroadcast')\ndef _broadcast_grad(op, grad):\n \"\"\"Gradient for broadcast op.\n\n Args:\n op: An operation.\n grad: `Tensor` gradient with respect to the output of the op.\n\n Returns:\n The gradient with respect to the input of the op.\n \"\"\"\n root_rank = op.get_attr('root_rank')\n grad_reduced = _allreduce(grad)\n if rank() != root_rank:\n return grad_reduced * 0\n return grad_reduced\n\n\ndef join():\n return MPI_LIB.horovod_join()\n\n\ndef size_op(name=None):\n \"\"\"An op that returns the number of Horovod processes.\n\n This operation determines the return value at the graph execution time,\n rather than at the graph construction time, and so allows for a graph to be\n constructed in a different environment than where it will be executed.\n\n Returns:\n An integer scalar containing the number of Horovod processes.\n \"\"\"\n return MPI_LIB.horovod_size(name=name)\n\n\nops.NotDifferentiable('HorovodSize')\n\n\ndef local_size_op(name=None):\n \"\"\"An op that returns the number of Horovod processes within the\n node the current process is running on.\n\n This operation determines the return value at the graph execution time,\n rather than at the graph construction time, and so allows for a graph to be\n constructed in a different environment than where it will be executed.\n\n Returns:\n An integer scalar containing the number of local Horovod processes.\n \"\"\"\n return MPI_LIB.horovod_local_size(name=name)\n\n\nops.NotDifferentiable('HorovodLocalSize')\n\n\ndef rank_op(name=None):\n \"\"\"An op that returns the Horovod rank of the calling process.\n\n This operation determines the return value at the graph execution time,\n rather than at the graph construction time, and so allows for a graph to be\n constructed in a different environment than where it will be executed.\n\n Returns:\n An integer scalar with the Horovod rank of the calling process.\n \"\"\"\n return MPI_LIB.horovod_rank(name=name)\n\n\nops.NotDifferentiable('HorovodRank')\n\n\ndef local_rank_op(name=None):\n \"\"\"An op that returns the local Horovod rank of the calling process, within the\n node that it is running on. For example, if there are seven processes running\n on a node, their local ranks will be zero through six, inclusive.\n\n This operation determines the return value at the graph execution time,\n rather than at the graph construction time, and so allows for a graph to be\n constructed in a different environment than where it will be executed.\n\n Returns:\n An integer scalar with the local Horovod rank of the calling process.\n \"\"\"\n return MPI_LIB.horovod_rank(name=name)\n\n\nops.NotDifferentiable('HorovodLocalRank')\n", "path": "horovod/tensorflow/mpi_ops.py"}]} | 3,672 | 169 |
gh_patches_debug_33923 | rasdani/github-patches | git_diff | fidals__shopelectro-861 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update_pack.py:17: Update Product.in_pack and render...
The puzzle `827-907829af` from #827 has to be resolved:
https://github.com/fidals/shopelectro/blob/39281ed9b9d945b4518b411769db4a3f454f2916/shopelectro/management/commands/_update_catalog/update_pack.py#L17-L17
The puzzle was created by Artemiy on 30-May-19.
Estimate: 60 minutes, role: DEV.
If you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
</issue>
<code>
[start of shopelectro/models.py]
1 import enum
2 import random
3 import string
4 import typing
5 from uuid import uuid4
6
7 from django.conf import settings
8 from django.db import models
9 from django.urls import reverse
10 from django.utils.translation import ugettext_lazy as _
11
12 from catalog import models as catalog_models
13 from ecommerce import models as ecommerce_models
14 from pages import models as pages_models
15
16
17 def randomize_slug(slug: str) -> str:
18 slug_hash = ''.join(
19 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
20 )
21 return f'{slug}_{slug_hash}'
22
23
24 class SECategoryQuerySet(catalog_models.CategoryQuerySet):
25 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
26 categories_with_pictures = (
27 self
28 .filter(products__page__images__isnull=False)
29 .distinct()
30 )
31
32 return categories_with_pictures.get_ancestors(include_self=True)
33
34
35 class SECategoryManager(
36 catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)
37 ):
38 pass
39
40
41 class Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):
42
43 objects = SECategoryManager()
44 uuid = models.UUIDField(default=uuid4, editable=False)
45
46 @classmethod
47 def get_default_parent(cls):
48 return pages_models.CustomPage.objects.filter(slug='catalog').first()
49
50 @property
51 def image(self):
52 products = self.products.all()
53 return products[0].image if products else None
54
55 def get_absolute_url(self):
56 return reverse('category', args=(self.page.slug,))
57
58
59 class Product(
60 catalog_models.AbstractProduct,
61 catalog_models.AbstractPosition,
62 pages_models.SyncPageMixin
63 ):
64
65 # That's why we are needed to explicitly add objects manager here
66 # because of Django special managers behaviour.
67 # Se se#480 for details.
68 objects = catalog_models.ProductManager()
69
70 category = models.ForeignKey(
71 Category,
72 on_delete=models.CASCADE,
73 null=True,
74 related_name='products',
75 verbose_name=_('category'),
76 )
77
78 tags = models.ManyToManyField(
79 'Tag',
80 related_name='products',
81 blank=True,
82 verbose_name=_('tags'),
83 )
84
85 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
86 uuid = models.UUIDField(default=uuid4, editable=False)
87 purchase_price = models.FloatField(
88 default=0, verbose_name=_('purchase_price'))
89 wholesale_small = models.FloatField(
90 default=0, verbose_name=_('wholesale_small'))
91 wholesale_medium = models.FloatField(
92 default=0, verbose_name=_('wholesale_medium'))
93 wholesale_large = models.FloatField(
94 default=0, verbose_name=_('wholesale_large'))
95
96 in_pack = models.PositiveSmallIntegerField(
97 default=1,
98 verbose_name=_('in pack'),
99 )
100
101 def get_absolute_url(self):
102 return reverse('product', args=(self.vendor_code,))
103
104 @property
105 def average_rate(self):
106 """Return rounded to first decimal averaged rating."""
107 rating = self.product_feedbacks.aggregate(
108 avg=models.Avg('rating')).get('avg', 0)
109 return round(rating, 1)
110
111 @property
112 def feedback_count(self):
113 return self.product_feedbacks.count()
114
115 @property
116 def feedback(self):
117 return self.product_feedbacks.all().order_by('-date')
118
119 def get_params(self):
120 return Tag.objects.filter_by_products([self]).group_tags()
121
122 def get_brand_name(self) -> str:
123 brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
124 return brand.name if brand else ''
125
126
127 class ProductFeedback(models.Model):
128 product = models.ForeignKey(
129 Product, on_delete=models.CASCADE, null=True,
130 related_name='product_feedbacks'
131 )
132
133 date = models.DateTimeField(
134 auto_now=True, db_index=True, verbose_name=_('date'))
135 name = models.CharField(
136 max_length=255, db_index=True, verbose_name=_('name'))
137 rating = models.PositiveSmallIntegerField(
138 default=1, db_index=True, verbose_name=_('rating'))
139 dignities = models.TextField(
140 default='', blank=True, verbose_name=_('dignities'))
141 limitations = models.TextField(
142 default='', blank=True, verbose_name=_('limitations'))
143 general = models.TextField(
144 default='', blank=True, verbose_name=_('limitations'))
145
146
147 class ItemsEnum(enum.EnumMeta):
148 """
149 Provide dict-like `items` method.
150
151 https://docs.python.org/3/library/enum.html#enum-classes
152 """
153
154 def items(self):
155 return [(i.name, i.value) for i in self]
156
157 def __repr__(self):
158 fields = ', '.join(i.name for i in self)
159 return f"<enum '{self.__name__}: {fields}'>"
160
161
162 class PaymentOptions(enum.Enum, metaclass=ItemsEnum):
163 cash = 'Наличные'
164 cashless = 'Безналичные и денежные переводы'
165 AC = 'Банковская карта'
166 PC = 'Яндекс.Деньги'
167 GP = 'Связной (терминал)'
168 AB = 'Альфа-Клик'
169
170 @staticmethod
171 def default():
172 return PaymentOptions.cash
173
174
175 class Order(ecommerce_models.Order):
176 address = models.TextField(blank=True, default='')
177 payment_type = models.CharField(
178 max_length=255,
179 choices=PaymentOptions.items(),
180 default=PaymentOptions.default().name,
181 )
182 comment = models.TextField(blank=True, default='')
183 # total price - total purchase price
184 revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))
185
186 @property
187 def payment_type_label(self):
188 """Return label for an order's payment option."""
189 return PaymentOptions[self.payment_type].value
190
191 def set_positions(self, cart):
192 """
193 Save cart's state into Order instance.
194
195 @todo #589:60m Create Cart model.
196 See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672
197 """
198 self.revenue = cart.total_revenue()
199 self.save()
200 for id_, position in cart:
201 self.positions.create(
202 order=self,
203 product_id=id_,
204 vendor_code=position['vendor_code'],
205 name=position['name'],
206 price=position['price'],
207 quantity=position['quantity'],
208 )
209 return self
210
211
212 class CategoryPage(pages_models.ModelPage):
213 """Create proxy model for Admin."""
214
215 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
216 proxy = True
217
218 # noinspection PyTypeChecker
219 objects = pages_models.ModelPage.create_model_page_managers(Category)
220
221
222 class ProductPage(pages_models.ModelPage):
223 """Create proxy model for Admin."""
224
225 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
226 proxy = True
227
228 # noinspection PyTypeChecker
229 objects = (
230 pages_models.ModelPage
231 .create_model_page_managers(Product)
232 )
233
234
235 class TagGroup(catalog_models.TagGroup):
236 pass
237
238
239 class TagQuerySet(catalog_models.TagQuerySet):
240 pass
241
242
243 class Tag(catalog_models.Tag):
244 group = models.ForeignKey(
245 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
246 )
247
[end of shopelectro/models.py]
[start of shopelectro/management/commands/_update_catalog/update_pack.py]
1 import logging
2
3 from django.db import transaction
4
5 from shopelectro.models import TagGroup
6
7 logger = logging.getLogger(__name__)
8
9
10 def main(*args, kwargs):
11 uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
12 pack_group = TagGroup.objects.filter(uuid=uuid).first()
13 if not pack_group:
14 logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".')
15 return
16
17 # @todo #827:60m Update Product.in_pack and render prices properly.
18
19 return
20 packs = pack_group.tags.all().prefetch_related('products')
21 with transaction.atomic():
22 for pack in packs:
23 ...
24
[end of shopelectro/management/commands/_update_catalog/update_pack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py
--- a/shopelectro/management/commands/_update_catalog/update_pack.py
+++ b/shopelectro/management/commands/_update_catalog/update_pack.py
@@ -1,10 +1,33 @@
+"""
+Update Product.in_pack and prices.
+
+The update_catalog command always resets product prices to per unit format, so:
+1. Parse in pack quantity from Tag.name and save it to Product.in_pack
+2. Multiply product prices by in_pack value and save.
+"""
import logging
-from django.db import transaction
+from django.db import models, transaction
-from shopelectro.models import TagGroup
+from shopelectro.models import TagQuerySet, TagGroup
logger = logging.getLogger(__name__)
+PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
+
+
+def update_in_packs(packs: TagQuerySet):
+ """Parse and save in pack quantity values."""
+ # @todo #859:60m Implement update_pack and render prices properly.
+
+
+def update_prices(packs: TagQuerySet):
+ """Multiply product prices on in pack quantity."""
+ fields_to_update = {}
+ for price in PRICES:
+ fields_to_update[price] = models.F(price) * models.F('in_pack')
+
+ with transaction.atomic():
+ packs.products().update(**fields_to_update)
def main(*args, kwargs):
@@ -14,10 +37,8 @@
logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".')
return
- # @todo #827:60m Update Product.in_pack and render prices properly.
-
return
+
packs = pack_group.tags.all().prefetch_related('products')
- with transaction.atomic():
- for pack in packs:
- ...
+ update_in_packs(packs)
+ update_prices(packs)
diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -237,6 +237,13 @@
class TagQuerySet(catalog_models.TagQuerySet):
+
+ def products(self):
+ ids = Tag.objects.all().values_list('products__id', flat=True)
+ return Product.objects.filter(id__in=ids).distinct()
+
+
+class TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):
pass
@@ -244,3 +251,5 @@
group = models.ForeignKey(
TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
)
+
+ objects = TagManager()
| {"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -1,10 +1,33 @@\n+\"\"\"\n+Update Product.in_pack and prices.\n+\n+The update_catalog command always resets product prices to per unit format, so:\n+1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n+2. Multiply product prices by in_pack value and save.\n+\"\"\"\n import logging\n \n-from django.db import transaction\n+from django.db import models, transaction\n \n-from shopelectro.models import TagGroup\n+from shopelectro.models import TagQuerySet, TagGroup\n \n logger = logging.getLogger(__name__)\n+PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n+\n+\n+def update_in_packs(packs: TagQuerySet):\n+ \"\"\"Parse and save in pack quantity values.\"\"\"\n+ # @todo #859:60m Implement update_pack and render prices properly.\n+\n+\n+def update_prices(packs: TagQuerySet):\n+ \"\"\"Multiply product prices on in pack quantity.\"\"\"\n+ fields_to_update = {}\n+ for price in PRICES:\n+ fields_to_update[price] = models.F(price) * models.F('in_pack')\n+\n+ with transaction.atomic():\n+ packs.products().update(**fields_to_update)\n \n \n def main(*args, kwargs):\n@@ -14,10 +37,8 @@\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n \n- # @todo #827:60m Update Product.in_pack and render prices properly.\n-\n return\n+\n packs = pack_group.tags.all().prefetch_related('products')\n- with transaction.atomic():\n- for pack in packs:\n- ...\n+ update_in_packs(packs)\n+ update_prices(packs)\ndiff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -237,6 +237,13 @@\n \n \n class TagQuerySet(catalog_models.TagQuerySet):\n+\n+ def products(self):\n+ ids = Tag.objects.all().values_list('products__id', flat=True)\n+ return Product.objects.filter(id__in=ids).distinct()\n+\n+\n+class TagManager(catalog_models.TagManager.from_queryset(TagQuerySet)):\n pass\n \n \n@@ -244,3 +251,5 @@\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n+\n+ objects = TagManager()\n", "issue": "update_pack.py:17: Update Product.in_pack and render...\nThe puzzle `827-907829af` from #827 has to be resolved: \n\nhttps://github.com/fidals/shopelectro/blob/39281ed9b9d945b4518b411769db4a3f454f2916/shopelectro/management/commands/_update_catalog/update_pack.py#L17-L17\n\nThe puzzle was created by Artemiy on 30-May-19. \n\nEstimate: 60 minutes, role: DEV. \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \\\"done\\\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html). \n\n", "before_files": [{"content": "import enum\nimport random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(\n catalog_models.AbstractProduct,\n catalog_models.AbstractPosition,\n pages_models.SyncPageMixin\n):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n in_pack = models.PositiveSmallIntegerField(\n default=1,\n verbose_name=_('in pack'),\n )\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).group_tags()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\nclass ItemsEnum(enum.EnumMeta):\n \"\"\"\n Provide dict-like `items` method.\n\n https://docs.python.org/3/library/enum.html#enum-classes\n \"\"\"\n\n def items(self):\n return [(i.name, i.value) for i in self]\n\n def __repr__(self):\n fields = ', '.join(i.name for i in self)\n return f\"<enum '{self.__name__}: {fields}'>\"\n\n\nclass PaymentOptions(enum.Enum, metaclass=ItemsEnum):\n cash = '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'\n cashless = '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'\n AC = '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'\n PC = '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'\n GP = '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'\n AB = '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'\n\n @staticmethod\n def default():\n return PaymentOptions.cash\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=PaymentOptions.items(),\n default=PaymentOptions.default().name,\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_label(self):\n \"\"\"Return label for an order's payment option.\"\"\"\n return PaymentOptions[self.payment_type].value\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n", "path": "shopelectro/models.py"}, {"content": "import logging\n\nfrom django.db import transaction\n\nfrom shopelectro.models import TagGroup\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(*args, kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n # @todo #827:60m Update Product.in_pack and render prices properly.\n\n return\n packs = pack_group.tags.all().prefetch_related('products')\n with transaction.atomic():\n for pack in packs:\n ...\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]} | 3,269 | 632 |
gh_patches_debug_40767 | rasdani/github-patches | git_diff | run-house__runhouse-53 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider adding `-y` option to `runhouse login` CLI command
## Feature
Simple use case is logging in with `system` command instead of Python API:
```
!runhouse login [TOKEN]
```
Currently, the CLI is hardcoded with `interactive=True`:
https://github.com/run-house/runhouse/blob/560a52880a333e17e8a1aca01c1048f4527fc375/runhouse/main.py#L27
## Motivation
It's a minor quality of life improvement.
## Ideal Solution
See above
## Additional context
Excited to get Runhouse integration up on NatML 😄
</issue>
<code>
[start of runhouse/rns/login.py]
1 import logging
2 from typing import Union
3
4 import typer
5
6 from runhouse.rh_config import configs, rns_client
7
8 logger = logging.getLogger(__name__)
9
10
11 def is_interactive():
12 import __main__ as main
13
14 return not hasattr(main, "__file__")
15
16
17 def login(
18 token: str = None,
19 download_config: bool = None,
20 upload_config: bool = None,
21 download_secrets: bool = None,
22 upload_secrets: bool = None,
23 ret_token: bool = False,
24 interactive: bool = False,
25 ):
26 """Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between
27 local environment and Runhouse / Vault.
28 """
29 from runhouse import Secrets
30
31 if is_interactive() or interactive:
32 from getpass import getpass
33
34 from rich.console import Console
35
36 console = Console()
37 console.print(
38 """
39 ____ __ @ @ @
40 / __ \__ ______ / /_ ____ __ __________ []___
41 / /_/ / / / / __ \/ __ \/ __ \/ / / / ___/ _ \ / /\____ @@
42 / _, _/ /_/ / / / / / / / /_/ / /_/ (__ ) __/ /_/\_//____/\ @@@@
43 /_/ |_|\__,_/_/ /_/_/ /_/\____/\__,_/____/\___/ | || |||__||| ||
44 """
45 )
46 link = (
47 f'[link={configs.get("api_server_url")}/dashboard/?option=token]https://api.run.house[/link]'
48 if is_interactive()
49 else f'{configs.get("api_server_url")}/dashboard/?option=token'
50 )
51 console.print(
52 f"Retrieve your token :key: here to use :person_running: :house: Runhouse for "
53 f"secrets and artifact management: {link}",
54 style="bold yellow",
55 )
56 if not token:
57 token = getpass("Token: ")
58
59 download_config = (
60 download_config
61 if download_config is not None
62 else typer.confirm(
63 "Download config from Runhouse to your local .rh folder?"
64 )
65 )
66 download_secrets = (
67 download_secrets
68 if download_secrets is not None
69 else typer.confirm(
70 "Download secrets from Vault to your local Runhouse config?"
71 )
72 )
73 upload_config = (
74 upload_config
75 if upload_config is not None
76 else typer.confirm("Upload your local config to Runhouse?")
77 )
78 upload_secrets = (
79 upload_secrets
80 if upload_secrets is not None
81 else typer.confirm("Upload your enabled cloud provider secrets to Vault?")
82 )
83
84 if token:
85 configs.set("token", token)
86
87 if download_config:
88 configs.download_and_save_defaults()
89 # We need to fresh the RNSClient to use the newly loaded configs
90 rns_client.refresh_defaults()
91 elif upload_config:
92 configs.upload_defaults(defaults=configs.defaults_cache)
93 else:
94 # If we are not downloading or uploading config, we still want to make sure the token is valid
95 try:
96 configs.download_defaults()
97 except:
98 logger.error("Failed to validate token")
99 return None
100
101 if download_secrets:
102 Secrets.download_into_env()
103
104 if upload_secrets:
105 Secrets.extract_and_upload(interactive=interactive)
106
107 logger.info("Successfully logged into Runhouse.")
108 if ret_token:
109 return token
110
111
112 def logout(
113 delete_loaded_secrets: bool = None,
114 delete_rh_config_file: bool = None,
115 interactive: bool = None,
116 ):
117 """Logout from Runhouse. Provides option to delete credentials from the Runhouse config and the underlying
118 credentials file. Token is also deleted from the config.
119
120 Args:
121 delete_loaded_secrets (bool, optional): If True, deletes the provider credentials file. Defaults to None.
122 delete_rh_config_file (bool, optional): If True, deletes the rh config file. Defaults to None.
123 interactive (bool, optional): If True, runs the logout process in interactive mode. Defaults to None.
124
125 Returns:
126 None
127 """
128 from runhouse import Secrets
129
130 interactive_session: bool = (
131 interactive if interactive is not None else is_interactive()
132 )
133 for provider in Secrets.enabled_providers():
134 provider_name: str = provider.PROVIDER_NAME
135 provider_creds_path: Union[str, tuple] = provider.default_credentials_path()
136
137 if interactive_session:
138 delete_loaded_secrets = typer.confirm(
139 f"Delete credentials file for {provider_name}?"
140 )
141
142 configs.delete(provider_name)
143
144 if delete_loaded_secrets:
145 provider.delete_secrets_file(provider_creds_path)
146 logger.info(
147 f"Deleted {provider_name} credentials file from path: {provider_creds_path}"
148 )
149
150 # Delete token from rh config file
151 configs.delete(key="token")
152
153 rh_config_path = configs.CONFIG_PATH
154 if not delete_rh_config_file and interactive_session:
155 delete_rh_config_file = typer.confirm("Delete your local Runhouse config file?")
156
157 if delete_rh_config_file:
158 # Delete the credentials file on the file system
159 configs.delete_defaults(rh_config_path)
160 logger.info(f"Deleted Runhouse config file from path: {rh_config_path}")
161
162 logger.info("Successfully logged out of Runhouse.")
163
[end of runhouse/rns/login.py]
[start of runhouse/main.py]
1 import subprocess
2 import webbrowser
3 from typing import Optional
4
5 import pkg_resources
6 import typer
7 from rich.console import Console
8
9 from runhouse import cluster, configs
10 from runhouse.rns import ( # Need to rename it because it conflicts with the login command
11 login as login_module,
12 )
13
14 # create an explicit Typer application
15 app = typer.Typer(add_completion=False)
16 state = {"verbose": False}
17
18 # For printing with typer
19 console = Console()
20
21
22 @app.command()
23 def login(token: Optional[str] = typer.Argument(None, help="Your Runhouse API token")):
24 """Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between
25 local environment and Runhouse / Vault.
26 """
27 valid_token: str = login_module.login(token=token, interactive=True, ret_token=True)
28 if valid_token:
29 webbrowser.open(
30 f"{configs.get('api_server_url')}/dashboard?token={valid_token}"
31 )
32 raise typer.Exit()
33 else:
34 raise typer.Exit(code=1)
35
36
37 @app.command()
38 def logout():
39 """Logout of Runhouse. Provides options to delete locally configured secrets and local Runhouse configs"""
40 login_module.logout(interactive=True)
41 raise typer.Exit()
42
43
44 @app.command()
45 def notebook(
46 cluster_name: str, up: bool = typer.Option(False, help="Start the cluster")
47 ):
48 """Open a Jupyter notebook on a cluster."""
49 c = cluster(name=cluster_name)
50 if up:
51 c.up_if_not()
52 if not c.is_up():
53 console.print(
54 f"Cluster {cluster_name} is not up. Please run `runhouse notebook {cluster_name} --up`."
55 )
56 raise typer.Exit(1)
57 c.notebook()
58
59
60 @app.command()
61 def ssh(cluster_name: str, up: bool = typer.Option(False, help="Start the cluster")):
62 """SSH into a cluster created elsewhere (so `ssh cluster` doesn't work out of the box) or not yet up."""
63 c = cluster(name=cluster_name)
64 if up:
65 c.up_if_not()
66 if not c.is_up():
67 console.print(
68 f"Cluster {cluster_name} is not up. Please run `runhouse ssh {cluster_name} --up`."
69 )
70 raise typer.Exit(1)
71 subprocess.call(f"ssh {c.name}", shell=True)
72
73
74 @app.command()
75 def cancel(
76 cluster_name: str,
77 run_key: str,
78 force: Optional[bool] = typer.Option(False, help="Force cancel"),
79 all: Optional[bool] = typer.Option(False, help="Cancel all jobs"),
80 ):
81 """Cancel a run on a cluster."""
82 c = cluster(name=cluster_name)
83 c.cancel(run_key, force=force, all=all)
84
85
86 @app.command()
87 def logs(
88 cluster_name: str,
89 run_key: str,
90 print_results: Optional[bool] = typer.Option(False, help="Print results"),
91 ):
92 """Get logs from a run on a cluster."""
93 c = cluster(name=cluster_name)
94 res = c.get(run_key, stream_logs=True)
95 if print_results:
96 console.print(res)
97
98
99 def load_cluster(cluster_name: str):
100 """Load a cluster from RNS into the local environment, e.g. to be able to ssh."""
101 c = cluster(name=cluster_name)
102 if not c.address:
103 c.update_from_sky_status(dryrun=True)
104
105
106 @app.command()
107 def restart_grpc(
108 cluster_name: str,
109 restart_ray: bool = typer.Option(False, help="Restart the Ray runtime"),
110 resync_rh: bool = typer.Option(False, help="Resync the Runhouse package"),
111 ):
112 """Restart the gRPC server on a cluster."""
113 c = cluster(name=cluster_name)
114 c.restart_grpc_server(resync_rh=resync_rh, restart_ray=restart_ray)
115
116
117 @app.callback()
118 def main(verbose: bool = False):
119 """
120 Runhouse CLI
121 """
122 if verbose:
123 name = "runhouse"
124 version = pkg_resources.get_distribution(name).version
125 console.print(f"{name}=={version}", style="bold green")
126 state["verbose"] = True
127
[end of runhouse/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/runhouse/main.py b/runhouse/main.py
--- a/runhouse/main.py
+++ b/runhouse/main.py
@@ -20,11 +20,27 @@
@app.command()
-def login(token: Optional[str] = typer.Argument(None, help="Your Runhouse API token")):
+def login(
+ token: Optional[str] = typer.Argument(None, help="Your Runhouse API token"),
+ yes: Optional[bool] = typer.Option(
+ False, "--yes", "-y", help="Sets any confirmations to 'yes' automatically."
+ ),
+):
"""Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between
local environment and Runhouse / Vault.
"""
- valid_token: str = login_module.login(token=token, interactive=True, ret_token=True)
+ valid_token: str = (
+ login_module.login(
+ token=token,
+ download_config=True,
+ upload_config=True,
+ download_secrets=True,
+ upload_secrets=True,
+ )
+ if yes
+ else login_module.login(token=token, interactive=True, ret_token=True)
+ )
+
if valid_token:
webbrowser.open(
f"{configs.get('api_server_url')}/dashboard?token={valid_token}"
diff --git a/runhouse/rns/login.py b/runhouse/rns/login.py
--- a/runhouse/rns/login.py
+++ b/runhouse/rns/login.py
@@ -21,14 +21,24 @@
download_secrets: bool = None,
upload_secrets: bool = None,
ret_token: bool = False,
- interactive: bool = False,
+ interactive: bool = None,
):
"""Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between
local environment and Runhouse / Vault.
"""
from runhouse import Secrets
- if is_interactive() or interactive:
+ all_options_set = token and not any(
+ arg is None
+ for arg in (download_config, upload_config, download_secrets, upload_secrets)
+ )
+
+ if interactive is False and not token:
+ raise Exception(
+ "`interactive` can only be set to `False` if token is provided."
+ )
+
+ if interactive or (interactive is None and not all_options_set):
from getpass import getpass
from rich.console import Console
@@ -48,12 +58,12 @@
if is_interactive()
else f'{configs.get("api_server_url")}/dashboard/?option=token'
)
- console.print(
- f"Retrieve your token :key: here to use :person_running: :house: Runhouse for "
- f"secrets and artifact management: {link}",
- style="bold yellow",
- )
if not token:
+ console.print(
+ f"Retrieve your token :key: here to use :person_running: :house: Runhouse for "
+ f"secrets and artifact management: {link}",
+ style="bold yellow",
+ )
token = getpass("Token: ")
download_config = (
@@ -132,6 +142,8 @@
)
for provider in Secrets.enabled_providers():
provider_name: str = provider.PROVIDER_NAME
+ if provider_name == "ssh":
+ continue
provider_creds_path: Union[str, tuple] = provider.default_credentials_path()
if interactive_session:
| {"golden_diff": "diff --git a/runhouse/main.py b/runhouse/main.py\n--- a/runhouse/main.py\n+++ b/runhouse/main.py\n@@ -20,11 +20,27 @@\n \n \n @app.command()\n-def login(token: Optional[str] = typer.Argument(None, help=\"Your Runhouse API token\")):\n+def login(\n+ token: Optional[str] = typer.Argument(None, help=\"Your Runhouse API token\"),\n+ yes: Optional[bool] = typer.Option(\n+ False, \"--yes\", \"-y\", help=\"Sets any confirmations to 'yes' automatically.\"\n+ ),\n+):\n \"\"\"Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between\n local environment and Runhouse / Vault.\n \"\"\"\n- valid_token: str = login_module.login(token=token, interactive=True, ret_token=True)\n+ valid_token: str = (\n+ login_module.login(\n+ token=token,\n+ download_config=True,\n+ upload_config=True,\n+ download_secrets=True,\n+ upload_secrets=True,\n+ )\n+ if yes\n+ else login_module.login(token=token, interactive=True, ret_token=True)\n+ )\n+\n if valid_token:\n webbrowser.open(\n f\"{configs.get('api_server_url')}/dashboard?token={valid_token}\"\ndiff --git a/runhouse/rns/login.py b/runhouse/rns/login.py\n--- a/runhouse/rns/login.py\n+++ b/runhouse/rns/login.py\n@@ -21,14 +21,24 @@\n download_secrets: bool = None,\n upload_secrets: bool = None,\n ret_token: bool = False,\n- interactive: bool = False,\n+ interactive: bool = None,\n ):\n \"\"\"Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between\n local environment and Runhouse / Vault.\n \"\"\"\n from runhouse import Secrets\n \n- if is_interactive() or interactive:\n+ all_options_set = token and not any(\n+ arg is None\n+ for arg in (download_config, upload_config, download_secrets, upload_secrets)\n+ )\n+\n+ if interactive is False and not token:\n+ raise Exception(\n+ \"`interactive` can only be set to `False` if token is provided.\"\n+ )\n+\n+ if interactive or (interactive is None and not all_options_set):\n from getpass import getpass\n \n from rich.console import Console\n@@ -48,12 +58,12 @@\n if is_interactive()\n else f'{configs.get(\"api_server_url\")}/dashboard/?option=token'\n )\n- console.print(\n- f\"Retrieve your token :key: here to use :person_running: :house: Runhouse for \"\n- f\"secrets and artifact management: {link}\",\n- style=\"bold yellow\",\n- )\n if not token:\n+ console.print(\n+ f\"Retrieve your token :key: here to use :person_running: :house: Runhouse for \"\n+ f\"secrets and artifact management: {link}\",\n+ style=\"bold yellow\",\n+ )\n token = getpass(\"Token: \")\n \n download_config = (\n@@ -132,6 +142,8 @@\n )\n for provider in Secrets.enabled_providers():\n provider_name: str = provider.PROVIDER_NAME\n+ if provider_name == \"ssh\":\n+ continue\n provider_creds_path: Union[str, tuple] = provider.default_credentials_path()\n \n if interactive_session:\n", "issue": "Consider adding `-y` option to `runhouse login` CLI command\n## Feature\r\nSimple use case is logging in with `system` command instead of Python API:\r\n```\r\n!runhouse login [TOKEN]\r\n```\r\nCurrently, the CLI is hardcoded with `interactive=True`:\r\nhttps://github.com/run-house/runhouse/blob/560a52880a333e17e8a1aca01c1048f4527fc375/runhouse/main.py#L27\r\n\r\n## Motivation\r\nIt's a minor quality of life improvement.\r\n\r\n## Ideal Solution\r\nSee above\r\n\r\n## Additional context\r\nExcited to get Runhouse integration up on NatML \ud83d\ude04 \r\n\n", "before_files": [{"content": "import logging\nfrom typing import Union\n\nimport typer\n\nfrom runhouse.rh_config import configs, rns_client\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_interactive():\n import __main__ as main\n\n return not hasattr(main, \"__file__\")\n\n\ndef login(\n token: str = None,\n download_config: bool = None,\n upload_config: bool = None,\n download_secrets: bool = None,\n upload_secrets: bool = None,\n ret_token: bool = False,\n interactive: bool = False,\n):\n \"\"\"Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between\n local environment and Runhouse / Vault.\n \"\"\"\n from runhouse import Secrets\n\n if is_interactive() or interactive:\n from getpass import getpass\n\n from rich.console import Console\n\n console = Console()\n console.print(\n \"\"\"\n ____ __ @ @ @\n / __ \\__ ______ / /_ ____ __ __________ []___\n / /_/ / / / / __ \\/ __ \\/ __ \\/ / / / ___/ _ \\ / /\\____ @@\n / _, _/ /_/ / / / / / / / /_/ / /_/ (__ ) __/ /_/\\_//____/\\ @@@@\n /_/ |_|\\__,_/_/ /_/_/ /_/\\____/\\__,_/____/\\___/ | || |||__||| ||\n \"\"\"\n )\n link = (\n f'[link={configs.get(\"api_server_url\")}/dashboard/?option=token]https://api.run.house[/link]'\n if is_interactive()\n else f'{configs.get(\"api_server_url\")}/dashboard/?option=token'\n )\n console.print(\n f\"Retrieve your token :key: here to use :person_running: :house: Runhouse for \"\n f\"secrets and artifact management: {link}\",\n style=\"bold yellow\",\n )\n if not token:\n token = getpass(\"Token: \")\n\n download_config = (\n download_config\n if download_config is not None\n else typer.confirm(\n \"Download config from Runhouse to your local .rh folder?\"\n )\n )\n download_secrets = (\n download_secrets\n if download_secrets is not None\n else typer.confirm(\n \"Download secrets from Vault to your local Runhouse config?\"\n )\n )\n upload_config = (\n upload_config\n if upload_config is not None\n else typer.confirm(\"Upload your local config to Runhouse?\")\n )\n upload_secrets = (\n upload_secrets\n if upload_secrets is not None\n else typer.confirm(\"Upload your enabled cloud provider secrets to Vault?\")\n )\n\n if token:\n configs.set(\"token\", token)\n\n if download_config:\n configs.download_and_save_defaults()\n # We need to fresh the RNSClient to use the newly loaded configs\n rns_client.refresh_defaults()\n elif upload_config:\n configs.upload_defaults(defaults=configs.defaults_cache)\n else:\n # If we are not downloading or uploading config, we still want to make sure the token is valid\n try:\n configs.download_defaults()\n except:\n logger.error(\"Failed to validate token\")\n return None\n\n if download_secrets:\n Secrets.download_into_env()\n\n if upload_secrets:\n Secrets.extract_and_upload(interactive=interactive)\n\n logger.info(\"Successfully logged into Runhouse.\")\n if ret_token:\n return token\n\n\ndef logout(\n delete_loaded_secrets: bool = None,\n delete_rh_config_file: bool = None,\n interactive: bool = None,\n):\n \"\"\"Logout from Runhouse. Provides option to delete credentials from the Runhouse config and the underlying\n credentials file. Token is also deleted from the config.\n\n Args:\n delete_loaded_secrets (bool, optional): If True, deletes the provider credentials file. Defaults to None.\n delete_rh_config_file (bool, optional): If True, deletes the rh config file. Defaults to None.\n interactive (bool, optional): If True, runs the logout process in interactive mode. Defaults to None.\n\n Returns:\n None\n \"\"\"\n from runhouse import Secrets\n\n interactive_session: bool = (\n interactive if interactive is not None else is_interactive()\n )\n for provider in Secrets.enabled_providers():\n provider_name: str = provider.PROVIDER_NAME\n provider_creds_path: Union[str, tuple] = provider.default_credentials_path()\n\n if interactive_session:\n delete_loaded_secrets = typer.confirm(\n f\"Delete credentials file for {provider_name}?\"\n )\n\n configs.delete(provider_name)\n\n if delete_loaded_secrets:\n provider.delete_secrets_file(provider_creds_path)\n logger.info(\n f\"Deleted {provider_name} credentials file from path: {provider_creds_path}\"\n )\n\n # Delete token from rh config file\n configs.delete(key=\"token\")\n\n rh_config_path = configs.CONFIG_PATH\n if not delete_rh_config_file and interactive_session:\n delete_rh_config_file = typer.confirm(\"Delete your local Runhouse config file?\")\n\n if delete_rh_config_file:\n # Delete the credentials file on the file system\n configs.delete_defaults(rh_config_path)\n logger.info(f\"Deleted Runhouse config file from path: {rh_config_path}\")\n\n logger.info(\"Successfully logged out of Runhouse.\")\n", "path": "runhouse/rns/login.py"}, {"content": "import subprocess\nimport webbrowser\nfrom typing import Optional\n\nimport pkg_resources\nimport typer\nfrom rich.console import Console\n\nfrom runhouse import cluster, configs\nfrom runhouse.rns import ( # Need to rename it because it conflicts with the login command\n login as login_module,\n)\n\n# create an explicit Typer application\napp = typer.Typer(add_completion=False)\nstate = {\"verbose\": False}\n\n# For printing with typer\nconsole = Console()\n\n\[email protected]()\ndef login(token: Optional[str] = typer.Argument(None, help=\"Your Runhouse API token\")):\n \"\"\"Login to Runhouse. Validates token provided, with options to upload or download stored secrets or config between\n local environment and Runhouse / Vault.\n \"\"\"\n valid_token: str = login_module.login(token=token, interactive=True, ret_token=True)\n if valid_token:\n webbrowser.open(\n f\"{configs.get('api_server_url')}/dashboard?token={valid_token}\"\n )\n raise typer.Exit()\n else:\n raise typer.Exit(code=1)\n\n\[email protected]()\ndef logout():\n \"\"\"Logout of Runhouse. Provides options to delete locally configured secrets and local Runhouse configs\"\"\"\n login_module.logout(interactive=True)\n raise typer.Exit()\n\n\[email protected]()\ndef notebook(\n cluster_name: str, up: bool = typer.Option(False, help=\"Start the cluster\")\n):\n \"\"\"Open a Jupyter notebook on a cluster.\"\"\"\n c = cluster(name=cluster_name)\n if up:\n c.up_if_not()\n if not c.is_up():\n console.print(\n f\"Cluster {cluster_name} is not up. Please run `runhouse notebook {cluster_name} --up`.\"\n )\n raise typer.Exit(1)\n c.notebook()\n\n\[email protected]()\ndef ssh(cluster_name: str, up: bool = typer.Option(False, help=\"Start the cluster\")):\n \"\"\"SSH into a cluster created elsewhere (so `ssh cluster` doesn't work out of the box) or not yet up.\"\"\"\n c = cluster(name=cluster_name)\n if up:\n c.up_if_not()\n if not c.is_up():\n console.print(\n f\"Cluster {cluster_name} is not up. Please run `runhouse ssh {cluster_name} --up`.\"\n )\n raise typer.Exit(1)\n subprocess.call(f\"ssh {c.name}\", shell=True)\n\n\[email protected]()\ndef cancel(\n cluster_name: str,\n run_key: str,\n force: Optional[bool] = typer.Option(False, help=\"Force cancel\"),\n all: Optional[bool] = typer.Option(False, help=\"Cancel all jobs\"),\n):\n \"\"\"Cancel a run on a cluster.\"\"\"\n c = cluster(name=cluster_name)\n c.cancel(run_key, force=force, all=all)\n\n\[email protected]()\ndef logs(\n cluster_name: str,\n run_key: str,\n print_results: Optional[bool] = typer.Option(False, help=\"Print results\"),\n):\n \"\"\"Get logs from a run on a cluster.\"\"\"\n c = cluster(name=cluster_name)\n res = c.get(run_key, stream_logs=True)\n if print_results:\n console.print(res)\n\n\ndef load_cluster(cluster_name: str):\n \"\"\"Load a cluster from RNS into the local environment, e.g. to be able to ssh.\"\"\"\n c = cluster(name=cluster_name)\n if not c.address:\n c.update_from_sky_status(dryrun=True)\n\n\[email protected]()\ndef restart_grpc(\n cluster_name: str,\n restart_ray: bool = typer.Option(False, help=\"Restart the Ray runtime\"),\n resync_rh: bool = typer.Option(False, help=\"Resync the Runhouse package\"),\n):\n \"\"\"Restart the gRPC server on a cluster.\"\"\"\n c = cluster(name=cluster_name)\n c.restart_grpc_server(resync_rh=resync_rh, restart_ray=restart_ray)\n\n\[email protected]()\ndef main(verbose: bool = False):\n \"\"\"\n Runhouse CLI\n \"\"\"\n if verbose:\n name = \"runhouse\"\n version = pkg_resources.get_distribution(name).version\n console.print(f\"{name}=={version}\", style=\"bold green\")\n state[\"verbose\"] = True\n", "path": "runhouse/main.py"}]} | 3,470 | 781 |
gh_patches_debug_29072 | rasdani/github-patches | git_diff | evennia__evennia-2922 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG - Develop] Global Scripts loading doesn't catch parse errors
#### Describe the bug
When there's an error with loading a global scripts module, e.g. it imports the `Room` class but that class has a syntax error, the loading error isn't caught and the traceback points to [this line](https://github.com/evennia/evennia/blob/251a70275bbd98a3e157cbb4c025597a4bb24ac9/evennia/utils/containers.py#L135) with a KeyError
#### To Reproduce
Steps to reproduce the behavior:
1. Create a simple global script which imports a class such as `Room` from your game dir, and add it to `settings.py`
2. Introduce a syntax error to your `Room` class, such as an extra space in an indent.
3. Reload the server.
4. See error
#### Expected behavior
It'd be more useful if there was an error thrown which indicated that the script had failed being loaded (and, ideally, why), as the current error invariably leads people to think there is a problem with the global scripts dict in `settings.py`.
#### Develop-branch commit
f093c8bcb
#### Additional context
I feel like it should be catching that kind of an error in [`class_from_module`](https://github.com/evennia/evennia/blob/f093c8bcb1321a44e33003cf91cf9f565e028de7/evennia/utils/utils.py#L1524) or it should at least be throwing an exception in [`load_data`](https://github.com/evennia/evennia/blob/f093c8bcb1321a44e33003cf91cf9f565e028de7/evennia/utils/containers.py#L202) but neither seem to be doing so. Instead, they just silently don't load the script.
[BUG - Develop] Evennia can no longer create global scripts from settings.py
#### Describe the bug
As of merging #2882 any attempt to create a new game with global scripts defined in settings does not create the script. It simply fails the assertion check and moves on without attempting to create them.
#### To Reproduce
Steps to reproduce the behavior:
1. Set up a new freshly-made game with `GLOBAL_SCRIPTS` defined in the settings. (I included what I used at the bottom.)
2. Attempt to start the game.
4. See error
#### Expected behavior
It's supposed to automatically create the script if it doesn't exist.
#### Develop-branch commit
b0f24f997
#### Additional context
```py
# added to typeclasses/scripts.py
from evennia.utils import logger
class TestScript(DefaultScript):
key = "global_test_script"
def at_repeat(self):
logger.log_msg("This just prints a nice message.")
```
```py
# added to server/conf/settings.py
GLOBAL_SCRIPTS = {
"global_test_script": {
"typeclass": "typeclasses.scripts.TestScript",
"persistent": True,
"interval": 60,
"desc": "A simple script to test creation.",
},
}
```
```
2022-10-08 04:42:58 [!!] Traceback (most recent call last):
2022-10-08 04:42:58 [!!] File "./evgames/evennia/evennia/utils/containers.py", line 214, in load_data
2022-10-08 04:42:58 [!!] assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)
2022-10-08 04:42:58 [!!] AssertionError
2022-10-08 04:42:58 [!!] GlobalScriptContainer could not start import global script global_test_script. It will be removed (skipped).
```
</issue>
<code>
[start of evennia/utils/containers.py]
1 """
2 Containers
3
4 Containers are storage classes usually initialized from a setting. They
5 represent Singletons and acts as a convenient place to find resources (
6 available as properties on the singleton)
7
8 evennia.GLOBAL_SCRIPTS
9 evennia.OPTION_CLASSES
10
11 """
12
13
14 from pickle import dumps
15
16 from django.conf import settings
17 from django.db.utils import OperationalError, ProgrammingError
18 from evennia.utils import logger
19 from evennia.utils.utils import callables_from_module, class_from_module
20
21 SCRIPTDB = None
22 _BASE_SCRIPT_TYPECLASS = None
23
24
25 class Container:
26 """
27 Base container class. A container is simply a storage object whose
28 properties can be acquired as a property on it. This is generally
29 considered a read-only affair.
30
31 The container is initialized by a list of modules containing callables.
32
33 """
34
35 storage_modules = []
36
37 def __init__(self):
38 """
39 Read data from module.
40
41 """
42 self.loaded_data = None
43
44 def load_data(self):
45 """
46 Delayed import to avoid eventual circular imports from inside
47 the storage modules.
48
49 """
50 if self.loaded_data is None:
51 self.loaded_data = {}
52 for module in self.storage_modules:
53 self.loaded_data.update(callables_from_module(module))
54
55 def __getattr__(self, key):
56 return self.get(key)
57
58 def get(self, key, default=None):
59 """
60 Retrive data by key (in case of not knowing it beforehand).
61
62 Args:
63 key (str): The name of the script.
64 default (any, optional): Value to return if key is not found.
65
66 Returns:
67 any (any): The data loaded on this container.
68
69 """
70 self.load_data()
71 return self.loaded_data.get(key, default)
72
73 def all(self):
74 """
75 Get all stored data
76
77 Returns:
78 scripts (list): All global script objects stored on the container.
79
80 """
81 self.load_data()
82 return list(self.loaded_data.values())
83
84
85 class OptionContainer(Container):
86 """
87 Loads and stores the final list of OPTION CLASSES.
88
89 Can access these as properties or dictionary-contents.
90 """
91
92 storage_modules = settings.OPTION_CLASS_MODULES
93
94
95 class GlobalScriptContainer(Container):
96 """
97 Simple Handler object loaded by the Evennia API to contain and manage a
98 game's Global Scripts. This will list global Scripts created on their own
99 but will also auto-(re)create scripts defined in `settings.GLOBAL_SCRIPTS`.
100
101 Example:
102 import evennia
103 evennia.GLOBAL_SCRIPTS.scriptname
104
105 Note:
106 This does not use much of the BaseContainer since it's not loading
107 callables from settings but a custom dict of tuples.
108
109 """
110
111 def __init__(self):
112 """
113 Note: We must delay loading of typeclasses since this module may get
114 initialized before Scripts are actually initialized.
115
116 """
117 self.typeclass_storage = None
118 self.loaded_data = {
119 key: {} if data is None else data for key, data in settings.GLOBAL_SCRIPTS.items()
120 }
121
122 def _get_scripts(self, key=None, default=None):
123 global SCRIPTDB
124 if not SCRIPTDB:
125 from evennia.scripts.models import ScriptDB as SCRIPTDB
126 if key:
127 try:
128 return SCRIPTDB.objects.get(db_key__exact=key, db_obj__isnull=True)
129 except SCRIPTDB.DoesNotExist:
130 return default
131 else:
132 return SCRIPTDB.objects.filter(db_obj__isnull=True)
133
134 def _load_script(self, key):
135 self.load_data()
136
137 typeclass = self.typeclass_storage[key]
138 script = typeclass.objects.filter(
139 db_key=key, db_account__isnull=True, db_obj__isnull=True
140 ).first()
141
142 kwargs = {**self.loaded_data[key]}
143 kwargs["key"] = key
144 kwargs["persistent"] = kwargs.get("persistent", True)
145
146 compare_hash = str(dumps(kwargs, protocol=4))
147
148 if script:
149 script_hash = script.attributes.get("global_script_settings", category="settings_hash")
150 if script_hash is None:
151 # legacy - store the hash anew and assume no change
152 script.attributes.add(
153 "global_script_settings", compare_hash, category="settings_hash"
154 )
155 elif script_hash != compare_hash:
156 # wipe the old version and create anew
157 logger.log_info(f"GLOBAL_SCRIPTS: Settings changed for {key} ({typeclass}).")
158 script.stop()
159 script.delete()
160 script = None
161
162 if not script:
163 logger.log_info(f"GLOBAL_SCRIPTS: (Re)creating {key} ({typeclass}).")
164
165 script, errors = typeclass.create(**kwargs)
166 if errors:
167 logger.log_err("\n".join(errors))
168 return None
169
170 # store a hash representation of the setup
171 script.attributes.add("_global_script_settings", compare_hash, category="settings_hash")
172
173 return script
174
175 def start(self):
176 """
177 Called last in evennia.__init__ to initialize the container late
178 (after script typeclasses have finished loading).
179
180 We include all global scripts in the handler and
181 make sure to auto-load time-based scripts.
182
183 """
184 # populate self.typeclass_storage
185 self.load_data()
186
187 # make sure settings-defined scripts are loaded
188 for key in self.loaded_data:
189 self._load_script(key)
190 # start all global scripts
191 try:
192 for script in self._get_scripts():
193 script.start()
194 except (OperationalError, ProgrammingError):
195 # this can happen if db is not loaded yet (such as when building docs)
196 pass
197
198 def load_data(self):
199 """
200 This delayed import avoids trying to load Scripts before they are
201 initialized.
202
203 """
204 if self.loaded_data:
205 # we don't always load this, it collides with doc generation
206 global _BASE_SCRIPT_TYPECLASS
207 if not _BASE_SCRIPT_TYPECLASS:
208 _BASE_SCRIPT_TYPECLASS = class_from_module(settings.BASE_SCRIPT_TYPECLASS)
209
210 if self.typeclass_storage is None:
211 self.typeclass_storage = {}
212 for key, data in list(self.loaded_data.items()):
213 try:
214 typeclass = data.get("typeclass", settings.BASE_SCRIPT_TYPECLASS)
215 script_typeclass = class_from_module(typeclass)
216 assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)
217 self.typeclass_storage[key] = script_typeclass
218 except Exception:
219 logger.log_trace(
220 f"GlobalScriptContainer could not start import global script {key}. "
221 "It will be removed (skipped)."
222 )
223 # Let's remove this key/value. We want to let other scripts load.
224 self.loaded_data.pop(key)
225
226 def get(self, key, default=None):
227 """
228 Retrive data by key (in case of not knowing it beforehand). Any
229 scripts that are in settings.GLOBAL_SCRIPTS that are not found
230 will be recreated on-demand.
231
232 Args:
233 key (str): The name of the script.
234 default (any, optional): Value to return if key is not found
235 at all on this container (i.e it cannot be loaded at all).
236
237 Returns:
238 any (any): The data loaded on this container.
239 """
240 res = self._get_scripts(key)
241 if not res:
242 if key in self.loaded_data:
243 # recreate if we have the info
244 return self._load_script(key) or default
245 return default
246 return res
247
248 def all(self):
249 """
250 Get all global scripts. Note that this will not auto-start
251 scripts defined in settings.
252
253 Returns:
254 scripts (list): All global script objects stored on the container.
255
256 """
257 self.typeclass_storage = None
258 self.load_data()
259 for key in self.loaded_data:
260 self._load_script(key)
261 return self._get_scripts(None)
262
263
264 # Create all singletons
265
266 GLOBAL_SCRIPTS = GlobalScriptContainer()
267 OPTION_CLASSES = OptionContainer()
268
[end of evennia/utils/containers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evennia/utils/containers.py b/evennia/utils/containers.py
--- a/evennia/utils/containers.py
+++ b/evennia/utils/containers.py
@@ -19,8 +19,6 @@
from evennia.utils.utils import callables_from_module, class_from_module
SCRIPTDB = None
-_BASE_SCRIPT_TYPECLASS = None
-
class Container:
"""
@@ -201,27 +199,11 @@
initialized.
"""
- if self.loaded_data:
- # we don't always load this, it collides with doc generation
- global _BASE_SCRIPT_TYPECLASS
- if not _BASE_SCRIPT_TYPECLASS:
- _BASE_SCRIPT_TYPECLASS = class_from_module(settings.BASE_SCRIPT_TYPECLASS)
-
if self.typeclass_storage is None:
self.typeclass_storage = {}
for key, data in list(self.loaded_data.items()):
- try:
- typeclass = data.get("typeclass", settings.BASE_SCRIPT_TYPECLASS)
- script_typeclass = class_from_module(typeclass)
- assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)
- self.typeclass_storage[key] = script_typeclass
- except Exception:
- logger.log_trace(
- f"GlobalScriptContainer could not start import global script {key}. "
- "It will be removed (skipped)."
- )
- # Let's remove this key/value. We want to let other scripts load.
- self.loaded_data.pop(key)
+ typeclass = data.get("typeclass", settings.BASE_SCRIPT_TYPECLASS)
+ self.typeclass_storage[key] = class_from_module(typeclass, fallback=settings.BASE_SCRIPT_TYPECLASS)
def get(self, key, default=None):
"""
| {"golden_diff": "diff --git a/evennia/utils/containers.py b/evennia/utils/containers.py\n--- a/evennia/utils/containers.py\n+++ b/evennia/utils/containers.py\n@@ -19,8 +19,6 @@\n from evennia.utils.utils import callables_from_module, class_from_module\n \n SCRIPTDB = None\n-_BASE_SCRIPT_TYPECLASS = None\n-\n \n class Container:\n \"\"\"\n@@ -201,27 +199,11 @@\n initialized.\n \n \"\"\"\n- if self.loaded_data:\n- # we don't always load this, it collides with doc generation\n- global _BASE_SCRIPT_TYPECLASS\n- if not _BASE_SCRIPT_TYPECLASS:\n- _BASE_SCRIPT_TYPECLASS = class_from_module(settings.BASE_SCRIPT_TYPECLASS)\n-\n if self.typeclass_storage is None:\n self.typeclass_storage = {}\n for key, data in list(self.loaded_data.items()):\n- try:\n- typeclass = data.get(\"typeclass\", settings.BASE_SCRIPT_TYPECLASS)\n- script_typeclass = class_from_module(typeclass)\n- assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)\n- self.typeclass_storage[key] = script_typeclass\n- except Exception:\n- logger.log_trace(\n- f\"GlobalScriptContainer could not start import global script {key}. \"\n- \"It will be removed (skipped).\"\n- )\n- # Let's remove this key/value. We want to let other scripts load.\n- self.loaded_data.pop(key)\n+ typeclass = data.get(\"typeclass\", settings.BASE_SCRIPT_TYPECLASS)\n+ self.typeclass_storage[key] = class_from_module(typeclass, fallback=settings.BASE_SCRIPT_TYPECLASS)\n \n def get(self, key, default=None):\n \"\"\"\n", "issue": "[BUG - Develop] Global Scripts loading doesn't catch parse errors\n#### Describe the bug\r\nWhen there's an error with loading a global scripts module, e.g. it imports the `Room` class but that class has a syntax error, the loading error isn't caught and the traceback points to [this line](https://github.com/evennia/evennia/blob/251a70275bbd98a3e157cbb4c025597a4bb24ac9/evennia/utils/containers.py#L135) with a KeyError\r\n\r\n#### To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Create a simple global script which imports a class such as `Room` from your game dir, and add it to `settings.py`\r\n2. Introduce a syntax error to your `Room` class, such as an extra space in an indent.\r\n3. Reload the server.\r\n4. See error\r\n\r\n#### Expected behavior\r\nIt'd be more useful if there was an error thrown which indicated that the script had failed being loaded (and, ideally, why), as the current error invariably leads people to think there is a problem with the global scripts dict in `settings.py`.\r\n\r\n#### Develop-branch commit\r\nf093c8bcb\r\n\r\n#### Additional context\r\nI feel like it should be catching that kind of an error in [`class_from_module`](https://github.com/evennia/evennia/blob/f093c8bcb1321a44e33003cf91cf9f565e028de7/evennia/utils/utils.py#L1524) or it should at least be throwing an exception in [`load_data`](https://github.com/evennia/evennia/blob/f093c8bcb1321a44e33003cf91cf9f565e028de7/evennia/utils/containers.py#L202) but neither seem to be doing so. Instead, they just silently don't load the script.\n[BUG - Develop] Evennia can no longer create global scripts from settings.py\n#### Describe the bug\r\nAs of merging #2882 any attempt to create a new game with global scripts defined in settings does not create the script. It simply fails the assertion check and moves on without attempting to create them.\r\n\r\n#### To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Set up a new freshly-made game with `GLOBAL_SCRIPTS` defined in the settings. (I included what I used at the bottom.)\r\n2. Attempt to start the game.\r\n4. See error\r\n\r\n#### Expected behavior\r\nIt's supposed to automatically create the script if it doesn't exist.\r\n\r\n#### Develop-branch commit\r\nb0f24f997\r\n\r\n#### Additional context\r\n```py\r\n# added to typeclasses/scripts.py\r\nfrom evennia.utils import logger\r\n\r\nclass TestScript(DefaultScript):\r\n key = \"global_test_script\"\r\n\r\n def at_repeat(self):\r\n logger.log_msg(\"This just prints a nice message.\")\r\n```\r\n\r\n```py\r\n# added to server/conf/settings.py\r\nGLOBAL_SCRIPTS = {\r\n \"global_test_script\": {\r\n \"typeclass\": \"typeclasses.scripts.TestScript\",\r\n \"persistent\": True,\r\n \"interval\": 60,\r\n \"desc\": \"A simple script to test creation.\",\r\n },\r\n}\r\n```\r\n\r\n```\r\n2022-10-08 04:42:58 [!!] Traceback (most recent call last):\r\n2022-10-08 04:42:58 [!!] File \"./evgames/evennia/evennia/utils/containers.py\", line 214, in load_data\r\n2022-10-08 04:42:58 [!!] assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)\r\n2022-10-08 04:42:58 [!!] AssertionError\r\n2022-10-08 04:42:58 [!!] GlobalScriptContainer could not start import global script global_test_script. It will be removed (skipped).\r\n```\n", "before_files": [{"content": "\"\"\"\nContainers\n\nContainers are storage classes usually initialized from a setting. They\nrepresent Singletons and acts as a convenient place to find resources (\navailable as properties on the singleton)\n\nevennia.GLOBAL_SCRIPTS\nevennia.OPTION_CLASSES\n\n\"\"\"\n\n\nfrom pickle import dumps\n\nfrom django.conf import settings\nfrom django.db.utils import OperationalError, ProgrammingError\nfrom evennia.utils import logger\nfrom evennia.utils.utils import callables_from_module, class_from_module\n\nSCRIPTDB = None\n_BASE_SCRIPT_TYPECLASS = None\n\n\nclass Container:\n \"\"\"\n Base container class. A container is simply a storage object whose\n properties can be acquired as a property on it. This is generally\n considered a read-only affair.\n\n The container is initialized by a list of modules containing callables.\n\n \"\"\"\n\n storage_modules = []\n\n def __init__(self):\n \"\"\"\n Read data from module.\n\n \"\"\"\n self.loaded_data = None\n\n def load_data(self):\n \"\"\"\n Delayed import to avoid eventual circular imports from inside\n the storage modules.\n\n \"\"\"\n if self.loaded_data is None:\n self.loaded_data = {}\n for module in self.storage_modules:\n self.loaded_data.update(callables_from_module(module))\n\n def __getattr__(self, key):\n return self.get(key)\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand).\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found.\n\n Returns:\n any (any): The data loaded on this container.\n\n \"\"\"\n self.load_data()\n return self.loaded_data.get(key, default)\n\n def all(self):\n \"\"\"\n Get all stored data\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.load_data()\n return list(self.loaded_data.values())\n\n\nclass OptionContainer(Container):\n \"\"\"\n Loads and stores the final list of OPTION CLASSES.\n\n Can access these as properties or dictionary-contents.\n \"\"\"\n\n storage_modules = settings.OPTION_CLASS_MODULES\n\n\nclass GlobalScriptContainer(Container):\n \"\"\"\n Simple Handler object loaded by the Evennia API to contain and manage a\n game's Global Scripts. This will list global Scripts created on their own\n but will also auto-(re)create scripts defined in `settings.GLOBAL_SCRIPTS`.\n\n Example:\n import evennia\n evennia.GLOBAL_SCRIPTS.scriptname\n\n Note:\n This does not use much of the BaseContainer since it's not loading\n callables from settings but a custom dict of tuples.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Note: We must delay loading of typeclasses since this module may get\n initialized before Scripts are actually initialized.\n\n \"\"\"\n self.typeclass_storage = None\n self.loaded_data = {\n key: {} if data is None else data for key, data in settings.GLOBAL_SCRIPTS.items()\n }\n\n def _get_scripts(self, key=None, default=None):\n global SCRIPTDB\n if not SCRIPTDB:\n from evennia.scripts.models import ScriptDB as SCRIPTDB\n if key:\n try:\n return SCRIPTDB.objects.get(db_key__exact=key, db_obj__isnull=True)\n except SCRIPTDB.DoesNotExist:\n return default\n else:\n return SCRIPTDB.objects.filter(db_obj__isnull=True)\n\n def _load_script(self, key):\n self.load_data()\n\n typeclass = self.typeclass_storage[key]\n script = typeclass.objects.filter(\n db_key=key, db_account__isnull=True, db_obj__isnull=True\n ).first()\n\n kwargs = {**self.loaded_data[key]}\n kwargs[\"key\"] = key\n kwargs[\"persistent\"] = kwargs.get(\"persistent\", True)\n\n compare_hash = str(dumps(kwargs, protocol=4))\n\n if script:\n script_hash = script.attributes.get(\"global_script_settings\", category=\"settings_hash\")\n if script_hash is None:\n # legacy - store the hash anew and assume no change\n script.attributes.add(\n \"global_script_settings\", compare_hash, category=\"settings_hash\"\n )\n elif script_hash != compare_hash:\n # wipe the old version and create anew\n logger.log_info(f\"GLOBAL_SCRIPTS: Settings changed for {key} ({typeclass}).\")\n script.stop()\n script.delete()\n script = None\n\n if not script:\n logger.log_info(f\"GLOBAL_SCRIPTS: (Re)creating {key} ({typeclass}).\")\n\n script, errors = typeclass.create(**kwargs)\n if errors:\n logger.log_err(\"\\n\".join(errors))\n return None\n\n # store a hash representation of the setup\n script.attributes.add(\"_global_script_settings\", compare_hash, category=\"settings_hash\")\n\n return script\n\n def start(self):\n \"\"\"\n Called last in evennia.__init__ to initialize the container late\n (after script typeclasses have finished loading).\n\n We include all global scripts in the handler and\n make sure to auto-load time-based scripts.\n\n \"\"\"\n # populate self.typeclass_storage\n self.load_data()\n\n # make sure settings-defined scripts are loaded\n for key in self.loaded_data:\n self._load_script(key)\n # start all global scripts\n try:\n for script in self._get_scripts():\n script.start()\n except (OperationalError, ProgrammingError):\n # this can happen if db is not loaded yet (such as when building docs)\n pass\n\n def load_data(self):\n \"\"\"\n This delayed import avoids trying to load Scripts before they are\n initialized.\n\n \"\"\"\n if self.loaded_data:\n # we don't always load this, it collides with doc generation\n global _BASE_SCRIPT_TYPECLASS\n if not _BASE_SCRIPT_TYPECLASS:\n _BASE_SCRIPT_TYPECLASS = class_from_module(settings.BASE_SCRIPT_TYPECLASS)\n\n if self.typeclass_storage is None:\n self.typeclass_storage = {}\n for key, data in list(self.loaded_data.items()):\n try:\n typeclass = data.get(\"typeclass\", settings.BASE_SCRIPT_TYPECLASS)\n script_typeclass = class_from_module(typeclass)\n assert issubclass(script_typeclass, _BASE_SCRIPT_TYPECLASS)\n self.typeclass_storage[key] = script_typeclass\n except Exception:\n logger.log_trace(\n f\"GlobalScriptContainer could not start import global script {key}. \"\n \"It will be removed (skipped).\"\n )\n # Let's remove this key/value. We want to let other scripts load.\n self.loaded_data.pop(key)\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand). Any\n scripts that are in settings.GLOBAL_SCRIPTS that are not found\n will be recreated on-demand.\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found\n at all on this container (i.e it cannot be loaded at all).\n\n Returns:\n any (any): The data loaded on this container.\n \"\"\"\n res = self._get_scripts(key)\n if not res:\n if key in self.loaded_data:\n # recreate if we have the info\n return self._load_script(key) or default\n return default\n return res\n\n def all(self):\n \"\"\"\n Get all global scripts. Note that this will not auto-start\n scripts defined in settings.\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.typeclass_storage = None\n self.load_data()\n for key in self.loaded_data:\n self._load_script(key)\n return self._get_scripts(None)\n\n\n# Create all singletons\n\nGLOBAL_SCRIPTS = GlobalScriptContainer()\nOPTION_CLASSES = OptionContainer()\n", "path": "evennia/utils/containers.py"}]} | 3,859 | 380 |
gh_patches_debug_43353 | rasdani/github-patches | git_diff | conan-io__conan-center-index-3151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ixwebsocket: add 10.2.5 + several improvements
Specify library name and version: **ixwebsocket/10.2.5**
- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.
- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.
- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.
- [x] I've tried at least one configuration locally with the
[conan-center hook](https://github.com/conan-io/hooks.git) activated.
Others modifications:
- use transparent cmake integration in test_package
- optional zlib for version >= 10.1.5
- don't force PIC for version >= 9.5.7
- add public definitions in cpp_info
- fix system libs on Windows
poco: fix hooks
Specify library name and version: **poco/all**
- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.
- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.
- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.
- [x] I've tried at least one configuration locally with the
[conan-center hook](https://github.com/conan-io/hooks.git) activated.
</issue>
<code>
[start of recipes/poco/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanException, ConanInvalidConfiguration
3 from collections import namedtuple, OrderedDict
4 import os
5
6
7 class PocoConan(ConanFile):
8 name = "poco"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://pocoproject.org"
11 topics = ("conan", "poco", "building", "networking", "server", "mobile", "embedded")
12 exports_sources = "CMakeLists.txt", "patches/**"
13 generators = "cmake", "cmake_find_package"
14 settings = "os", "arch", "compiler", "build_type"
15 license = "BSL-1.0"
16 description = "Modern, powerful open source C++ class libraries for building network- and internet-based " \
17 "applications that run on desktop, server, mobile and embedded systems."
18 options = {
19 "shared": [True, False],
20 "fPIC": [True, False],
21 }
22 default_options = {
23 "shared": False,
24 "fPIC": True,
25 }
26
27 _PocoComponent = namedtuple("_PocoComponent", ("option", "default_option", "dependencies", "is_lib"))
28 _poco_component_tree = {
29 "mod_poco": _PocoComponent("enable_apacheconnector", False, ("PocoUtil", "PocoNet", ), False), # also external apr and apr-util
30 "PocoCppParser": _PocoComponent("enable_cppparser", False, ("PocoFoundation", ), False),
31 # "PocoCppUnit": _PocoComponent("enable_cppunit", False, ("PocoFoundation", ), False)),
32 "PocoCrypto": _PocoComponent("enable_crypto", True, ("PocoFoundation", ), True), # also external openssl
33 "PocoData": _PocoComponent("enable_data", True, ("PocoFoundation", ), True),
34 "PocoDataMySQL": _PocoComponent("enable_data_mysql", False, ("PocoData", ), True),
35 "PocoDataODBC": _PocoComponent("enable_data_odbc", False, ("PocoData", ), True),
36 "PocoDataPostgreSQL": _PocoComponent("enable_data_postgresql", False, ("PocoData", ), True), # also external postgresql
37 "PocoDataSQLite": _PocoComponent("enable_data_sqlite", True, ("PocoData", ), True), # also external sqlite3
38 "PocoEncodings": _PocoComponent("enable_encodings", True, ("PocoFoundation", ), True),
39 # "PocoEncodingsCompiler": _PocoComponent("enable_encodingscompiler", False, ("PocoNet", "PocoUtil", ), False),
40 "PocoFoundation": _PocoComponent(None, "PocoFoundation", (), True),
41 "PocoJSON": _PocoComponent("enable_json", True, ("PocoFoundation", ), True),
42 "PocoJWT": _PocoComponent("enable_jwt", True, ("PocoJSON", "PocoCrypto", ), True),
43 "PocoMongoDB": _PocoComponent("enable_mongodb", True, ("PocoNet", ), True),
44 "PocoNet": _PocoComponent("enable_net", True, ("PocoFoundation", ), True),
45 "PocoNetSSL": _PocoComponent("enable_netssl", True, ("PocoCrypto", "PocoUtil", "PocoNet", ), True), # also external openssl
46 "PocoNetSSLWin": _PocoComponent("enable_netssl_win", True, ("PocoNet", "PocoUtil", ), True),
47 "PocoPDF": _PocoComponent("enable_pdf", False, ("PocoXML", "PocoUtil", ), True),
48 "PocoPageCompiler": _PocoComponent("enable_pagecompiler", False, ("PocoNet", "PocoUtil", ), False),
49 "PocoFile2Page": _PocoComponent("enable_pagecompiler_file2page", False, ("PocoNet", "PocoUtil", "PocoXML", "PocoJSON", ), False),
50 "PocoPocoDoc": _PocoComponent("enable_pocodoc", False, ("PocoUtil", "PocoXML", "PocoCppParser", ), False),
51 "PocoRedis": _PocoComponent("enable_redis", True, ("PocoNet", ), True),
52 "PocoSevenZip": _PocoComponent("enable_sevenzip", False, ("PocoUtil", "PocoXML", ), True),
53 "PocoUtil": _PocoComponent("enable_util", True, ("PocoFoundation", "PocoXML", "PocoJSON", ), True),
54 "PocoXML": _PocoComponent("enable_xml", True, ("PocoFoundation", ), True),
55 "PocoZip": _PocoComponent("enable_zip", True, ("PocoUtil", "PocoXML", ), True),
56 }
57
58 for comp in _poco_component_tree.values():
59 if comp.option:
60 options[comp.option] = [True, False]
61 default_options[comp.option] = comp.default_option
62 del comp
63
64 @property
65 def _poco_ordered_components(self):
66 remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())
67 ordered_components = []
68 while remaining_components:
69 components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)
70 if not components_no_deps:
71 raise ConanException("The poco dependency tree is invalid and contains a cycle")
72 for c in components_no_deps:
73 remaining_components.pop(c)
74 ordered_components.extend(components_no_deps)
75 for rname in remaining_components.keys():
76 remaining_components[rname] = remaining_components[rname].difference(components_no_deps)
77 ordered_components.reverse()
78 return ordered_components
79
80 _cmake = None
81
82 @property
83 def _source_subfolder(self):
84 return "source_subfolder"
85
86 @property
87 def _build_subfolder(self):
88 return "build_subfolder"
89
90 def source(self):
91 tools.get(**self.conan_data["sources"][self.version])
92 extracted_folder = "poco-poco-{}-release".format(self.version)
93 os.rename(extracted_folder, self._source_subfolder)
94
95 def config_options(self):
96 if self.settings.os == "Windows":
97 del self.options.fPIC
98 else:
99 del self.options.enable_netssl_win
100 if tools.Version(self.version) < "1.9":
101 del self.options.enable_encodings
102 if tools.Version(self.version) < "1.10":
103 del self.options.enable_data_postgresql
104 del self.options.enable_jwt
105
106 def configure(self):
107 if self.options.enable_apacheconnector:
108 raise ConanInvalidConfiguration("Apache connector not supported: https://github.com/pocoproject/poco/issues/1764")
109 if self.options.enable_data_mysql:
110 raise ConanInvalidConfiguration("MySQL not supported yet, open an issue here please: %s" % self.url)
111 if self.options.get_safe("enable_data_postgresql", False):
112 raise ConanInvalidConfiguration("PostgreSQL not supported yet, open an issue here please: %s" % self.url)
113 for compopt in self._poco_component_tree.values():
114 if not compopt.option:
115 continue
116 if self.options.get_safe(compopt.option, False):
117 for compdep in compopt.dependencies:
118 if not self._poco_component_tree[compdep].option:
119 continue
120 if not self.options.get_safe(self._poco_component_tree[compdep].option, False):
121 raise ConanInvalidConfiguration("option {} requires also option {}".format(compopt.option, self._poco_component_tree[compdep].option))
122
123 def requirements(self):
124 self.requires("pcre/8.41")
125 self.requires("zlib/1.2.11")
126 if self.options.enable_xml:
127 self.requires("expat/2.2.9")
128 if self.options.enable_data_sqlite:
129 self.requires("sqlite3/3.31.1")
130 if self.options.enable_apacheconnector:
131 self.requires("apr/1.7.0")
132 self.requires("apr-util/1.6.1")
133 raise ConanInvalidConfiguration("apache2 is not (yet) available on CCI")
134 self.requires("apache2/x.y.z")
135 if self.options.enable_netssl or \
136 self.options.enable_crypto or \
137 self.options.get_safe("enable_jwt", False):
138 self.requires("openssl/1.1.1g")
139
140 def _patch_sources(self):
141 for patch in self.conan_data.get("patches", {}).get(self.version, []):
142 tools.patch(**patch)
143
144 def _configure_cmake(self):
145 if self._cmake:
146 return self._cmake
147 self._cmake = CMake(self)
148 if tools.Version(self.version) < "1.10.1":
149 self._cmake.definitions["POCO_STATIC"] = not self.options.shared
150 for comp in self._poco_component_tree.values():
151 if not comp.option:
152 continue
153 self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)
154 self._cmake.definitions["POCO_UNBUNDLED"] = True
155 self._cmake.definitions["CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP"] = True
156 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio": # MT or MTd
157 self._cmake.definitions["POCO_MT"] = "ON" if "MT" in str(self.settings.compiler.runtime) else "OFF"
158 self.output.info(self._cmake.definitions)
159 # On Windows, Poco needs a message (MC) compiler.
160 with tools.vcvars(self.settings) if self.settings.compiler == "Visual Studio" else tools.no_op():
161 self._cmake.configure(build_dir=self._build_subfolder)
162 return self._cmake
163
164 def build(self):
165 if self.options.enable_data_sqlite:
166 if self.options["sqlite3"].threadsafe == 0:
167 raise ConanInvalidConfiguration("sqlite3 must be built with threadsafe enabled")
168 self._patch_sources()
169 cmake = self._configure_cmake()
170 cmake.build()
171
172 def package(self):
173 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
174 cmake = self._configure_cmake()
175 cmake.install()
176 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
177 tools.rmdir(os.path.join(self.package_folder, "cmake"))
178
179 @property
180 def _ordered_libs(self):
181 libs = []
182 for compname in self._poco_ordered_components:
183 comp_options = self._poco_component_tree[compname]
184 if comp_options.is_lib:
185 if not comp_options.option:
186 libs.append(compname)
187 elif self.options.get_safe(comp_options.option, False):
188 libs.append(compname)
189 return libs
190
191 def package_info(self):
192 suffix = str(self.settings.compiler.runtime).lower() \
193 if self.settings.compiler == "Visual Studio" and not self.options.shared \
194 else ("d" if self.settings.build_type == "Debug" else "")
195
196 self.cpp_info.libs = list("{}{}".format(lib, suffix) for lib in self._ordered_libs)
197
198 if self.settings.os == "Linux":
199 self.cpp_info.system_libs.extend(["pthread", "dl", "rt"])
200
201 if self.settings.compiler == "Visual Studio":
202 self.cpp_info.defines.append("POCO_NO_AUTOMATIC_LIBS")
203 if not self.options.shared:
204 self.cpp_info.defines.append("POCO_STATIC=ON")
205 if self.settings.compiler == "Visual Studio":
206 self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
207 self.cpp_info.names["cmake_find_package"] = "Poco"
208 self.cpp_info.names["cmake_find_package_multi"] = "Poco"
209
[end of recipes/poco/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py
--- a/recipes/poco/all/conanfile.py
+++ b/recipes/poco/all/conanfile.py
@@ -54,7 +54,7 @@
"PocoXML": _PocoComponent("enable_xml", True, ("PocoFoundation", ), True),
"PocoZip": _PocoComponent("enable_zip", True, ("PocoUtil", "PocoXML", ), True),
}
-
+
for comp in _poco_component_tree.values():
if comp.option:
options[comp.option] = [True, False]
@@ -108,6 +108,9 @@
raise ConanInvalidConfiguration("Apache connector not supported: https://github.com/pocoproject/poco/issues/1764")
if self.options.enable_data_mysql:
raise ConanInvalidConfiguration("MySQL not supported yet, open an issue here please: %s" % self.url)
+ if self.settings.compiler == "Visual Studio":
+ if self.options.shared and "MT" in str(self.settings.compiler.runtime):
+ raise ConanInvalidConfiguration("Cannot build shared poco libraries with MT(d) runtime")
if self.options.get_safe("enable_data_postgresql", False):
raise ConanInvalidConfiguration("PostgreSQL not supported yet, open an issue here please: %s" % self.url)
for compopt in self._poco_component_tree.values():
@@ -124,18 +127,18 @@
self.requires("pcre/8.41")
self.requires("zlib/1.2.11")
if self.options.enable_xml:
- self.requires("expat/2.2.9")
+ self.requires("expat/2.2.10")
if self.options.enable_data_sqlite:
- self.requires("sqlite3/3.31.1")
+ self.requires("sqlite3/3.33.0")
if self.options.enable_apacheconnector:
self.requires("apr/1.7.0")
self.requires("apr-util/1.6.1")
+ # FIXME: missing apache2 recipe
raise ConanInvalidConfiguration("apache2 is not (yet) available on CCI")
- self.requires("apache2/x.y.z")
if self.options.enable_netssl or \
self.options.enable_crypto or \
self.options.get_safe("enable_jwt", False):
- self.requires("openssl/1.1.1g")
+ self.requires("openssl/1.1.1h")
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
@@ -194,7 +197,7 @@
else ("d" if self.settings.build_type == "Debug" else "")
self.cpp_info.libs = list("{}{}".format(lib, suffix) for lib in self._ordered_libs)
-
+
if self.settings.os == "Linux":
self.cpp_info.system_libs.extend(["pthread", "dl", "rt"])
@@ -202,7 +205,7 @@
self.cpp_info.defines.append("POCO_NO_AUTOMATIC_LIBS")
if not self.options.shared:
self.cpp_info.defines.append("POCO_STATIC=ON")
- if self.settings.compiler == "Visual Studio":
+ if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
self.cpp_info.names["cmake_find_package"] = "Poco"
self.cpp_info.names["cmake_find_package_multi"] = "Poco"
| {"golden_diff": "diff --git a/recipes/poco/all/conanfile.py b/recipes/poco/all/conanfile.py\n--- a/recipes/poco/all/conanfile.py\n+++ b/recipes/poco/all/conanfile.py\n@@ -54,7 +54,7 @@\n \"PocoXML\": _PocoComponent(\"enable_xml\", True, (\"PocoFoundation\", ), True),\n \"PocoZip\": _PocoComponent(\"enable_zip\", True, (\"PocoUtil\", \"PocoXML\", ), True),\n }\n- \n+\n for comp in _poco_component_tree.values():\n if comp.option:\n options[comp.option] = [True, False]\n@@ -108,6 +108,9 @@\n raise ConanInvalidConfiguration(\"Apache connector not supported: https://github.com/pocoproject/poco/issues/1764\")\n if self.options.enable_data_mysql:\n raise ConanInvalidConfiguration(\"MySQL not supported yet, open an issue here please: %s\" % self.url)\n+ if self.settings.compiler == \"Visual Studio\":\n+ if self.options.shared and \"MT\" in str(self.settings.compiler.runtime):\n+ raise ConanInvalidConfiguration(\"Cannot build shared poco libraries with MT(d) runtime\")\n if self.options.get_safe(\"enable_data_postgresql\", False):\n raise ConanInvalidConfiguration(\"PostgreSQL not supported yet, open an issue here please: %s\" % self.url)\n for compopt in self._poco_component_tree.values():\n@@ -124,18 +127,18 @@\n self.requires(\"pcre/8.41\")\n self.requires(\"zlib/1.2.11\")\n if self.options.enable_xml:\n- self.requires(\"expat/2.2.9\")\n+ self.requires(\"expat/2.2.10\")\n if self.options.enable_data_sqlite:\n- self.requires(\"sqlite3/3.31.1\")\n+ self.requires(\"sqlite3/3.33.0\")\n if self.options.enable_apacheconnector:\n self.requires(\"apr/1.7.0\")\n self.requires(\"apr-util/1.6.1\")\n+ # FIXME: missing apache2 recipe\n raise ConanInvalidConfiguration(\"apache2 is not (yet) available on CCI\")\n- self.requires(\"apache2/x.y.z\")\n if self.options.enable_netssl or \\\n self.options.enable_crypto or \\\n self.options.get_safe(\"enable_jwt\", False):\n- self.requires(\"openssl/1.1.1g\")\n+ self.requires(\"openssl/1.1.1h\")\n \n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n@@ -194,7 +197,7 @@\n else (\"d\" if self.settings.build_type == \"Debug\" else \"\")\n \n self.cpp_info.libs = list(\"{}{}\".format(lib, suffix) for lib in self._ordered_libs)\n- \n+\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\", \"rt\"])\n \n@@ -202,7 +205,7 @@\n self.cpp_info.defines.append(\"POCO_NO_AUTOMATIC_LIBS\")\n if not self.options.shared:\n self.cpp_info.defines.append(\"POCO_STATIC=ON\")\n- if self.settings.compiler == \"Visual Studio\":\n+ if self.settings.os == \"Windows\":\n self.cpp_info.system_libs.extend([\"ws2_32\", \"iphlpapi\", \"crypt32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "issue": "ixwebsocket: add 10.2.5 + several improvements\nSpecify library name and version: **ixwebsocket/10.2.5**\r\n\r\n- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.\r\n- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.\r\n- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.\r\n- [x] I've tried at least one configuration locally with the\r\n [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\r\nOthers modifications:\r\n- use transparent cmake integration in test_package\r\n- optional zlib for version >= 10.1.5\r\n- don't force PIC for version >= 9.5.7\r\n- add public definitions in cpp_info\r\n- fix system libs on Windows\npoco: fix hooks\nSpecify library name and version: **poco/all**\r\n\r\n- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.\r\n- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.\r\n- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.\r\n- [x] I've tried at least one configuration locally with the\r\n [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nfrom collections import namedtuple, OrderedDict\nimport os\n\n\nclass PocoConan(ConanFile):\n name = \"poco\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://pocoproject.org\"\n topics = (\"conan\", \"poco\", \"building\", \"networking\", \"server\", \"mobile\", \"embedded\")\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n license = \"BSL-1.0\"\n description = \"Modern, powerful open source C++ class libraries for building network- and internet-based \" \\\n \"applications that run on desktop, server, mobile and embedded systems.\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _PocoComponent = namedtuple(\"_PocoComponent\", (\"option\", \"default_option\", \"dependencies\", \"is_lib\"))\n _poco_component_tree = {\n \"mod_poco\": _PocoComponent(\"enable_apacheconnector\", False, (\"PocoUtil\", \"PocoNet\", ), False), # also external apr and apr-util\n \"PocoCppParser\": _PocoComponent(\"enable_cppparser\", False, (\"PocoFoundation\", ), False),\n # \"PocoCppUnit\": _PocoComponent(\"enable_cppunit\", False, (\"PocoFoundation\", ), False)),\n \"PocoCrypto\": _PocoComponent(\"enable_crypto\", True, (\"PocoFoundation\", ), True), # also external openssl\n \"PocoData\": _PocoComponent(\"enable_data\", True, (\"PocoFoundation\", ), True),\n \"PocoDataMySQL\": _PocoComponent(\"enable_data_mysql\", False, (\"PocoData\", ), True),\n \"PocoDataODBC\": _PocoComponent(\"enable_data_odbc\", False, (\"PocoData\", ), True),\n \"PocoDataPostgreSQL\": _PocoComponent(\"enable_data_postgresql\", False, (\"PocoData\", ), True), # also external postgresql\n \"PocoDataSQLite\": _PocoComponent(\"enable_data_sqlite\", True, (\"PocoData\", ), True), # also external sqlite3\n \"PocoEncodings\": _PocoComponent(\"enable_encodings\", True, (\"PocoFoundation\", ), True),\n # \"PocoEncodingsCompiler\": _PocoComponent(\"enable_encodingscompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFoundation\": _PocoComponent(None, \"PocoFoundation\", (), True),\n \"PocoJSON\": _PocoComponent(\"enable_json\", True, (\"PocoFoundation\", ), True),\n \"PocoJWT\": _PocoComponent(\"enable_jwt\", True, (\"PocoJSON\", \"PocoCrypto\", ), True),\n \"PocoMongoDB\": _PocoComponent(\"enable_mongodb\", True, (\"PocoNet\", ), True),\n \"PocoNet\": _PocoComponent(\"enable_net\", True, (\"PocoFoundation\", ), True),\n \"PocoNetSSL\": _PocoComponent(\"enable_netssl\", True, (\"PocoCrypto\", \"PocoUtil\", \"PocoNet\", ), True), # also external openssl\n \"PocoNetSSLWin\": _PocoComponent(\"enable_netssl_win\", True, (\"PocoNet\", \"PocoUtil\", ), True),\n \"PocoPDF\": _PocoComponent(\"enable_pdf\", False, (\"PocoXML\", \"PocoUtil\", ), True),\n \"PocoPageCompiler\": _PocoComponent(\"enable_pagecompiler\", False, (\"PocoNet\", \"PocoUtil\", ), False),\n \"PocoFile2Page\": _PocoComponent(\"enable_pagecompiler_file2page\", False, (\"PocoNet\", \"PocoUtil\", \"PocoXML\", \"PocoJSON\", ), False),\n \"PocoPocoDoc\": _PocoComponent(\"enable_pocodoc\", False, (\"PocoUtil\", \"PocoXML\", \"PocoCppParser\", ), False),\n \"PocoRedis\": _PocoComponent(\"enable_redis\", True, (\"PocoNet\", ), True),\n \"PocoSevenZip\": _PocoComponent(\"enable_sevenzip\", False, (\"PocoUtil\", \"PocoXML\", ), True),\n \"PocoUtil\": _PocoComponent(\"enable_util\", True, (\"PocoFoundation\", \"PocoXML\", \"PocoJSON\", ), True),\n \"PocoXML\": _PocoComponent(\"enable_xml\", True, (\"PocoFoundation\", ), True),\n \"PocoZip\": _PocoComponent(\"enable_zip\", True, (\"PocoUtil\", \"PocoXML\", ), True),\n }\n \n for comp in _poco_component_tree.values():\n if comp.option:\n options[comp.option] = [True, False]\n default_options[comp.option] = comp.default_option\n del comp\n\n @property\n def _poco_ordered_components(self):\n remaining_components = dict((compname, set(compopts.dependencies)) for compname, compopts in self._poco_component_tree.items())\n ordered_components = []\n while remaining_components:\n components_no_deps = set(compname for compname, compopts in remaining_components.items() if not compopts)\n if not components_no_deps:\n raise ConanException(\"The poco dependency tree is invalid and contains a cycle\")\n for c in components_no_deps:\n remaining_components.pop(c)\n ordered_components.extend(components_no_deps)\n for rname in remaining_components.keys():\n remaining_components[rname] = remaining_components[rname].difference(components_no_deps)\n ordered_components.reverse()\n return ordered_components\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_folder = \"poco-poco-{}-release\".format(self.version)\n os.rename(extracted_folder, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n else:\n del self.options.enable_netssl_win\n if tools.Version(self.version) < \"1.9\":\n del self.options.enable_encodings\n if tools.Version(self.version) < \"1.10\":\n del self.options.enable_data_postgresql\n del self.options.enable_jwt\n\n def configure(self):\n if self.options.enable_apacheconnector:\n raise ConanInvalidConfiguration(\"Apache connector not supported: https://github.com/pocoproject/poco/issues/1764\")\n if self.options.enable_data_mysql:\n raise ConanInvalidConfiguration(\"MySQL not supported yet, open an issue here please: %s\" % self.url)\n if self.options.get_safe(\"enable_data_postgresql\", False):\n raise ConanInvalidConfiguration(\"PostgreSQL not supported yet, open an issue here please: %s\" % self.url)\n for compopt in self._poco_component_tree.values():\n if not compopt.option:\n continue\n if self.options.get_safe(compopt.option, False):\n for compdep in compopt.dependencies:\n if not self._poco_component_tree[compdep].option:\n continue\n if not self.options.get_safe(self._poco_component_tree[compdep].option, False):\n raise ConanInvalidConfiguration(\"option {} requires also option {}\".format(compopt.option, self._poco_component_tree[compdep].option))\n\n def requirements(self):\n self.requires(\"pcre/8.41\")\n self.requires(\"zlib/1.2.11\")\n if self.options.enable_xml:\n self.requires(\"expat/2.2.9\")\n if self.options.enable_data_sqlite:\n self.requires(\"sqlite3/3.31.1\")\n if self.options.enable_apacheconnector:\n self.requires(\"apr/1.7.0\")\n self.requires(\"apr-util/1.6.1\")\n raise ConanInvalidConfiguration(\"apache2 is not (yet) available on CCI\")\n self.requires(\"apache2/x.y.z\")\n if self.options.enable_netssl or \\\n self.options.enable_crypto or \\\n self.options.get_safe(\"enable_jwt\", False):\n self.requires(\"openssl/1.1.1g\")\n\n def _patch_sources(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n if tools.Version(self.version) < \"1.10.1\":\n self._cmake.definitions[\"POCO_STATIC\"] = not self.options.shared\n for comp in self._poco_component_tree.values():\n if not comp.option:\n continue\n self._cmake.definitions[comp.option.upper()] = self.options.get_safe(comp.option, False)\n self._cmake.definitions[\"POCO_UNBUNDLED\"] = True\n self._cmake.definitions[\"CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP\"] = True\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\": # MT or MTd\n self._cmake.definitions[\"POCO_MT\"] = \"ON\" if \"MT\" in str(self.settings.compiler.runtime) else \"OFF\"\n self.output.info(self._cmake.definitions)\n # On Windows, Poco needs a message (MC) compiler.\n with tools.vcvars(self.settings) if self.settings.compiler == \"Visual Studio\" else tools.no_op():\n self._cmake.configure(build_dir=self._build_subfolder)\n return self._cmake\n\n def build(self):\n if self.options.enable_data_sqlite:\n if self.options[\"sqlite3\"].threadsafe == 0:\n raise ConanInvalidConfiguration(\"sqlite3 must be built with threadsafe enabled\")\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n @property\n def _ordered_libs(self):\n libs = []\n for compname in self._poco_ordered_components:\n comp_options = self._poco_component_tree[compname]\n if comp_options.is_lib:\n if not comp_options.option:\n libs.append(compname)\n elif self.options.get_safe(comp_options.option, False):\n libs.append(compname)\n return libs\n\n def package_info(self):\n suffix = str(self.settings.compiler.runtime).lower() \\\n if self.settings.compiler == \"Visual Studio\" and not self.options.shared \\\n else (\"d\" if self.settings.build_type == \"Debug\" else \"\")\n\n self.cpp_info.libs = list(\"{}{}\".format(lib, suffix) for lib in self._ordered_libs)\n \n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\", \"rt\"])\n\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"POCO_NO_AUTOMATIC_LIBS\")\n if not self.options.shared:\n self.cpp_info.defines.append(\"POCO_STATIC=ON\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.system_libs.extend([\"ws2_32\", \"iphlpapi\", \"crypt32\"])\n self.cpp_info.names[\"cmake_find_package\"] = \"Poco\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Poco\"\n", "path": "recipes/poco/all/conanfile.py"}]} | 4,073 | 803 |
gh_patches_debug_25558 | rasdani/github-patches | git_diff | interlegis__sapl-2525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sem acesso anônimo de Documento Acessório de Audiencia Pública
<!--- Forneça um resumo geral da _issue_ no título acima -->
## Comportamento Esperado
Usuários anônimos poderem acessar documento acessório das Audiências Públicas
## Comportamento Atual
Usuário anônimo não acessa a parte de "Documento Acessório" da Audiência Pública pedindo um login com a aplicação de documento administrativo "Restritiva", acredito que Audiência Pública não deveria se enquadrar nessa regra.
## Passos para Reproduzir (para bugs)
<!--- Forneça um link para um exemplo, ou um conjunto de passos inequívocos -->
<!--- para reproduzir esse bug. Inclua código para reproduzir, se relevante. -->
1. Com opção de Visibilidade de Documentos Administrativos "Restritiva" -Está deslogado - Institucional - Audiências Públicas - Acessar uma audiência cadastrada - clicar em Documento Acessório
<!-- ## Imagens do Ocorrido -->
## Seu Ambiente
<!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. -->
* Versão usada (_Release_): 3.1.143
* Nome e versão do navegador: Chrome
* Nome e versão do Sistema Operacional (desktop ou mobile): Windows 10
</issue>
<code>
[start of sapl/audiencia/views.py]
1 import sapl
2
3 from django.http import HttpResponse
4 from django.core.urlresolvers import reverse
5 from django.views.decorators.clickjacking import xframe_options_exempt
6 from django.views.generic import UpdateView
7 from sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud
8
9 from .forms import AudienciaForm, AnexoAudienciaPublicaForm
10 from .models import AudienciaPublica, AnexoAudienciaPublica
11
12
13 def index(request):
14 return HttpResponse("Audiência Pública")
15
16
17 class AudienciaCrud(Crud):
18 model = AudienciaPublica
19 public = [RP_LIST, RP_DETAIL, ]
20
21 class BaseMixin(Crud.BaseMixin):
22 list_field_names = ['numero', 'nome', 'tipo', 'materia',
23 'data']
24 ordering = 'nome', 'numero', 'tipo', 'data'
25
26 class ListView(Crud.ListView):
27 paginate_by = 10
28
29 def get_context_data(self, **kwargs):
30 context = super().get_context_data(**kwargs)
31
32 audiencia_materia = {}
33 for o in context['object_list']:
34 # indexado pelo numero da audiencia
35 audiencia_materia[str(o.numero)] = o.materia
36
37 for row in context['rows']:
38 coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui
39 if coluna_materia[0]:
40 materia = audiencia_materia[row[0][0]]
41 url_materia = reverse('sapl.materia:materialegislativa_detail',
42 kwargs={'pk': materia.id})
43 row[3] = (coluna_materia[0], url_materia)
44 return context
45
46 class CreateView(Crud.CreateView):
47 form_class = AudienciaForm
48
49 def form_valid(self, form):
50 return super(Crud.CreateView, self).form_valid(form)
51
52 class UpdateView(Crud.UpdateView):
53 form_class = AudienciaForm
54
55 def get_initial(self):
56 initial = super(UpdateView, self).get_initial()
57 if self.object.materia:
58 initial['tipo_materia'] = self.object.materia.tipo.id
59 initial['numero_materia'] = self.object.materia.numero
60 initial['ano_materia'] = self.object.materia.ano
61 return initial
62
63 class DeleteView(Crud.DeleteView):
64 pass
65
66 class DetailView(Crud.DetailView):
67
68 layout_key = 'AudienciaPublicaDetail'
69
70 @xframe_options_exempt
71 def get(self, request, *args, **kwargs):
72 return super().get(request, *args, **kwargs)
73
74
75 class AudienciaPublicaMixin:
76
77 def has_permission(self):
78 app_config = sapl.base.models.AppConfig.objects.last()
79 if app_config and app_config.documentos_administrativos == 'O':
80 return True
81
82 return super().has_permission()
83
84
85 class AnexoAudienciaPublicaCrud(MasterDetailCrud):
86 model = AnexoAudienciaPublica
87 parent_field = 'audiencia'
88 help_topic = 'numeracao_docsacess'
89
90 class BaseMixin(MasterDetailCrud.BaseMixin):
91 list_field_names = ['assunto']
92
93 class CreateView(MasterDetailCrud.CreateView):
94 form_class = AnexoAudienciaPublicaForm
95 layout_key = None
96
97 class UpdateView(MasterDetailCrud.UpdateView):
98 form_class = AnexoAudienciaPublicaForm
99
100 class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):
101
102 def get_queryset(self):
103 qs = super(MasterDetailCrud.ListView, self).get_queryset()
104 kwargs = {self.crud.parent_field: self.kwargs['pk']}
105 return qs.filter(**kwargs).order_by('-data', '-id')
106
107 class DetailView(AudienciaPublicaMixin,
108 MasterDetailCrud.DetailView):
109 pass
110
[end of sapl/audiencia/views.py]
[start of sapl/audiencia/urls.py]
1 from django.conf.urls import include, url
2 from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)
3
4 from .apps import AppConfig
5
6 app_name = AppConfig.name
7
8 urlpatterns = [
9 url(r'^audiencia/', include(AudienciaCrud.get_urls() +
10 AnexoAudienciaPublicaCrud.get_urls())),
11 ]
[end of sapl/audiencia/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sapl/audiencia/urls.py b/sapl/audiencia/urls.py
--- a/sapl/audiencia/urls.py
+++ b/sapl/audiencia/urls.py
@@ -1,11 +1,10 @@
from django.conf.urls import include, url
-from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)
+from sapl.audiencia.views import (index, AudienciaCrud, AnexoAudienciaPublicaCrud)
from .apps import AppConfig
app_name = AppConfig.name
urlpatterns = [
- url(r'^audiencia/', include(AudienciaCrud.get_urls() +
- AnexoAudienciaPublicaCrud.get_urls())),
+ url(r'^audiencia/', include(AudienciaCrud.get_urls() + AnexoAudienciaPublicaCrud.get_urls())),
]
\ No newline at end of file
diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py
--- a/sapl/audiencia/views.py
+++ b/sapl/audiencia/views.py
@@ -86,6 +86,7 @@
model = AnexoAudienciaPublica
parent_field = 'audiencia'
help_topic = 'numeracao_docsacess'
+ public = [RP_LIST, RP_DETAIL, ]
class BaseMixin(MasterDetailCrud.BaseMixin):
list_field_names = ['assunto']
@@ -104,7 +105,5 @@
kwargs = {self.crud.parent_field: self.kwargs['pk']}
return qs.filter(**kwargs).order_by('-data', '-id')
- class DetailView(AudienciaPublicaMixin,
- MasterDetailCrud.DetailView):
+ class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):
pass
-
\ No newline at end of file
| {"golden_diff": "diff --git a/sapl/audiencia/urls.py b/sapl/audiencia/urls.py\n--- a/sapl/audiencia/urls.py\n+++ b/sapl/audiencia/urls.py\n@@ -1,11 +1,10 @@\n from django.conf.urls import include, url\n-from sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)\n+from sapl.audiencia.views import (index, AudienciaCrud, AnexoAudienciaPublicaCrud)\n \n from .apps import AppConfig\n \n app_name = AppConfig.name\n \n urlpatterns = [\n- url(r'^audiencia/', include(AudienciaCrud.get_urls() +\n- \t\t\t\t\t\t\tAnexoAudienciaPublicaCrud.get_urls())),\n+ url(r'^audiencia/', include(AudienciaCrud.get_urls() + AnexoAudienciaPublicaCrud.get_urls())),\n ]\n\\ No newline at end of file\ndiff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py\n--- a/sapl/audiencia/views.py\n+++ b/sapl/audiencia/views.py\n@@ -86,6 +86,7 @@\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n+ public = [RP_LIST, RP_DETAIL, ]\n \n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n@@ -104,7 +105,5 @@\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n \n- class DetailView(AudienciaPublicaMixin,\n- MasterDetailCrud.DetailView):\n+ class DetailView(AudienciaPublicaMixin, MasterDetailCrud.DetailView):\n pass\n- \n\\ No newline at end of file\n", "issue": "Sem acesso an\u00f4nimo de Documento Acess\u00f3rio de Audiencia P\u00fablica \n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\n\r\n## Comportamento Esperado\r\nUsu\u00e1rios an\u00f4nimos poderem acessar documento acess\u00f3rio das Audi\u00eancias P\u00fablicas \r\n\r\n## Comportamento Atual\r\nUsu\u00e1rio an\u00f4nimo n\u00e3o acessa a parte de \"Documento Acess\u00f3rio\" da Audi\u00eancia P\u00fablica pedindo um login com a aplica\u00e7\u00e3o de documento administrativo \"Restritiva\", acredito que Audi\u00eancia P\u00fablica n\u00e3o deveria se enquadrar nessa regra.\r\n\r\n## Passos para Reproduzir (para bugs)\r\n<!--- Forne\u00e7a um link para um exemplo, ou um conjunto de passos inequ\u00edvocos -->\r\n<!--- para reproduzir esse bug. Inclua c\u00f3digo para reproduzir, se relevante. -->\r\n1. Com op\u00e7\u00e3o de Visibilidade de Documentos Administrativos \"Restritiva\" -Est\u00e1 deslogado - Institucional - Audi\u00eancias P\u00fablicas - Acessar uma audi\u00eancia cadastrada - clicar em Documento Acess\u00f3rio\r\n\r\n<!-- ## Imagens do Ocorrido -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_): 3.1.143\r\n* Nome e vers\u00e3o do navegador: Chrome\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile): Windows 10\r\n\n", "before_files": [{"content": "import sapl\n\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud, MasterDetailCrud\n\nfrom .forms import AudienciaForm, AnexoAudienciaPublicaForm\nfrom .models import AudienciaPublica, AnexoAudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data'] \n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n audiencia_materia = {}\n for o in context['object_list']:\n # indexado pelo numero da audiencia\n audiencia_materia[str(o.numero)] = o.materia\n\n for row in context['rows']:\n coluna_materia = row[3] # se mudar a ordem de listagem mudar aqui\n if coluna_materia[0]:\n materia = audiencia_materia[row[0][0]]\n url_materia = reverse('sapl.materia:materialegislativa_detail',\n kwargs={'pk': materia.id})\n row[3] = (coluna_materia[0], url_materia)\n return context\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n if self.object.materia:\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n\nclass AudienciaPublicaMixin:\n\n def has_permission(self):\n app_config = sapl.base.models.AppConfig.objects.last()\n if app_config and app_config.documentos_administrativos == 'O':\n return True\n\n return super().has_permission()\n\n\nclass AnexoAudienciaPublicaCrud(MasterDetailCrud):\n model = AnexoAudienciaPublica\n parent_field = 'audiencia'\n help_topic = 'numeracao_docsacess'\n\n class BaseMixin(MasterDetailCrud.BaseMixin):\n list_field_names = ['assunto']\n\n class CreateView(MasterDetailCrud.CreateView):\n form_class = AnexoAudienciaPublicaForm\n layout_key = None\n\n class UpdateView(MasterDetailCrud.UpdateView):\n form_class = AnexoAudienciaPublicaForm\n\n class ListView(AudienciaPublicaMixin, MasterDetailCrud.ListView):\n\n def get_queryset(self):\n qs = super(MasterDetailCrud.ListView, self).get_queryset()\n kwargs = {self.crud.parent_field: self.kwargs['pk']}\n return qs.filter(**kwargs).order_by('-data', '-id')\n\n class DetailView(AudienciaPublicaMixin,\n MasterDetailCrud.DetailView):\n pass\n ", "path": "sapl/audiencia/views.py"}, {"content": "from django.conf.urls import include, url\nfrom sapl.audiencia.views import (index, AudienciaCrud,AnexoAudienciaPublicaCrud)\n\nfrom .apps import AppConfig\n\napp_name = AppConfig.name\n\nurlpatterns = [\n url(r'^audiencia/', include(AudienciaCrud.get_urls() +\n \t\t\t\t\t\t\tAnexoAudienciaPublicaCrud.get_urls())),\n]", "path": "sapl/audiencia/urls.py"}]} | 2,043 | 410 |
gh_patches_debug_23485 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1954 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
selecting from datasource error
Repoted by David F.
```
use Postgres_Sample;
SELECT * FROM data.insurance LIMIT 200;
```
error:
```
SQL Error [1149] [42000]: 'str' object has no attribute '__name__'
```
</issue>
<code>
[start of mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py]
1 import pandas as pd
2 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
3
4 from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
5 from mindsdb.utilities.log import log
6
7
8 class IntegrationDataNode(DataNode):
9 type = 'integration'
10
11 def __init__(self, integration_name, data_store, ds_type):
12 self.integration_name = integration_name
13 self.data_store = data_store
14 self.ds_type = ds_type
15
16 def get_type(self):
17 return self.type
18
19 def get_tables(self):
20 return []
21
22 def has_table(self, tableName):
23 return True
24
25 def get_table_columns(self, tableName):
26 return []
27
28 def select(self, query):
29 if self.ds_type in ('postgres', 'snowflake'):
30 dialect = 'postgres'
31 else:
32 dialect = 'mysql'
33 render = SqlalchemyRender(dialect)
34 try:
35 query_str = render.get_string(query, with_failback=False)
36 except Exception as e:
37 log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}")
38 query_str = render.get_string(query, with_failback=True)
39
40 dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})
41 data = dso.df.to_dict(orient='records')
42 column_names = list(dso.df.columns)
43
44 for column_name in column_names:
45 if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):
46 pass_data = dso.df[column_name].dt.to_pydatetime()
47 for i, rec in enumerate(data):
48 rec[column_name] = pass_data[i].timestamp()
49
50 if len(column_names) == 0:
51 column_names = ['dataframe_is_empty']
52
53 return data, column_names
54
[end of mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
@@ -26,16 +26,19 @@
return []
def select(self, query):
- if self.ds_type in ('postgres', 'snowflake'):
- dialect = 'postgres'
+ if isinstance(query, str):
+ query_str = query
else:
- dialect = 'mysql'
- render = SqlalchemyRender(dialect)
- try:
- query_str = render.get_string(query, with_failback=False)
- except Exception as e:
- log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}")
- query_str = render.get_string(query, with_failback=True)
+ if self.ds_type in ('postgres', 'snowflake'):
+ dialect = 'postgres'
+ else:
+ dialect = 'mysql'
+ render = SqlalchemyRender(dialect)
+ try:
+ query_str = render.get_string(query, with_failback=False)
+ except Exception as e:
+ log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}")
+ query_str = render.get_string(query, with_failback=True)
dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})
data = dso.df.to_dict(orient='records')
| {"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py\n@@ -26,16 +26,19 @@\n return []\n \n def select(self, query):\n- if self.ds_type in ('postgres', 'snowflake'):\n- dialect = 'postgres'\n+ if isinstance(query, str):\n+ query_str = query\n else:\n- dialect = 'mysql'\n- render = SqlalchemyRender(dialect)\n- try:\n- query_str = render.get_string(query, with_failback=False)\n- except Exception as e:\n- log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n- query_str = render.get_string(query, with_failback=True)\n+ if self.ds_type in ('postgres', 'snowflake'):\n+ dialect = 'postgres'\n+ else:\n+ dialect = 'mysql'\n+ render = SqlalchemyRender(dialect)\n+ try:\n+ query_str = render.get_string(query, with_failback=False)\n+ except Exception as e:\n+ log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n+ query_str = render.get_string(query, with_failback=True)\n \n dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})\n data = dso.df.to_dict(orient='records')\n", "issue": "selecting from datasource error\nRepoted by David F.\r\n```\r\nuse Postgres_Sample;\r\n\r\nSELECT * FROM data.insurance LIMIT 200;\r\n```\r\nerror:\r\n```\r\nSQL Error [1149] [42000]: 'str' object has no attribute '__name__'\r\n```\n", "before_files": [{"content": "import pandas as pd\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode\nfrom mindsdb.utilities.log import log\n\n\nclass IntegrationDataNode(DataNode):\n type = 'integration'\n\n def __init__(self, integration_name, data_store, ds_type):\n self.integration_name = integration_name\n self.data_store = data_store\n self.ds_type = ds_type\n\n def get_type(self):\n return self.type\n\n def get_tables(self):\n return []\n\n def has_table(self, tableName):\n return True\n\n def get_table_columns(self, tableName):\n return []\n\n def select(self, query):\n if self.ds_type in ('postgres', 'snowflake'):\n dialect = 'postgres'\n else:\n dialect = 'mysql'\n render = SqlalchemyRender(dialect)\n try:\n query_str = render.get_string(query, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}\")\n query_str = render.get_string(query, with_failback=True)\n\n dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})\n data = dso.df.to_dict(orient='records')\n column_names = list(dso.df.columns)\n\n for column_name in column_names:\n if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):\n pass_data = dso.df[column_name].dt.to_pydatetime()\n for i, rec in enumerate(data):\n rec[column_name] = pass_data[i].timestamp()\n\n if len(column_names) == 0:\n column_names = ['dataframe_is_empty']\n\n return data, column_names\n", "path": "mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py"}]} | 1,123 | 381 |
gh_patches_debug_40307 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-1946 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `strict_shape` option to CheckpointLoader
**Is your feature request related to a problem? Please describe.**
Currently, we don't support the transfer-learning case that load a checkpoint with same layer names but different shape.
We can refer to below code:
```py
model_3 = get_model_with_3_classes()
state_dict_model_4 = torch.load("best_model_4.pt")
@trainer.on(Events.STARTED, model_3, state_dict_model_4)
def permissive_model_loader(model, state_dict):
this_state_dict = model.state_dict()
matched_state_dict = {
k: v for k, v in state_dict.items()
if k in this_state_dict and v.shape == this_state_dict[k].shape
}
model.load_state_dict(matched_state_dict, strict=False)
trainer.run(...)
```
Thanks.
</issue>
<code>
[start of monai/handlers/checkpoint_loader.py]
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import logging
13 from typing import TYPE_CHECKING, Dict, Optional
14
15 import torch
16
17 from monai.utils import exact_version, optional_import
18
19 Events, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Events")
20 Checkpoint, _ = optional_import("ignite.handlers", "0.4.4", exact_version, "Checkpoint")
21 if TYPE_CHECKING:
22 from ignite.engine import Engine
23 else:
24 Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
25
26
27 class CheckpointLoader:
28 """
29 CheckpointLoader acts as an Ignite handler to load checkpoint data from file.
30 It can load variables for network, optimizer, lr_scheduler, etc.
31 If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead
32 as PyTorch recommended and then use this loader to load the model.
33
34 Args:
35 load_path: the file path of checkpoint, it should be a PyTorch `pth` file.
36 load_dict: target objects that load checkpoint to. examples::
37
38 {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}
39
40 name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
41 map_location: when loading the module for distributed training/evaluation,
42 need to provide an appropriate map_location argument to prevent a process
43 to step into others’ devices. If map_location is missing, torch.load will
44 first load the module to CPU and then copy each parameter to where it was
45 saved, which would result in all processes on the same machine using the
46 same set of devices.
47 strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys
48 returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``
49
50 """
51
52 def __init__(
53 self,
54 load_path: str,
55 load_dict: Dict,
56 name: Optional[str] = None,
57 map_location: Optional[Dict] = None,
58 strict: bool = True,
59 ) -> None:
60 if load_path is None:
61 raise AssertionError("must provide clear path to load checkpoint.")
62 self.load_path = load_path
63 if not (load_dict is not None and len(load_dict) > 0):
64 raise AssertionError("must provide target objects to load.")
65 self.logger = logging.getLogger(name)
66 self.load_dict = load_dict
67 self._name = name
68 self.map_location = map_location
69 self.strict = strict
70
71 def attach(self, engine: Engine) -> None:
72 """
73 Args:
74 engine: Ignite Engine, it can be a trainer, validator or evaluator.
75 """
76 if self._name is None:
77 self.logger = engine.logger
78 engine.add_event_handler(Events.STARTED, self)
79
80 def __call__(self, engine: Engine) -> None:
81 """
82 Args:
83 engine: Ignite Engine, it can be a trainer, validator or evaluator.
84 """
85 checkpoint = torch.load(self.load_path, map_location=self.map_location)
86
87 # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint
88 prior_max_epochs = engine.state.max_epochs
89 Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)
90 if engine.state.epoch > prior_max_epochs:
91 raise ValueError(
92 f"Epoch count ({engine.state.epoch}) in checkpoint is larger than "
93 f"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, "
94 "construct trainer with `max_epochs` larger than checkpoint's epoch count. "
95 "To use checkpoint for inference, no need to load state_dict for the engine."
96 )
97 engine.state.max_epochs = prior_max_epochs
98
99 self.logger.info(f"Restored all variables from {self.load_path}")
100
[end of monai/handlers/checkpoint_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py
--- a/monai/handlers/checkpoint_loader.py
+++ b/monai/handlers/checkpoint_loader.py
@@ -13,6 +13,7 @@
from typing import TYPE_CHECKING, Dict, Optional
import torch
+import torch.nn as nn
from monai.utils import exact_version, optional_import
@@ -44,8 +45,12 @@
first load the module to CPU and then copy each parameter to where it was
saved, which would result in all processes on the same machine using the
same set of devices.
- strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys
- returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``
+ strict: whether to strictly enforce that the keys in `state_dict` match the keys
+ returned by `torch.nn.Module.state_dict` function. default to `True`.
+ strict_shape: whether to enforce the data shape of the matched layers in the checkpoint,
+ `if `False`, it will skip the layers that have different data shape with checkpoint content.
+ This can be useful advanced feature for transfer learning. users should totally
+ understand which layers will have different shape. default to `True`.
"""
@@ -56,6 +61,7 @@
name: Optional[str] = None,
map_location: Optional[Dict] = None,
strict: bool = True,
+ strict_shape: bool = True,
) -> None:
if load_path is None:
raise AssertionError("must provide clear path to load checkpoint.")
@@ -67,6 +73,7 @@
self._name = name
self.map_location = map_location
self.strict = strict
+ self.strict_shape = strict_shape
def attach(self, engine: Engine) -> None:
"""
@@ -84,6 +91,20 @@
"""
checkpoint = torch.load(self.load_path, map_location=self.map_location)
+ if not self.strict_shape:
+ k, _ = list(self.load_dict.items())[0]
+ # single object and checkpoint is directly a state_dict
+ if len(self.load_dict) == 1 and k not in checkpoint:
+ checkpoint = {k: checkpoint}
+
+ # skip items that don't match data shape
+ for k, obj in self.load_dict.items():
+ if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
+ obj = obj.module
+ if isinstance(obj, torch.nn.Module):
+ d = obj.state_dict()
+ checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape}
+
# save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint
prior_max_epochs = engine.state.max_epochs
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)
| {"golden_diff": "diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py\n--- a/monai/handlers/checkpoint_loader.py\n+++ b/monai/handlers/checkpoint_loader.py\n@@ -13,6 +13,7 @@\n from typing import TYPE_CHECKING, Dict, Optional\n \n import torch\n+import torch.nn as nn\n \n from monai.utils import exact_version, optional_import\n \n@@ -44,8 +45,12 @@\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n- strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys\n- returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n+ strict: whether to strictly enforce that the keys in `state_dict` match the keys\n+ returned by `torch.nn.Module.state_dict` function. default to `True`.\n+ strict_shape: whether to enforce the data shape of the matched layers in the checkpoint,\n+ `if `False`, it will skip the layers that have different data shape with checkpoint content.\n+ This can be useful advanced feature for transfer learning. users should totally\n+ understand which layers will have different shape. default to `True`.\n \n \"\"\"\n \n@@ -56,6 +61,7 @@\n name: Optional[str] = None,\n map_location: Optional[Dict] = None,\n strict: bool = True,\n+ strict_shape: bool = True,\n ) -> None:\n if load_path is None:\n raise AssertionError(\"must provide clear path to load checkpoint.\")\n@@ -67,6 +73,7 @@\n self._name = name\n self.map_location = map_location\n self.strict = strict\n+ self.strict_shape = strict_shape\n \n def attach(self, engine: Engine) -> None:\n \"\"\"\n@@ -84,6 +91,20 @@\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n \n+ if not self.strict_shape:\n+ k, _ = list(self.load_dict.items())[0]\n+ # single object and checkpoint is directly a state_dict\n+ if len(self.load_dict) == 1 and k not in checkpoint:\n+ checkpoint = {k: checkpoint}\n+\n+ # skip items that don't match data shape\n+ for k, obj in self.load_dict.items():\n+ if isinstance(obj, (nn.DataParallel, nn.parallel.DistributedDataParallel)):\n+ obj = obj.module\n+ if isinstance(obj, torch.nn.Module):\n+ d = obj.state_dict()\n+ checkpoint[k] = {k: v for k, v in checkpoint[k].items() if k in d and v.shape == d[k].shape}\n+\n # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint\n prior_max_epochs = engine.state.max_epochs\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)\n", "issue": "Add `strict_shape` option to CheckpointLoader\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, we don't support the transfer-learning case that load a checkpoint with same layer names but different shape.\r\nWe can refer to below code:\r\n```py\r\nmodel_3 = get_model_with_3_classes()\r\nstate_dict_model_4 = torch.load(\"best_model_4.pt\")\r\n\r\[email protected](Events.STARTED, model_3, state_dict_model_4)\r\ndef permissive_model_loader(model, state_dict):\r\n this_state_dict = model.state_dict()\r\n matched_state_dict = {\r\n k: v for k, v in state_dict.items()\r\n if k in this_state_dict and v.shape == this_state_dict[k].shape\r\n }\r\n model.load_state_dict(matched_state_dict, strict=False)\r\n\r\ntrainer.run(...)\r\n```\r\n\r\nThanks.\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport torch\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.4.4\", exact_version, \"Checkpoint\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.4.4\", exact_version, \"Engine\")\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler, etc.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n\n Args:\n load_path: the file path of checkpoint, it should be a PyTorch `pth` file.\n load_dict: target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n map_location: when loading the module for distributed training/evaluation,\n need to provide an appropriate map_location argument to prevent a process\n to step into others\u2019 devices. If map_location is missing, torch.load will\n first load the module to CPU and then copy each parameter to where it was\n saved, which would result in all processes on the same machine using the\n same set of devices.\n strict: whether to strictly enforce that the keys in :attr:`state_dict` match the keys\n returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True``\n\n \"\"\"\n\n def __init__(\n self,\n load_path: str,\n load_dict: Dict,\n name: Optional[str] = None,\n map_location: Optional[Dict] = None,\n strict: bool = True,\n ) -> None:\n if load_path is None:\n raise AssertionError(\"must provide clear path to load checkpoint.\")\n self.load_path = load_path\n if not (load_dict is not None and len(load_dict) > 0):\n raise AssertionError(\"must provide target objects to load.\")\n self.logger = logging.getLogger(name)\n self.load_dict = load_dict\n self._name = name\n self.map_location = map_location\n self.strict = strict\n\n def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n checkpoint = torch.load(self.load_path, map_location=self.map_location)\n\n # save current max epochs setting in the engine, don't overwrite it if larger than max_epochs in checkpoint\n prior_max_epochs = engine.state.max_epochs\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint, strict=self.strict)\n if engine.state.epoch > prior_max_epochs:\n raise ValueError(\n f\"Epoch count ({engine.state.epoch}) in checkpoint is larger than \"\n f\"the `engine.state.max_epochs` ({prior_max_epochs}) of engine. To further train from checkpoint, \"\n \"construct trainer with `max_epochs` larger than checkpoint's epoch count. \"\n \"To use checkpoint for inference, no need to load state_dict for the engine.\"\n )\n engine.state.max_epochs = prior_max_epochs\n\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}]} | 1,892 | 680 |
gh_patches_debug_22520 | rasdani/github-patches | git_diff | fossasia__open-event-server-5102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use identifier instead of id to send order receipts
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
Currently, the endpoint to send order receipts uses order ids, but purchasers do not know about order ids, instead they know of order identifiers. Thus, it's more appropriate to use order identifiers instead of ids in that endpoint.
**Describe the solution you'd like**
Use order identifiers instead of order endpoints.
</issue>
<code>
[start of app/api/attendees.py]
1 from flask import Blueprint, request, jsonify, abort, make_response
2 from flask_jwt import current_identity, jwt_required
3 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
4 from flask_rest_jsonapi.exceptions import ObjectNotFound
5 from sqlalchemy.orm.exc import NoResultFound
6
7 from app.api.bootstrap import api
8 from app.api.helpers.db import safe_query, get_count
9 from app.api.helpers.exceptions import (
10 ConflictException,
11 ForbiddenException,
12 UnprocessableEntity,
13 )
14 from app.api.helpers.mail import send_email_to_attendees
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.permissions import jwt_required
17 from app.api.helpers.query import event_query
18 from app.api.helpers.utilities import require_relationship
19 from app.api.schema.attendees import AttendeeSchema
20 from app.models import db
21 from app.models.order import Order
22 from app.models.ticket import Ticket
23 from app.models.ticket_holder import TicketHolder
24 from app.models.user import User
25
26 attendee_misc_routes = Blueprint('attendee_misc', __name__, url_prefix='/v1')
27
28 class AttendeeListPost(ResourceList):
29 """
30 List and create Attendees through direct URL
31 """
32
33 def before_post(self, args, kwargs, data):
34 """
35 Before post method to check for required relationship and proper permissions
36 :param args:
37 :param kwargs:
38 :param data:
39 :return:
40 """
41 require_relationship(['ticket', 'event'], data)
42
43 ticket = db.session.query(Ticket).filter_by(
44 id=int(data['ticket']), deleted_at=None
45 ).first()
46 if ticket is None:
47 raise UnprocessableEntity(
48 {'pointer': '/data/relationships/ticket'}, "Invalid Ticket"
49 )
50 if ticket.event_id != int(data['event']):
51 raise UnprocessableEntity(
52 {'pointer': '/data/relationships/ticket'},
53 "Ticket belongs to a different Event"
54 )
55 # Check if the ticket is already sold out or not.
56 if get_count(db.session.query(TicketHolder.id).
57 filter_by(ticket_id=int(data['ticket']), deleted_at=None)) >= ticket.quantity:
58 raise ConflictException(
59 {'pointer': '/data/attributes/ticket_id'},
60 "Ticket already sold out"
61 )
62
63 decorators = (jwt_required,)
64 methods = ['POST']
65 schema = AttendeeSchema
66 data_layer = {'session': db.session,
67 'model': TicketHolder}
68
69
70 class AttendeeList(ResourceList):
71 """
72 List Attendees
73 """
74 def query(self, view_kwargs):
75 """
76 query method for Attendees List
77 :param view_kwargs:
78 :return:
79 """
80 query_ = self.session.query(TicketHolder)
81
82 if view_kwargs.get('order_identifier'):
83 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
84 if not has_access('is_registrar', event_id=order.event_id) and not has_access('is_user_itself',
85 user_id=order.user_id):
86 raise ForbiddenException({'source': ''}, 'Access Forbidden')
87 query_ = query_.join(Order).filter(Order.id == order.id)
88
89 if view_kwargs.get('ticket_id'):
90 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')
91 if not has_access('is_registrar', event_id=ticket.event_id):
92 raise ForbiddenException({'source': ''}, 'Access Forbidden')
93 query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)
94
95 if view_kwargs.get('user_id'):
96 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
97 if not has_access('is_user_itself', user_id=user.id):
98 raise ForbiddenException({'source': ''}, 'Access Forbidden')
99 query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)
100
101 query_ = event_query(self, query_, view_kwargs, permission='is_registrar')
102 return query_
103
104 view_kwargs = True
105 methods = ['GET', ]
106 schema = AttendeeSchema
107 data_layer = {'session': db.session,
108 'model': TicketHolder,
109 'methods': {
110 'query': query
111 }}
112
113
114 class AttendeeDetail(ResourceDetail):
115 """
116 Attendee detail by id
117 """
118 def before_get_object(self, view_kwargs):
119 """
120 before get object method for attendee detail
121 :param view_kwargs:
122 :return:
123 """
124 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')
125 if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):
126 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
127
128 def before_delete_object(self, obj, kwargs):
129 """
130 before delete object method for attendee detail
131 :param obj:
132 :param kwargs:
133 :return:
134 """
135 if not has_access('is_registrar', event_id=obj.event_id):
136 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
137
138 def before_update_object(self, obj, data, kwargs):
139 """
140 before update object method for attendee detail
141 :param obj:
142 :param data:
143 :param kwargs:
144 :return:
145 """
146 if not has_access('is_registrar', event_id=obj.event_id):
147 raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')
148
149 if 'is_checked_in' in data and data['is_checked_in']:
150 if 'checkin_times' not in data:
151 raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},
152 "Check in time missing while trying to check in attendee")
153 else:
154 if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(","):
155 data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])
156
157 if 'attendee_notes' in data:
158 if obj.attendee_notes and data['attendee_notes'] not in obj.attendee_notes.split(","):
159 data['attendee_notes'] = '{},{}'.format(obj.attendee_notes, data['attendee_notes'])
160
161 decorators = (jwt_required,)
162 schema = AttendeeSchema
163 data_layer = {'session': db.session,
164 'model': TicketHolder,
165 'methods': {
166 'before_get_object': before_get_object,
167 'before_update_object': before_update_object,
168 'before_delete_object': before_delete_object
169 }}
170
171
172 class AttendeeRelationshipRequired(ResourceRelationship):
173 """
174 Attendee Relationship (Required)
175 """
176 decorators = (jwt_required,)
177 methods = ['GET', 'PATCH']
178 schema = AttendeeSchema
179 data_layer = {'session': db.session,
180 'model': TicketHolder}
181
182
183 class AttendeeRelationshipOptional(ResourceRelationship):
184 """
185 Attendee Relationship(Optional)
186 """
187 decorators = (api.has_permission('is_user_itself', fetch="user_id", fetch_as="id", model=TicketHolder),)
188 schema = AttendeeSchema
189 data_layer = {'session': db.session,
190 'model': TicketHolder}
191
192
193 @attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])
194 @jwt_required
195 def send_receipt():
196 order_id = request.json.get('order-id')
197 if order_id:
198 try:
199 order = db.session.query(Order).filter_by(id=int(order_id)).one()
200 except NoResultFound:
201 raise ObjectNotFound({'parameter': '{id}'}, "Order not found")
202
203 if order.user_id != current_identity.id:
204 abort(
205 make_response(jsonify(error="You cannot send reciept for an order not created by you"), 403)
206 )
207 elif order.status != 'completed':
208 abort(
209 make_response(jsonify(error="Cannot send receipt for an incomplete order"), 409)
210 )
211 else:
212 send_email_to_attendees(order, current_identity.id)
213 return jsonify(message="receipt sent to attendees")
214 else:
215 abort(
216 make_response(jsonify(error="Order id missing"), 422)
217 )
218
[end of app/api/attendees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/attendees.py b/app/api/attendees.py
--- a/app/api/attendees.py
+++ b/app/api/attendees.py
@@ -193,12 +193,14 @@
@attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])
@jwt_required
def send_receipt():
- order_id = request.json.get('order-id')
- if order_id:
+ # Function to send receipts to attendees related to the provided order.
+
+ order_identifier = request.json.get('order-identifier')
+ if order_identifier:
try:
- order = db.session.query(Order).filter_by(id=int(order_id)).one()
+ order = db.session.query(Order).filter_by(identifier=order_identifier).one()
except NoResultFound:
- raise ObjectNotFound({'parameter': '{id}'}, "Order not found")
+ raise ObjectNotFound({'parameter': '{identifier}'}, "Order not found")
if order.user_id != current_identity.id:
abort(
@@ -213,5 +215,5 @@
return jsonify(message="receipt sent to attendees")
else:
abort(
- make_response(jsonify(error="Order id missing"), 422)
+ make_response(jsonify(error="Order identifier missing"), 422)
)
| {"golden_diff": "diff --git a/app/api/attendees.py b/app/api/attendees.py\n--- a/app/api/attendees.py\n+++ b/app/api/attendees.py\n@@ -193,12 +193,14 @@\n @attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])\n @jwt_required\n def send_receipt():\n- order_id = request.json.get('order-id')\n- if order_id:\n+ # Function to send receipts to attendees related to the provided order.\n+\n+ order_identifier = request.json.get('order-identifier')\n+ if order_identifier:\n try:\n- order = db.session.query(Order).filter_by(id=int(order_id)).one()\n+ order = db.session.query(Order).filter_by(identifier=order_identifier).one()\n except NoResultFound:\n- raise ObjectNotFound({'parameter': '{id}'}, \"Order not found\")\n+ raise ObjectNotFound({'parameter': '{identifier}'}, \"Order not found\")\n \n if order.user_id != current_identity.id:\n abort(\n@@ -213,5 +215,5 @@\n return jsonify(message=\"receipt sent to attendees\")\n else:\n abort(\n- make_response(jsonify(error=\"Order id missing\"), 422)\n+ make_response(jsonify(error=\"Order identifier missing\"), 422)\n )\n", "issue": "Use identifier instead of id to send order receipts\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nCurrently, the endpoint to send order receipts uses order ids, but purchasers do not know about order ids, instead they know of order identifiers. Thus, it's more appropriate to use order identifiers instead of ids in that endpoint.\r\n\r\n**Describe the solution you'd like**\r\nUse order identifiers instead of order endpoints.\n", "before_files": [{"content": "from flask import Blueprint, request, jsonify, abort, make_response\nfrom flask_jwt import current_identity, jwt_required\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query, get_count\nfrom app.api.helpers.exceptions import (\n ConflictException,\n ForbiddenException,\n UnprocessableEntity,\n)\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.attendees import AttendeeSchema\nfrom app.models import db\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_holder import TicketHolder\nfrom app.models.user import User\n\nattendee_misc_routes = Blueprint('attendee_misc', __name__, url_prefix='/v1')\n\nclass AttendeeListPost(ResourceList):\n \"\"\"\n List and create Attendees through direct URL\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n Before post method to check for required relationship and proper permissions\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['ticket', 'event'], data)\n\n ticket = db.session.query(Ticket).filter_by(\n id=int(data['ticket']), deleted_at=None\n ).first()\n if ticket is None:\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/ticket'}, \"Invalid Ticket\"\n )\n if ticket.event_id != int(data['event']):\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/ticket'},\n \"Ticket belongs to a different Event\"\n )\n # Check if the ticket is already sold out or not.\n if get_count(db.session.query(TicketHolder.id).\n filter_by(ticket_id=int(data['ticket']), deleted_at=None)) >= ticket.quantity:\n raise ConflictException(\n {'pointer': '/data/attributes/ticket_id'},\n \"Ticket already sold out\"\n )\n\n decorators = (jwt_required,)\n methods = ['POST']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeList(ResourceList):\n \"\"\"\n List Attendees\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for Attendees List\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketHolder)\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n if not has_access('is_registrar', event_id=order.event_id) and not has_access('is_user_itself',\n user_id=order.user_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Order).filter(Order.id == order.id)\n\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n if not has_access('is_registrar', event_id=ticket.event_id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(Ticket).filter(Ticket.id == ticket.id)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n if not has_access('is_user_itself', user_id=user.id):\n raise ForbiddenException({'source': ''}, 'Access Forbidden')\n query_ = query_.join(User, User.email == TicketHolder.email).filter(User.id == user.id)\n\n query_ = event_query(self, query_, view_kwargs, permission='is_registrar')\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'query': query\n }}\n\n\nclass AttendeeDetail(ResourceDetail):\n \"\"\"\n Attendee detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method for attendee detail\n :param view_kwargs:\n :return:\n \"\"\"\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['id'], 'attendee_id')\n if not has_access('is_registrar_or_user_itself', user_id=current_identity.id, event_id=attendee.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_delete_object(self, obj, kwargs):\n \"\"\"\n before delete object method for attendee detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n def before_update_object(self, obj, data, kwargs):\n \"\"\"\n before update object method for attendee detail\n :param obj:\n :param data:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_registrar', event_id=obj.event_id):\n raise ForbiddenException({'source': 'User'}, 'You are not authorized to access this.')\n\n if 'is_checked_in' in data and data['is_checked_in']:\n if 'checkin_times' not in data:\n raise UnprocessableEntity({'pointer': '/data/attributes/checkin_times'},\n \"Check in time missing while trying to check in attendee\")\n else:\n if obj.checkin_times and data['checkin_times'] not in obj.checkin_times.split(\",\"):\n data['checkin_times'] = '{},{}'.format(obj.checkin_times, data['checkin_times'])\n\n if 'attendee_notes' in data:\n if obj.attendee_notes and data['attendee_notes'] not in obj.attendee_notes.split(\",\"):\n data['attendee_notes'] = '{},{}'.format(obj.attendee_notes, data['attendee_notes'])\n\n decorators = (jwt_required,)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object,\n 'before_delete_object': before_delete_object\n }}\n\n\nclass AttendeeRelationshipRequired(ResourceRelationship):\n \"\"\"\n Attendee Relationship (Required)\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\nclass AttendeeRelationshipOptional(ResourceRelationship):\n \"\"\"\n Attendee Relationship(Optional)\n \"\"\"\n decorators = (api.has_permission('is_user_itself', fetch=\"user_id\", fetch_as=\"id\", model=TicketHolder),)\n schema = AttendeeSchema\n data_layer = {'session': db.session,\n 'model': TicketHolder}\n\n\n@attendee_misc_routes.route('/attendees/send-receipt', methods=['POST'])\n@jwt_required\ndef send_receipt():\n order_id = request.json.get('order-id')\n if order_id:\n try:\n order = db.session.query(Order).filter_by(id=int(order_id)).one()\n except NoResultFound:\n raise ObjectNotFound({'parameter': '{id}'}, \"Order not found\")\n\n if order.user_id != current_identity.id:\n abort(\n make_response(jsonify(error=\"You cannot send reciept for an order not created by you\"), 403)\n )\n elif order.status != 'completed':\n abort(\n make_response(jsonify(error=\"Cannot send receipt for an incomplete order\"), 409)\n )\n else:\n send_email_to_attendees(order, current_identity.id)\n return jsonify(message=\"receipt sent to attendees\")\n else:\n abort(\n make_response(jsonify(error=\"Order id missing\"), 422)\n )\n", "path": "app/api/attendees.py"}]} | 2,942 | 289 |
gh_patches_debug_23199 | rasdani/github-patches | git_diff | great-expectations__great_expectations-7252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py]
1 from typing import Optional
2
3 import numpy as np
4
5 from great_expectations.core.expectation_configuration import ExpectationConfiguration
6 from great_expectations.execution_engine import PandasExecutionEngine
7 from great_expectations.expectations.expectation import MulticolumnMapExpectation
8 from great_expectations.expectations.metrics.map_metric_provider import (
9 MulticolumnMapMetricProvider,
10 multicolumn_condition_partial,
11 )
12
13
14 # This class defines a Metric to support your Expectation.
15 # For most MulticolumnMapExpectations, the main business logic for calculation will live in this class.
16 class MulticolumnValuesNotAllNull(MulticolumnMapMetricProvider):
17
18 # This is the id string that will be used to reference your metric.
19
20 condition_metric_name = "multicolumn_values.not_all_null"
21
22 # These point your metric at the provided keys to facilitate calculation
23 condition_domain_keys = (
24 "batch_id",
25 "table",
26 "column_list",
27 "row_condition",
28 "condition_parser",
29 "ignore_row_if",
30 )
31 condition_value_keys = ()
32
33 # This method implements the core logic for the PandasExecutionEngine
34
35 @multicolumn_condition_partial(engine=PandasExecutionEngine)
36 def _pandas(cls, column_list, **kwargs):
37 row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)
38 return row_wise_cond
39
40 # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
41 # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
42 # def _sqlalchemy(cls, column_list, **kwargs):
43 # raise NotImplementedError
44
45 # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
46 # @multicolumn_condition_partial(engine=SparkDFExecutionEngine)
47 # def _spark(cls, column_list, **kwargs):
48 # raise NotImplementedError
49
50
51 # This class defines the Expectation itself
52 class ExpectMulticolumnValuesNotToBeAllNull(MulticolumnMapExpectation):
53 """Expect the certain set of columns not to be null at the same time."""
54
55 # These examples will be shown in the public gallery.
56 # They will also be executed as unit tests for your Expectation.
57 examples = [
58 {
59 "data": {
60 "no_nulls": [5, 6, 5, 12, -3],
61 "some_nulls": [np.nan, -3, np.nan, np.nan, -9],
62 "one_non_null": [np.nan, 2, np.nan, np.nan, np.nan],
63 "all_nulls": [np.nan, np.nan, np.nan, np.nan, np.nan],
64 },
65 "tests": [
66 {
67 "title": "basic_positive_test",
68 "exact_match_out": False,
69 "include_in_gallery": True,
70 "in": {"column_list": ["no_nulls", "some_nulls"]},
71 "out": {
72 "success": True,
73 },
74 },
75 {
76 "title": "basic_positive_test",
77 "exact_match_out": False,
78 "include_in_gallery": True,
79 "in": {
80 "column_list": ["some_nulls", "one_non_null"],
81 "mostly": 0.4,
82 },
83 "out": {
84 "success": True,
85 },
86 },
87 {
88 "title": "basic_negative_test",
89 "exact_match_out": False,
90 "include_in_gallery": True,
91 "in": {
92 "column_list": ["some_nulls", "one_non_null", "all_nulls"],
93 "mostly": 1,
94 },
95 "out": {
96 "success": False,
97 },
98 },
99 ],
100 "test_backends": [
101 {
102 "backend": "pandas",
103 "dialects": None,
104 },
105 ],
106 }
107 ]
108
109 # This is the id string of the Metric used by this Expectation.
110 # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
111
112 map_metric = "multicolumn_values.not_all_null"
113
114 # This is a list of parameter names that can affect whether the Expectation evaluates to True or False
115 success_keys = (
116 "column_list",
117 "mostly",
118 )
119
120 # This dictionary contains default values for any parameters that should have default values
121 default_kwarg_values = {}
122
123 def validate_configuration(
124 self, configuration: Optional[ExpectationConfiguration] = None
125 ) -> None:
126 """
127 Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
128 necessary configuration arguments have been provided for the validation of the expectation.
129
130 Args:
131 configuration (OPTIONAL[ExpectationConfiguration]): \
132 An optional Expectation Configuration entry that will be used to configure the expectation
133 Returns:
134 None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
135 """
136
137 super().validate_configuration(configuration)
138 configuration = configuration or self.configuration
139
140 # # Check other things in configuration.kwargs and raise Exceptions if needed
141 # try:
142 # assert (
143 # ...
144 # ), "message"
145 # assert (
146 # ...
147 # ), "message"
148 # except AssertionError as e:
149 # raise InvalidExpectationConfigurationError(str(e))
150
151 # This object contains metadata for display in the public Gallery
152
153 library_metadata = {
154 "tags": ["null_check"], # Tags for this Expectation in the Gallery
155 "contributors": [ # Github handles for all contributors to this Expectation.
156 "@liyusa", # Don't forget to add your github handle here!
157 ],
158 }
159
160
161 if __name__ == "__main__":
162
163 ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist()
164
[end of contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py
--- a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py
+++ b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py
@@ -34,8 +34,7 @@
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_list, **kwargs):
- row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)
- return row_wise_cond
+ return column_list.notna().any(axis=1)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
@@ -118,7 +117,7 @@
)
# This dictionary contains default values for any parameters that should have default values
- default_kwarg_values = {}
+ default_kwarg_values = {"ignore_row_if": "never"}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
| {"golden_diff": "diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n--- a/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n+++ b/contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py\n@@ -34,8 +34,7 @@\n \n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n- row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)\n- return row_wise_cond\n+ return column_list.notna().any(axis=1)\n \n # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n@@ -118,7 +117,7 @@\n )\n \n # This dictionary contains default values for any parameters that should have default values\n- default_kwarg_values = {}\n+ default_kwarg_values = {\"ignore_row_if\": \"never\"}\n \n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import MulticolumnMapExpectation\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\n\n# This class defines a Metric to support your Expectation.\n# For most MulticolumnMapExpectations, the main business logic for calculation will live in this class.\nclass MulticolumnValuesNotAllNull(MulticolumnMapMetricProvider):\n\n # This is the id string that will be used to reference your metric.\n\n condition_metric_name = \"multicolumn_values.not_all_null\"\n\n # These point your metric at the provided keys to facilitate calculation\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # This method implements the core logic for the PandasExecutionEngine\n\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n row_wise_cond = column_list.isna().sum(axis=1) < len(column_list)\n return row_wise_cond\n\n # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine\n # @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n # def _sqlalchemy(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine\n # @multicolumn_condition_partial(engine=SparkDFExecutionEngine)\n # def _spark(cls, column_list, **kwargs):\n # raise NotImplementedError\n\n\n# This class defines the Expectation itself\nclass ExpectMulticolumnValuesNotToBeAllNull(MulticolumnMapExpectation):\n \"\"\"Expect the certain set of columns not to be null at the same time.\"\"\"\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = [\n {\n \"data\": {\n \"no_nulls\": [5, 6, 5, 12, -3],\n \"some_nulls\": [np.nan, -3, np.nan, np.nan, -9],\n \"one_non_null\": [np.nan, 2, np.nan, np.nan, np.nan],\n \"all_nulls\": [np.nan, np.nan, np.nan, np.nan, np.nan],\n },\n \"tests\": [\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\"column_list\": [\"no_nulls\", \"some_nulls\"]},\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\"],\n \"mostly\": 0.4,\n },\n \"out\": {\n \"success\": True,\n },\n },\n {\n \"title\": \"basic_negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column_list\": [\"some_nulls\", \"one_non_null\", \"all_nulls\"],\n \"mostly\": 1,\n },\n \"out\": {\n \"success\": False,\n },\n },\n ],\n \"test_backends\": [\n {\n \"backend\": \"pandas\",\n \"dialects\": None,\n },\n ],\n }\n ]\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n\n map_metric = \"multicolumn_values.not_all_null\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n success_keys = (\n \"column_list\",\n \"mostly\",\n )\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {}\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration] = None\n ) -> None:\n \"\"\"\n Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that\n necessary configuration arguments have been provided for the validation of the expectation.\n\n Args:\n configuration (OPTIONAL[ExpectationConfiguration]): \\\n An optional Expectation Configuration entry that will be used to configure the expectation\n Returns:\n None. Raises InvalidExpectationConfigurationError if the config is not validated successfully\n \"\"\"\n\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))\n\n # This object contains metadata for display in the public Gallery\n\n library_metadata = {\n \"tags\": [\"null_check\"], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@liyusa\", # Don't forget to add your github handle here!\n ],\n }\n\n\nif __name__ == \"__main__\":\n\n ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist()\n", "path": "contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py"}]} | 2,252 | 306 |
gh_patches_debug_22178 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Empty hook file causes cryptic error
If you have a pre_gen_project.sh or a post_gen_project.sh file with no data in it, cookiecutter fails with an unhelpful traceback.
```
Traceback (most recent call last):
File "/usr/local/bin/cookiecutter", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/cli.py", line 100, in main
config_file=user_config
File "/usr/local/lib/python2.7/site-packages/cookiecutter/main.py", line 140, in cookiecutter
output_dir=output_dir
File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 273, in generate_files
_run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py", line 232, in _run_hook_from_repo_dir
run_hook(hook_name, project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 116, in run_hook
run_script_with_context(script, project_dir, context)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 101, in run_script_with_context
run_script(temp.name, cwd)
File "/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py", line 73, in run_script
cwd=cwd
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 656, in __init__
_cleanup()
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py", line 1335, in _execute_child
raise child_exception
OSError: [Errno 8] Exec format error
```
</issue>
<code>
[start of cookiecutter/hooks.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.hooks
6 ------------------
7
8 Functions for discovering and executing various cookiecutter hooks.
9 """
10
11 import io
12 import logging
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 from jinja2 import Template
19
20 from cookiecutter import utils
21 from .exceptions import FailedHookException
22
23
24 _HOOKS = [
25 'pre_gen_project',
26 'post_gen_project',
27 # TODO: other hooks should be listed here
28 ]
29 EXIT_SUCCESS = 0
30
31
32 def find_hooks():
33 """
34 Must be called with the project template as the current working directory.
35 Returns a dict of all hook scripts provided.
36 Dict's key will be the hook/script's name, without extension, while
37 values will be the absolute path to the script.
38 Missing scripts will not be included in the returned dict.
39 """
40 hooks_dir = 'hooks'
41 r = {}
42 logging.debug('hooks_dir is {0}'.format(hooks_dir))
43 if not os.path.isdir(hooks_dir):
44 logging.debug('No hooks/ dir in template_dir')
45 return r
46 for f in os.listdir(hooks_dir):
47 basename = os.path.splitext(os.path.basename(f))[0]
48 if basename in _HOOKS:
49 r[basename] = os.path.abspath(os.path.join(hooks_dir, f))
50 return r
51
52
53 def run_script(script_path, cwd='.'):
54 """
55 Executes a script from a working directory.
56
57 :param script_path: Absolute path to the script to run.
58 :param cwd: The directory to run the script from.
59 """
60 run_thru_shell = sys.platform.startswith('win')
61 if script_path.endswith('.py'):
62 script_command = [sys.executable, script_path]
63 else:
64 script_command = [script_path]
65
66 utils.make_executable(script_path)
67
68 proc = subprocess.Popen(
69 script_command,
70 shell=run_thru_shell,
71 cwd=cwd
72 )
73 exit_status = proc.wait()
74 if exit_status != EXIT_SUCCESS:
75 raise FailedHookException(
76 "Hook script failed (exit status: %d)" % exit_status)
77
78
79 def run_script_with_context(script_path, cwd, context):
80 """
81 Executes a script after rendering with it Jinja.
82
83 :param script_path: Absolute path to the script to run.
84 :param cwd: The directory to run the script from.
85 :param context: Cookiecutter project template context.
86 """
87 _, extension = os.path.splitext(script_path)
88
89 contents = io.open(script_path, 'r', encoding='utf-8').read()
90
91 with tempfile.NamedTemporaryFile(
92 delete=False,
93 mode='wb',
94 suffix=extension
95 ) as temp:
96 output = Template(contents).render(**context)
97 temp.write(output.encode('utf-8'))
98
99 run_script(temp.name, cwd)
100
101
102 def run_hook(hook_name, project_dir, context):
103 """
104 Try to find and execute a hook from the specified project directory.
105
106 :param hook_name: The hook to execute.
107 :param project_dir: The directory to execute the script from.
108 :param context: Cookiecutter project context.
109 """
110 script = find_hooks().get(hook_name)
111 if script is None:
112 logging.debug('No hooks found')
113 return
114 run_script_with_context(script, project_dir, context)
115
[end of cookiecutter/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py
--- a/cookiecutter/hooks.py
+++ b/cookiecutter/hooks.py
@@ -8,6 +8,7 @@
Functions for discovering and executing various cookiecutter hooks.
"""
+import errno
import io
import logging
import os
@@ -65,15 +66,23 @@
utils.make_executable(script_path)
- proc = subprocess.Popen(
- script_command,
- shell=run_thru_shell,
- cwd=cwd
- )
- exit_status = proc.wait()
- if exit_status != EXIT_SUCCESS:
+ try:
+ proc = subprocess.Popen(
+ script_command,
+ shell=run_thru_shell,
+ cwd=cwd
+ )
+ exit_status = proc.wait()
+ if exit_status != EXIT_SUCCESS:
+ raise FailedHookException(
+ "Hook script failed (exit status: %d)" % exit_status)
+ except OSError as oe:
+ if oe.errno == errno.ENOEXEC:
+ raise FailedHookException(
+ "Hook script failed, might be an "
+ "empty file or missing a shebang")
raise FailedHookException(
- "Hook script failed (exit status: %d)" % exit_status)
+ "Hook script failed (error: %s)" % oe)
def run_script_with_context(script_path, cwd, context):
| {"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -8,6 +8,7 @@\n Functions for discovering and executing various cookiecutter hooks.\n \"\"\"\n \n+import errno\n import io\n import logging\n import os\n@@ -65,15 +66,23 @@\n \n utils.make_executable(script_path)\n \n- proc = subprocess.Popen(\n- script_command,\n- shell=run_thru_shell,\n- cwd=cwd\n- )\n- exit_status = proc.wait()\n- if exit_status != EXIT_SUCCESS:\n+ try:\n+ proc = subprocess.Popen(\n+ script_command,\n+ shell=run_thru_shell,\n+ cwd=cwd\n+ )\n+ exit_status = proc.wait()\n+ if exit_status != EXIT_SUCCESS:\n+ raise FailedHookException(\n+ \"Hook script failed (exit status: %d)\" % exit_status)\n+ except OSError as oe:\n+ if oe.errno == errno.ENOEXEC:\n+ raise FailedHookException(\n+ \"Hook script failed, might be an \"\n+ \"empty file or missing a shebang\")\n raise FailedHookException(\n- \"Hook script failed (exit status: %d)\" % exit_status)\n+ \"Hook script failed (error: %s)\" % oe)\n \n \n def run_script_with_context(script_path, cwd, context):\n", "issue": "Empty hook file causes cryptic error\nIf you have a pre_gen_project.sh or a post_gen_project.sh file with no data in it, cookiecutter fails with an unhelpful traceback.\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/cookiecutter\", line 11, in <module>\n sys.exit(main())\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 716, in __call__\n return self.main(*args, **kwargs)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 696, in main\n rv = self.invoke(ctx)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 889, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/usr/local/lib/python2.7/site-packages/click/core.py\", line 534, in invoke\n return callback(*args, **kwargs)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/cli.py\", line 100, in main\n config_file=user_config\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/main.py\", line 140, in cookiecutter\n output_dir=output_dir\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py\", line 273, in generate_files\n _run_hook_from_repo_dir(repo_dir, 'pre_gen_project', project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/generate.py\", line 232, in _run_hook_from_repo_dir\n run_hook(hook_name, project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 116, in run_hook\n run_script_with_context(script, project_dir, context)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 101, in run_script_with_context\n run_script(temp.name, cwd)\n File \"/usr/local/lib/python2.7/site-packages/cookiecutter/hooks.py\", line 73, in run_script\n cwd=cwd\n File \"/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py\", line 656, in __init__\n _cleanup()\n File \"/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/subprocess.py\", line 1335, in _execute_child\n raise child_exception\nOSError: [Errno 8] Exec format error\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]} | 2,118 | 317 |
gh_patches_debug_9282 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4165 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: colossalai run is stuck during multi-nodes training
### 🐛 Describe the bug
When using colossalai run during multi-nodes training, it's stuck before initializing distributed process group.
This is because potentially wrong launch command.
### Environment
Python 3.8.0
torch 1.12.1+cu113
CUDA 11.4
</issue>
<code>
[start of colossalai/cli/launcher/run.py]
1 import os
2 import sys
3 from typing import List
4
5 import click
6 import torch
7 from packaging import version
8
9 from colossalai.context import Config
10
11 from .hostinfo import HostInfo, HostInfoList
12 from .multinode_runner import MultiNodeRunner
13
14 # Constants that define our syntax
15 NODE_SEP = ','
16
17
18 def fetch_hostfile(hostfile_path: str, ssh_port: int) -> HostInfoList:
19 """
20 Parse the hostfile to obtain a list of hosts.
21
22 A hostfile should look like:
23 worker-0
24 worker-1
25 worker-2
26 ...
27
28 Args:
29 hostfile_path (str): the path to the hostfile
30 ssh_port (int): the port to connect to the host
31 """
32
33 if not os.path.isfile(hostfile_path):
34 click.echo(f"Error: Unable to find the hostfile, no such file: {hostfile_path}")
35 exit()
36
37 with open(hostfile_path, 'r') as fd:
38 device_pool = HostInfoList()
39
40 for line in fd.readlines():
41 line = line.strip()
42 if line == '':
43 # skip empty lines
44 continue
45
46 # build the HostInfo object
47 hostname = line.strip()
48 hostinfo = HostInfo(hostname=hostname, port=ssh_port)
49
50 if device_pool.has(hostname):
51 click.echo(f"Error: found duplicate host {hostname} in the hostfile")
52 exit()
53
54 device_pool.append(hostinfo)
55 return device_pool
56
57
58 def parse_device_filter(device_pool: HostInfoList, include_str=None, exclude_str=None) -> HostInfoList:
59 '''Parse an inclusion or exclusion string and filter a hostfile dictionary.
60
61 Examples:
62 include_str="worker-0,worker-1" will execute jobs only on worker-0 and worker-1.
63 exclude_str="worker-1" will use all available devices except worker-1.
64
65 Args:
66 device_pool (HostInfoList): a list of HostInfo objects
67 include_str (str): --include option passed by user, default None
68 exclude_str (str): --exclude option passed by user, default None
69
70 Returns:
71 filtered_hosts (HostInfoList): filtered hosts after inclusion/exclusion
72 '''
73
74 # Ensure include/exclude are mutually exclusive
75 if include_str and exclude_str:
76 click.echo("--include and --exclude are mutually exclusive, only one can be used")
77 exit()
78
79 # no-op
80 if include_str is None and exclude_str is None:
81 return device_pool
82
83 # Either build from scratch or remove items
84 if include_str:
85 parse_str = include_str
86 filtered_hosts = HostInfoList()
87 elif exclude_str:
88 parse_str = exclude_str
89 filtered_hosts = device_pool
90
91 # foreach node in the list
92 for node_config in parse_str.split(NODE_SEP):
93 hostname = node_config
94 hostinfo = device_pool.get_hostinfo(hostname)
95 # sanity check hostname
96 if not device_pool.has(hostname):
97 click.echo(f"Error: Hostname '{hostname}' not found in hostfile")
98 exit()
99
100 if include_str:
101 filtered_hosts.append(hostinfo)
102 elif exclude_str:
103 filtered_hosts.remove(hostname)
104
105 return filtered_hosts
106
107
108 def get_launch_command(
109 master_addr: str,
110 master_port: int,
111 nproc_per_node: int,
112 user_script: str,
113 user_args: List[str],
114 node_rank: int,
115 num_nodes: int,
116 extra_launch_args: str = None,
117 ) -> str:
118 """
119 Generate a command for distributed training.
120
121 Args:
122 master_addr (str): the host of the master node
123 master_port (str): the port of the master node
124 nproc_per_node (str): the number of processes to launch on each node
125 user_script (str): the user Python file
126 user_args (str): the arguments for the user script
127 node_rank (int): the unique ID for the node
128 num_nodes (int): the number of nodes to execute jobs
129
130 Returns:
131 cmd (str): the command the start distributed training
132 """
133
134 def _arg_dict_to_list(arg_dict):
135 ret = []
136
137 for k, v in arg_dict.items():
138 if v:
139 ret.append(f'--{k}={v}')
140 else:
141 ret.append(f'--{k}')
142 return ret
143
144 if extra_launch_args:
145 extra_launch_args_dict = dict()
146 for arg in extra_launch_args.split(','):
147 if '=' in arg:
148 k, v = arg.split('=')
149 extra_launch_args_dict[k] = v
150 else:
151 extra_launch_args_dict[arg] = None
152 extra_launch_args = extra_launch_args_dict
153 else:
154 extra_launch_args = dict()
155
156 torch_version = version.parse(torch.__version__)
157 assert torch_version.major >= 1
158
159 if torch_version.minor < 9:
160 cmd = [
161 sys.executable, "-m", "torch.distributed.launch", f"--nproc_per_node={nproc_per_node}",
162 f"--master_addr={master_addr}", f"--master_port={master_port}", f"--nnodes={num_nodes}",
163 f"--node_rank={node_rank}"
164 ]
165 else:
166 # extra launch args for torch distributed launcher with torch >= 1.9
167 default_torchrun_rdzv_args = dict(rdzv_backend="c10d",
168 rdzv_endpoint=f"{master_addr}:{master_port}",
169 rdzv_id="colossalai-default-job")
170
171 # update rdzv arguments
172 for key in default_torchrun_rdzv_args.keys():
173 if key in extra_launch_args:
174 value = extra_launch_args.pop(key)
175 default_torchrun_rdzv_args[key] = value
176
177 if torch_version.minor < 10:
178 cmd = [
179 sys.executable, "-m", "torch.distributed.run", f"--nproc_per_node={nproc_per_node}",
180 f"--nnodes={num_nodes}", f"--node_rank={node_rank}"
181 ]
182 else:
183 cmd = [
184 "torchrun", f"--nproc_per_node={nproc_per_node}", f"--nnodes={num_nodes}", f"--node_rank={node_rank}"
185 ]
186 cmd += _arg_dict_to_list(default_torchrun_rdzv_args)
187
188 cmd += _arg_dict_to_list(extra_launch_args) + [user_script] + user_args
189 cmd = ' '.join(cmd)
190 return cmd
191
192
193 def launch_multi_processes(args: Config) -> None:
194 """
195 Launch multiple processes on a single node or multiple nodes.
196
197 The overall logic can be summarized as the pseudo code below:
198
199 if hostfile given:
200 hostinfo = parse_hostfile(hostfile)
201 hostinfo = include_or_exclude_hosts(hostinfo)
202 launch_on_multi_nodes(hostinfo)
203 elif hosts given:
204 hostinfo = parse_hosts(hosts)
205 launch_on_multi_nodes(hostinfo)
206 else:
207 launch_on_current_node()
208
209 Args:
210 args (Config): the arguments taken from command line
211
212 """
213 assert isinstance(args, Config)
214
215 if args.nproc_per_node is None:
216 click.echo("--nproc_per_node did not receive any value")
217 exit()
218
219 # cannot accept hosts and hostfile at the same time
220 if args.host and args.hostfile:
221 click.echo("Error: hostfile and hosts are mutually exclusive, only one is required")
222
223 # check if hostfile is given
224 if args.hostfile:
225 device_pool = fetch_hostfile(args.hostfile, ssh_port=args.ssh_port)
226 active_device_pool = parse_device_filter(device_pool, args.include, args.exclude)
227
228 if args.num_nodes > 0:
229 # only keep the first num_nodes to execute jobs
230 updated_active_device_pool = HostInfoList()
231 for count, hostinfo in enumerate(active_device_pool):
232 if args.num_nodes == count:
233 break
234 updated_active_device_pool.append(hostinfo)
235 active_device_pool = updated_active_device_pool
236 else:
237 active_device_pool = None
238
239 env = os.environ.copy()
240
241 # use hosts if hostfile is not given
242 if args.host and active_device_pool is None:
243 active_device_pool = HostInfoList()
244 host_list = args.host.strip().split(NODE_SEP)
245 for hostname in host_list:
246 hostinfo = HostInfo(hostname=hostname, port=args.ssh_port)
247 active_device_pool.append(hostinfo)
248
249 if not active_device_pool:
250 # run on local node if not hosts or hostfile is given
251 # add local node to host info list
252 active_device_pool = HostInfoList()
253 localhost_info = HostInfo(hostname='127.0.0.1', port=args.ssh_port)
254 active_device_pool.append(localhost_info)
255
256 # launch distributed processes
257 runner = MultiNodeRunner()
258 curr_path = os.path.abspath('.')
259
260 # collect current path env
261 env = dict()
262 for k, v in os.environ.items():
263 # do not support multi-line env var
264 if v and '\n' not in v:
265 env[k] = v
266
267 # establish remote connection
268 runner.connect(host_info_list=active_device_pool, workdir=curr_path, env=env)
269
270 # execute distributed launching command
271 for node_id, hostinfo in enumerate(active_device_pool):
272 cmd = get_launch_command(master_addr=args.master_addr,
273 master_port=args.master_port,
274 nproc_per_node=args.nproc_per_node,
275 user_script=args.user_script,
276 user_args=args.user_args,
277 node_rank=node_id,
278 num_nodes=len(active_device_pool),
279 extra_launch_args=args.extra_launch_args)
280 runner.send(hostinfo=hostinfo, cmd=cmd)
281
282 # start training
283 msg_from_node = runner.recv_from_all()
284 has_error = False
285
286 # print node status
287 click.echo("\n====== Training on All Nodes =====")
288 for hostname, msg in msg_from_node.items():
289 click.echo(f"{hostname}: {msg}")
290
291 # check if a process failed
292 if msg == "failure":
293 has_error = True
294
295 # stop all nodes
296 runner.stop_all()
297
298 # receive the stop status
299 msg_from_node = runner.recv_from_all()
300
301 # print node status
302 click.echo("\n====== Stopping All Nodes =====")
303 for hostname, msg in msg_from_node.items():
304 click.echo(f"{hostname}: {msg}")
305
306 # give the process an exit code
307 # so that it behaves like a normal process
308 if has_error:
309 sys.exit(1)
310 else:
311 sys.exit(0)
312
[end of colossalai/cli/launcher/run.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/cli/launcher/run.py b/colossalai/cli/launcher/run.py
--- a/colossalai/cli/launcher/run.py
+++ b/colossalai/cli/launcher/run.py
@@ -164,9 +164,7 @@
]
else:
# extra launch args for torch distributed launcher with torch >= 1.9
- default_torchrun_rdzv_args = dict(rdzv_backend="c10d",
- rdzv_endpoint=f"{master_addr}:{master_port}",
- rdzv_id="colossalai-default-job")
+ default_torchrun_rdzv_args = dict(master_addr=master_addr, master_port=master_port)
# update rdzv arguments
for key in default_torchrun_rdzv_args.keys():
| {"golden_diff": "diff --git a/colossalai/cli/launcher/run.py b/colossalai/cli/launcher/run.py\n--- a/colossalai/cli/launcher/run.py\n+++ b/colossalai/cli/launcher/run.py\n@@ -164,9 +164,7 @@\n ]\n else:\n # extra launch args for torch distributed launcher with torch >= 1.9\n- default_torchrun_rdzv_args = dict(rdzv_backend=\"c10d\",\n- rdzv_endpoint=f\"{master_addr}:{master_port}\",\n- rdzv_id=\"colossalai-default-job\")\n+ default_torchrun_rdzv_args = dict(master_addr=master_addr, master_port=master_port)\n \n # update rdzv arguments\n for key in default_torchrun_rdzv_args.keys():\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: colossalai run is stuck during multi-nodes training\n### \ud83d\udc1b Describe the bug\n\nWhen using colossalai run during multi-nodes training, it's stuck before initializing distributed process group.\r\n\r\nThis is because potentially wrong launch command.\n\n### Environment\n\nPython 3.8.0\r\ntorch 1.12.1+cu113\r\nCUDA 11.4\n", "before_files": [{"content": "import os\nimport sys\nfrom typing import List\n\nimport click\nimport torch\nfrom packaging import version\n\nfrom colossalai.context import Config\n\nfrom .hostinfo import HostInfo, HostInfoList\nfrom .multinode_runner import MultiNodeRunner\n\n# Constants that define our syntax\nNODE_SEP = ','\n\n\ndef fetch_hostfile(hostfile_path: str, ssh_port: int) -> HostInfoList:\n \"\"\"\n Parse the hostfile to obtain a list of hosts.\n\n A hostfile should look like:\n worker-0\n worker-1\n worker-2\n ...\n\n Args:\n hostfile_path (str): the path to the hostfile\n ssh_port (int): the port to connect to the host\n \"\"\"\n\n if not os.path.isfile(hostfile_path):\n click.echo(f\"Error: Unable to find the hostfile, no such file: {hostfile_path}\")\n exit()\n\n with open(hostfile_path, 'r') as fd:\n device_pool = HostInfoList()\n\n for line in fd.readlines():\n line = line.strip()\n if line == '':\n # skip empty lines\n continue\n\n # build the HostInfo object\n hostname = line.strip()\n hostinfo = HostInfo(hostname=hostname, port=ssh_port)\n\n if device_pool.has(hostname):\n click.echo(f\"Error: found duplicate host {hostname} in the hostfile\")\n exit()\n\n device_pool.append(hostinfo)\n return device_pool\n\n\ndef parse_device_filter(device_pool: HostInfoList, include_str=None, exclude_str=None) -> HostInfoList:\n '''Parse an inclusion or exclusion string and filter a hostfile dictionary.\n\n Examples:\n include_str=\"worker-0,worker-1\" will execute jobs only on worker-0 and worker-1.\n exclude_str=\"worker-1\" will use all available devices except worker-1.\n\n Args:\n device_pool (HostInfoList): a list of HostInfo objects\n include_str (str): --include option passed by user, default None\n exclude_str (str): --exclude option passed by user, default None\n\n Returns:\n filtered_hosts (HostInfoList): filtered hosts after inclusion/exclusion\n '''\n\n # Ensure include/exclude are mutually exclusive\n if include_str and exclude_str:\n click.echo(\"--include and --exclude are mutually exclusive, only one can be used\")\n exit()\n\n # no-op\n if include_str is None and exclude_str is None:\n return device_pool\n\n # Either build from scratch or remove items\n if include_str:\n parse_str = include_str\n filtered_hosts = HostInfoList()\n elif exclude_str:\n parse_str = exclude_str\n filtered_hosts = device_pool\n\n # foreach node in the list\n for node_config in parse_str.split(NODE_SEP):\n hostname = node_config\n hostinfo = device_pool.get_hostinfo(hostname)\n # sanity check hostname\n if not device_pool.has(hostname):\n click.echo(f\"Error: Hostname '{hostname}' not found in hostfile\")\n exit()\n\n if include_str:\n filtered_hosts.append(hostinfo)\n elif exclude_str:\n filtered_hosts.remove(hostname)\n\n return filtered_hosts\n\n\ndef get_launch_command(\n master_addr: str,\n master_port: int,\n nproc_per_node: int,\n user_script: str,\n user_args: List[str],\n node_rank: int,\n num_nodes: int,\n extra_launch_args: str = None,\n) -> str:\n \"\"\"\n Generate a command for distributed training.\n\n Args:\n master_addr (str): the host of the master node\n master_port (str): the port of the master node\n nproc_per_node (str): the number of processes to launch on each node\n user_script (str): the user Python file\n user_args (str): the arguments for the user script\n node_rank (int): the unique ID for the node\n num_nodes (int): the number of nodes to execute jobs\n\n Returns:\n cmd (str): the command the start distributed training\n \"\"\"\n\n def _arg_dict_to_list(arg_dict):\n ret = []\n\n for k, v in arg_dict.items():\n if v:\n ret.append(f'--{k}={v}')\n else:\n ret.append(f'--{k}')\n return ret\n\n if extra_launch_args:\n extra_launch_args_dict = dict()\n for arg in extra_launch_args.split(','):\n if '=' in arg:\n k, v = arg.split('=')\n extra_launch_args_dict[k] = v\n else:\n extra_launch_args_dict[arg] = None\n extra_launch_args = extra_launch_args_dict\n else:\n extra_launch_args = dict()\n\n torch_version = version.parse(torch.__version__)\n assert torch_version.major >= 1\n\n if torch_version.minor < 9:\n cmd = [\n sys.executable, \"-m\", \"torch.distributed.launch\", f\"--nproc_per_node={nproc_per_node}\",\n f\"--master_addr={master_addr}\", f\"--master_port={master_port}\", f\"--nnodes={num_nodes}\",\n f\"--node_rank={node_rank}\"\n ]\n else:\n # extra launch args for torch distributed launcher with torch >= 1.9\n default_torchrun_rdzv_args = dict(rdzv_backend=\"c10d\",\n rdzv_endpoint=f\"{master_addr}:{master_port}\",\n rdzv_id=\"colossalai-default-job\")\n\n # update rdzv arguments\n for key in default_torchrun_rdzv_args.keys():\n if key in extra_launch_args:\n value = extra_launch_args.pop(key)\n default_torchrun_rdzv_args[key] = value\n\n if torch_version.minor < 10:\n cmd = [\n sys.executable, \"-m\", \"torch.distributed.run\", f\"--nproc_per_node={nproc_per_node}\",\n f\"--nnodes={num_nodes}\", f\"--node_rank={node_rank}\"\n ]\n else:\n cmd = [\n \"torchrun\", f\"--nproc_per_node={nproc_per_node}\", f\"--nnodes={num_nodes}\", f\"--node_rank={node_rank}\"\n ]\n cmd += _arg_dict_to_list(default_torchrun_rdzv_args)\n\n cmd += _arg_dict_to_list(extra_launch_args) + [user_script] + user_args\n cmd = ' '.join(cmd)\n return cmd\n\n\ndef launch_multi_processes(args: Config) -> None:\n \"\"\"\n Launch multiple processes on a single node or multiple nodes.\n\n The overall logic can be summarized as the pseudo code below:\n\n if hostfile given:\n hostinfo = parse_hostfile(hostfile)\n hostinfo = include_or_exclude_hosts(hostinfo)\n launch_on_multi_nodes(hostinfo)\n elif hosts given:\n hostinfo = parse_hosts(hosts)\n launch_on_multi_nodes(hostinfo)\n else:\n launch_on_current_node()\n\n Args:\n args (Config): the arguments taken from command line\n\n \"\"\"\n assert isinstance(args, Config)\n\n if args.nproc_per_node is None:\n click.echo(\"--nproc_per_node did not receive any value\")\n exit()\n\n # cannot accept hosts and hostfile at the same time\n if args.host and args.hostfile:\n click.echo(\"Error: hostfile and hosts are mutually exclusive, only one is required\")\n\n # check if hostfile is given\n if args.hostfile:\n device_pool = fetch_hostfile(args.hostfile, ssh_port=args.ssh_port)\n active_device_pool = parse_device_filter(device_pool, args.include, args.exclude)\n\n if args.num_nodes > 0:\n # only keep the first num_nodes to execute jobs\n updated_active_device_pool = HostInfoList()\n for count, hostinfo in enumerate(active_device_pool):\n if args.num_nodes == count:\n break\n updated_active_device_pool.append(hostinfo)\n active_device_pool = updated_active_device_pool\n else:\n active_device_pool = None\n\n env = os.environ.copy()\n\n # use hosts if hostfile is not given\n if args.host and active_device_pool is None:\n active_device_pool = HostInfoList()\n host_list = args.host.strip().split(NODE_SEP)\n for hostname in host_list:\n hostinfo = HostInfo(hostname=hostname, port=args.ssh_port)\n active_device_pool.append(hostinfo)\n\n if not active_device_pool:\n # run on local node if not hosts or hostfile is given\n # add local node to host info list\n active_device_pool = HostInfoList()\n localhost_info = HostInfo(hostname='127.0.0.1', port=args.ssh_port)\n active_device_pool.append(localhost_info)\n\n # launch distributed processes\n runner = MultiNodeRunner()\n curr_path = os.path.abspath('.')\n\n # collect current path env\n env = dict()\n for k, v in os.environ.items():\n # do not support multi-line env var\n if v and '\\n' not in v:\n env[k] = v\n\n # establish remote connection\n runner.connect(host_info_list=active_device_pool, workdir=curr_path, env=env)\n\n # execute distributed launching command\n for node_id, hostinfo in enumerate(active_device_pool):\n cmd = get_launch_command(master_addr=args.master_addr,\n master_port=args.master_port,\n nproc_per_node=args.nproc_per_node,\n user_script=args.user_script,\n user_args=args.user_args,\n node_rank=node_id,\n num_nodes=len(active_device_pool),\n extra_launch_args=args.extra_launch_args)\n runner.send(hostinfo=hostinfo, cmd=cmd)\n\n # start training\n msg_from_node = runner.recv_from_all()\n has_error = False\n\n # print node status\n click.echo(\"\\n====== Training on All Nodes =====\")\n for hostname, msg in msg_from_node.items():\n click.echo(f\"{hostname}: {msg}\")\n\n # check if a process failed\n if msg == \"failure\":\n has_error = True\n\n # stop all nodes\n runner.stop_all()\n\n # receive the stop status\n msg_from_node = runner.recv_from_all()\n\n # print node status\n click.echo(\"\\n====== Stopping All Nodes =====\")\n for hostname, msg in msg_from_node.items():\n click.echo(f\"{hostname}: {msg}\")\n\n # give the process an exit code\n # so that it behaves like a normal process\n if has_error:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "colossalai/cli/launcher/run.py"}]} | 3,770 | 180 |
gh_patches_debug_48963 | rasdani/github-patches | git_diff | scverse__scanpy-1948 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx 4.1.0 doesn't like ScanpyConfig
Update:
Docs don't build with sphinx 4.1.0 due to a error triggered by `scanpydoc`. Sphinx will be pinned until this is solved (which is when this issue should be closed). It's not obvious to me at the moment whether sphinx or scanpydoc is at fault.
---------------
Trying to build the docs with Sphinx 4.1.0 fails with the following output:
<details>
<summary> </summary>
```sh
$ make html
Running Sphinx v4.1.0
loading intersphinx inventory from https://anndata.readthedocs.io/en/stable/objects.inv...
loading intersphinx inventory from https://bbknn.readthedocs.io/en/latest/objects.inv...
loading intersphinx inventory from https://matplotlib.org/cycler/objects.inv...
loading intersphinx inventory from http://docs.h5py.org/en/stable/objects.inv...
loading intersphinx inventory from https://ipython.readthedocs.io/en/stable/objects.inv...
loading intersphinx inventory from https://leidenalg.readthedocs.io/en/latest/objects.inv...
loading intersphinx inventory from https://louvain-igraph.readthedocs.io/en/latest/objects.inv...
loading intersphinx inventory from https://matplotlib.org/objects.inv...
loading intersphinx inventory from https://networkx.github.io/documentation/networkx-1.10/objects.inv...
loading intersphinx inventory from https://docs.scipy.org/doc/numpy/objects.inv...
loading intersphinx inventory from https://pandas.pydata.org/pandas-docs/stable/objects.inv...
loading intersphinx inventory from https://docs.pytest.org/en/latest/objects.inv...
loading intersphinx inventory from https://docs.python.org/3/objects.inv...
loading intersphinx inventory from https://docs.scipy.org/doc/scipy/reference/objects.inv...
loading intersphinx inventory from https://seaborn.pydata.org/objects.inv...
loading intersphinx inventory from https://scikit-learn.org/stable/objects.inv...
loading intersphinx inventory from https://scanpy-tutorials.readthedocs.io/en/latest/objects.inv...
intersphinx inventory has moved: https://networkx.github.io/documentation/networkx-1.10/objects.inv -> https://networkx.org/documentation/networkx-1.10/objects.inv
intersphinx inventory has moved: https://docs.scipy.org/doc/numpy/objects.inv -> https://numpy.org/doc/stable/objects.inv
intersphinx inventory has moved: http://docs.h5py.org/en/stable/objects.inv -> https://docs.h5py.org/en/stable/objects.inv
[autosummary] generating autosummary for: _key_contributors.rst, api.rst, basic_usage.rst, community.rst, contributors.rst, dev/ci.rst, dev/code.rst, dev/documentation.rst, dev/external-tools.rst, dev/getting-set-up.rst, ..., release-notes/1.7.1.rst, release-notes/1.7.2.rst, release-notes/1.8.0.rst, release-notes/1.8.1.rst, release-notes/1.8.2.rst, release-notes/1.9.0.rst, release-notes/index.rst, release-notes/release-latest.rst, tutorials.rst, usage-principles.rst
Error in github_url('scanpy._settings.ScanpyConfig.N_PCS'):
Extension error (sphinx.ext.autosummary):
Handler <function process_generate_options at 0x139c4a940> for event 'builder-inited' threw an exception (exception: type object 'ScanpyConfig' has no attribute 'N_PCS')
make: *** [html] Error 2
```
</details>
However, I'm entirely sure if this is Sphinx's fault, or our own. Currently the [N_PCS parameter isn't in the rendered documentation](https://scanpy.readthedocs.io/en/stable/generated/scanpy._settings.ScanpyConfig.html#scanpy._settings.ScanpyConfig). I think it should be, and am not sure why it's not showing up here.
To summarize:
* Previous versions of our doc builds didn't seem to be including attribute docstrings for `ScanpyConfig`.
* Sphinx 4.1.0 raises an error when it hits this attribute
</issue>
<code>
[start of docs/conf.py]
1 import os
2 import sys
3 from pathlib import Path
4 from datetime import datetime
5
6 import matplotlib # noqa
7
8 # Don’t use tkinter agg when importing scanpy → … → matplotlib
9 matplotlib.use('agg')
10
11 HERE = Path(__file__).parent
12 sys.path[:0] = [str(HERE.parent), str(HERE / 'extensions')]
13 import scanpy # noqa
14
15 on_rtd = os.environ.get('READTHEDOCS') == 'True'
16
17 # -- General configuration ------------------------------------------------
18
19
20 nitpicky = True # Warn about broken links. This is here for a reason: Do not change.
21 needs_sphinx = '2.0' # Nicer param docs
22 suppress_warnings = ['ref.citation']
23
24 # General information
25 project = 'Scanpy'
26 author = scanpy.__author__
27 copyright = f'{datetime.now():%Y}, {author}.'
28 version = scanpy.__version__.replace('.dirty', '')
29 release = version
30
31 # default settings
32 templates_path = ['_templates']
33 source_suffix = '.rst'
34 master_doc = 'index'
35 default_role = 'literal'
36 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
37 pygments_style = 'sphinx'
38
39 extensions = [
40 'sphinx.ext.autodoc',
41 'sphinx.ext.intersphinx',
42 'sphinx.ext.doctest',
43 'sphinx.ext.coverage',
44 'sphinx.ext.mathjax',
45 'sphinx.ext.napoleon',
46 'sphinx.ext.autosummary',
47 # 'plot_generator',
48 'matplotlib.sphinxext.plot_directive',
49 'sphinx_autodoc_typehints', # needs to be after napoleon
50 # 'ipython_directive',
51 # 'ipython_console_highlighting',
52 'scanpydoc',
53 *[p.stem for p in (HERE / 'extensions').glob('*.py')],
54 ]
55
56 # Generate the API documentation when building
57 autosummary_generate = True
58 autodoc_member_order = 'bysource'
59 # autodoc_default_flags = ['members']
60 napoleon_google_docstring = False
61 napoleon_numpy_docstring = True
62 napoleon_include_init_with_doc = False
63 napoleon_use_rtype = True # having a separate entry generally helps readability
64 napoleon_use_param = True
65 napoleon_custom_sections = [('Params', 'Parameters')]
66 todo_include_todos = False
67 api_dir = HERE / 'api' # function_images
68
69 scanpy_tutorials_url = 'https://scanpy-tutorials.readthedocs.io/en/latest/'
70
71 intersphinx_mapping = dict(
72 anndata=('https://anndata.readthedocs.io/en/stable/', None),
73 bbknn=('https://bbknn.readthedocs.io/en/latest/', None),
74 cycler=('https://matplotlib.org/cycler/', None),
75 h5py=('http://docs.h5py.org/en/stable/', None),
76 ipython=('https://ipython.readthedocs.io/en/stable/', None),
77 leidenalg=('https://leidenalg.readthedocs.io/en/latest/', None),
78 louvain=('https://louvain-igraph.readthedocs.io/en/latest/', None),
79 matplotlib=('https://matplotlib.org/', None),
80 networkx=('https://networkx.github.io/documentation/networkx-1.10/', None),
81 numpy=('https://docs.scipy.org/doc/numpy/', None),
82 pandas=('https://pandas.pydata.org/pandas-docs/stable/', None),
83 pytest=('https://docs.pytest.org/en/latest/', None),
84 python=('https://docs.python.org/3', None),
85 scipy=('https://docs.scipy.org/doc/scipy/reference/', None),
86 seaborn=('https://seaborn.pydata.org/', None),
87 sklearn=('https://scikit-learn.org/stable/', None),
88 scanpy_tutorials=(scanpy_tutorials_url, None),
89 )
90
91
92 # -- Options for HTML output ----------------------------------------------
93
94
95 html_theme = 'scanpydoc'
96 html_theme_options = dict(
97 navigation_depth=4,
98 logo_only=True,
99 docsearch_index='scanpy',
100 docsearch_key='fa4304eb95d2134997e3729553a674b2',
101 )
102 html_context = dict(
103 display_github=True, # Integrate GitHub
104 github_user='theislab', # Username
105 github_repo='scanpy', # Repo name
106 github_version='master', # Version
107 conf_py_path='/docs/', # Path in the checkout to the docs root
108 )
109 html_static_path = ['_static']
110 html_show_sphinx = False
111 html_logo = '_static/img/Scanpy_Logo_BrightFG.svg'
112
113
114 def setup(app):
115 app.warningiserror = on_rtd
116
117
118 # -- Options for other output formats ------------------------------------------
119
120 htmlhelp_basename = f'{project}doc'
121 doc_title = f'{project} Documentation'
122 latex_documents = [(master_doc, f'{project}.tex', doc_title, author, 'manual')]
123 man_pages = [(master_doc, project, doc_title, [author], 1)]
124 texinfo_documents = [
125 (
126 master_doc,
127 project,
128 doc_title,
129 author,
130 project,
131 'One line description of project.',
132 'Miscellaneous',
133 )
134 ]
135
136
137 # -- Suppress link warnings ----------------------------------------------------
138
139 qualname_overrides = {
140 "sklearn.neighbors._dist_metrics.DistanceMetric": "sklearn.neighbors.DistanceMetric",
141 # If the docs are built with an old version of numpy, this will make it work:
142 "numpy.random.RandomState": "numpy.random.mtrand.RandomState",
143 "scanpy.plotting._matrixplot.MatrixPlot": "scanpy.pl.MatrixPlot",
144 "scanpy.plotting._dotplot.DotPlot": "scanpy.pl.DotPlot",
145 "scanpy.plotting._stacked_violin.StackedViolin": "scanpy.pl.StackedViolin",
146 "pandas.core.series.Series": "pandas.Series",
147 }
148
149 nitpick_ignore = [
150 # Will probably be documented
151 ('py:class', 'scanpy._settings.Verbosity'),
152 # Currently undocumented: https://github.com/mwaskom/seaborn/issues/1810
153 ('py:class', 'seaborn.ClusterGrid'),
154 # Won’t be documented
155 ('py:class', 'scanpy.plotting._utils._AxesSubplot'),
156 ('py:class', 'scanpy._utils.Empty'),
157 ('py:class', 'numpy.random.mtrand.RandomState'),
158 ]
159
160 # Options for plot examples
161
162 plot_include_source = True
163 plot_formats = [("png", 90)]
164 plot_html_show_formats = False
165 plot_html_show_source_link = False
166 plot_working_directory = HERE.parent # Project root
167
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -155,6 +155,9 @@
('py:class', 'scanpy.plotting._utils._AxesSubplot'),
('py:class', 'scanpy._utils.Empty'),
('py:class', 'numpy.random.mtrand.RandomState'),
+ # Will work once scipy 1.8 is released
+ ('py:class', 'scipy.sparse.base.spmatrix'),
+ ('py:class', 'scipy.sparse.csr.csr_matrix'),
]
# Options for plot examples
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -155,6 +155,9 @@\n ('py:class', 'scanpy.plotting._utils._AxesSubplot'),\n ('py:class', 'scanpy._utils.Empty'),\n ('py:class', 'numpy.random.mtrand.RandomState'),\n+ # Will work once scipy 1.8 is released\n+ ('py:class', 'scipy.sparse.base.spmatrix'),\n+ ('py:class', 'scipy.sparse.csr.csr_matrix'),\n ]\n \n # Options for plot examples\n", "issue": "Sphinx 4.1.0 doesn't like ScanpyConfig\nUpdate:\r\n\r\nDocs don't build with sphinx 4.1.0 due to a error triggered by `scanpydoc`. Sphinx will be pinned until this is solved (which is when this issue should be closed). It's not obvious to me at the moment whether sphinx or scanpydoc is at fault.\r\n\r\n---------------\r\n\r\nTrying to build the docs with Sphinx 4.1.0 fails with the following output:\r\n\r\n<details>\r\n<summary> </summary>\r\n\r\n```sh\r\n$ make html\r\nRunning Sphinx v4.1.0\r\nloading intersphinx inventory from https://anndata.readthedocs.io/en/stable/objects.inv...\r\nloading intersphinx inventory from https://bbknn.readthedocs.io/en/latest/objects.inv...\r\nloading intersphinx inventory from https://matplotlib.org/cycler/objects.inv...\r\nloading intersphinx inventory from http://docs.h5py.org/en/stable/objects.inv...\r\nloading intersphinx inventory from https://ipython.readthedocs.io/en/stable/objects.inv...\r\nloading intersphinx inventory from https://leidenalg.readthedocs.io/en/latest/objects.inv...\r\nloading intersphinx inventory from https://louvain-igraph.readthedocs.io/en/latest/objects.inv...\r\nloading intersphinx inventory from https://matplotlib.org/objects.inv...\r\nloading intersphinx inventory from https://networkx.github.io/documentation/networkx-1.10/objects.inv...\r\nloading intersphinx inventory from https://docs.scipy.org/doc/numpy/objects.inv...\r\nloading intersphinx inventory from https://pandas.pydata.org/pandas-docs/stable/objects.inv...\r\nloading intersphinx inventory from https://docs.pytest.org/en/latest/objects.inv...\r\nloading intersphinx inventory from https://docs.python.org/3/objects.inv...\r\nloading intersphinx inventory from https://docs.scipy.org/doc/scipy/reference/objects.inv...\r\nloading intersphinx inventory from https://seaborn.pydata.org/objects.inv...\r\nloading intersphinx inventory from https://scikit-learn.org/stable/objects.inv...\r\nloading intersphinx inventory from https://scanpy-tutorials.readthedocs.io/en/latest/objects.inv...\r\nintersphinx inventory has moved: https://networkx.github.io/documentation/networkx-1.10/objects.inv -> https://networkx.org/documentation/networkx-1.10/objects.inv\r\nintersphinx inventory has moved: https://docs.scipy.org/doc/numpy/objects.inv -> https://numpy.org/doc/stable/objects.inv\r\nintersphinx inventory has moved: http://docs.h5py.org/en/stable/objects.inv -> https://docs.h5py.org/en/stable/objects.inv\r\n[autosummary] generating autosummary for: _key_contributors.rst, api.rst, basic_usage.rst, community.rst, contributors.rst, dev/ci.rst, dev/code.rst, dev/documentation.rst, dev/external-tools.rst, dev/getting-set-up.rst, ..., release-notes/1.7.1.rst, release-notes/1.7.2.rst, release-notes/1.8.0.rst, release-notes/1.8.1.rst, release-notes/1.8.2.rst, release-notes/1.9.0.rst, release-notes/index.rst, release-notes/release-latest.rst, tutorials.rst, usage-principles.rst\r\nError in github_url('scanpy._settings.ScanpyConfig.N_PCS'):\r\n\r\nExtension error (sphinx.ext.autosummary):\r\nHandler <function process_generate_options at 0x139c4a940> for event 'builder-inited' threw an exception (exception: type object 'ScanpyConfig' has no attribute 'N_PCS')\r\nmake: *** [html] Error 2\r\n```\r\n\r\n</details>\r\n\r\nHowever, I'm entirely sure if this is Sphinx's fault, or our own. Currently the [N_PCS parameter isn't in the rendered documentation](https://scanpy.readthedocs.io/en/stable/generated/scanpy._settings.ScanpyConfig.html#scanpy._settings.ScanpyConfig). I think it should be, and am not sure why it's not showing up here.\r\n\r\nTo summarize:\r\n\r\n* Previous versions of our doc builds didn't seem to be including attribute docstrings for `ScanpyConfig`.\r\n* Sphinx 4.1.0 raises an error when it hits this attribute\n", "before_files": [{"content": "import os\nimport sys\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport matplotlib # noqa\n\n# Don\u2019t use tkinter agg when importing scanpy \u2192 \u2026 \u2192 matplotlib\nmatplotlib.use('agg')\n\nHERE = Path(__file__).parent\nsys.path[:0] = [str(HERE.parent), str(HERE / 'extensions')]\nimport scanpy # noqa\n\non_rtd = os.environ.get('READTHEDOCS') == 'True'\n\n# -- General configuration ------------------------------------------------\n\n\nnitpicky = True # Warn about broken links. This is here for a reason: Do not change.\nneeds_sphinx = '2.0' # Nicer param docs\nsuppress_warnings = ['ref.citation']\n\n# General information\nproject = 'Scanpy'\nauthor = scanpy.__author__\ncopyright = f'{datetime.now():%Y}, {author}.'\nversion = scanpy.__version__.replace('.dirty', '')\nrelease = version\n\n# default settings\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\ndefault_role = 'literal'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\npygments_style = 'sphinx'\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n # 'plot_generator',\n 'matplotlib.sphinxext.plot_directive',\n 'sphinx_autodoc_typehints', # needs to be after napoleon\n # 'ipython_directive',\n # 'ipython_console_highlighting',\n 'scanpydoc',\n *[p.stem for p in (HERE / 'extensions').glob('*.py')],\n]\n\n# Generate the API documentation when building\nautosummary_generate = True\nautodoc_member_order = 'bysource'\n# autodoc_default_flags = ['members']\nnapoleon_google_docstring = False\nnapoleon_numpy_docstring = True\nnapoleon_include_init_with_doc = False\nnapoleon_use_rtype = True # having a separate entry generally helps readability\nnapoleon_use_param = True\nnapoleon_custom_sections = [('Params', 'Parameters')]\ntodo_include_todos = False\napi_dir = HERE / 'api' # function_images\n\nscanpy_tutorials_url = 'https://scanpy-tutorials.readthedocs.io/en/latest/'\n\nintersphinx_mapping = dict(\n anndata=('https://anndata.readthedocs.io/en/stable/', None),\n bbknn=('https://bbknn.readthedocs.io/en/latest/', None),\n cycler=('https://matplotlib.org/cycler/', None),\n h5py=('http://docs.h5py.org/en/stable/', None),\n ipython=('https://ipython.readthedocs.io/en/stable/', None),\n leidenalg=('https://leidenalg.readthedocs.io/en/latest/', None),\n louvain=('https://louvain-igraph.readthedocs.io/en/latest/', None),\n matplotlib=('https://matplotlib.org/', None),\n networkx=('https://networkx.github.io/documentation/networkx-1.10/', None),\n numpy=('https://docs.scipy.org/doc/numpy/', None),\n pandas=('https://pandas.pydata.org/pandas-docs/stable/', None),\n pytest=('https://docs.pytest.org/en/latest/', None),\n python=('https://docs.python.org/3', None),\n scipy=('https://docs.scipy.org/doc/scipy/reference/', None),\n seaborn=('https://seaborn.pydata.org/', None),\n sklearn=('https://scikit-learn.org/stable/', None),\n scanpy_tutorials=(scanpy_tutorials_url, None),\n)\n\n\n# -- Options for HTML output ----------------------------------------------\n\n\nhtml_theme = 'scanpydoc'\nhtml_theme_options = dict(\n navigation_depth=4,\n logo_only=True,\n docsearch_index='scanpy',\n docsearch_key='fa4304eb95d2134997e3729553a674b2',\n)\nhtml_context = dict(\n display_github=True, # Integrate GitHub\n github_user='theislab', # Username\n github_repo='scanpy', # Repo name\n github_version='master', # Version\n conf_py_path='/docs/', # Path in the checkout to the docs root\n)\nhtml_static_path = ['_static']\nhtml_show_sphinx = False\nhtml_logo = '_static/img/Scanpy_Logo_BrightFG.svg'\n\n\ndef setup(app):\n app.warningiserror = on_rtd\n\n\n# -- Options for other output formats ------------------------------------------\n\nhtmlhelp_basename = f'{project}doc'\ndoc_title = f'{project} Documentation'\nlatex_documents = [(master_doc, f'{project}.tex', doc_title, author, 'manual')]\nman_pages = [(master_doc, project, doc_title, [author], 1)]\ntexinfo_documents = [\n (\n master_doc,\n project,\n doc_title,\n author,\n project,\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n\n# -- Suppress link warnings ----------------------------------------------------\n\nqualname_overrides = {\n \"sklearn.neighbors._dist_metrics.DistanceMetric\": \"sklearn.neighbors.DistanceMetric\",\n # If the docs are built with an old version of numpy, this will make it work:\n \"numpy.random.RandomState\": \"numpy.random.mtrand.RandomState\",\n \"scanpy.plotting._matrixplot.MatrixPlot\": \"scanpy.pl.MatrixPlot\",\n \"scanpy.plotting._dotplot.DotPlot\": \"scanpy.pl.DotPlot\",\n \"scanpy.plotting._stacked_violin.StackedViolin\": \"scanpy.pl.StackedViolin\",\n \"pandas.core.series.Series\": \"pandas.Series\",\n}\n\nnitpick_ignore = [\n # Will probably be documented\n ('py:class', 'scanpy._settings.Verbosity'),\n # Currently undocumented: https://github.com/mwaskom/seaborn/issues/1810\n ('py:class', 'seaborn.ClusterGrid'),\n # Won\u2019t be documented\n ('py:class', 'scanpy.plotting._utils._AxesSubplot'),\n ('py:class', 'scanpy._utils.Empty'),\n ('py:class', 'numpy.random.mtrand.RandomState'),\n]\n\n# Options for plot examples\n\nplot_include_source = True\nplot_formats = [(\"png\", 90)]\nplot_html_show_formats = False\nplot_html_show_source_link = False\nplot_working_directory = HERE.parent # Project root\n", "path": "docs/conf.py"}]} | 3,284 | 133 |
gh_patches_debug_16799 | rasdani/github-patches | git_diff | kserve__kserve-3424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FileNotFoundError when using an s3 bucket as the model_dir with HuggingFace model server
/kind bug
First of all, I'd like to say thank you for the work on KServe! It's been delightful so far playing around with KServe. But we found a small bug while testing out the HuggingFace model server (which we're aware is a very new addition as well).
**What steps did you take and what happened:**
1. Created an InferenceService using the HuggingFace model server (yaml pasted below)
2. Specified an s3 bucket as the `model_dir` (I suspect this might happen for anything that's not a local dir)
3. Observed that the model is succesfully downloaded to a tmp directory and loaded, but then encountered the `FileNotFoundError` right after
Logs:
```
% k logs huggingface-predictor-00003-deployment-8659bb8b9-m945b
Defaulted container "kserve-container" out of: kserve-container, queue-proxy
INFO:root:Copying contents of s3://kserve-test-models/classifier to local
INFO:root:Downloaded object classifier/config.json to /tmp/tmpckx_trr1/config.json
...
INFO:root:Successfully copied s3://kserve-test-models/classifier to /tmp/tmpckx_trr1
INFO:kserve:successfully loaded tokenizer for task: 4
INFO:kserve:successfully loaded huggingface model from path /tmp/tmpckx_trr1
Traceback (most recent call last):
File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/huggingfaceserver/huggingfaceserver/__main__.py", line 69, in <module>
kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(
File "/huggingfaceserver/huggingfaceserver/huggingface_model_repository.py", line 24, in __init__
self.load_models()
File "/kserve/kserve/model_repository.py", line 37, in load_models
for name in os.listdir(self.models_dir):
FileNotFoundError: [Errno 2] No such file or directory: 's3://kserve-test-models/spam-classifier'
```
**What did you expect to happen:**
I expected that this would work, as the model was successfully downloaded and loaded. But I did find a tmp workaround below and I think I know where the issue is!
**What's the InferenceService yaml:**
```yaml
apiVersion: serving.kserve.io/v1beta1
kind: InferenceService
metadata:
name: huggingface
spec:
predictor:
serviceAccountName: huggingface-sa
containers:
- args:
- --model_name=spam-classifier
# - --model_id=xyz (see workaround below)
- --model_dir=s3://kserve-test-models/classifier
- --tensor_input_names=input_ids
image: kserve/huggingfaceserver:latest
name: kserve-container
```
**Anything else you would like to add:**
A temporary workaround I found is to supply the `model_id` argument. It can have any value, as the `model_dir` will override it anyway during loading:
https://github.com/kserve/kserve/blob/5172dc80b0a23d2263757de70a02eeef08b18811/python/huggingfaceserver/huggingfaceserver/model.py#L91-L94
<details>
<summary>I have verified that this workaround works (expand to see logs).</summary>
```
% k logs huggingface-predictor-00004-deployment-946b4d6c8-pk5nj -f
Defaulted container "kserve-container" out of: kserve-container, queue-proxy
INFO:root:Copying contents of s3://kserve-test-models/classifier to local
INFO:root:Downloaded object classifier/config.json to /tmp/tmppwjsica7/config.json
...
INFO:kserve:successfully loaded tokenizer for task: 4
INFO:kserve:successfully loaded huggingface model from path /tmp/tmppwjsica7
INFO:kserve:Registering model: classifier
INFO:kserve:Setting max asyncio worker threads as 5
INFO:kserve:Starting uvicorn with 1 workers
2024-02-09 18:57:33.228 uvicorn.error INFO: Started server process [1]
2024-02-09 18:57:33.229 uvicorn.error INFO: Waiting for application startup.
2024-02-09 18:57:33.234 1 kserve INFO [start():62] Starting gRPC server on [::]:8081
2024-02-09 18:57:33.234 uvicorn.error INFO: Application startup complete.
2024-02-09 18:57:33.235 uvicorn.error INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)
```
</details>
I think the issue is here:
https://github.com/kserve/kserve/blob/5172dc80b0a23d2263757de70a02eeef08b18811/python/huggingfaceserver/huggingfaceserver/__main__.py#L63-L72
1. `model.load()` will succeed, so we jump to line 68
2. It checks for `args.model_id`, which is empty, so we go inside the if block
3. It will try to instantiate `HuggingfaceModelRepository` with `model_dir`, which is pointing to an s3 bucket and not a local directory, thus causing the `FileNotFoundError`
4. This is how I came up with the workaround of passing `model_id`, so that the else block is executed instead (because the model did load succesfully, so doing `kserve.ModelServer().start([model] if model.ready else [])` won't be a problem)
**Environment:**
- Cloud Environment: aws
- Kubernetes version: (use `kubectl version`): v1.27.9-eks-5e0fdde
- OS (e.g. from `/etc/os-release`): Ubuntu 22.04.3 LTS
</issue>
<code>
[start of python/huggingfaceserver/huggingfaceserver/__main__.py]
1 # Copyright 2024 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import logging
17
18 from kserve.model import PredictorConfig
19 from . import HuggingfaceModel, HuggingfaceModelRepository
20 import kserve
21 from kserve.errors import ModelMissingError
22
23
24 def list_of_strings(arg):
25 return arg.split(',')
26
27
28 parser = argparse.ArgumentParser(parents=[kserve.model_server.parser])
29
30 parser.add_argument('--model_dir', required=False, default=None,
31 help='A URI pointer to the model binary')
32 parser.add_argument('--model_id', required=False,
33 help='Huggingface model id')
34 parser.add_argument('--tensor_parallel_degree', type=int, default=-1,
35 help='tensor parallel degree')
36 parser.add_argument('--max_length', type=int, default=None,
37 help='max sequence length for the tokenizer')
38 parser.add_argument('--do_lower_case', type=bool, default=True,
39 help='do lower case for the tokenizer')
40 parser.add_argument('--add_special_tokens', type=bool, default=True,
41 help='the sequences will be encoded with the special tokens relative to their model')
42 parser.add_argument('--tensor_input_names', type=list_of_strings, default=None,
43 help='the tensor input names passed to the model')
44 parser.add_argument('--task', required=False, help="The ML task name")
45
46 try:
47 from vllm.engine.arg_utils import AsyncEngineArgs
48
49 parser = AsyncEngineArgs.add_cli_args(parser)
50 _vllm = True
51 except ImportError:
52 _vllm = False
53 args, _ = parser.parse_known_args()
54
55 if __name__ == "__main__":
56 engine_args = AsyncEngineArgs.from_cli_args(args) if _vllm else None
57 predictor_config = PredictorConfig(args.predictor_host, args.predictor_protocol,
58 args.predictor_use_ssl,
59 args.predictor_request_timeout_seconds)
60 model = HuggingfaceModel(args.model_name,
61 predictor_config=predictor_config,
62 kwargs=vars(args), engine_args=engine_args)
63 try:
64 model.load()
65 except ModelMissingError:
66 logging.error(f"fail to locate model file for model {args.model_name} under dir {args.model_dir},"
67 f"trying loading from model repository.")
68 if not args.model_id:
69 kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(
70 [model] if model.ready else [])
71 else:
72 kserve.ModelServer().start([model] if model.ready else [])
73
[end of python/huggingfaceserver/huggingfaceserver/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/huggingfaceserver/huggingfaceserver/__main__.py b/python/huggingfaceserver/huggingfaceserver/__main__.py
--- a/python/huggingfaceserver/huggingfaceserver/__main__.py
+++ b/python/huggingfaceserver/huggingfaceserver/__main__.py
@@ -62,11 +62,9 @@
kwargs=vars(args), engine_args=engine_args)
try:
model.load()
+ kserve.ModelServer().start([model] if model.ready else [])
except ModelMissingError:
logging.error(f"fail to locate model file for model {args.model_name} under dir {args.model_dir},"
f"trying loading from model repository.")
- if not args.model_id:
kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(
[model] if model.ready else [])
- else:
- kserve.ModelServer().start([model] if model.ready else [])
| {"golden_diff": "diff --git a/python/huggingfaceserver/huggingfaceserver/__main__.py b/python/huggingfaceserver/huggingfaceserver/__main__.py\n--- a/python/huggingfaceserver/huggingfaceserver/__main__.py\n+++ b/python/huggingfaceserver/huggingfaceserver/__main__.py\n@@ -62,11 +62,9 @@\n kwargs=vars(args), engine_args=engine_args)\n try:\n model.load()\n+ kserve.ModelServer().start([model] if model.ready else [])\n except ModelMissingError:\n logging.error(f\"fail to locate model file for model {args.model_name} under dir {args.model_dir},\"\n f\"trying loading from model repository.\")\n- if not args.model_id:\n kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(\n [model] if model.ready else [])\n- else:\n- kserve.ModelServer().start([model] if model.ready else [])\n", "issue": "FileNotFoundError when using an s3 bucket as the model_dir with HuggingFace model server\n/kind bug\r\n\r\nFirst of all, I'd like to say thank you for the work on KServe! It's been delightful so far playing around with KServe. But we found a small bug while testing out the HuggingFace model server (which we're aware is a very new addition as well).\r\n\r\n**What steps did you take and what happened:**\r\n1. Created an InferenceService using the HuggingFace model server (yaml pasted below)\r\n2. Specified an s3 bucket as the `model_dir` (I suspect this might happen for anything that's not a local dir)\r\n3. Observed that the model is succesfully downloaded to a tmp directory and loaded, but then encountered the `FileNotFoundError` right after\r\n\r\nLogs:\r\n```\r\n% k logs huggingface-predictor-00003-deployment-8659bb8b9-m945b\r\nDefaulted container \"kserve-container\" out of: kserve-container, queue-proxy\r\nINFO:root:Copying contents of s3://kserve-test-models/classifier to local\r\nINFO:root:Downloaded object classifier/config.json to /tmp/tmpckx_trr1/config.json\r\n...\r\nINFO:root:Successfully copied s3://kserve-test-models/classifier to /tmp/tmpckx_trr1\r\nINFO:kserve:successfully loaded tokenizer for task: 4\r\nINFO:kserve:successfully loaded huggingface model from path /tmp/tmpckx_trr1\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.10/runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"/huggingfaceserver/huggingfaceserver/__main__.py\", line 69, in <module>\r\n kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(\r\n File \"/huggingfaceserver/huggingfaceserver/huggingface_model_repository.py\", line 24, in __init__\r\n self.load_models()\r\n File \"/kserve/kserve/model_repository.py\", line 37, in load_models\r\n for name in os.listdir(self.models_dir):\r\nFileNotFoundError: [Errno 2] No such file or directory: 's3://kserve-test-models/spam-classifier'\r\n```\r\n\r\n\r\n**What did you expect to happen:**\r\n\r\nI expected that this would work, as the model was successfully downloaded and loaded. But I did find a tmp workaround below and I think I know where the issue is!\r\n\r\n**What's the InferenceService yaml:**\r\n```yaml\r\napiVersion: serving.kserve.io/v1beta1\r\nkind: InferenceService\r\nmetadata:\r\n name: huggingface\r\nspec:\r\n predictor:\r\n serviceAccountName: huggingface-sa\r\n containers:\r\n - args:\r\n - --model_name=spam-classifier\r\n # - --model_id=xyz (see workaround below)\r\n - --model_dir=s3://kserve-test-models/classifier\r\n - --tensor_input_names=input_ids\r\n image: kserve/huggingfaceserver:latest\r\n name: kserve-container\r\n```\r\n\r\n**Anything else you would like to add:**\r\n\r\nA temporary workaround I found is to supply the `model_id` argument. It can have any value, as the `model_dir` will override it anyway during loading:\r\n\r\nhttps://github.com/kserve/kserve/blob/5172dc80b0a23d2263757de70a02eeef08b18811/python/huggingfaceserver/huggingfaceserver/model.py#L91-L94\r\n\r\n<details>\r\n <summary>I have verified that this workaround works (expand to see logs).</summary>\r\n\r\n```\r\n% k logs huggingface-predictor-00004-deployment-946b4d6c8-pk5nj -f\r\nDefaulted container \"kserve-container\" out of: kserve-container, queue-proxy\r\nINFO:root:Copying contents of s3://kserve-test-models/classifier to local\r\nINFO:root:Downloaded object classifier/config.json to /tmp/tmppwjsica7/config.json\r\n...\r\nINFO:kserve:successfully loaded tokenizer for task: 4\r\nINFO:kserve:successfully loaded huggingface model from path /tmp/tmppwjsica7\r\nINFO:kserve:Registering model: classifier\r\nINFO:kserve:Setting max asyncio worker threads as 5\r\nINFO:kserve:Starting uvicorn with 1 workers\r\n2024-02-09 18:57:33.228 uvicorn.error INFO: Started server process [1]\r\n2024-02-09 18:57:33.229 uvicorn.error INFO: Waiting for application startup.\r\n2024-02-09 18:57:33.234 1 kserve INFO [start():62] Starting gRPC server on [::]:8081\r\n2024-02-09 18:57:33.234 uvicorn.error INFO: Application startup complete.\r\n2024-02-09 18:57:33.235 uvicorn.error INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)\r\n```\r\n</details>\r\n\r\nI think the issue is here:\r\nhttps://github.com/kserve/kserve/blob/5172dc80b0a23d2263757de70a02eeef08b18811/python/huggingfaceserver/huggingfaceserver/__main__.py#L63-L72\r\n\r\n1. `model.load()` will succeed, so we jump to line 68\r\n2. It checks for `args.model_id`, which is empty, so we go inside the if block\r\n3. It will try to instantiate `HuggingfaceModelRepository` with `model_dir`, which is pointing to an s3 bucket and not a local directory, thus causing the `FileNotFoundError`\r\n4. This is how I came up with the workaround of passing `model_id`, so that the else block is executed instead (because the model did load succesfully, so doing `kserve.ModelServer().start([model] if model.ready else [])` won't be a problem)\r\n\r\n**Environment:**\r\n\r\n- Cloud Environment: aws\r\n- Kubernetes version: (use `kubectl version`): v1.27.9-eks-5e0fdde\r\n- OS (e.g. from `/etc/os-release`): Ubuntu 22.04.3 LTS\r\n\n", "before_files": [{"content": "# Copyright 2024 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\n\nfrom kserve.model import PredictorConfig\nfrom . import HuggingfaceModel, HuggingfaceModelRepository\nimport kserve\nfrom kserve.errors import ModelMissingError\n\n\ndef list_of_strings(arg):\n return arg.split(',')\n\n\nparser = argparse.ArgumentParser(parents=[kserve.model_server.parser])\n\nparser.add_argument('--model_dir', required=False, default=None,\n help='A URI pointer to the model binary')\nparser.add_argument('--model_id', required=False,\n help='Huggingface model id')\nparser.add_argument('--tensor_parallel_degree', type=int, default=-1,\n help='tensor parallel degree')\nparser.add_argument('--max_length', type=int, default=None,\n help='max sequence length for the tokenizer')\nparser.add_argument('--do_lower_case', type=bool, default=True,\n help='do lower case for the tokenizer')\nparser.add_argument('--add_special_tokens', type=bool, default=True,\n help='the sequences will be encoded with the special tokens relative to their model')\nparser.add_argument('--tensor_input_names', type=list_of_strings, default=None,\n help='the tensor input names passed to the model')\nparser.add_argument('--task', required=False, help=\"The ML task name\")\n\ntry:\n from vllm.engine.arg_utils import AsyncEngineArgs\n\n parser = AsyncEngineArgs.add_cli_args(parser)\n _vllm = True\nexcept ImportError:\n _vllm = False\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n engine_args = AsyncEngineArgs.from_cli_args(args) if _vllm else None\n predictor_config = PredictorConfig(args.predictor_host, args.predictor_protocol,\n args.predictor_use_ssl,\n args.predictor_request_timeout_seconds)\n model = HuggingfaceModel(args.model_name,\n predictor_config=predictor_config,\n kwargs=vars(args), engine_args=engine_args)\n try:\n model.load()\n except ModelMissingError:\n logging.error(f\"fail to locate model file for model {args.model_name} under dir {args.model_dir},\"\n f\"trying loading from model repository.\")\n if not args.model_id:\n kserve.ModelServer(registered_models=HuggingfaceModelRepository(args.model_dir)).start(\n [model] if model.ready else [])\n else:\n kserve.ModelServer().start([model] if model.ready else [])\n", "path": "python/huggingfaceserver/huggingfaceserver/__main__.py"}]} | 2,823 | 209 |
gh_patches_debug_11771 | rasdani/github-patches | git_diff | google__timesketch-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong app context for CSV task
We need to run the CSV importer task in the correct context.
</issue>
<code>
[start of timesketch/lib/tasks.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Celery task for processing Plaso storage files."""
15
16 import os
17 import logging
18 import sys
19
20 from flask import current_app
21 # We currently don't have plaso in our Travis setup. This is a workaround
22 # for that until we fix the Travis environment.
23 # TODO: Add Plaso to our Travis environment we are running our tests in.
24 try:
25 from plaso.frontend import psort
26 except ImportError:
27 pass
28
29 from timesketch import create_celery_app
30 from timesketch.lib.datastores.elastic import ElasticsearchDataStore
31 from timesketch.lib.utils import read_and_validate_csv
32 from timesketch.models import db_session
33 from timesketch.models.sketch import SearchIndex
34
35 celery = create_celery_app()
36
37
38 def get_data_location():
39 """Path to the plaso data directory.
40
41 Returns:
42 The path to where the plaso data directory is or None if not existing.
43 """
44 data_location = current_app.config.get(u'PLASO_DATA_LOCATION', None)
45 if not data_location:
46 data_location = os.path.join(sys.prefix, u'share', u'plaso')
47 if not os.path.exists(data_location):
48 data_location = None
49 return data_location
50
51
52 @celery.task(track_started=True)
53 def run_plaso(source_file_path, timeline_name, index_name, username=None):
54 """Create a Celery task for processing Plaso storage file.
55
56 Args:
57 source_file_path: Path to plaso storage file.
58 timeline_name: Name of the Timesketch timeline.
59 index_name: Name of the datastore index.
60 username: Username of the user who will own the timeline.
61
62 Returns:
63 Dictionary with count of processed events.
64 """
65 plaso_data_location = get_data_location()
66 flush_interval = 1000 # events to queue before bulk index
67 doc_type = u'plaso_event' # Document type for Elasticsearch
68
69 # Use Plaso psort frontend tool.
70 frontend = psort.PsortFrontend()
71 frontend.SetDataLocation(plaso_data_location)
72 storage_reader = frontend.CreateStorageReader(source_file_path)
73
74 # Setup the Timesketch output module.
75 output_module = frontend.CreateOutputModule(u'timesketch')
76 output_module.SetIndexName(index_name)
77 output_module.SetTimelineName(timeline_name)
78 output_module.SetFlushInterval(flush_interval)
79 output_module.SetDocType(doc_type)
80 if username:
81 output_module.SetUserName(username)
82
83 # Start process the Plaso storage file.
84 counter = frontend.ExportEvents(storage_reader, output_module)
85
86 return dict(counter)
87
88
89 @celery.task(track_started=True)
90 def run_csv(source_file_path, timeline_name, index_name, username=None):
91 """Create a Celery task for processing a CSV file.
92
93 Args:
94 source_file_path: Path to CSV file.
95 timeline_name: Name of the Timesketch timeline.
96 index_name: Name of the datastore index.
97
98 Returns:
99 Dictionary with count of processed events.
100 """
101 flush_interval = 1000 # events to queue before bulk index
102 event_type = u'generic_event' # Document type for Elasticsearch
103
104 # Log information to Celery
105 logging.info(u'Index name: %s', index_name)
106 logging.info(u'Timeline name: %s', timeline_name)
107 logging.info(u'Flush interval: %d', flush_interval)
108 logging.info(u'Document type: %s', event_type)
109 logging.info(u'Owner: %s', username)
110
111 es = ElasticsearchDataStore(
112 host=current_app.config[u'ELASTIC_HOST'],
113 port=current_app.config[u'ELASTIC_PORT'])
114
115 es.create_index(index_name=index_name, doc_type=event_type)
116 for event in read_and_validate_csv(source_file_path):
117 es.import_event(
118 flush_interval, index_name, event_type, event)
119
120 # Import the remaining events
121 total_events = es.import_event(flush_interval, index_name, event_type)
122
123 # We are done so let's remove the processing status flag
124 search_index = SearchIndex.query.filter_by(index_name=index_name).first()
125 search_index.status.remove(search_index.status[0])
126 db_session.add(search_index)
127 db_session.commit()
128
129 return {u'Events processed': total_events}
130
[end of timesketch/lib/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py
--- a/timesketch/lib/tasks.py
+++ b/timesketch/lib/tasks.py
@@ -121,9 +121,10 @@
total_events = es.import_event(flush_interval, index_name, event_type)
# We are done so let's remove the processing status flag
- search_index = SearchIndex.query.filter_by(index_name=index_name).first()
- search_index.status.remove(search_index.status[0])
- db_session.add(search_index)
- db_session.commit()
+ with celery.app.app_context():
+ search_index = SearchIndex.query.filter_by(index_name=index_name).first()
+ search_index.status.remove(search_index.status[0])
+ db_session.add(search_index)
+ db_session.commit()
return {u'Events processed': total_events}
| {"golden_diff": "diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py\n--- a/timesketch/lib/tasks.py\n+++ b/timesketch/lib/tasks.py\n@@ -121,9 +121,10 @@\n total_events = es.import_event(flush_interval, index_name, event_type)\n \n # We are done so let's remove the processing status flag\n- search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n- search_index.status.remove(search_index.status[0])\n- db_session.add(search_index)\n- db_session.commit()\n+ with celery.app.app_context():\n+ search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n+ search_index.status.remove(search_index.status[0])\n+ db_session.add(search_index)\n+ db_session.commit()\n \n return {u'Events processed': total_events}\n", "issue": "Wrong app context for CSV task\nWe need to run the CSV importer task in the correct context.\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport os\nimport logging\nimport sys\n\nfrom flask import current_app\n# We currently don't have plaso in our Travis setup. This is a workaround\n# for that until we fix the Travis environment.\n# TODO: Add Plaso to our Travis environment we are running our tests in.\ntry:\n from plaso.frontend import psort\nexcept ImportError:\n pass\n\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\n\ncelery = create_celery_app()\n\n\ndef get_data_location():\n \"\"\"Path to the plaso data directory.\n\n Returns:\n The path to where the plaso data directory is or None if not existing.\n \"\"\"\n data_location = current_app.config.get(u'PLASO_DATA_LOCATION', None)\n if not data_location:\n data_location = os.path.join(sys.prefix, u'share', u'plaso')\n if not os.path.exists(data_location):\n data_location = None\n return data_location\n\n\[email protected](track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n plaso_data_location = get_data_location()\n flush_interval = 1000 # events to queue before bulk index\n doc_type = u'plaso_event' # Document type for Elasticsearch\n\n # Use Plaso psort frontend tool.\n frontend = psort.PsortFrontend()\n frontend.SetDataLocation(plaso_data_location)\n storage_reader = frontend.CreateStorageReader(source_file_path)\n\n # Setup the Timesketch output module.\n output_module = frontend.CreateOutputModule(u'timesketch')\n output_module.SetIndexName(index_name)\n output_module.SetTimelineName(timeline_name)\n output_module.SetFlushInterval(flush_interval)\n output_module.SetDocType(doc_type)\n if username:\n output_module.SetUserName(username)\n\n # Start process the Plaso storage file.\n counter = frontend.ExportEvents(storage_reader, output_module)\n\n return dict(counter)\n\n\[email protected](track_started=True)\ndef run_csv(source_file_path, timeline_name, index_name, username=None):\n \"\"\"Create a Celery task for processing a CSV file.\n\n Args:\n source_file_path: Path to CSV file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n flush_interval = 1000 # events to queue before bulk index\n event_type = u'generic_event' # Document type for Elasticsearch\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Flush interval: %d', flush_interval)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate_csv(source_file_path):\n es.import_event(\n flush_interval, index_name, event_type, event)\n\n # Import the remaining events\n total_events = es.import_event(flush_interval, index_name, event_type)\n\n # We are done so let's remove the processing status flag\n search_index = SearchIndex.query.filter_by(index_name=index_name).first()\n search_index.status.remove(search_index.status[0])\n db_session.add(search_index)\n db_session.commit()\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}]} | 1,898 | 193 |
gh_patches_debug_41329 | rasdani/github-patches | git_diff | tensorflow__addons-2008 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keras model save using WeightedKappaLoss errors, not json serializable
**Describe the bug**
Keras model compiled with WeightedKappaLoss errors when saving, "TypeError: ('Not JSON Serializable:', tf.float32)"
**Code to reproduce the issue**
```
model = Sequential()
model._set_inputs(tf.keras.Input((256,256,3)))
model.add(layers.Dense(6, activation='softmax'))
model.compile(Adam(lr=1e-3), tfa.losses.WeightedKappaLoss(num_classes=6, weightage='quadratic'))
model.save('test')
```
</issue>
<code>
[start of tensorflow_addons/losses/kappa_loss.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements Weighted kappa loss."""
16
17 import tensorflow as tf
18 from tensorflow_addons.utils.types import Number
19 from typeguard import typechecked
20 from typing import Optional
21
22
23 @tf.keras.utils.register_keras_serializable(package="Addons")
24 class WeightedKappaLoss(tf.keras.losses.Loss):
25 """Implements the Weighted Kappa loss function.
26
27 Weighted Kappa loss was introduced in the
28 [Weighted kappa loss function for multi-class classification
29 of ordinal data in deep learning]
30 (https://www.sciencedirect.com/science/article/abs/pii/S0167865517301666).
31 Weighted Kappa is widely used in Ordinal Classification Problems.
32 The loss value lies in [-inf, log 2], where log 2
33 means the random prediction.
34
35 Usage:
36
37 ```python
38 kappa_loss = WeightedKappaLoss(num_classes=4)
39 y_true = tf.constant([[0, 0, 1, 0], [0, 1, 0, 0],
40 [1, 0, 0, 0], [0, 0, 0, 1]])
41 y_pred = tf.constant([[0.1, 0.2, 0.6, 0.1], [0.1, 0.5, 0.3, 0.1],
42 [0.8, 0.05, 0.05, 0.1], [0.01, 0.09, 0.1, 0.8]])
43 loss = kappa_loss(y_true, y_pred)
44 print('Loss: ', loss.numpy()) # Loss: -1.1611923
45 ```
46
47 Usage with `tf.keras` API:
48 ```python
49 # outputs should be softmax results
50 # if you want to weight the samples, just multiply the outputs
51 # by the sample weight.
52 model = tf.keras.Model(inputs, outputs)
53 model.compile('sgd', loss=tfa.losses.WeightedKappa(num_classes=4))
54 ```
55 """
56
57 @typechecked
58 def __init__(
59 self,
60 num_classes: int,
61 weightage: Optional[str] = "quadratic",
62 name: Optional[str] = "cohen_kappa_loss",
63 epsilon: Optional[Number] = 1e-6,
64 dtype: Optional[tf.DType] = tf.float32,
65 reduction: str = tf.keras.losses.Reduction.NONE,
66 ):
67 """Creates a `WeightedKappa` instance.
68
69 Args:
70 num_classes: Number of unique classes in your dataset.
71 weightage: (Optional) Weighting to be considered for calculating
72 kappa statistics. A valid value is one of
73 ['linear', 'quadratic']. Defaults to `quadratic` since it's
74 mostly used.
75 name: (Optional) String name of the metric instance.
76 epsilon: (Optional) increment to avoid log zero,
77 so the loss will be log(1 - k + epsilon), where k belongs to
78 [-1, 1], usually you can use the default value which is 1e-6.
79 dtype: (Optional) Data type of the metric result.
80 Defaults to `tf.float32`.
81 Raises:
82 ValueError: If the value passed for `weightage` is invalid
83 i.e. not any one of ['linear', 'quadratic']
84 """
85
86 super().__init__(name=name, reduction=reduction)
87
88 if weightage not in ("linear", "quadratic"):
89 raise ValueError("Unknown kappa weighting type.")
90
91 self.weightage = weightage
92 self.num_classes = num_classes
93 self.epsilon = epsilon
94 self.dtype = dtype
95 label_vec = tf.range(num_classes, dtype=dtype)
96 self.row_label_vec = tf.reshape(label_vec, [1, num_classes])
97 self.col_label_vec = tf.reshape(label_vec, [num_classes, 1])
98 col_mat = tf.tile(self.col_label_vec, [1, num_classes])
99 row_mat = tf.tile(self.row_label_vec, [num_classes, 1])
100 if weightage == "linear":
101 self.weight_mat = tf.abs(col_mat - row_mat)
102 else:
103 self.weight_mat = (col_mat - row_mat) ** 2
104
105 def call(self, y_true, y_pred):
106 y_true = tf.cast(y_true, dtype=self.dtype)
107 batch_size = tf.shape(y_true)[0]
108 cat_labels = tf.matmul(y_true, self.col_label_vec)
109 cat_label_mat = tf.tile(cat_labels, [1, self.num_classes])
110 row_label_mat = tf.tile(self.row_label_vec, [batch_size, 1])
111 if self.weightage == "linear":
112 weight = tf.abs(cat_label_mat - row_label_mat)
113 else:
114 weight = (cat_label_mat - row_label_mat) ** 2
115 numerator = tf.reduce_sum(weight * y_pred)
116 label_dist = tf.reduce_sum(y_true, axis=0, keepdims=True)
117 pred_dist = tf.reduce_sum(y_pred, axis=0, keepdims=True)
118 w_pred_dist = tf.matmul(self.weight_mat, pred_dist, transpose_b=True)
119 denominator = tf.reduce_sum(tf.matmul(label_dist, w_pred_dist))
120 denominator /= tf.cast(batch_size, dtype=self.dtype)
121 loss = tf.math.divide_no_nan(numerator, denominator)
122 return tf.math.log(loss + self.epsilon)
123
124 def get_config(self):
125 config = {
126 "num_classes": self.num_classes,
127 "weightage": self.weightage,
128 "epsilon": self.epsilon,
129 "dtype": self.dtype,
130 }
131 base_config = super().get_config()
132 return {**base_config, **config}
133
[end of tensorflow_addons/losses/kappa_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/losses/kappa_loss.py b/tensorflow_addons/losses/kappa_loss.py
--- a/tensorflow_addons/losses/kappa_loss.py
+++ b/tensorflow_addons/losses/kappa_loss.py
@@ -14,10 +14,13 @@
# ==============================================================================
"""Implements Weighted kappa loss."""
+import warnings
+from typing import Optional
+
import tensorflow as tf
-from tensorflow_addons.utils.types import Number
from typeguard import typechecked
-from typing import Optional
+
+from tensorflow_addons.utils.types import Number
@tf.keras.utils.register_keras_serializable(package="Addons")
@@ -85,14 +88,20 @@
super().__init__(name=name, reduction=reduction)
+ warnings.warn(
+ "The data type for `WeightedKappaLoss` defaults to "
+ "`tf.keras.backend.floatx()`."
+ "The argument `dtype` will be removed in Addons `0.12`.",
+ DeprecationWarning,
+ )
+
if weightage not in ("linear", "quadratic"):
raise ValueError("Unknown kappa weighting type.")
self.weightage = weightage
self.num_classes = num_classes
- self.epsilon = epsilon
- self.dtype = dtype
- label_vec = tf.range(num_classes, dtype=dtype)
+ self.epsilon = epsilon or tf.keras.backend.epsilon()
+ label_vec = tf.range(num_classes, dtype=tf.keras.backend.floatx())
self.row_label_vec = tf.reshape(label_vec, [1, num_classes])
self.col_label_vec = tf.reshape(label_vec, [num_classes, 1])
col_mat = tf.tile(self.col_label_vec, [1, num_classes])
@@ -103,7 +112,8 @@
self.weight_mat = (col_mat - row_mat) ** 2
def call(self, y_true, y_pred):
- y_true = tf.cast(y_true, dtype=self.dtype)
+ y_true = tf.cast(y_true, dtype=self.col_label_vec.dtype)
+ y_pred = tf.cast(y_pred, dtype=self.weight_mat.dtype)
batch_size = tf.shape(y_true)[0]
cat_labels = tf.matmul(y_true, self.col_label_vec)
cat_label_mat = tf.tile(cat_labels, [1, self.num_classes])
@@ -117,7 +127,7 @@
pred_dist = tf.reduce_sum(y_pred, axis=0, keepdims=True)
w_pred_dist = tf.matmul(self.weight_mat, pred_dist, transpose_b=True)
denominator = tf.reduce_sum(tf.matmul(label_dist, w_pred_dist))
- denominator /= tf.cast(batch_size, dtype=self.dtype)
+ denominator /= tf.cast(batch_size, dtype=denominator.dtype)
loss = tf.math.divide_no_nan(numerator, denominator)
return tf.math.log(loss + self.epsilon)
@@ -126,7 +136,6 @@
"num_classes": self.num_classes,
"weightage": self.weightage,
"epsilon": self.epsilon,
- "dtype": self.dtype,
}
base_config = super().get_config()
return {**base_config, **config}
| {"golden_diff": "diff --git a/tensorflow_addons/losses/kappa_loss.py b/tensorflow_addons/losses/kappa_loss.py\n--- a/tensorflow_addons/losses/kappa_loss.py\n+++ b/tensorflow_addons/losses/kappa_loss.py\n@@ -14,10 +14,13 @@\n # ==============================================================================\n \"\"\"Implements Weighted kappa loss.\"\"\"\n \n+import warnings\n+from typing import Optional\n+\n import tensorflow as tf\n-from tensorflow_addons.utils.types import Number\n from typeguard import typechecked\n-from typing import Optional\n+\n+from tensorflow_addons.utils.types import Number\n \n \n @tf.keras.utils.register_keras_serializable(package=\"Addons\")\n@@ -85,14 +88,20 @@\n \n super().__init__(name=name, reduction=reduction)\n \n+ warnings.warn(\n+ \"The data type for `WeightedKappaLoss` defaults to \"\n+ \"`tf.keras.backend.floatx()`.\"\n+ \"The argument `dtype` will be removed in Addons `0.12`.\",\n+ DeprecationWarning,\n+ )\n+\n if weightage not in (\"linear\", \"quadratic\"):\n raise ValueError(\"Unknown kappa weighting type.\")\n \n self.weightage = weightage\n self.num_classes = num_classes\n- self.epsilon = epsilon\n- self.dtype = dtype\n- label_vec = tf.range(num_classes, dtype=dtype)\n+ self.epsilon = epsilon or tf.keras.backend.epsilon()\n+ label_vec = tf.range(num_classes, dtype=tf.keras.backend.floatx())\n self.row_label_vec = tf.reshape(label_vec, [1, num_classes])\n self.col_label_vec = tf.reshape(label_vec, [num_classes, 1])\n col_mat = tf.tile(self.col_label_vec, [1, num_classes])\n@@ -103,7 +112,8 @@\n self.weight_mat = (col_mat - row_mat) ** 2\n \n def call(self, y_true, y_pred):\n- y_true = tf.cast(y_true, dtype=self.dtype)\n+ y_true = tf.cast(y_true, dtype=self.col_label_vec.dtype)\n+ y_pred = tf.cast(y_pred, dtype=self.weight_mat.dtype)\n batch_size = tf.shape(y_true)[0]\n cat_labels = tf.matmul(y_true, self.col_label_vec)\n cat_label_mat = tf.tile(cat_labels, [1, self.num_classes])\n@@ -117,7 +127,7 @@\n pred_dist = tf.reduce_sum(y_pred, axis=0, keepdims=True)\n w_pred_dist = tf.matmul(self.weight_mat, pred_dist, transpose_b=True)\n denominator = tf.reduce_sum(tf.matmul(label_dist, w_pred_dist))\n- denominator /= tf.cast(batch_size, dtype=self.dtype)\n+ denominator /= tf.cast(batch_size, dtype=denominator.dtype)\n loss = tf.math.divide_no_nan(numerator, denominator)\n return tf.math.log(loss + self.epsilon)\n \n@@ -126,7 +136,6 @@\n \"num_classes\": self.num_classes,\n \"weightage\": self.weightage,\n \"epsilon\": self.epsilon,\n- \"dtype\": self.dtype,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n", "issue": "Keras model save using WeightedKappaLoss errors, not json serializable\n**Describe the bug**\r\n\r\nKeras model compiled with WeightedKappaLoss errors when saving, \"TypeError: ('Not JSON Serializable:', tf.float32)\"\r\n\r\n**Code to reproduce the issue**\r\n\r\n```\r\nmodel = Sequential()\r\n\r\nmodel._set_inputs(tf.keras.Input((256,256,3)))\r\nmodel.add(layers.Dense(6, activation='softmax'))\r\n\r\nmodel.compile(Adam(lr=1e-3), tfa.losses.WeightedKappaLoss(num_classes=6, weightage='quadratic'))\r\nmodel.save('test')\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Weighted kappa loss.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons.utils.types import Number\nfrom typeguard import typechecked\nfrom typing import Optional\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass WeightedKappaLoss(tf.keras.losses.Loss):\n \"\"\"Implements the Weighted Kappa loss function.\n\n Weighted Kappa loss was introduced in the\n [Weighted kappa loss function for multi-class classification\n of ordinal data in deep learning]\n (https://www.sciencedirect.com/science/article/abs/pii/S0167865517301666).\n Weighted Kappa is widely used in Ordinal Classification Problems.\n The loss value lies in [-inf, log 2], where log 2\n means the random prediction.\n\n Usage:\n\n ```python\n kappa_loss = WeightedKappaLoss(num_classes=4)\n y_true = tf.constant([[0, 0, 1, 0], [0, 1, 0, 0],\n [1, 0, 0, 0], [0, 0, 0, 1]])\n y_pred = tf.constant([[0.1, 0.2, 0.6, 0.1], [0.1, 0.5, 0.3, 0.1],\n [0.8, 0.05, 0.05, 0.1], [0.01, 0.09, 0.1, 0.8]])\n loss = kappa_loss(y_true, y_pred)\n print('Loss: ', loss.numpy()) # Loss: -1.1611923\n ```\n\n Usage with `tf.keras` API:\n ```python\n # outputs should be softmax results\n # if you want to weight the samples, just multiply the outputs\n # by the sample weight.\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tfa.losses.WeightedKappa(num_classes=4))\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n num_classes: int,\n weightage: Optional[str] = \"quadratic\",\n name: Optional[str] = \"cohen_kappa_loss\",\n epsilon: Optional[Number] = 1e-6,\n dtype: Optional[tf.DType] = tf.float32,\n reduction: str = tf.keras.losses.Reduction.NONE,\n ):\n \"\"\"Creates a `WeightedKappa` instance.\n\n Args:\n num_classes: Number of unique classes in your dataset.\n weightage: (Optional) Weighting to be considered for calculating\n kappa statistics. A valid value is one of\n ['linear', 'quadratic']. Defaults to `quadratic` since it's\n mostly used.\n name: (Optional) String name of the metric instance.\n epsilon: (Optional) increment to avoid log zero,\n so the loss will be log(1 - k + epsilon), where k belongs to\n [-1, 1], usually you can use the default value which is 1e-6.\n dtype: (Optional) Data type of the metric result.\n Defaults to `tf.float32`.\n Raises:\n ValueError: If the value passed for `weightage` is invalid\n i.e. not any one of ['linear', 'quadratic']\n \"\"\"\n\n super().__init__(name=name, reduction=reduction)\n\n if weightage not in (\"linear\", \"quadratic\"):\n raise ValueError(\"Unknown kappa weighting type.\")\n\n self.weightage = weightage\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.dtype = dtype\n label_vec = tf.range(num_classes, dtype=dtype)\n self.row_label_vec = tf.reshape(label_vec, [1, num_classes])\n self.col_label_vec = tf.reshape(label_vec, [num_classes, 1])\n col_mat = tf.tile(self.col_label_vec, [1, num_classes])\n row_mat = tf.tile(self.row_label_vec, [num_classes, 1])\n if weightage == \"linear\":\n self.weight_mat = tf.abs(col_mat - row_mat)\n else:\n self.weight_mat = (col_mat - row_mat) ** 2\n\n def call(self, y_true, y_pred):\n y_true = tf.cast(y_true, dtype=self.dtype)\n batch_size = tf.shape(y_true)[0]\n cat_labels = tf.matmul(y_true, self.col_label_vec)\n cat_label_mat = tf.tile(cat_labels, [1, self.num_classes])\n row_label_mat = tf.tile(self.row_label_vec, [batch_size, 1])\n if self.weightage == \"linear\":\n weight = tf.abs(cat_label_mat - row_label_mat)\n else:\n weight = (cat_label_mat - row_label_mat) ** 2\n numerator = tf.reduce_sum(weight * y_pred)\n label_dist = tf.reduce_sum(y_true, axis=0, keepdims=True)\n pred_dist = tf.reduce_sum(y_pred, axis=0, keepdims=True)\n w_pred_dist = tf.matmul(self.weight_mat, pred_dist, transpose_b=True)\n denominator = tf.reduce_sum(tf.matmul(label_dist, w_pred_dist))\n denominator /= tf.cast(batch_size, dtype=self.dtype)\n loss = tf.math.divide_no_nan(numerator, denominator)\n return tf.math.log(loss + self.epsilon)\n\n def get_config(self):\n config = {\n \"num_classes\": self.num_classes,\n \"weightage\": self.weightage,\n \"epsilon\": self.epsilon,\n \"dtype\": self.dtype,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n", "path": "tensorflow_addons/losses/kappa_loss.py"}]} | 2,358 | 704 |
gh_patches_debug_6381 | rasdani/github-patches | git_diff | vllm-project__vllm-3578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The same model cannot be loaded by two different users
As pointed out here, the way lockfiles are created prevents the second user from loading any models that a previous user has loaded at any point: https://github.com/vllm-project/vllm/issues/2179
This is still an issue with the only workaround being to force-delete the lockfile created by another user.
</issue>
<code>
[start of vllm/model_executor/weight_utils.py]
1 """Utilities for downloading and initializing model weights."""
2 import filelock
3 import glob
4 import fnmatch
5 import json
6 import os
7 from collections import defaultdict
8 from typing import Any, Iterator, List, Optional, Tuple
9
10 from huggingface_hub import snapshot_download, HfFileSystem
11 import numpy as np
12 from safetensors.torch import load_file, save_file, safe_open
13 import torch
14 from tqdm.auto import tqdm
15
16 from vllm.config import ModelConfig
17 from vllm.logger import init_logger
18 from vllm.model_executor.layers.quantization import (get_quantization_config,
19 QuantizationConfig)
20
21 logger = init_logger(__name__)
22
23 _xdg_cache_home = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
24 _vllm_filelocks_path = os.path.join(_xdg_cache_home, 'vllm/locks/')
25
26
27 class Disabledtqdm(tqdm):
28
29 def __init__(self, *args, **kwargs):
30 super().__init__(*args, **kwargs, disable=True)
31
32
33 def get_lock(model_name_or_path: str, cache_dir: Optional[str] = None):
34 lock_dir = cache_dir if cache_dir is not None else _vllm_filelocks_path
35 os.makedirs(os.path.dirname(lock_dir), exist_ok=True)
36 lock_file_name = model_name_or_path.replace("/", "-") + ".lock"
37 lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name))
38 return lock
39
40
41 def _shared_pointers(tensors):
42 ptrs = defaultdict(list)
43 for k, v in tensors.items():
44 ptrs[v.data_ptr()].append(k)
45 failing = []
46 for _, names in ptrs.items():
47 if len(names) > 1:
48 failing.append(names)
49 return failing
50
51
52 def convert_bin_to_safetensor_file(
53 pt_filename: str,
54 sf_filename: str,
55 ) -> None:
56 loaded = torch.load(pt_filename, map_location="cpu")
57 if "state_dict" in loaded:
58 loaded = loaded["state_dict"]
59 shared = _shared_pointers(loaded)
60 for shared_weights in shared:
61 for name in shared_weights[1:]:
62 loaded.pop(name)
63
64 # For tensors to be contiguous
65 loaded = {k: v.contiguous() for k, v in loaded.items()}
66
67 dirname = os.path.dirname(sf_filename)
68 os.makedirs(dirname, exist_ok=True)
69 save_file(loaded, sf_filename, metadata={"format": "pt"})
70
71 # check file size
72 sf_size = os.stat(sf_filename).st_size
73 pt_size = os.stat(pt_filename).st_size
74 if (sf_size - pt_size) / pt_size > 0.01:
75 raise RuntimeError(f"""The file size different is more than 1%:
76 - {sf_filename}: {sf_size}
77 - {pt_filename}: {pt_size}
78 """)
79
80 # check if the tensors are the same
81 reloaded = load_file(sf_filename)
82 for k in loaded:
83 pt_tensor = loaded[k]
84 sf_tensor = reloaded[k]
85 if not torch.equal(pt_tensor, sf_tensor):
86 raise RuntimeError(f"The output tensors do not match for key {k}")
87
88
89 # TODO(woosuk): Move this to other place.
90 def get_quant_config(model_config: ModelConfig) -> QuantizationConfig:
91 quant_cls = get_quantization_config(model_config.quantization)
92 # Read the quantization config from the HF model config, if available.
93 hf_quant_config = getattr(model_config.hf_config, "quantization_config",
94 None)
95 if hf_quant_config is not None:
96 return quant_cls.from_config(hf_quant_config)
97 model_name_or_path = model_config.model
98 is_local = os.path.isdir(model_name_or_path)
99 if not is_local:
100 # Download the config files.
101 with get_lock(model_name_or_path, model_config.download_dir):
102 hf_folder = snapshot_download(model_name_or_path,
103 revision=model_config.revision,
104 allow_patterns="*.json",
105 cache_dir=model_config.download_dir,
106 tqdm_class=Disabledtqdm)
107 else:
108 hf_folder = model_name_or_path
109 config_files = glob.glob(os.path.join(hf_folder, "*.json"))
110
111 quant_config_files = [
112 f for f in config_files if any(
113 f.endswith(x) for x in quant_cls.get_config_filenames())
114 ]
115 if len(quant_config_files) == 0:
116 raise ValueError(
117 f"Cannot find the config file for {model_config.quantization}")
118 if len(quant_config_files) > 1:
119 raise ValueError(
120 f"Found multiple config files for {model_config.quantization}: "
121 f"{quant_config_files}")
122
123 quant_config_file = quant_config_files[0]
124 with open(quant_config_file, "r") as f:
125 config = json.load(f)
126 return quant_cls.from_config(config)
127
128
129 def prepare_hf_model_weights(
130 model_name_or_path: str,
131 cache_dir: Optional[str] = None,
132 load_format: str = "auto",
133 fall_back_to_pt: bool = True,
134 revision: Optional[str] = None,
135 ) -> Tuple[str, List[str], bool]:
136 # Download model weights from huggingface.
137 is_local = os.path.isdir(model_name_or_path)
138 use_safetensors = False
139 # Some quantized models use .pt files for storing the weights.
140 if load_format == "auto":
141 allow_patterns = ["*.safetensors", "*.bin"]
142 elif load_format == "safetensors":
143 use_safetensors = True
144 allow_patterns = ["*.safetensors"]
145 elif load_format == "pt":
146 allow_patterns = ["*.pt"]
147 elif load_format == "npcache":
148 allow_patterns = ["*.bin"]
149 else:
150 raise ValueError(f"Unknown load_format: {load_format}")
151
152 if fall_back_to_pt:
153 allow_patterns += ["*.pt"]
154
155 if not is_local:
156 # Before we download we look at that is available:
157 fs = HfFileSystem()
158 file_list = fs.ls(model_name_or_path, detail=False, revision=revision)
159
160 # depending on what is available we download different things
161 for pattern in allow_patterns:
162 matching = fnmatch.filter(file_list, pattern)
163 if len(matching) > 0:
164 allow_patterns = [pattern]
165 break
166
167 logger.info(f"Using model weights format {allow_patterns}")
168 # Use file lock to prevent multiple processes from
169 # downloading the same model weights at the same time.
170 with get_lock(model_name_or_path, cache_dir):
171 hf_folder = snapshot_download(model_name_or_path,
172 allow_patterns=allow_patterns,
173 cache_dir=cache_dir,
174 tqdm_class=Disabledtqdm,
175 revision=revision)
176 else:
177 hf_folder = model_name_or_path
178 hf_weights_files: List[str] = []
179 for pattern in allow_patterns:
180 hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))
181 if len(hf_weights_files) > 0:
182 if pattern == "*.safetensors":
183 use_safetensors = True
184 break
185 if not use_safetensors:
186 # Exclude files that are not needed for inference.
187 # https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233
188 blacklist = [
189 "training_args.bin",
190 "optimizer.bin",
191 "optimizer.pt",
192 "scheduler.pt",
193 "scaler.pt",
194 ]
195 hf_weights_files = [
196 f for f in hf_weights_files
197 if not any(f.endswith(x) for x in blacklist)
198 ]
199
200 if len(hf_weights_files) == 0:
201 raise RuntimeError(
202 f"Cannot find any model weights with `{model_name_or_path}`")
203
204 return hf_folder, hf_weights_files, use_safetensors
205
206
207 def hf_model_weights_iterator(
208 model_name_or_path: str,
209 cache_dir: Optional[str] = None,
210 load_format: str = "auto",
211 revision: Optional[str] = None,
212 fall_back_to_pt: Optional[bool] = True,
213 ) -> Iterator[Tuple[str, torch.Tensor]]:
214 hf_folder, hf_weights_files, use_safetensors = prepare_hf_model_weights(
215 model_name_or_path,
216 cache_dir=cache_dir,
217 load_format=load_format,
218 fall_back_to_pt=fall_back_to_pt,
219 revision=revision)
220
221 if load_format == "npcache":
222 # Currently np_cache only support *.bin checkpoints
223 assert use_safetensors is False
224
225 # Convert the model weights from torch tensors to numpy arrays for
226 # faster loading.
227 np_folder = os.path.join(hf_folder, "np")
228 os.makedirs(np_folder, exist_ok=True)
229 weight_names_file = os.path.join(np_folder, "weight_names.json")
230 # Use file lock to prevent multiple processes from
231 # dumping the same model weights to numpy at the same time.
232 with get_lock(model_name_or_path, cache_dir):
233 if not os.path.exists(weight_names_file):
234 weight_names = []
235 for bin_file in hf_weights_files:
236 state = torch.load(bin_file, map_location="cpu")
237 for name, param in state.items():
238 param_path = os.path.join(np_folder, name)
239 with open(param_path, "wb") as f:
240 np.save(f, param.cpu().detach().numpy())
241 weight_names.append(name)
242 with open(weight_names_file, "w") as f:
243 json.dump(weight_names, f)
244
245 with open(weight_names_file, "r") as f:
246 weight_names = json.load(f)
247
248 for name in weight_names:
249 param_path = os.path.join(np_folder, name)
250 with open(param_path, "rb") as f:
251 param = np.load(f)
252 yield name, torch.from_numpy(param)
253 elif use_safetensors:
254 for st_file in hf_weights_files:
255 with safe_open(st_file, framework="pt") as f:
256 for name in f.keys(): # noqa: SIM118
257 param = f.get_tensor(name)
258 yield name, param
259 else:
260 for bin_file in hf_weights_files:
261 state = torch.load(bin_file, map_location="cpu")
262 for name, param in state.items():
263 yield name, param
264 del state
265 torch.cuda.empty_cache()
266
267
268 def convert_pyslice_to_tensor(x: Any) -> torch.Tensor:
269 """convert PySafeSlice object from safetensors to torch.Tensor
270
271 PySafeSlice object supports indexing, which is done before loading the
272 actual tensor and can reduce the amount of memory being read into the
273 memory. However, it does not support more advanced functionalities
274 like `.view()` or `.t()`. Therefore, if we need to modify the loaded
275 tensor with these more complicated operators, we need to convert to
276 tensor first.
277 """
278 if not isinstance(x, torch.Tensor):
279 x = x[:]
280 return x
281
282
283 def default_weight_loader(param: torch.Tensor,
284 loaded_weight: torch.Tensor) -> None:
285 """Default weight loader."""
286 assert param.size() == loaded_weight.size()
287 param.data.copy_(loaded_weight)
288
289
290 def initialize_dummy_weights(
291 model: torch.nn.Module,
292 low: float = -1e-3,
293 high: float = 1e-3,
294 ) -> None:
295 """Initialize model weights with random values.
296
297 The model weights must be randomly initialized for accurate performance
298 measurements. Additionally, the model weights should not cause NaNs in the
299 forward pass. We empirically found that initializing the weights with
300 values between -1e-3 and 1e-3 works well for most models.
301 """
302 for param in model.state_dict().values():
303 if torch.is_floating_point(param):
304 param.data.uniform_(low, high)
305
[end of vllm/model_executor/weight_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/model_executor/weight_utils.py b/vllm/model_executor/weight_utils.py
--- a/vllm/model_executor/weight_utils.py
+++ b/vllm/model_executor/weight_utils.py
@@ -34,7 +34,7 @@
lock_dir = cache_dir if cache_dir is not None else _vllm_filelocks_path
os.makedirs(os.path.dirname(lock_dir), exist_ok=True)
lock_file_name = model_name_or_path.replace("/", "-") + ".lock"
- lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name))
+ lock = filelock.SoftFileLock(os.path.join(lock_dir, lock_file_name))
return lock
| {"golden_diff": "diff --git a/vllm/model_executor/weight_utils.py b/vllm/model_executor/weight_utils.py\n--- a/vllm/model_executor/weight_utils.py\n+++ b/vllm/model_executor/weight_utils.py\n@@ -34,7 +34,7 @@\n lock_dir = cache_dir if cache_dir is not None else _vllm_filelocks_path\n os.makedirs(os.path.dirname(lock_dir), exist_ok=True)\n lock_file_name = model_name_or_path.replace(\"/\", \"-\") + \".lock\"\n- lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name))\n+ lock = filelock.SoftFileLock(os.path.join(lock_dir, lock_file_name))\n return lock\n", "issue": "The same model cannot be loaded by two different users\nAs pointed out here, the way lockfiles are created prevents the second user from loading any models that a previous user has loaded at any point: https://github.com/vllm-project/vllm/issues/2179\r\n\r\nThis is still an issue with the only workaround being to force-delete the lockfile created by another user.\n", "before_files": [{"content": "\"\"\"Utilities for downloading and initializing model weights.\"\"\"\nimport filelock\nimport glob\nimport fnmatch\nimport json\nimport os\nfrom collections import defaultdict\nfrom typing import Any, Iterator, List, Optional, Tuple\n\nfrom huggingface_hub import snapshot_download, HfFileSystem\nimport numpy as np\nfrom safetensors.torch import load_file, save_file, safe_open\nimport torch\nfrom tqdm.auto import tqdm\n\nfrom vllm.config import ModelConfig\nfrom vllm.logger import init_logger\nfrom vllm.model_executor.layers.quantization import (get_quantization_config,\n QuantizationConfig)\n\nlogger = init_logger(__name__)\n\n_xdg_cache_home = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n_vllm_filelocks_path = os.path.join(_xdg_cache_home, 'vllm/locks/')\n\n\nclass Disabledtqdm(tqdm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs, disable=True)\n\n\ndef get_lock(model_name_or_path: str, cache_dir: Optional[str] = None):\n lock_dir = cache_dir if cache_dir is not None else _vllm_filelocks_path\n os.makedirs(os.path.dirname(lock_dir), exist_ok=True)\n lock_file_name = model_name_or_path.replace(\"/\", \"-\") + \".lock\"\n lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name))\n return lock\n\n\ndef _shared_pointers(tensors):\n ptrs = defaultdict(list)\n for k, v in tensors.items():\n ptrs[v.data_ptr()].append(k)\n failing = []\n for _, names in ptrs.items():\n if len(names) > 1:\n failing.append(names)\n return failing\n\n\ndef convert_bin_to_safetensor_file(\n pt_filename: str,\n sf_filename: str,\n) -> None:\n loaded = torch.load(pt_filename, map_location=\"cpu\")\n if \"state_dict\" in loaded:\n loaded = loaded[\"state_dict\"]\n shared = _shared_pointers(loaded)\n for shared_weights in shared:\n for name in shared_weights[1:]:\n loaded.pop(name)\n\n # For tensors to be contiguous\n loaded = {k: v.contiguous() for k, v in loaded.items()}\n\n dirname = os.path.dirname(sf_filename)\n os.makedirs(dirname, exist_ok=True)\n save_file(loaded, sf_filename, metadata={\"format\": \"pt\"})\n\n # check file size\n sf_size = os.stat(sf_filename).st_size\n pt_size = os.stat(pt_filename).st_size\n if (sf_size - pt_size) / pt_size > 0.01:\n raise RuntimeError(f\"\"\"The file size different is more than 1%:\n - {sf_filename}: {sf_size}\n - {pt_filename}: {pt_size}\n \"\"\")\n\n # check if the tensors are the same\n reloaded = load_file(sf_filename)\n for k in loaded:\n pt_tensor = loaded[k]\n sf_tensor = reloaded[k]\n if not torch.equal(pt_tensor, sf_tensor):\n raise RuntimeError(f\"The output tensors do not match for key {k}\")\n\n\n# TODO(woosuk): Move this to other place.\ndef get_quant_config(model_config: ModelConfig) -> QuantizationConfig:\n quant_cls = get_quantization_config(model_config.quantization)\n # Read the quantization config from the HF model config, if available.\n hf_quant_config = getattr(model_config.hf_config, \"quantization_config\",\n None)\n if hf_quant_config is not None:\n return quant_cls.from_config(hf_quant_config)\n model_name_or_path = model_config.model\n is_local = os.path.isdir(model_name_or_path)\n if not is_local:\n # Download the config files.\n with get_lock(model_name_or_path, model_config.download_dir):\n hf_folder = snapshot_download(model_name_or_path,\n revision=model_config.revision,\n allow_patterns=\"*.json\",\n cache_dir=model_config.download_dir,\n tqdm_class=Disabledtqdm)\n else:\n hf_folder = model_name_or_path\n config_files = glob.glob(os.path.join(hf_folder, \"*.json\"))\n\n quant_config_files = [\n f for f in config_files if any(\n f.endswith(x) for x in quant_cls.get_config_filenames())\n ]\n if len(quant_config_files) == 0:\n raise ValueError(\n f\"Cannot find the config file for {model_config.quantization}\")\n if len(quant_config_files) > 1:\n raise ValueError(\n f\"Found multiple config files for {model_config.quantization}: \"\n f\"{quant_config_files}\")\n\n quant_config_file = quant_config_files[0]\n with open(quant_config_file, \"r\") as f:\n config = json.load(f)\n return quant_cls.from_config(config)\n\n\ndef prepare_hf_model_weights(\n model_name_or_path: str,\n cache_dir: Optional[str] = None,\n load_format: str = \"auto\",\n fall_back_to_pt: bool = True,\n revision: Optional[str] = None,\n) -> Tuple[str, List[str], bool]:\n # Download model weights from huggingface.\n is_local = os.path.isdir(model_name_or_path)\n use_safetensors = False\n # Some quantized models use .pt files for storing the weights.\n if load_format == \"auto\":\n allow_patterns = [\"*.safetensors\", \"*.bin\"]\n elif load_format == \"safetensors\":\n use_safetensors = True\n allow_patterns = [\"*.safetensors\"]\n elif load_format == \"pt\":\n allow_patterns = [\"*.pt\"]\n elif load_format == \"npcache\":\n allow_patterns = [\"*.bin\"]\n else:\n raise ValueError(f\"Unknown load_format: {load_format}\")\n\n if fall_back_to_pt:\n allow_patterns += [\"*.pt\"]\n\n if not is_local:\n # Before we download we look at that is available:\n fs = HfFileSystem()\n file_list = fs.ls(model_name_or_path, detail=False, revision=revision)\n\n # depending on what is available we download different things\n for pattern in allow_patterns:\n matching = fnmatch.filter(file_list, pattern)\n if len(matching) > 0:\n allow_patterns = [pattern]\n break\n\n logger.info(f\"Using model weights format {allow_patterns}\")\n # Use file lock to prevent multiple processes from\n # downloading the same model weights at the same time.\n with get_lock(model_name_or_path, cache_dir):\n hf_folder = snapshot_download(model_name_or_path,\n allow_patterns=allow_patterns,\n cache_dir=cache_dir,\n tqdm_class=Disabledtqdm,\n revision=revision)\n else:\n hf_folder = model_name_or_path\n hf_weights_files: List[str] = []\n for pattern in allow_patterns:\n hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))\n if len(hf_weights_files) > 0:\n if pattern == \"*.safetensors\":\n use_safetensors = True\n break\n if not use_safetensors:\n # Exclude files that are not needed for inference.\n # https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233\n blacklist = [\n \"training_args.bin\",\n \"optimizer.bin\",\n \"optimizer.pt\",\n \"scheduler.pt\",\n \"scaler.pt\",\n ]\n hf_weights_files = [\n f for f in hf_weights_files\n if not any(f.endswith(x) for x in blacklist)\n ]\n\n if len(hf_weights_files) == 0:\n raise RuntimeError(\n f\"Cannot find any model weights with `{model_name_or_path}`\")\n\n return hf_folder, hf_weights_files, use_safetensors\n\n\ndef hf_model_weights_iterator(\n model_name_or_path: str,\n cache_dir: Optional[str] = None,\n load_format: str = \"auto\",\n revision: Optional[str] = None,\n fall_back_to_pt: Optional[bool] = True,\n) -> Iterator[Tuple[str, torch.Tensor]]:\n hf_folder, hf_weights_files, use_safetensors = prepare_hf_model_weights(\n model_name_or_path,\n cache_dir=cache_dir,\n load_format=load_format,\n fall_back_to_pt=fall_back_to_pt,\n revision=revision)\n\n if load_format == \"npcache\":\n # Currently np_cache only support *.bin checkpoints\n assert use_safetensors is False\n\n # Convert the model weights from torch tensors to numpy arrays for\n # faster loading.\n np_folder = os.path.join(hf_folder, \"np\")\n os.makedirs(np_folder, exist_ok=True)\n weight_names_file = os.path.join(np_folder, \"weight_names.json\")\n # Use file lock to prevent multiple processes from\n # dumping the same model weights to numpy at the same time.\n with get_lock(model_name_or_path, cache_dir):\n if not os.path.exists(weight_names_file):\n weight_names = []\n for bin_file in hf_weights_files:\n state = torch.load(bin_file, map_location=\"cpu\")\n for name, param in state.items():\n param_path = os.path.join(np_folder, name)\n with open(param_path, \"wb\") as f:\n np.save(f, param.cpu().detach().numpy())\n weight_names.append(name)\n with open(weight_names_file, \"w\") as f:\n json.dump(weight_names, f)\n\n with open(weight_names_file, \"r\") as f:\n weight_names = json.load(f)\n\n for name in weight_names:\n param_path = os.path.join(np_folder, name)\n with open(param_path, \"rb\") as f:\n param = np.load(f)\n yield name, torch.from_numpy(param)\n elif use_safetensors:\n for st_file in hf_weights_files:\n with safe_open(st_file, framework=\"pt\") as f:\n for name in f.keys(): # noqa: SIM118\n param = f.get_tensor(name)\n yield name, param\n else:\n for bin_file in hf_weights_files:\n state = torch.load(bin_file, map_location=\"cpu\")\n for name, param in state.items():\n yield name, param\n del state\n torch.cuda.empty_cache()\n\n\ndef convert_pyslice_to_tensor(x: Any) -> torch.Tensor:\n \"\"\"convert PySafeSlice object from safetensors to torch.Tensor\n\n PySafeSlice object supports indexing, which is done before loading the\n actual tensor and can reduce the amount of memory being read into the\n memory. However, it does not support more advanced functionalities\n like `.view()` or `.t()`. Therefore, if we need to modify the loaded\n tensor with these more complicated operators, we need to convert to\n tensor first.\n \"\"\"\n if not isinstance(x, torch.Tensor):\n x = x[:]\n return x\n\n\ndef default_weight_loader(param: torch.Tensor,\n loaded_weight: torch.Tensor) -> None:\n \"\"\"Default weight loader.\"\"\"\n assert param.size() == loaded_weight.size()\n param.data.copy_(loaded_weight)\n\n\ndef initialize_dummy_weights(\n model: torch.nn.Module,\n low: float = -1e-3,\n high: float = 1e-3,\n) -> None:\n \"\"\"Initialize model weights with random values.\n\n The model weights must be randomly initialized for accurate performance\n measurements. Additionally, the model weights should not cause NaNs in the\n forward pass. We empirically found that initializing the weights with\n values between -1e-3 and 1e-3 works well for most models.\n \"\"\"\n for param in model.state_dict().values():\n if torch.is_floating_point(param):\n param.data.uniform_(low, high)\n", "path": "vllm/model_executor/weight_utils.py"}]} | 4,000 | 153 |
gh_patches_debug_39306 | rasdani/github-patches | git_diff | piskvorky__gensim-2245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wiki examples: sample code to get from tfidf doc to wikipedia title/uri and vice versa
I very much like the LSI and LDA [wiki examples](https://github.com/RaRe-Technologies/gensim/blob/develop/docs/src/wiki.rst), but one aspect that i think is missing is: how to get from tf-idf doc vectors (or later LSI / LDA vecs) back to Wikipedia URIs (or titles if easier) and vice versa?
Am i missing something obvious, or do i have to run another pass over the wiki dump, as the titles aren't saved anywhere?
I'll happily make a PR to extend the examples with this...
</issue>
<code>
[start of gensim/scripts/make_wikicorpus.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2010 Radim Rehurek <[email protected]>
5 # Copyright (C) 2012 Lars Buitinck <[email protected]>
6 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
7
8
9 """
10 USAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]
11
12 Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
13 bz2-compressed dump of Wikipedia articles, in XML format.
14
15 This actually creates three files:
16
17 * `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
18 * `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
19 Matrix Matrix format
20 * `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
21 * `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
22
23 The output Matrix Market files can then be compressed (e.g., by bzip2) to save
24 disk space; gensim's corpus iterators can work with compressed input, too.
25
26 `VOCABULARY_SIZE` controls how many of the most frequent words to keep (after
27 removing tokens that appear in more than 10%% of all documents). Defaults to
28 100,000.
29
30 If you have the `pattern` package installed, this script will use a fancy
31 lemmatization to get a lemma of each token (instead of plain alphabetic
32 tokenizer). The package is available at https://github.com/clips/pattern .
33
34 Example:
35 python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki
36 """
37
38
39 import logging
40 import os.path
41 import sys
42
43 from gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus
44 from gensim.models import TfidfModel
45
46
47 # Wiki is first scanned for all distinct word types (~7M). The types that
48 # appear in more than 10% of articles are removed and from the rest, the
49 # DEFAULT_DICT_SIZE most frequent types are kept.
50 DEFAULT_DICT_SIZE = 100000
51
52
53 if __name__ == '__main__':
54 program = os.path.basename(sys.argv[0])
55 logger = logging.getLogger(program)
56
57 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
58 logging.root.setLevel(level=logging.INFO)
59 logger.info("running %s", ' '.join(sys.argv))
60
61 # check and process input arguments
62 if len(sys.argv) < 3:
63 print(globals()['__doc__'] % locals())
64 sys.exit(1)
65 inp, outp = sys.argv[1:3]
66
67 if not os.path.isdir(os.path.dirname(outp)):
68 raise SystemExit("Error: The output directory does not exist. Create the directory and try again.")
69
70 if len(sys.argv) > 3:
71 keep_words = int(sys.argv[3])
72 else:
73 keep_words = DEFAULT_DICT_SIZE
74 online = 'online' in program
75 lemmatize = 'lemma' in program
76 debug = 'nodebug' not in program
77
78 if online:
79 dictionary = HashDictionary(id_range=keep_words, debug=debug)
80 dictionary.allow_update = True # start collecting document frequencies
81 wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
82 # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
83 MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
84 # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
85 dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
86 dictionary.save_as_text(outp + '_wordids.txt.bz2')
87 wiki.save(outp + '_corpus.pkl.bz2')
88 dictionary.allow_update = False
89 else:
90 wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)
91 # only keep the most frequent words (out of total ~8.2m unique tokens)
92 wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
93 # save dictionary and bag-of-words (term-document frequency matrix)
94 MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
95 wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
96 # load back the id->word mapping directly from file
97 # this seems to save more memory, compared to keeping the wiki.dictionary object from above
98 dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')
99 del wiki
100
101 # initialize corpus reader and word->id mapping
102 mm = MmCorpus(outp + '_bow.mm')
103
104 # build tfidf, ~50min
105 tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)
106 tfidf.save(outp + '.tfidf_model')
107
108 # save tfidf vectors in matrix market format
109 # ~4h; result file is 15GB! bzip2'ed down to 4.5GB
110 MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)
111
112 logger.info("finished running %s", program)
113
[end of gensim/scripts/make_wikicorpus.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gensim/scripts/make_wikicorpus.py b/gensim/scripts/make_wikicorpus.py
--- a/gensim/scripts/make_wikicorpus.py
+++ b/gensim/scripts/make_wikicorpus.py
@@ -12,13 +12,15 @@
Convert articles from a Wikipedia dump to (sparse) vectors. The input is a
bz2-compressed dump of Wikipedia articles, in XML format.
-This actually creates three files:
+This actually creates several files:
-* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids
-* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in
- Matrix Matrix format
-* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation
-* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump
+* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids
+* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format
+* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`
+* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents
+* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matix Market format
+* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`
+* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model
The output Matrix Market files can then be compressed (e.g., by bzip2) to save
disk space; gensim's corpus iterators can work with compressed input, too.
@@ -80,7 +82,7 @@
dictionary.allow_update = True # start collecting document frequencies
wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)
# ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)
- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)
+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)
# with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`
dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
dictionary.save_as_text(outp + '_wordids.txt.bz2')
@@ -91,7 +93,7 @@
# only keep the most frequent words (out of total ~8.2m unique tokens)
wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)
# save dictionary and bag-of-words (term-document frequency matrix)
- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h
+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h
wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')
# load back the id->word mapping directly from file
# this seems to save more memory, compared to keeping the wiki.dictionary object from above
| {"golden_diff": "diff --git a/gensim/scripts/make_wikicorpus.py b/gensim/scripts/make_wikicorpus.py\n--- a/gensim/scripts/make_wikicorpus.py\n+++ b/gensim/scripts/make_wikicorpus.py\n@@ -12,13 +12,15 @@\n Convert articles from a Wikipedia dump to (sparse) vectors. The input is a\n bz2-compressed dump of Wikipedia articles, in XML format.\n \n-This actually creates three files:\n+This actually creates several files:\n \n-* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids\n-* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in\n- Matrix Matrix format\n-* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation\n-* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump\n+* `OUTPUT_PREFIX_wordids.txt.bz2`: mapping between words and their integer ids\n+* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation in Matrix Market format\n+* `OUTPUT_PREFIX_bow.mm.index`: index for `OUTPUT_PREFIX_bow.mm`\n+* `OUTPUT_PREFIX_bow.mm.metadata.cpickle`: titles of documents\n+* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation in Matix Market format\n+* `OUTPUT_PREFIX_tfidf.mm.index`: index for `OUTPUT_PREFIX_tfidf.mm`\n+* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model\n \n The output Matrix Market files can then be compressed (e.g., by bzip2) to save\n disk space; gensim's corpus iterators can work with compressed input, too.\n@@ -80,7 +82,7 @@\n dictionary.allow_update = True # start collecting document frequencies\n wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)\n # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)\n- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)\n+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True)\n # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`\n dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n dictionary.save_as_text(outp + '_wordids.txt.bz2')\n@@ -91,7 +93,7 @@\n # only keep the most frequent words (out of total ~8.2m unique tokens)\n wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n # save dictionary and bag-of-words (term-document frequency matrix)\n- MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h\n+ MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000, metadata=True) # another ~9h\n wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')\n # load back the id->word mapping directly from file\n # this seems to save more memory, compared to keeping the wiki.dictionary object from above\n", "issue": "wiki examples: sample code to get from tfidf doc to wikipedia title/uri and vice versa\nI very much like the LSI and LDA [wiki examples](https://github.com/RaRe-Technologies/gensim/blob/develop/docs/src/wiki.rst), but one aspect that i think is missing is: how to get from tf-idf doc vectors (or later LSI / LDA vecs) back to Wikipedia URIs (or titles if easier) and vice versa?\r\n\r\nAm i missing something obvious, or do i have to run another pass over the wiki dump, as the titles aren't saved anywhere?\r\n\r\nI'll happily make a PR to extend the examples with this...\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2010 Radim Rehurek <[email protected]>\n# Copyright (C) 2012 Lars Buitinck <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\n\"\"\"\nUSAGE: %(program)s WIKI_XML_DUMP OUTPUT_PREFIX [VOCABULARY_SIZE]\n\nConvert articles from a Wikipedia dump to (sparse) vectors. The input is a\nbz2-compressed dump of Wikipedia articles, in XML format.\n\nThis actually creates three files:\n\n* `OUTPUT_PREFIX_wordids.txt`: mapping between words and their integer ids\n* `OUTPUT_PREFIX_bow.mm`: bag-of-words (word counts) representation, in\n Matrix Matrix format\n* `OUTPUT_PREFIX_tfidf.mm`: TF-IDF representation\n* `OUTPUT_PREFIX.tfidf_model`: TF-IDF model dump\n\nThe output Matrix Market files can then be compressed (e.g., by bzip2) to save\ndisk space; gensim's corpus iterators can work with compressed input, too.\n\n`VOCABULARY_SIZE` controls how many of the most frequent words to keep (after\nremoving tokens that appear in more than 10%% of all documents). Defaults to\n100,000.\n\nIf you have the `pattern` package installed, this script will use a fancy\nlemmatization to get a lemma of each token (instead of plain alphabetic\ntokenizer). The package is available at https://github.com/clips/pattern .\n\nExample:\n python -m gensim.scripts.make_wikicorpus ~/gensim/results/enwiki-latest-pages-articles.xml.bz2 ~/gensim/results/wiki\n\"\"\"\n\n\nimport logging\nimport os.path\nimport sys\n\nfrom gensim.corpora import Dictionary, HashDictionary, MmCorpus, WikiCorpus\nfrom gensim.models import TfidfModel\n\n\n# Wiki is first scanned for all distinct word types (~7M). The types that\n# appear in more than 10% of articles are removed and from the rest, the\n# DEFAULT_DICT_SIZE most frequent types are kept.\nDEFAULT_DICT_SIZE = 100000\n\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(\"running %s\", ' '.join(sys.argv))\n\n # check and process input arguments\n if len(sys.argv) < 3:\n print(globals()['__doc__'] % locals())\n sys.exit(1)\n inp, outp = sys.argv[1:3]\n\n if not os.path.isdir(os.path.dirname(outp)):\n raise SystemExit(\"Error: The output directory does not exist. Create the directory and try again.\")\n\n if len(sys.argv) > 3:\n keep_words = int(sys.argv[3])\n else:\n keep_words = DEFAULT_DICT_SIZE\n online = 'online' in program\n lemmatize = 'lemma' in program\n debug = 'nodebug' not in program\n\n if online:\n dictionary = HashDictionary(id_range=keep_words, debug=debug)\n dictionary.allow_update = True # start collecting document frequencies\n wiki = WikiCorpus(inp, lemmatize=lemmatize, dictionary=dictionary)\n # ~4h on my macbook pro without lemmatization, 3.1m articles (august 2012)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000)\n # with HashDictionary, the token->id mapping is only fully instantiated now, after `serialize`\n dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n dictionary.save_as_text(outp + '_wordids.txt.bz2')\n wiki.save(outp + '_corpus.pkl.bz2')\n dictionary.allow_update = False\n else:\n wiki = WikiCorpus(inp, lemmatize=lemmatize) # takes about 9h on a macbook pro, for 3.5m articles (june 2011)\n # only keep the most frequent words (out of total ~8.2m unique tokens)\n wiki.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=DEFAULT_DICT_SIZE)\n # save dictionary and bag-of-words (term-document frequency matrix)\n MmCorpus.serialize(outp + '_bow.mm', wiki, progress_cnt=10000) # another ~9h\n wiki.dictionary.save_as_text(outp + '_wordids.txt.bz2')\n # load back the id->word mapping directly from file\n # this seems to save more memory, compared to keeping the wiki.dictionary object from above\n dictionary = Dictionary.load_from_text(outp + '_wordids.txt.bz2')\n del wiki\n\n # initialize corpus reader and word->id mapping\n mm = MmCorpus(outp + '_bow.mm')\n\n # build tfidf, ~50min\n tfidf = TfidfModel(mm, id2word=dictionary, normalize=True)\n tfidf.save(outp + '.tfidf_model')\n\n # save tfidf vectors in matrix market format\n # ~4h; result file is 15GB! bzip2'ed down to 4.5GB\n MmCorpus.serialize(outp + '_tfidf.mm', tfidf[mm], progress_cnt=10000)\n\n logger.info(\"finished running %s\", program)\n", "path": "gensim/scripts/make_wikicorpus.py"}]} | 2,182 | 736 |
gh_patches_debug_19043 | rasdani/github-patches | git_diff | keras-team__keras-nlp-818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Call `super.config()` in `BartBackbone`'s `get_config()`
We should call `super().config()` here: https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bart/bart_backbone.py#L238, update the config with `BARTBackbone`-specific keys and return the config. Check this for reference: https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bert/bert_backbone.py#L204.
</issue>
<code>
[start of keras_nlp/models/bart/bart_backbone.py]
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """BART backbone model."""
16
17 import copy
18
19 import tensorflow as tf
20 from tensorflow import keras
21
22 from keras_nlp.layers.position_embedding import PositionEmbedding
23 from keras_nlp.layers.transformer_decoder import TransformerDecoder
24 from keras_nlp.layers.transformer_encoder import TransformerEncoder
25 from keras_nlp.models.backbone import Backbone
26 from keras_nlp.models.bart.bart_presets import backbone_presets
27 from keras_nlp.utils.python_utils import classproperty
28
29
30 def bart_kernel_initializer(stddev=0.02):
31 return keras.initializers.TruncatedNormal(stddev=stddev)
32
33
34 @keras.utils.register_keras_serializable(package="keras_nlp")
35 class BartBackbone(Backbone):
36 """BART encoder-decoder network.
37
38 This class implements a Transformer-based encoder-decoder model as
39 described in
40 ["BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"](https://arxiv.org/abs/1910.13461).
41
42 The default constructor gives a fully customizable, randomly initialized BART
43 model with any number of layers, heads, and embedding dimensions. To load
44 preset architectures and weights, use the `from_preset` constructor.
45
46 Disclaimer: Pre-trained models are provided on an "as is" basis, without
47 warranties or conditions of any kind. The underlying model is provided by a
48 third party and subject to a separate license, available
49 [here](https://github.com/facebookresearch/fairseq/).
50
51 Args:
52 vocabulary_size: int. The size of the token vocabulary.
53 num_layers: int. The number of transformer encoder layers and
54 transformer decoder layers.
55 num_heads: int. The number of attention heads for each transformer.
56 The hidden size must be divisible by the number of attention heads.
57 hidden_dim: int. The size of the transformer encoding and pooler layers.
58 intermediate_dim: int. The output dimension of the first Dense layer in
59 a two-layer feedforward network for each transformer.
60 dropout: float. Dropout probability for the Transformer encoder.
61 max_sequence_length: int. The maximum sequence length that this encoder
62 can consume. If None, `max_sequence_length` uses the value from
63 sequence length. This determines the variable shape for positional
64 embeddings.
65
66 Examples:
67 ```python
68 input_data = {
69 "encoder_token_ids": tf.ones(shape=(1, 12), dtype=tf.int64),
70 "encoder_padding_mask": tf.constant(
71 [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
72 ),
73 "decoder_token_ids": tf.ones(shape=(1, 12), dtype=tf.int64),
74 "decoder_padding_mask": tf.constant(
75 [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], shape=(1, 12)
76 ),
77 }
78
79 # Randomly initialized BART encoder-decoder model with a custom config
80 model = keras_nlp.models.BartBackbone(
81 vocabulary_size=50265,
82 num_layers=6,
83 num_heads=12,
84 hidden_dim=768,
85 intermediate_dim=3072,
86 max_sequence_length=12,
87 )
88 output = model(input_data)
89 ```
90 """
91
92 def __init__(
93 self,
94 vocabulary_size,
95 num_layers,
96 num_heads,
97 hidden_dim,
98 intermediate_dim,
99 dropout=0.1,
100 max_sequence_length=1024,
101 **kwargs,
102 ):
103 # Encoder inputs
104 encoder_token_id_input = keras.Input(
105 shape=(None,), dtype="int32", name="encoder_token_ids"
106 )
107 encoder_padding_mask = keras.Input(
108 shape=(None,), dtype="int32", name="encoder_padding_mask"
109 )
110
111 # Decoder inputs.
112 decoder_token_id_input = keras.Input(
113 shape=(None,), dtype="int32", name="decoder_token_ids"
114 )
115 decoder_padding_mask = keras.Input(
116 shape=(None,), dtype="int32", name="decoder_padding_mask"
117 )
118
119 # Token embedding layer. This layer is shared by encoder and decoder.
120 token_embedding_layer = keras.layers.Embedding(
121 input_dim=vocabulary_size,
122 output_dim=hidden_dim,
123 embeddings_initializer=bart_kernel_initializer(),
124 name="token_embedding",
125 )
126
127 # ===== Encoder =====
128
129 # Embed tokens and positions.
130 token_embedding = token_embedding_layer(encoder_token_id_input)
131 # Position embedding parameters are not shared by encode and decoder.
132 position_embedding = PositionEmbedding(
133 initializer=bart_kernel_initializer(),
134 sequence_length=max_sequence_length,
135 name="encoder_position_embedding",
136 )(token_embedding)
137
138 # Sum, normalize and apply dropout to embeddings.
139 x = keras.layers.Add()((token_embedding, position_embedding))
140 x = keras.layers.LayerNormalization(
141 name="encoder_embeddings_layer_norm",
142 axis=-1,
143 epsilon=1e-5,
144 dtype=tf.float32,
145 )(x)
146 x = keras.layers.Dropout(
147 dropout,
148 name="encoder_embeddings_dropout",
149 )(x)
150
151 # Apply successive transformer encoder blocks.
152 for i in range(num_layers):
153 x = TransformerEncoder(
154 num_heads=num_heads,
155 intermediate_dim=intermediate_dim,
156 activation=lambda x: keras.activations.gelu(
157 x, approximate=False
158 ),
159 dropout=dropout,
160 layer_norm_epsilon=1e-5,
161 kernel_initializer=bart_kernel_initializer(),
162 name=f"transformer_encoder_layer_{i}",
163 )(x, padding_mask=encoder_padding_mask)
164
165 encoder_output = x
166
167 # ===== Decoder =====
168
169 # Embed tokens and positions.
170 token_embedding = token_embedding_layer(decoder_token_id_input)
171 # Position embedding parameters are not shared by encode and decoder.
172 position_embedding = PositionEmbedding(
173 initializer=bart_kernel_initializer(),
174 sequence_length=max_sequence_length,
175 name="decoder_position_embedding",
176 )(token_embedding)
177
178 # Sum, normalize and apply dropout to embeddings.
179 x = keras.layers.Add()((token_embedding, position_embedding))
180 x = keras.layers.LayerNormalization(
181 name="decoder_embeddings_layer_norm",
182 axis=-1,
183 epsilon=1e-5,
184 dtype=tf.float32,
185 )(x)
186 x = keras.layers.Dropout(
187 dropout,
188 name="decoder_embeddings_dropout",
189 )(x)
190
191 # Apply successive transformer decoder blocks.
192 for i in range(num_layers):
193 transformer_decoder_layer = TransformerDecoder(
194 intermediate_dim=intermediate_dim,
195 num_heads=num_heads,
196 dropout=dropout,
197 activation=lambda x: keras.activations.gelu(
198 x, approximate=False
199 ),
200 layer_norm_epsilon=1e-5,
201 kernel_initializer=bart_kernel_initializer(),
202 name=f"transformer_decoder_layer_{i}",
203 has_cross_attention=True,
204 )
205 x = transformer_decoder_layer(
206 decoder_sequence=x,
207 encoder_sequence=encoder_output,
208 decoder_padding_mask=decoder_padding_mask,
209 encoder_padding_mask=encoder_padding_mask,
210 )
211
212 decoder_output = x
213
214 # Instantiate using Functional API Model constructor
215 super().__init__(
216 inputs={
217 "encoder_token_ids": encoder_token_id_input,
218 "encoder_padding_mask": encoder_padding_mask,
219 "decoder_token_ids": decoder_token_id_input,
220 "decoder_padding_mask": decoder_padding_mask,
221 },
222 outputs={
223 "encoder_sequence_output": encoder_output,
224 "decoder_sequence_output": decoder_output,
225 },
226 **kwargs,
227 )
228
229 # All references to `self` below this line
230 self.vocabulary_size = vocabulary_size
231 self.num_layers = num_layers
232 self.num_heads = num_heads
233 self.hidden_dim = hidden_dim
234 self.intermediate_dim = intermediate_dim
235 self.dropout = dropout
236 self.max_sequence_length = max_sequence_length
237
238 def get_config(self):
239 return {
240 "vocabulary_size": self.vocabulary_size,
241 "num_layers": self.num_layers,
242 "num_heads": self.num_heads,
243 "hidden_dim": self.hidden_dim,
244 "intermediate_dim": self.intermediate_dim,
245 "dropout": self.dropout,
246 "max_sequence_length": self.max_sequence_length,
247 "name": self.name,
248 "trainable": self.trainable,
249 }
250
251 @property
252 def token_embedding(self):
253 return self.get_layer("token_embedding")
254
255 @classproperty
256 def presets(cls):
257 return copy.deepcopy(backbone_presets)
258
[end of keras_nlp/models/bart/bart_backbone.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras_nlp/models/bart/bart_backbone.py b/keras_nlp/models/bart/bart_backbone.py
--- a/keras_nlp/models/bart/bart_backbone.py
+++ b/keras_nlp/models/bart/bart_backbone.py
@@ -236,17 +236,20 @@
self.max_sequence_length = max_sequence_length
def get_config(self):
- return {
- "vocabulary_size": self.vocabulary_size,
- "num_layers": self.num_layers,
- "num_heads": self.num_heads,
- "hidden_dim": self.hidden_dim,
- "intermediate_dim": self.intermediate_dim,
- "dropout": self.dropout,
- "max_sequence_length": self.max_sequence_length,
- "name": self.name,
- "trainable": self.trainable,
- }
+ config = super().get_config()
+ config.update(
+ {
+ "vocabulary_size": self.vocabulary_size,
+ "num_layers": self.num_layers,
+ "num_heads": self.num_heads,
+ "hidden_dim": self.hidden_dim,
+ "intermediate_dim": self.intermediate_dim,
+ "dropout": self.dropout,
+ "max_sequence_length": self.max_sequence_length,
+ }
+ )
+
+ return config
@property
def token_embedding(self):
| {"golden_diff": "diff --git a/keras_nlp/models/bart/bart_backbone.py b/keras_nlp/models/bart/bart_backbone.py\n--- a/keras_nlp/models/bart/bart_backbone.py\n+++ b/keras_nlp/models/bart/bart_backbone.py\n@@ -236,17 +236,20 @@\n self.max_sequence_length = max_sequence_length\n \n def get_config(self):\n- return {\n- \"vocabulary_size\": self.vocabulary_size,\n- \"num_layers\": self.num_layers,\n- \"num_heads\": self.num_heads,\n- \"hidden_dim\": self.hidden_dim,\n- \"intermediate_dim\": self.intermediate_dim,\n- \"dropout\": self.dropout,\n- \"max_sequence_length\": self.max_sequence_length,\n- \"name\": self.name,\n- \"trainable\": self.trainable,\n- }\n+ config = super().get_config()\n+ config.update(\n+ {\n+ \"vocabulary_size\": self.vocabulary_size,\n+ \"num_layers\": self.num_layers,\n+ \"num_heads\": self.num_heads,\n+ \"hidden_dim\": self.hidden_dim,\n+ \"intermediate_dim\": self.intermediate_dim,\n+ \"dropout\": self.dropout,\n+ \"max_sequence_length\": self.max_sequence_length,\n+ }\n+ )\n+\n+ return config\n \n @property\n def token_embedding(self):\n", "issue": "Call `super.config()` in `BartBackbone`'s `get_config()`\nWe should call `super().config()` here: https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bart/bart_backbone.py#L238, update the config with `BARTBackbone`-specific keys and return the config. Check this for reference: https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bert/bert_backbone.py#L204.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"BART backbone model.\"\"\"\n\nimport copy\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom keras_nlp.layers.position_embedding import PositionEmbedding\nfrom keras_nlp.layers.transformer_decoder import TransformerDecoder\nfrom keras_nlp.layers.transformer_encoder import TransformerEncoder\nfrom keras_nlp.models.backbone import Backbone\nfrom keras_nlp.models.bart.bart_presets import backbone_presets\nfrom keras_nlp.utils.python_utils import classproperty\n\n\ndef bart_kernel_initializer(stddev=0.02):\n return keras.initializers.TruncatedNormal(stddev=stddev)\n\n\[email protected]_keras_serializable(package=\"keras_nlp\")\nclass BartBackbone(Backbone):\n \"\"\"BART encoder-decoder network.\n\n This class implements a Transformer-based encoder-decoder model as\n described in\n [\"BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension\"](https://arxiv.org/abs/1910.13461).\n\n The default constructor gives a fully customizable, randomly initialized BART\n model with any number of layers, heads, and embedding dimensions. To load\n preset architectures and weights, use the `from_preset` constructor.\n\n Disclaimer: Pre-trained models are provided on an \"as is\" basis, without\n warranties or conditions of any kind. The underlying model is provided by a\n third party and subject to a separate license, available\n [here](https://github.com/facebookresearch/fairseq/).\n\n Args:\n vocabulary_size: int. The size of the token vocabulary.\n num_layers: int. The number of transformer encoder layers and\n transformer decoder layers.\n num_heads: int. The number of attention heads for each transformer.\n The hidden size must be divisible by the number of attention heads.\n hidden_dim: int. The size of the transformer encoding and pooler layers.\n intermediate_dim: int. The output dimension of the first Dense layer in\n a two-layer feedforward network for each transformer.\n dropout: float. Dropout probability for the Transformer encoder.\n max_sequence_length: int. The maximum sequence length that this encoder\n can consume. If None, `max_sequence_length` uses the value from\n sequence length. This determines the variable shape for positional\n embeddings.\n\n Examples:\n ```python\n input_data = {\n \"encoder_token_ids\": tf.ones(shape=(1, 12), dtype=tf.int64),\n \"encoder_padding_mask\": tf.constant(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)\n ),\n \"decoder_token_ids\": tf.ones(shape=(1, 12), dtype=tf.int64),\n \"decoder_padding_mask\": tf.constant(\n [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], shape=(1, 12)\n ),\n }\n\n # Randomly initialized BART encoder-decoder model with a custom config\n model = keras_nlp.models.BartBackbone(\n vocabulary_size=50265,\n num_layers=6,\n num_heads=12,\n hidden_dim=768,\n intermediate_dim=3072,\n max_sequence_length=12,\n )\n output = model(input_data)\n ```\n \"\"\"\n\n def __init__(\n self,\n vocabulary_size,\n num_layers,\n num_heads,\n hidden_dim,\n intermediate_dim,\n dropout=0.1,\n max_sequence_length=1024,\n **kwargs,\n ):\n # Encoder inputs\n encoder_token_id_input = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"encoder_token_ids\"\n )\n encoder_padding_mask = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"encoder_padding_mask\"\n )\n\n # Decoder inputs.\n decoder_token_id_input = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"decoder_token_ids\"\n )\n decoder_padding_mask = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"decoder_padding_mask\"\n )\n\n # Token embedding layer. This layer is shared by encoder and decoder.\n token_embedding_layer = keras.layers.Embedding(\n input_dim=vocabulary_size,\n output_dim=hidden_dim,\n embeddings_initializer=bart_kernel_initializer(),\n name=\"token_embedding\",\n )\n\n # ===== Encoder =====\n\n # Embed tokens and positions.\n token_embedding = token_embedding_layer(encoder_token_id_input)\n # Position embedding parameters are not shared by encode and decoder.\n position_embedding = PositionEmbedding(\n initializer=bart_kernel_initializer(),\n sequence_length=max_sequence_length,\n name=\"encoder_position_embedding\",\n )(token_embedding)\n\n # Sum, normalize and apply dropout to embeddings.\n x = keras.layers.Add()((token_embedding, position_embedding))\n x = keras.layers.LayerNormalization(\n name=\"encoder_embeddings_layer_norm\",\n axis=-1,\n epsilon=1e-5,\n dtype=tf.float32,\n )(x)\n x = keras.layers.Dropout(\n dropout,\n name=\"encoder_embeddings_dropout\",\n )(x)\n\n # Apply successive transformer encoder blocks.\n for i in range(num_layers):\n x = TransformerEncoder(\n num_heads=num_heads,\n intermediate_dim=intermediate_dim,\n activation=lambda x: keras.activations.gelu(\n x, approximate=False\n ),\n dropout=dropout,\n layer_norm_epsilon=1e-5,\n kernel_initializer=bart_kernel_initializer(),\n name=f\"transformer_encoder_layer_{i}\",\n )(x, padding_mask=encoder_padding_mask)\n\n encoder_output = x\n\n # ===== Decoder =====\n\n # Embed tokens and positions.\n token_embedding = token_embedding_layer(decoder_token_id_input)\n # Position embedding parameters are not shared by encode and decoder.\n position_embedding = PositionEmbedding(\n initializer=bart_kernel_initializer(),\n sequence_length=max_sequence_length,\n name=\"decoder_position_embedding\",\n )(token_embedding)\n\n # Sum, normalize and apply dropout to embeddings.\n x = keras.layers.Add()((token_embedding, position_embedding))\n x = keras.layers.LayerNormalization(\n name=\"decoder_embeddings_layer_norm\",\n axis=-1,\n epsilon=1e-5,\n dtype=tf.float32,\n )(x)\n x = keras.layers.Dropout(\n dropout,\n name=\"decoder_embeddings_dropout\",\n )(x)\n\n # Apply successive transformer decoder blocks.\n for i in range(num_layers):\n transformer_decoder_layer = TransformerDecoder(\n intermediate_dim=intermediate_dim,\n num_heads=num_heads,\n dropout=dropout,\n activation=lambda x: keras.activations.gelu(\n x, approximate=False\n ),\n layer_norm_epsilon=1e-5,\n kernel_initializer=bart_kernel_initializer(),\n name=f\"transformer_decoder_layer_{i}\",\n has_cross_attention=True,\n )\n x = transformer_decoder_layer(\n decoder_sequence=x,\n encoder_sequence=encoder_output,\n decoder_padding_mask=decoder_padding_mask,\n encoder_padding_mask=encoder_padding_mask,\n )\n\n decoder_output = x\n\n # Instantiate using Functional API Model constructor\n super().__init__(\n inputs={\n \"encoder_token_ids\": encoder_token_id_input,\n \"encoder_padding_mask\": encoder_padding_mask,\n \"decoder_token_ids\": decoder_token_id_input,\n \"decoder_padding_mask\": decoder_padding_mask,\n },\n outputs={\n \"encoder_sequence_output\": encoder_output,\n \"decoder_sequence_output\": decoder_output,\n },\n **kwargs,\n )\n\n # All references to `self` below this line\n self.vocabulary_size = vocabulary_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.hidden_dim = hidden_dim\n self.intermediate_dim = intermediate_dim\n self.dropout = dropout\n self.max_sequence_length = max_sequence_length\n\n def get_config(self):\n return {\n \"vocabulary_size\": self.vocabulary_size,\n \"num_layers\": self.num_layers,\n \"num_heads\": self.num_heads,\n \"hidden_dim\": self.hidden_dim,\n \"intermediate_dim\": self.intermediate_dim,\n \"dropout\": self.dropout,\n \"max_sequence_length\": self.max_sequence_length,\n \"name\": self.name,\n \"trainable\": self.trainable,\n }\n\n @property\n def token_embedding(self):\n return self.get_layer(\"token_embedding\")\n\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "path": "keras_nlp/models/bart/bart_backbone.py"}]} | 3,330 | 308 |
gh_patches_debug_28738 | rasdani/github-patches | git_diff | mirumee__ariadne-661 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
snake_case_fallback_resolvers not calling obj.get(attr_name)
**Ariadne version:** 0.13.0
**Python version:** 3.8.11
Hello. I am using the [databases](https://www.encode.io/databases/) package with an [asyncpg](https://magicstack.github.io/asyncpg/current/) backend to interact with a PostgreSQL database. The objects returned from my queries are of the type `databases.backends.postgres.Record`. The desired attributes can only can accessed via the get method. However, when I use `snake_case_fallback_resolvers`, Ariadne has trouble resolving the requested fields and I receive the following error: `Cannot return null for non-nullable field`
If I instead use the regular `fallback_resolvers` (adjusting my schema's naming conventions), Ariadne is able to resolve the requested fields.
Is this a bug or am I doing something wrong? Thank you for your time.
</issue>
<code>
[start of ariadne/utils.py]
1 import asyncio
2 from functools import wraps
3 from typing import Optional, Union, Callable, Dict, Any
4
5 from graphql import GraphQLError, parse
6
7
8 def convert_camel_case_to_snake(graphql_name: str) -> str:
9 # pylint: disable=too-many-boolean-expressions
10 max_index = len(graphql_name) - 1
11 lowered_name = graphql_name.lower()
12
13 python_name = ""
14 for i, c in enumerate(lowered_name):
15 if i > 0 and (
16 # testWord -> test_word
17 (
18 c != graphql_name[i]
19 and graphql_name[i - 1] != "_"
20 and graphql_name[i - 1] == python_name[-1]
21 )
22 # TESTWord -> test_word
23 or (
24 i < max_index
25 and graphql_name[i] != lowered_name[i]
26 and graphql_name[i + 1] == lowered_name[i + 1]
27 )
28 # test134 -> test_134
29 or (c.isdigit() and not graphql_name[i - 1].isdigit())
30 # 134test -> 134_test
31 or (not c.isdigit() and graphql_name[i - 1].isdigit())
32 ):
33 python_name += "_"
34 python_name += c
35 return python_name
36
37
38 def gql(value: str) -> str:
39 parse(value)
40 return value
41
42
43 def unwrap_graphql_error(
44 error: Union[GraphQLError, Optional[Exception]]
45 ) -> Optional[Exception]:
46 if isinstance(error, GraphQLError):
47 return unwrap_graphql_error(error.original_error)
48 return error
49
50
51 def convert_kwargs_to_snake_case(func: Callable) -> Callable:
52 def convert_to_snake_case(d: Dict) -> Dict:
53 converted: Dict = {}
54 for k, v in d.items():
55 if isinstance(v, dict):
56 v = convert_to_snake_case(v)
57 if isinstance(v, list):
58 v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]
59 converted[convert_camel_case_to_snake(k)] = v
60 return converted
61
62 if asyncio.iscoroutinefunction(func):
63
64 @wraps(func)
65 async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
66 return await func(*args, **convert_to_snake_case(kwargs))
67
68 return async_wrapper
69
70 @wraps(func)
71 def wrapper(*args: Any, **kwargs: Any) -> Any:
72 return func(*args, **convert_to_snake_case(kwargs))
73
74 return wrapper
75
[end of ariadne/utils.py]
[start of ariadne/resolvers.py]
1 from typing import Any
2
3 from graphql import default_field_resolver
4 from graphql.type import (
5 GraphQLField,
6 GraphQLObjectType,
7 GraphQLResolveInfo,
8 GraphQLSchema,
9 )
10
11 from .types import Resolver, SchemaBindable
12 from .utils import convert_camel_case_to_snake
13
14
15 class FallbackResolversSetter(SchemaBindable):
16 def bind_to_schema(self, schema: GraphQLSchema) -> None:
17 for type_object in schema.type_map.values():
18 if isinstance(type_object, GraphQLObjectType):
19 self.add_resolvers_to_object_fields(type_object)
20
21 def add_resolvers_to_object_fields(self, type_object) -> None:
22 for field_name, field_object in type_object.fields.items():
23 self.add_resolver_to_field(field_name, field_object)
24
25 def add_resolver_to_field(self, _: str, field_object: GraphQLField) -> None:
26 if field_object.resolve is None:
27 field_object.resolve = default_field_resolver
28
29
30 class SnakeCaseFallbackResolversSetter(FallbackResolversSetter):
31 def add_resolver_to_field(
32 self, field_name: str, field_object: GraphQLField
33 ) -> None:
34 if field_object.resolve is None:
35 field_name = convert_camel_case_to_snake(field_name)
36 field_object.resolve = resolve_to(field_name)
37
38
39 fallback_resolvers = FallbackResolversSetter()
40 snake_case_fallback_resolvers = SnakeCaseFallbackResolversSetter()
41
42
43 def resolve_parent_field(parent: Any, field_name: str) -> Any:
44 if isinstance(parent, dict):
45 return parent.get(field_name)
46 return getattr(parent, field_name, None)
47
48
49 def resolve_to(field_name: str) -> Resolver:
50 def resolver(parent: Any, info: GraphQLResolveInfo, **kwargs) -> Any:
51 value = resolve_parent_field(parent, field_name)
52 if callable(value):
53 return value(info, **kwargs)
54 return value
55
56 # pylint: disable=protected-access
57 resolver._ariadne_alias_resolver = True # type: ignore
58 return resolver
59
60
61 def is_default_resolver(resolver: Resolver) -> bool:
62 # pylint: disable=comparison-with-callable
63 if resolver == default_field_resolver:
64 return True
65 return hasattr(resolver, "_ariadne_alias_resolver")
66
[end of ariadne/resolvers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py
--- a/ariadne/resolvers.py
+++ b/ariadne/resolvers.py
@@ -1,3 +1,4 @@
+from collections.abc import Mapping
from typing import Any
from graphql import default_field_resolver
@@ -41,7 +42,7 @@
def resolve_parent_field(parent: Any, field_name: str) -> Any:
- if isinstance(parent, dict):
+ if isinstance(parent, Mapping):
return parent.get(field_name)
return getattr(parent, field_name, None)
diff --git a/ariadne/utils.py b/ariadne/utils.py
--- a/ariadne/utils.py
+++ b/ariadne/utils.py
@@ -1,4 +1,5 @@
import asyncio
+from collections.abc import Mapping
from functools import wraps
from typing import Optional, Union, Callable, Dict, Any
@@ -49,13 +50,15 @@
def convert_kwargs_to_snake_case(func: Callable) -> Callable:
- def convert_to_snake_case(d: Dict) -> Dict:
+ def convert_to_snake_case(m: Mapping) -> Dict:
converted: Dict = {}
- for k, v in d.items():
- if isinstance(v, dict):
+ for k, v in m.items():
+ if isinstance(v, Mapping):
v = convert_to_snake_case(v)
if isinstance(v, list):
- v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]
+ v = [
+ convert_to_snake_case(i) if isinstance(i, Mapping) else i for i in v
+ ]
converted[convert_camel_case_to_snake(k)] = v
return converted
| {"golden_diff": "diff --git a/ariadne/resolvers.py b/ariadne/resolvers.py\n--- a/ariadne/resolvers.py\n+++ b/ariadne/resolvers.py\n@@ -1,3 +1,4 @@\n+from collections.abc import Mapping\n from typing import Any\n \n from graphql import default_field_resolver\n@@ -41,7 +42,7 @@\n \n \n def resolve_parent_field(parent: Any, field_name: str) -> Any:\n- if isinstance(parent, dict):\n+ if isinstance(parent, Mapping):\n return parent.get(field_name)\n return getattr(parent, field_name, None)\n \ndiff --git a/ariadne/utils.py b/ariadne/utils.py\n--- a/ariadne/utils.py\n+++ b/ariadne/utils.py\n@@ -1,4 +1,5 @@\n import asyncio\n+from collections.abc import Mapping\n from functools import wraps\n from typing import Optional, Union, Callable, Dict, Any\n \n@@ -49,13 +50,15 @@\n \n \n def convert_kwargs_to_snake_case(func: Callable) -> Callable:\n- def convert_to_snake_case(d: Dict) -> Dict:\n+ def convert_to_snake_case(m: Mapping) -> Dict:\n converted: Dict = {}\n- for k, v in d.items():\n- if isinstance(v, dict):\n+ for k, v in m.items():\n+ if isinstance(v, Mapping):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n- v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]\n+ v = [\n+ convert_to_snake_case(i) if isinstance(i, Mapping) else i for i in v\n+ ]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n", "issue": "snake_case_fallback_resolvers not calling obj.get(attr_name)\n**Ariadne version:** 0.13.0\r\n**Python version:** 3.8.11\r\n\r\nHello. I am using the [databases](https://www.encode.io/databases/) package with an [asyncpg](https://magicstack.github.io/asyncpg/current/) backend to interact with a PostgreSQL database. The objects returned from my queries are of the type `databases.backends.postgres.Record`. The desired attributes can only can accessed via the get method. However, when I use `snake_case_fallback_resolvers`, Ariadne has trouble resolving the requested fields and I receive the following error: `Cannot return null for non-nullable field`\r\n\r\nIf I instead use the regular `fallback_resolvers` (adjusting my schema's naming conventions), Ariadne is able to resolve the requested fields.\r\n\r\nIs this a bug or am I doing something wrong? Thank you for your time.\r\n\n", "before_files": [{"content": "import asyncio\nfrom functools import wraps\nfrom typing import Optional, Union, Callable, Dict, Any\n\nfrom graphql import GraphQLError, parse\n\n\ndef convert_camel_case_to_snake(graphql_name: str) -> str:\n # pylint: disable=too-many-boolean-expressions\n max_index = len(graphql_name) - 1\n lowered_name = graphql_name.lower()\n\n python_name = \"\"\n for i, c in enumerate(lowered_name):\n if i > 0 and (\n # testWord -> test_word\n (\n c != graphql_name[i]\n and graphql_name[i - 1] != \"_\"\n and graphql_name[i - 1] == python_name[-1]\n )\n # TESTWord -> test_word\n or (\n i < max_index\n and graphql_name[i] != lowered_name[i]\n and graphql_name[i + 1] == lowered_name[i + 1]\n )\n # test134 -> test_134\n or (c.isdigit() and not graphql_name[i - 1].isdigit())\n # 134test -> 134_test\n or (not c.isdigit() and graphql_name[i - 1].isdigit())\n ):\n python_name += \"_\"\n python_name += c\n return python_name\n\n\ndef gql(value: str) -> str:\n parse(value)\n return value\n\n\ndef unwrap_graphql_error(\n error: Union[GraphQLError, Optional[Exception]]\n) -> Optional[Exception]:\n if isinstance(error, GraphQLError):\n return unwrap_graphql_error(error.original_error)\n return error\n\n\ndef convert_kwargs_to_snake_case(func: Callable) -> Callable:\n def convert_to_snake_case(d: Dict) -> Dict:\n converted: Dict = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = convert_to_snake_case(v)\n if isinstance(v, list):\n v = [convert_to_snake_case(i) if isinstance(i, dict) else i for i in v]\n converted[convert_camel_case_to_snake(k)] = v\n return converted\n\n if asyncio.iscoroutinefunction(func):\n\n @wraps(func)\n async def async_wrapper(*args: Any, **kwargs: Any) -> Any:\n return await func(*args, **convert_to_snake_case(kwargs))\n\n return async_wrapper\n\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n return func(*args, **convert_to_snake_case(kwargs))\n\n return wrapper\n", "path": "ariadne/utils.py"}, {"content": "from typing import Any\n\nfrom graphql import default_field_resolver\nfrom graphql.type import (\n GraphQLField,\n GraphQLObjectType,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\nfrom .types import Resolver, SchemaBindable\nfrom .utils import convert_camel_case_to_snake\n\n\nclass FallbackResolversSetter(SchemaBindable):\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n for type_object in schema.type_map.values():\n if isinstance(type_object, GraphQLObjectType):\n self.add_resolvers_to_object_fields(type_object)\n\n def add_resolvers_to_object_fields(self, type_object) -> None:\n for field_name, field_object in type_object.fields.items():\n self.add_resolver_to_field(field_name, field_object)\n\n def add_resolver_to_field(self, _: str, field_object: GraphQLField) -> None:\n if field_object.resolve is None:\n field_object.resolve = default_field_resolver\n\n\nclass SnakeCaseFallbackResolversSetter(FallbackResolversSetter):\n def add_resolver_to_field(\n self, field_name: str, field_object: GraphQLField\n ) -> None:\n if field_object.resolve is None:\n field_name = convert_camel_case_to_snake(field_name)\n field_object.resolve = resolve_to(field_name)\n\n\nfallback_resolvers = FallbackResolversSetter()\nsnake_case_fallback_resolvers = SnakeCaseFallbackResolversSetter()\n\n\ndef resolve_parent_field(parent: Any, field_name: str) -> Any:\n if isinstance(parent, dict):\n return parent.get(field_name)\n return getattr(parent, field_name, None)\n\n\ndef resolve_to(field_name: str) -> Resolver:\n def resolver(parent: Any, info: GraphQLResolveInfo, **kwargs) -> Any:\n value = resolve_parent_field(parent, field_name)\n if callable(value):\n return value(info, **kwargs)\n return value\n\n # pylint: disable=protected-access\n resolver._ariadne_alias_resolver = True # type: ignore\n return resolver\n\n\ndef is_default_resolver(resolver: Resolver) -> bool:\n # pylint: disable=comparison-with-callable\n if resolver == default_field_resolver:\n return True\n return hasattr(resolver, \"_ariadne_alias_resolver\")\n", "path": "ariadne/resolvers.py"}]} | 2,059 | 400 |
gh_patches_debug_3568 | rasdani/github-patches | git_diff | nf-core__tools-1441 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
All modules with the same prefix being found with nf-core modules update
### Description of the bug
Not sure whether this affects any other `nf-core modules` commands but if I want to update `minia` in my pipeline then it is correctly updated but another module with the same prefix `miniasm` is being installed too as you can see at the end of the console output below. I suspect there is a regex or glob that needs to be updated somewhere in the tools codebase to only find and update the required module.
### Command used and terminal output
```console
$ git clone https://github.com/nf-core/viralrecon.git
Cloning into 'viralrecon'...
remote: Enumerating objects: 10345, done.
remote: Counting objects: 100% (100/100), done.
remote: Compressing objects: 100% (88/88), done.
remote: Total 10345 (delta 22), reused 42 (delta 4), pack-reused 10245
Receiving objects: 100% (10345/10345), 7.96 MiB | 5.16 MiB/s, done.
Resolving deltas: 100% (6536/6536), done.
$ cd viralrecon
$ nf-core modules update minia
,--./,-.
___ __ __ __ ___ /,-._.--~\
|\ | |__ __ / ` / \ |__) |__ } {
| \| | \__, \__/ | \ |___ \`-._,-`-,
`._,._,'
nf-core/tools version 2.2
INFO Updating 'nf-core/modules/minia' update.py:239
INFO Downloaded 4 files to ./modules/nf-core/modules/minia modules_command.py:273
$ git status
On branch master
Your branch is up-to-date with 'origin/master'.
Changes not staged for commit:
(use "git add/rm <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
modified: modules.json
deleted: modules/nf-core/modules/minia/functions.nf
modified: modules/nf-core/modules/minia/main.nf
modified: modules/nf-core/modules/minia/meta.yml
Untracked files:
(use "git add <file>..." to include in what will be committed)
modules/nf-core/modules/miniasm/
no changes added to commit (use "git add" and/or "git commit -a")
```
### System information
_No response_
</issue>
<code>
[start of nf_core/modules/modules_repo.py]
1 import os
2 import requests
3 import base64
4 import sys
5 import logging
6 import nf_core.utils
7
8 log = logging.getLogger(__name__)
9
10
11 class ModulesRepo(object):
12 """
13 An object to store details about the repository being used for modules.
14
15 Used by the `nf-core modules` top-level command with -r and -b flags,
16 so that this can be used in the same way by all sub-commands.
17 """
18
19 def __init__(self, repo="nf-core/modules", branch=None):
20 self.name = repo
21 self.branch = branch
22
23 # Don't bother fetching default branch if we're using nf-core
24 if not self.branch and self.name == "nf-core/modules":
25 self.branch = "master"
26
27 # Verify that the repo seems to be correctly configured
28 if self.name != "nf-core/modules" or self.branch:
29
30 # Get the default branch if not set
31 if not self.branch:
32 self.get_default_branch()
33
34 try:
35 self.verify_modules_repo()
36 except LookupError:
37 raise
38
39 self.owner, self.repo = self.name.split("/")
40 self.modules_file_tree = {}
41 self.modules_avail_module_names = []
42
43 def get_default_branch(self):
44 """Get the default branch for a GitHub repo"""
45 api_url = f"https://api.github.com/repos/{self.name}"
46 response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())
47 if response.status_code == 200:
48 self.branch = response.json()["default_branch"]
49 log.debug(f"Found default branch to be '{self.branch}'")
50 else:
51 raise LookupError(f"Could not find repository '{self.name}' on GitHub")
52
53 def verify_modules_repo(self):
54
55 # Check if name seems to be well formed
56 if self.name.count("/") != 1:
57 raise LookupError(f"Repository name '{self.name}' should be of the format '<github_user_name>/<repo_name>'")
58
59 # Check if repository exist
60 api_url = f"https://api.github.com/repos/{self.name}/branches"
61 response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())
62 if response.status_code == 200:
63 branches = [branch["name"] for branch in response.json()]
64 if self.branch not in branches:
65 raise LookupError(f"Branch '{self.branch}' not found in '{self.name}'")
66 else:
67 raise LookupError(f"Repository '{self.name}' is not available on GitHub")
68
69 api_url = f"https://api.github.com/repos/{self.name}/contents?ref={self.branch}"
70 response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())
71 if response.status_code == 200:
72 dir_names = [entry["name"] for entry in response.json() if entry["type"] == "dir"]
73 if "modules" not in dir_names:
74 err_str = f"Repository '{self.name}' ({self.branch}) does not contain a 'modules/' directory"
75 if "software" in dir_names:
76 err_str += ".\nAs of version 2.0, the 'software/' directory should be renamed to 'modules/'"
77 raise LookupError(err_str)
78 else:
79 raise LookupError(f"Unable to fetch repository information from '{self.name}' ({self.branch})")
80
81 def get_modules_file_tree(self):
82 """
83 Fetch the file list from the repo, using the GitHub API
84
85 Sets self.modules_file_tree
86 self.modules_avail_module_names
87 """
88 api_url = "https://api.github.com/repos/{}/git/trees/{}?recursive=1".format(self.name, self.branch)
89 r = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())
90 if r.status_code == 404:
91 raise LookupError("Repository / branch not found: {} ({})\n{}".format(self.name, self.branch, api_url))
92 elif r.status_code != 200:
93 raise LookupError(
94 "Could not fetch {} ({}) tree: {}\n{}".format(self.name, self.branch, r.status_code, api_url)
95 )
96
97 result = r.json()
98 assert result["truncated"] == False
99
100 self.modules_file_tree = result["tree"]
101 for f in result["tree"]:
102 if f["path"].startswith(f"modules/") and f["path"].endswith("/main.nf") and "/test/" not in f["path"]:
103 # remove modules/ and /main.nf
104 self.modules_avail_module_names.append(f["path"].replace("modules/", "").replace("/main.nf", ""))
105 if len(self.modules_avail_module_names) == 0:
106 raise LookupError(f"Found no modules in '{self.name}'")
107
108 def get_module_file_urls(self, module, commit=""):
109 """Fetch list of URLs for a specific module
110
111 Takes the name of a module and iterates over the GitHub repo file tree.
112 Loops over items that are prefixed with the path 'modules/<module_name>' and ignores
113 anything that's not a blob. Also ignores the test/ subfolder.
114
115 Returns a dictionary with keys as filenames and values as GitHub API URLs.
116 These can be used to then download file contents.
117
118 Args:
119 module (string): Name of module for which to fetch a set of URLs
120
121 Returns:
122 dict: Set of files and associated URLs as follows:
123
124 {
125 'modules/fastqc/main.nf': 'https://api.github.com/repos/nf-core/modules/git/blobs/65ba598119206a2b851b86a9b5880b5476e263c3',
126 'modules/fastqc/meta.yml': 'https://api.github.com/repos/nf-core/modules/git/blobs/0d5afc23ba44d44a805c35902febc0a382b17651'
127 }
128 """
129 results = {}
130 for f in self.modules_file_tree:
131 if not f["path"].startswith("modules/{}".format(module)):
132 continue
133 if f["type"] != "blob":
134 continue
135 if "/test/" in f["path"]:
136 continue
137 results[f["path"]] = f["url"]
138 if commit != "":
139 for path in results:
140 results[path] = f"https://api.github.com/repos/{self.name}/contents/{path}?ref={commit}"
141 return results
142
143 def download_gh_file(self, dl_filename, api_url):
144 """Download a file from GitHub using the GitHub API
145
146 Args:
147 dl_filename (string): Path to save file to
148 api_url (string): GitHub API URL for file
149
150 Raises:
151 If a problem, raises an error
152 """
153
154 # Make target directory if it doesn't already exist
155 dl_directory = os.path.dirname(dl_filename)
156 if not os.path.exists(dl_directory):
157 os.makedirs(dl_directory)
158
159 # Call the GitHub API
160 r = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())
161 if r.status_code != 200:
162 raise LookupError("Could not fetch {} file: {}\n {}".format(self.name, r.status_code, api_url))
163 result = r.json()
164 file_contents = base64.b64decode(result["content"])
165
166 # Write the file contents
167 with open(dl_filename, "wb") as fh:
168 fh.write(file_contents)
169
[end of nf_core/modules/modules_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/modules/modules_repo.py b/nf_core/modules/modules_repo.py
--- a/nf_core/modules/modules_repo.py
+++ b/nf_core/modules/modules_repo.py
@@ -128,7 +128,7 @@
"""
results = {}
for f in self.modules_file_tree:
- if not f["path"].startswith("modules/{}".format(module)):
+ if not f["path"].startswith("modules/{}/".format(module)):
continue
if f["type"] != "blob":
continue
| {"golden_diff": "diff --git a/nf_core/modules/modules_repo.py b/nf_core/modules/modules_repo.py\n--- a/nf_core/modules/modules_repo.py\n+++ b/nf_core/modules/modules_repo.py\n@@ -128,7 +128,7 @@\n \"\"\"\n results = {}\n for f in self.modules_file_tree:\n- if not f[\"path\"].startswith(\"modules/{}\".format(module)):\n+ if not f[\"path\"].startswith(\"modules/{}/\".format(module)):\n continue\n if f[\"type\"] != \"blob\":\n continue\n", "issue": "All modules with the same prefix being found with nf-core modules update\n### Description of the bug\n\nNot sure whether this affects any other `nf-core modules` commands but if I want to update `minia` in my pipeline then it is correctly updated but another module with the same prefix `miniasm` is being installed too as you can see at the end of the console output below. I suspect there is a regex or glob that needs to be updated somewhere in the tools codebase to only find and update the required module.\n\n### Command used and terminal output\n\n```console\n$ git clone https://github.com/nf-core/viralrecon.git \r\nCloning into 'viralrecon'...\r\nremote: Enumerating objects: 10345, done.\r\nremote: Counting objects: 100% (100/100), done.\r\nremote: Compressing objects: 100% (88/88), done.\r\nremote: Total 10345 (delta 22), reused 42 (delta 4), pack-reused 10245\r\nReceiving objects: 100% (10345/10345), 7.96 MiB | 5.16 MiB/s, done.\r\nResolving deltas: 100% (6536/6536), done.\r\n\r\n$ cd viralrecon \r\n\r\n$ nf-core modules update minia\r\n\r\n ,--./,-.\r\n ___ __ __ __ ___ /,-._.--~\\\r\n |\\ | |__ __ / ` / \\ |__) |__ } {\r\n | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-,\r\n `._,._,'\r\n\r\n nf-core/tools version 2.2\r\n\r\n\r\n\r\nINFO Updating 'nf-core/modules/minia' update.py:239\r\nINFO Downloaded 4 files to ./modules/nf-core/modules/minia modules_command.py:273\r\n\r\n$ git status \r\nOn branch master\r\nYour branch is up-to-date with 'origin/master'.\r\n\r\nChanges not staged for commit:\r\n (use \"git add/rm <file>...\" to update what will be committed)\r\n (use \"git restore <file>...\" to discard changes in working directory)\r\n\tmodified: modules.json\r\n\tdeleted: modules/nf-core/modules/minia/functions.nf\r\n\tmodified: modules/nf-core/modules/minia/main.nf\r\n\tmodified: modules/nf-core/modules/minia/meta.yml\r\n\r\nUntracked files:\r\n (use \"git add <file>...\" to include in what will be committed)\r\n\tmodules/nf-core/modules/miniasm/\r\n\r\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\n```\n\n\n### System information\n\n_No response_\n", "before_files": [{"content": "import os\nimport requests\nimport base64\nimport sys\nimport logging\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n\nclass ModulesRepo(object):\n \"\"\"\n An object to store details about the repository being used for modules.\n\n Used by the `nf-core modules` top-level command with -r and -b flags,\n so that this can be used in the same way by all sub-commands.\n \"\"\"\n\n def __init__(self, repo=\"nf-core/modules\", branch=None):\n self.name = repo\n self.branch = branch\n\n # Don't bother fetching default branch if we're using nf-core\n if not self.branch and self.name == \"nf-core/modules\":\n self.branch = \"master\"\n\n # Verify that the repo seems to be correctly configured\n if self.name != \"nf-core/modules\" or self.branch:\n\n # Get the default branch if not set\n if not self.branch:\n self.get_default_branch()\n\n try:\n self.verify_modules_repo()\n except LookupError:\n raise\n\n self.owner, self.repo = self.name.split(\"/\")\n self.modules_file_tree = {}\n self.modules_avail_module_names = []\n\n def get_default_branch(self):\n \"\"\"Get the default branch for a GitHub repo\"\"\"\n api_url = f\"https://api.github.com/repos/{self.name}\"\n response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())\n if response.status_code == 200:\n self.branch = response.json()[\"default_branch\"]\n log.debug(f\"Found default branch to be '{self.branch}'\")\n else:\n raise LookupError(f\"Could not find repository '{self.name}' on GitHub\")\n\n def verify_modules_repo(self):\n\n # Check if name seems to be well formed\n if self.name.count(\"/\") != 1:\n raise LookupError(f\"Repository name '{self.name}' should be of the format '<github_user_name>/<repo_name>'\")\n\n # Check if repository exist\n api_url = f\"https://api.github.com/repos/{self.name}/branches\"\n response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())\n if response.status_code == 200:\n branches = [branch[\"name\"] for branch in response.json()]\n if self.branch not in branches:\n raise LookupError(f\"Branch '{self.branch}' not found in '{self.name}'\")\n else:\n raise LookupError(f\"Repository '{self.name}' is not available on GitHub\")\n\n api_url = f\"https://api.github.com/repos/{self.name}/contents?ref={self.branch}\"\n response = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())\n if response.status_code == 200:\n dir_names = [entry[\"name\"] for entry in response.json() if entry[\"type\"] == \"dir\"]\n if \"modules\" not in dir_names:\n err_str = f\"Repository '{self.name}' ({self.branch}) does not contain a 'modules/' directory\"\n if \"software\" in dir_names:\n err_str += \".\\nAs of version 2.0, the 'software/' directory should be renamed to 'modules/'\"\n raise LookupError(err_str)\n else:\n raise LookupError(f\"Unable to fetch repository information from '{self.name}' ({self.branch})\")\n\n def get_modules_file_tree(self):\n \"\"\"\n Fetch the file list from the repo, using the GitHub API\n\n Sets self.modules_file_tree\n self.modules_avail_module_names\n \"\"\"\n api_url = \"https://api.github.com/repos/{}/git/trees/{}?recursive=1\".format(self.name, self.branch)\n r = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())\n if r.status_code == 404:\n raise LookupError(\"Repository / branch not found: {} ({})\\n{}\".format(self.name, self.branch, api_url))\n elif r.status_code != 200:\n raise LookupError(\n \"Could not fetch {} ({}) tree: {}\\n{}\".format(self.name, self.branch, r.status_code, api_url)\n )\n\n result = r.json()\n assert result[\"truncated\"] == False\n\n self.modules_file_tree = result[\"tree\"]\n for f in result[\"tree\"]:\n if f[\"path\"].startswith(f\"modules/\") and f[\"path\"].endswith(\"/main.nf\") and \"/test/\" not in f[\"path\"]:\n # remove modules/ and /main.nf\n self.modules_avail_module_names.append(f[\"path\"].replace(\"modules/\", \"\").replace(\"/main.nf\", \"\"))\n if len(self.modules_avail_module_names) == 0:\n raise LookupError(f\"Found no modules in '{self.name}'\")\n\n def get_module_file_urls(self, module, commit=\"\"):\n \"\"\"Fetch list of URLs for a specific module\n\n Takes the name of a module and iterates over the GitHub repo file tree.\n Loops over items that are prefixed with the path 'modules/<module_name>' and ignores\n anything that's not a blob. Also ignores the test/ subfolder.\n\n Returns a dictionary with keys as filenames and values as GitHub API URLs.\n These can be used to then download file contents.\n\n Args:\n module (string): Name of module for which to fetch a set of URLs\n\n Returns:\n dict: Set of files and associated URLs as follows:\n\n {\n 'modules/fastqc/main.nf': 'https://api.github.com/repos/nf-core/modules/git/blobs/65ba598119206a2b851b86a9b5880b5476e263c3',\n 'modules/fastqc/meta.yml': 'https://api.github.com/repos/nf-core/modules/git/blobs/0d5afc23ba44d44a805c35902febc0a382b17651'\n }\n \"\"\"\n results = {}\n for f in self.modules_file_tree:\n if not f[\"path\"].startswith(\"modules/{}\".format(module)):\n continue\n if f[\"type\"] != \"blob\":\n continue\n if \"/test/\" in f[\"path\"]:\n continue\n results[f[\"path\"]] = f[\"url\"]\n if commit != \"\":\n for path in results:\n results[path] = f\"https://api.github.com/repos/{self.name}/contents/{path}?ref={commit}\"\n return results\n\n def download_gh_file(self, dl_filename, api_url):\n \"\"\"Download a file from GitHub using the GitHub API\n\n Args:\n dl_filename (string): Path to save file to\n api_url (string): GitHub API URL for file\n\n Raises:\n If a problem, raises an error\n \"\"\"\n\n # Make target directory if it doesn't already exist\n dl_directory = os.path.dirname(dl_filename)\n if not os.path.exists(dl_directory):\n os.makedirs(dl_directory)\n\n # Call the GitHub API\n r = requests.get(api_url, auth=nf_core.utils.github_api_auto_auth())\n if r.status_code != 200:\n raise LookupError(\"Could not fetch {} file: {}\\n {}\".format(self.name, r.status_code, api_url))\n result = r.json()\n file_contents = base64.b64decode(result[\"content\"])\n\n # Write the file contents\n with open(dl_filename, \"wb\") as fh:\n fh.write(file_contents)\n", "path": "nf_core/modules/modules_repo.py"}]} | 3,160 | 117 |
gh_patches_debug_42801 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possible error in how to generate unbiased benchmarks
## 📚 Documentation
(Possible error, possibly me doing it wrong)
https://facebookresearch.github.io/CompilerGym/compiler_gym/datasets.html
The code for `random_benchmark` says:
```python
rng = np.random.default_rng()
finite_datasets = [d for d in env.datasets if len(d) != math.inf]
dataset = rng.choice(
finite_datasets,
p=[len(d) for d in finite_datasets]
)
dataset.random_benchmark(random_state=rng)
```
But, I think that if you call `len` on a generator, it will fail with error:
`TypeError: 'float' object cannot be interpreted as an integer`
Suggest changing it to:
`d.size != math.inf`
</issue>
<code>
[start of compiler_gym/datasets/datasets.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 from collections import deque
6 from typing import Dict, Iterable, Optional, Set, TypeVar
7
8 import numpy as np
9
10 from compiler_gym.datasets.benchmark import Benchmark
11 from compiler_gym.datasets.dataset import Dataset
12 from compiler_gym.datasets.uri import BENCHMARK_URI_RE, resolve_uri_protocol
13
14 T = TypeVar("T")
15
16
17 def round_robin_iterables(iters: Iterable[Iterable[T]]) -> Iterable[T]:
18 """Yield from the given iterators in round robin order."""
19 # Use a queue of iterators to iterate over. Repeatedly pop an iterator from
20 # the queue, yield the next value from it, then put it at the back of the
21 # queue. The iterator is discarded once exhausted.
22 iters = deque(iters)
23 while len(iters) > 1:
24 it = iters.popleft()
25 try:
26 yield next(it)
27 iters.append(it)
28 except StopIteration:
29 pass
30 # Once we have only a single iterator left, return it directly rather
31 # continuing with the round robin.
32 if len(iters) == 1:
33 yield from iters.popleft()
34
35
36 class Datasets:
37 """A collection of datasets.
38
39 This class provides a dictionary-like interface for indexing and iterating
40 over multiple :class:`Dataset <compiler_gym.datasets.Dataset>` objects.
41 Select a dataset by URI using:
42
43 >>> env.datasets["benchmark://cbench-v1"]
44
45 Check whether a dataset exists using:
46
47 >>> "benchmark://cbench-v1" in env.datasets
48 True
49
50 Or iterate over the datasets using:
51
52 >>> for dataset in env.datasets:
53 ... print(dataset.name)
54 benchmark://cbench-v1
55 benchmark://github-v0
56 benchmark://npb-v0
57
58 To select a benchmark from the datasets, use :meth:`benchmark()`:
59
60 >>> env.datasets.benchmark("benchmark://a-v0/a")
61
62 Use the :meth:`benchmarks()` method to iterate over every benchmark in the
63 datasets in a stable round robin order:
64
65 >>> for benchmark in env.datasets.benchmarks():
66 ... print(benchmark)
67 benchmark://cbench-v1/1
68 benchmark://github-v0/1
69 benchmark://npb-v0/1
70 benchmark://cbench-v1/2
71 ...
72
73 If you want to exclude a dataset, delete it:
74
75 >>> del env.datasets["benchmark://b-v0"]
76 """
77
78 def __init__(
79 self,
80 datasets: Iterable[Dataset],
81 ):
82 self._datasets: Dict[str, Dataset] = {d.name: d for d in datasets}
83 self._visible_datasets: Set[str] = set(
84 name for name, dataset in self._datasets.items() if not dataset.deprecated
85 )
86
87 def datasets(self, with_deprecated: bool = False) -> Iterable[Dataset]:
88 """Enumerate the datasets.
89
90 Dataset order is consistent across runs.
91
92 :param with_deprecated: If :code:`True`, include datasets that have been
93 marked as deprecated.
94
95 :return: An iterable sequence of :meth:`Dataset
96 <compiler_gym.datasets.Dataset>` instances.
97 """
98 datasets = self._datasets.values()
99 if not with_deprecated:
100 datasets = (d for d in datasets if not d.deprecated)
101 yield from sorted(datasets, key=lambda d: (d.sort_order, d.name))
102
103 def __iter__(self) -> Iterable[Dataset]:
104 """Iterate over the datasets.
105
106 Dataset order is consistent across runs.
107
108 Equivalent to :meth:`datasets.datasets()
109 <compiler_gym.datasets.Dataset.datasets>`, but without the ability to
110 iterate over the deprecated datasets.
111
112 If the number of benchmarks in any of the datasets is infinite
113 (:code:`len(dataset) == math.inf`), the iterable returned by this method
114 will continue indefinitely.
115
116 :return: An iterable sequence of :meth:`Dataset
117 <compiler_gym.datasets.Dataset>` instances.
118 """
119 return self.datasets()
120
121 def dataset(self, dataset: str) -> Dataset:
122 """Get a dataset.
123
124 Return the corresponding :meth:`Dataset
125 <compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or
126 not the dataset is deprecated.
127
128 :param dataset: A dataset name.
129
130 :return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
131
132 :raises LookupError: If :code:`dataset` is not found.
133 """
134 dataset_name = resolve_uri_protocol(dataset)
135
136 if dataset_name not in self._datasets:
137 raise LookupError(f"Dataset not found: {dataset_name}")
138
139 return self._datasets[dataset_name]
140
141 def __getitem__(self, dataset: str) -> Dataset:
142 """Lookup a dataset.
143
144 :param dataset: A dataset name.
145
146 :return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.
147
148 :raises LookupError: If :code:`dataset` is not found.
149 """
150 return self.dataset(dataset)
151
152 def __setitem__(self, key: str, dataset: Dataset):
153 """Add a dataset to the collection.
154
155 :param key: The name of the dataset.
156 :param dataset: The dataset to add.
157 """
158 dataset_name = resolve_uri_protocol(key)
159
160 self._datasets[dataset_name] = dataset
161 if not dataset.deprecated:
162 self._visible_datasets.add(dataset_name)
163
164 def __delitem__(self, dataset: str):
165 """Remove a dataset from the collection.
166
167 This does not affect any underlying storage used by dataset. See
168 :meth:`uninstall() <compiler_gym.datasets.Datasets.uninstall>` to clean
169 up.
170
171 :param dataset: The name of a dataset.
172
173 :return: :code:`True` if the dataset was removed, :code:`False` if it
174 was already removed.
175 """
176 dataset_name = resolve_uri_protocol(dataset)
177 if dataset_name in self._visible_datasets:
178 self._visible_datasets.remove(dataset_name)
179 del self._datasets[dataset_name]
180
181 def __contains__(self, dataset: str) -> bool:
182 """Returns whether the dataset is contained."""
183 try:
184 self.dataset(dataset)
185 return True
186 except LookupError:
187 return False
188
189 def benchmarks(self, with_deprecated: bool = False) -> Iterable[Benchmark]:
190 """Enumerate the (possibly infinite) benchmarks lazily.
191
192 Benchmarks order is consistent across runs. One benchmark from each
193 dataset is returned in round robin order until all datasets have been
194 fully enumerated. The order of :meth:`benchmarks()
195 <compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
196 <compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
197
198 If the number of benchmarks in any of the datasets is infinite
199 (:code:`len(dataset) == math.inf`), the iterable returned by this method
200 will continue indefinitely.
201
202 :param with_deprecated: If :code:`True`, include benchmarks from
203 datasets that have been marked deprecated.
204
205 :return: An iterable sequence of :class:`Benchmark
206 <compiler_gym.datasets.Benchmark>` instances.
207 """
208 return round_robin_iterables(
209 (d.benchmarks() for d in self.datasets(with_deprecated=with_deprecated))
210 )
211
212 def benchmark_uris(self, with_deprecated: bool = False) -> Iterable[str]:
213 """Enumerate the (possibly infinite) benchmark URIs.
214
215 Benchmark URI order is consistent across runs. URIs from datasets are
216 returned in round robin order. The order of :meth:`benchmarks()
217 <compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()
218 <compiler_gym.datasets.Datasets.benchmark_uris>` is the same.
219
220 If the number of benchmarks in any of the datasets is infinite
221 (:code:`len(dataset) == math.inf`), the iterable returned by this method
222 will continue indefinitely.
223
224 :param with_deprecated: If :code:`True`, include benchmarks from
225 datasets that have been marked deprecated.
226
227 :return: An iterable sequence of benchmark URI strings.
228 """
229 return round_robin_iterables(
230 (d.benchmark_uris() for d in self.datasets(with_deprecated=with_deprecated))
231 )
232
233 def benchmark(self, uri: str) -> Benchmark:
234 """Select a benchmark.
235
236 Returns the corresponding :class:`Benchmark
237 <compiler_gym.datasets.Benchmark>`, regardless of whether the containing
238 dataset is installed or deprecated.
239
240 :param uri: The URI of the benchmark to return.
241
242 :return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
243 instance.
244 """
245 uri = resolve_uri_protocol(uri)
246
247 match = BENCHMARK_URI_RE.match(uri)
248 if not match:
249 raise ValueError(f"Invalid benchmark URI: '{uri}'")
250
251 dataset_name = match.group("dataset")
252 dataset = self._datasets[dataset_name]
253
254 return dataset.benchmark(uri)
255
256 def random_benchmark(
257 self, random_state: Optional[np.random.Generator] = None
258 ) -> Benchmark:
259 """Select a benchmark randomly.
260
261 First, a dataset is selected uniformly randomly using
262 :code:`random_state.choice(list(datasets))`. The
263 :meth:`random_benchmark()
264 <compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset
265 is then called to select a benchmark.
266
267 Note that the distribution of benchmarks selected by this method is not
268 biased by the size of each dataset, since datasets are selected
269 uniformly. This means that datasets with a small number of benchmarks
270 will be overrepresented compared to datasets with many benchmarks. To
271 correct for this bias, use the number of benchmarks in each dataset as
272 a weight for the random selection:
273
274 >>> rng = np.random.default_rng()
275 >>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]
276 >>> dataset = rng.choice(
277 finite_datasets,
278 p=[len(d) for d in finite_datasets]
279 )
280 >>> dataset.random_benchmark(random_state=rng)
281
282 :param random_state: A random number generator. If not provided, a
283 default :code:`np.random.default_rng()` is used.
284
285 :return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
286 instance.
287 """
288 random_state = random_state or np.random.default_rng()
289 dataset = random_state.choice(list(self._visible_datasets))
290 return self[dataset].random_benchmark(random_state=random_state)
291
292 @property
293 def size(self) -> int:
294 return len(self._visible_datasets)
295
296 def __len__(self) -> int:
297 """The number of datasets in the collection."""
298 return self.size
299
[end of compiler_gym/datasets/datasets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/compiler_gym/datasets/datasets.py b/compiler_gym/datasets/datasets.py
--- a/compiler_gym/datasets/datasets.py
+++ b/compiler_gym/datasets/datasets.py
@@ -3,7 +3,7 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
-from typing import Dict, Iterable, Optional, Set, TypeVar
+from typing import Dict, Iterable, List, Optional, Set, TypeVar
import numpy as np
@@ -254,39 +254,64 @@
return dataset.benchmark(uri)
def random_benchmark(
- self, random_state: Optional[np.random.Generator] = None
+ self,
+ random_state: Optional[np.random.Generator] = None,
+ weighted: bool = False,
+ weights: Optional[Dict[str, float]] = None,
) -> Benchmark:
"""Select a benchmark randomly.
- First, a dataset is selected uniformly randomly using
- :code:`random_state.choice(list(datasets))`. The
+ First, a dataset is selected randomly using
+ :code:`random_state.choice(list(datasets))`. Then the
:meth:`random_benchmark()
- <compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset
- is then called to select a benchmark.
-
- Note that the distribution of benchmarks selected by this method is not
- biased by the size of each dataset, since datasets are selected
- uniformly. This means that datasets with a small number of benchmarks
- will be overrepresented compared to datasets with many benchmarks. To
- correct for this bias, use the number of benchmarks in each dataset as
- a weight for the random selection:
-
- >>> rng = np.random.default_rng()
- >>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]
- >>> dataset = rng.choice(
- finite_datasets,
- p=[len(d) for d in finite_datasets]
- )
- >>> dataset.random_benchmark(random_state=rng)
+ <compiler_gym.datasets.Dataset.random_benchmark>` method of the chosen
+ dataset is called to select a benchmark.
+
+ By default datasets are selected uniformly randomly. This means that
+ datasets with a small number of benchmarks will be overrepresented
+ compared to datasets with many benchmarks. To correct for this bias pass
+ the argument :code:`weighted=True`, which weights the dataset choice by
+ the number of benchmarks in each dataset, equivalent to:
+
+ >>> random.choices(datasets, weights=[len(p) for p in datasets])
+
+ Weighting the choice of datasets by their size means that datasets with
+ infinite sizes (such as random program generators) will be excluded from
+ sampling as their size is :code:`0`. To override the weights of datasets
+ pass a :code:`weights` mapping:
+
+ >>> env.datasets.random_benchmark(weighted=True, weights={
+ "benchmark://dataset-v0": 10,
+ "benchmark://another-dataset-v0": 555,
+ })
:param random_state: A random number generator. If not provided, a
default :code:`np.random.default_rng()` is used.
+ :param weighted: If set, weight the choice of dataset by the number of
+ benchmarks in each dataset, or the value specified in the
+ :code:`weights` mapping.
+
+ :param weights: An optional mapping from dataset URI to the weight to
+ use when :code:`weighted=True`. This overrides the default value of
+ using the dataset size.
+
:return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`
instance.
"""
random_state = random_state or np.random.default_rng()
- dataset = random_state.choice(list(self._visible_datasets))
+ datasets: List[str] = list(self._visible_datasets)
+ # Assume weighted=True if weights dictionary is specified.
+ weighted = weighted or weights
+
+ if weighted:
+ weights: Dict[str, float] = weights or {}
+ w: List[float] = np.array(
+ [weights.get(d, self[d].size) for d in datasets], dtype=float
+ )
+ dataset = random_state.choice(datasets, p=w / w.sum())
+ else:
+ dataset = random_state.choice(datasets)
return self[dataset].random_benchmark(random_state=random_state)
@property
| {"golden_diff": "diff --git a/compiler_gym/datasets/datasets.py b/compiler_gym/datasets/datasets.py\n--- a/compiler_gym/datasets/datasets.py\n+++ b/compiler_gym/datasets/datasets.py\n@@ -3,7 +3,7 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n from collections import deque\n-from typing import Dict, Iterable, Optional, Set, TypeVar\n+from typing import Dict, Iterable, List, Optional, Set, TypeVar\n \n import numpy as np\n \n@@ -254,39 +254,64 @@\n return dataset.benchmark(uri)\n \n def random_benchmark(\n- self, random_state: Optional[np.random.Generator] = None\n+ self,\n+ random_state: Optional[np.random.Generator] = None,\n+ weighted: bool = False,\n+ weights: Optional[Dict[str, float]] = None,\n ) -> Benchmark:\n \"\"\"Select a benchmark randomly.\n \n- First, a dataset is selected uniformly randomly using\n- :code:`random_state.choice(list(datasets))`. The\n+ First, a dataset is selected randomly using\n+ :code:`random_state.choice(list(datasets))`. Then the\n :meth:`random_benchmark()\n- <compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset\n- is then called to select a benchmark.\n-\n- Note that the distribution of benchmarks selected by this method is not\n- biased by the size of each dataset, since datasets are selected\n- uniformly. This means that datasets with a small number of benchmarks\n- will be overrepresented compared to datasets with many benchmarks. To\n- correct for this bias, use the number of benchmarks in each dataset as\n- a weight for the random selection:\n-\n- >>> rng = np.random.default_rng()\n- >>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]\n- >>> dataset = rng.choice(\n- finite_datasets,\n- p=[len(d) for d in finite_datasets]\n- )\n- >>> dataset.random_benchmark(random_state=rng)\n+ <compiler_gym.datasets.Dataset.random_benchmark>` method of the chosen\n+ dataset is called to select a benchmark.\n+\n+ By default datasets are selected uniformly randomly. This means that\n+ datasets with a small number of benchmarks will be overrepresented\n+ compared to datasets with many benchmarks. To correct for this bias pass\n+ the argument :code:`weighted=True`, which weights the dataset choice by\n+ the number of benchmarks in each dataset, equivalent to:\n+\n+ >>> random.choices(datasets, weights=[len(p) for p in datasets])\n+\n+ Weighting the choice of datasets by their size means that datasets with\n+ infinite sizes (such as random program generators) will be excluded from\n+ sampling as their size is :code:`0`. To override the weights of datasets\n+ pass a :code:`weights` mapping:\n+\n+ >>> env.datasets.random_benchmark(weighted=True, weights={\n+ \"benchmark://dataset-v0\": 10,\n+ \"benchmark://another-dataset-v0\": 555,\n+ })\n \n :param random_state: A random number generator. If not provided, a\n default :code:`np.random.default_rng()` is used.\n \n+ :param weighted: If set, weight the choice of dataset by the number of\n+ benchmarks in each dataset, or the value specified in the\n+ :code:`weights` mapping.\n+\n+ :param weights: An optional mapping from dataset URI to the weight to\n+ use when :code:`weighted=True`. This overrides the default value of\n+ using the dataset size.\n+\n :return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`\n instance.\n \"\"\"\n random_state = random_state or np.random.default_rng()\n- dataset = random_state.choice(list(self._visible_datasets))\n+ datasets: List[str] = list(self._visible_datasets)\n+ # Assume weighted=True if weights dictionary is specified.\n+ weighted = weighted or weights\n+\n+ if weighted:\n+ weights: Dict[str, float] = weights or {}\n+ w: List[float] = np.array(\n+ [weights.get(d, self[d].size) for d in datasets], dtype=float\n+ )\n+ dataset = random_state.choice(datasets, p=w / w.sum())\n+ else:\n+ dataset = random_state.choice(datasets)\n return self[dataset].random_benchmark(random_state=random_state)\n \n @property\n", "issue": "Possible error in how to generate unbiased benchmarks\n## \ud83d\udcda Documentation\r\n(Possible error, possibly me doing it wrong)\r\nhttps://facebookresearch.github.io/CompilerGym/compiler_gym/datasets.html\r\n\r\nThe code for `random_benchmark` says:\r\n\r\n```python\r\nrng = np.random.default_rng()\r\nfinite_datasets = [d for d in env.datasets if len(d) != math.inf]\r\ndataset = rng.choice(\r\n finite_datasets,\r\n p=[len(d) for d in finite_datasets]\r\n)\r\ndataset.random_benchmark(random_state=rng)\r\n```\r\n\r\nBut, I think that if you call `len` on a generator, it will fail with error:\r\n`TypeError: 'float' object cannot be interpreted as an integer`\r\n\r\nSuggest changing it to:\r\n`d.size != math.inf`\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections import deque\nfrom typing import Dict, Iterable, Optional, Set, TypeVar\n\nimport numpy as np\n\nfrom compiler_gym.datasets.benchmark import Benchmark\nfrom compiler_gym.datasets.dataset import Dataset\nfrom compiler_gym.datasets.uri import BENCHMARK_URI_RE, resolve_uri_protocol\n\nT = TypeVar(\"T\")\n\n\ndef round_robin_iterables(iters: Iterable[Iterable[T]]) -> Iterable[T]:\n \"\"\"Yield from the given iterators in round robin order.\"\"\"\n # Use a queue of iterators to iterate over. Repeatedly pop an iterator from\n # the queue, yield the next value from it, then put it at the back of the\n # queue. The iterator is discarded once exhausted.\n iters = deque(iters)\n while len(iters) > 1:\n it = iters.popleft()\n try:\n yield next(it)\n iters.append(it)\n except StopIteration:\n pass\n # Once we have only a single iterator left, return it directly rather\n # continuing with the round robin.\n if len(iters) == 1:\n yield from iters.popleft()\n\n\nclass Datasets:\n \"\"\"A collection of datasets.\n\n This class provides a dictionary-like interface for indexing and iterating\n over multiple :class:`Dataset <compiler_gym.datasets.Dataset>` objects.\n Select a dataset by URI using:\n\n >>> env.datasets[\"benchmark://cbench-v1\"]\n\n Check whether a dataset exists using:\n\n >>> \"benchmark://cbench-v1\" in env.datasets\n True\n\n Or iterate over the datasets using:\n\n >>> for dataset in env.datasets:\n ... print(dataset.name)\n benchmark://cbench-v1\n benchmark://github-v0\n benchmark://npb-v0\n\n To select a benchmark from the datasets, use :meth:`benchmark()`:\n\n >>> env.datasets.benchmark(\"benchmark://a-v0/a\")\n\n Use the :meth:`benchmarks()` method to iterate over every benchmark in the\n datasets in a stable round robin order:\n\n >>> for benchmark in env.datasets.benchmarks():\n ... print(benchmark)\n benchmark://cbench-v1/1\n benchmark://github-v0/1\n benchmark://npb-v0/1\n benchmark://cbench-v1/2\n ...\n\n If you want to exclude a dataset, delete it:\n\n >>> del env.datasets[\"benchmark://b-v0\"]\n \"\"\"\n\n def __init__(\n self,\n datasets: Iterable[Dataset],\n ):\n self._datasets: Dict[str, Dataset] = {d.name: d for d in datasets}\n self._visible_datasets: Set[str] = set(\n name for name, dataset in self._datasets.items() if not dataset.deprecated\n )\n\n def datasets(self, with_deprecated: bool = False) -> Iterable[Dataset]:\n \"\"\"Enumerate the datasets.\n\n Dataset order is consistent across runs.\n\n :param with_deprecated: If :code:`True`, include datasets that have been\n marked as deprecated.\n\n :return: An iterable sequence of :meth:`Dataset\n <compiler_gym.datasets.Dataset>` instances.\n \"\"\"\n datasets = self._datasets.values()\n if not with_deprecated:\n datasets = (d for d in datasets if not d.deprecated)\n yield from sorted(datasets, key=lambda d: (d.sort_order, d.name))\n\n def __iter__(self) -> Iterable[Dataset]:\n \"\"\"Iterate over the datasets.\n\n Dataset order is consistent across runs.\n\n Equivalent to :meth:`datasets.datasets()\n <compiler_gym.datasets.Dataset.datasets>`, but without the ability to\n iterate over the deprecated datasets.\n\n If the number of benchmarks in any of the datasets is infinite\n (:code:`len(dataset) == math.inf`), the iterable returned by this method\n will continue indefinitely.\n\n :return: An iterable sequence of :meth:`Dataset\n <compiler_gym.datasets.Dataset>` instances.\n \"\"\"\n return self.datasets()\n\n def dataset(self, dataset: str) -> Dataset:\n \"\"\"Get a dataset.\n\n Return the corresponding :meth:`Dataset\n <compiler_gym.datasets.Dataset>`. Name lookup will succeed whether or\n not the dataset is deprecated.\n\n :param dataset: A dataset name.\n\n :return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.\n\n :raises LookupError: If :code:`dataset` is not found.\n \"\"\"\n dataset_name = resolve_uri_protocol(dataset)\n\n if dataset_name not in self._datasets:\n raise LookupError(f\"Dataset not found: {dataset_name}\")\n\n return self._datasets[dataset_name]\n\n def __getitem__(self, dataset: str) -> Dataset:\n \"\"\"Lookup a dataset.\n\n :param dataset: A dataset name.\n\n :return: A :meth:`Dataset <compiler_gym.datasets.Dataset>` instance.\n\n :raises LookupError: If :code:`dataset` is not found.\n \"\"\"\n return self.dataset(dataset)\n\n def __setitem__(self, key: str, dataset: Dataset):\n \"\"\"Add a dataset to the collection.\n\n :param key: The name of the dataset.\n :param dataset: The dataset to add.\n \"\"\"\n dataset_name = resolve_uri_protocol(key)\n\n self._datasets[dataset_name] = dataset\n if not dataset.deprecated:\n self._visible_datasets.add(dataset_name)\n\n def __delitem__(self, dataset: str):\n \"\"\"Remove a dataset from the collection.\n\n This does not affect any underlying storage used by dataset. See\n :meth:`uninstall() <compiler_gym.datasets.Datasets.uninstall>` to clean\n up.\n\n :param dataset: The name of a dataset.\n\n :return: :code:`True` if the dataset was removed, :code:`False` if it\n was already removed.\n \"\"\"\n dataset_name = resolve_uri_protocol(dataset)\n if dataset_name in self._visible_datasets:\n self._visible_datasets.remove(dataset_name)\n del self._datasets[dataset_name]\n\n def __contains__(self, dataset: str) -> bool:\n \"\"\"Returns whether the dataset is contained.\"\"\"\n try:\n self.dataset(dataset)\n return True\n except LookupError:\n return False\n\n def benchmarks(self, with_deprecated: bool = False) -> Iterable[Benchmark]:\n \"\"\"Enumerate the (possibly infinite) benchmarks lazily.\n\n Benchmarks order is consistent across runs. One benchmark from each\n dataset is returned in round robin order until all datasets have been\n fully enumerated. The order of :meth:`benchmarks()\n <compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()\n <compiler_gym.datasets.Datasets.benchmark_uris>` is the same.\n\n If the number of benchmarks in any of the datasets is infinite\n (:code:`len(dataset) == math.inf`), the iterable returned by this method\n will continue indefinitely.\n\n :param with_deprecated: If :code:`True`, include benchmarks from\n datasets that have been marked deprecated.\n\n :return: An iterable sequence of :class:`Benchmark\n <compiler_gym.datasets.Benchmark>` instances.\n \"\"\"\n return round_robin_iterables(\n (d.benchmarks() for d in self.datasets(with_deprecated=with_deprecated))\n )\n\n def benchmark_uris(self, with_deprecated: bool = False) -> Iterable[str]:\n \"\"\"Enumerate the (possibly infinite) benchmark URIs.\n\n Benchmark URI order is consistent across runs. URIs from datasets are\n returned in round robin order. The order of :meth:`benchmarks()\n <compiler_gym.datasets.Datasets.benchmarks>` and :meth:`benchmark_uris()\n <compiler_gym.datasets.Datasets.benchmark_uris>` is the same.\n\n If the number of benchmarks in any of the datasets is infinite\n (:code:`len(dataset) == math.inf`), the iterable returned by this method\n will continue indefinitely.\n\n :param with_deprecated: If :code:`True`, include benchmarks from\n datasets that have been marked deprecated.\n\n :return: An iterable sequence of benchmark URI strings.\n \"\"\"\n return round_robin_iterables(\n (d.benchmark_uris() for d in self.datasets(with_deprecated=with_deprecated))\n )\n\n def benchmark(self, uri: str) -> Benchmark:\n \"\"\"Select a benchmark.\n\n Returns the corresponding :class:`Benchmark\n <compiler_gym.datasets.Benchmark>`, regardless of whether the containing\n dataset is installed or deprecated.\n\n :param uri: The URI of the benchmark to return.\n\n :return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`\n instance.\n \"\"\"\n uri = resolve_uri_protocol(uri)\n\n match = BENCHMARK_URI_RE.match(uri)\n if not match:\n raise ValueError(f\"Invalid benchmark URI: '{uri}'\")\n\n dataset_name = match.group(\"dataset\")\n dataset = self._datasets[dataset_name]\n\n return dataset.benchmark(uri)\n\n def random_benchmark(\n self, random_state: Optional[np.random.Generator] = None\n ) -> Benchmark:\n \"\"\"Select a benchmark randomly.\n\n First, a dataset is selected uniformly randomly using\n :code:`random_state.choice(list(datasets))`. The\n :meth:`random_benchmark()\n <compiler_gym.datasets.Dataset.random_benchmark>` method of that dataset\n is then called to select a benchmark.\n\n Note that the distribution of benchmarks selected by this method is not\n biased by the size of each dataset, since datasets are selected\n uniformly. This means that datasets with a small number of benchmarks\n will be overrepresented compared to datasets with many benchmarks. To\n correct for this bias, use the number of benchmarks in each dataset as\n a weight for the random selection:\n\n >>> rng = np.random.default_rng()\n >>> finite_datasets = [d for d in env.datasets if len(d) != math.inf]\n >>> dataset = rng.choice(\n finite_datasets,\n p=[len(d) for d in finite_datasets]\n )\n >>> dataset.random_benchmark(random_state=rng)\n\n :param random_state: A random number generator. If not provided, a\n default :code:`np.random.default_rng()` is used.\n\n :return: A :class:`Benchmark <compiler_gym.datasets.Benchmark>`\n instance.\n \"\"\"\n random_state = random_state or np.random.default_rng()\n dataset = random_state.choice(list(self._visible_datasets))\n return self[dataset].random_benchmark(random_state=random_state)\n\n @property\n def size(self) -> int:\n return len(self._visible_datasets)\n\n def __len__(self) -> int:\n \"\"\"The number of datasets in the collection.\"\"\"\n return self.size\n", "path": "compiler_gym/datasets/datasets.py"}]} | 3,873 | 1,000 |
gh_patches_debug_16378 | rasdani/github-patches | git_diff | freedomofpress__securedrop-379 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Display number of docs and messages per source in source list
> At the moment each source in the list displays: source codename, last updated. It would be helpful to also see: total # of messages/docs.
Extracted from #322
</issue>
<code>
[start of securedrop/db.py]
1 import os
2 import datetime
3
4 from sqlalchemy import create_engine, ForeignKey
5 from sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref
6 from sqlalchemy.ext.declarative import declarative_base
7 from sqlalchemy import Column, Integer, String, Boolean, DateTime
8 from sqlalchemy.orm.exc import NoResultFound
9
10 import config
11 import crypto_util
12 import store
13
14 # http://flask.pocoo.org/docs/patterns/sqlalchemy/
15
16 if config.DATABASE_ENGINE == "sqlite":
17 engine = create_engine(
18 config.DATABASE_ENGINE + ":///" +
19 config.DATABASE_FILE
20 )
21 else:
22 engine = create_engine(
23 config.DATABASE_ENGINE + '://' +
24 config.DATABASE_USERNAME + ':' +
25 config.DATABASE_PASSWORD + '@' +
26 config.DATABASE_HOST + '/' +
27 config.DATABASE_NAME, echo=False
28 )
29
30 db_session = scoped_session(sessionmaker(autocommit=False,
31 autoflush=False,
32 bind=engine))
33 Base = declarative_base()
34 Base.query = db_session.query_property()
35
36
37 class Source(Base):
38 __tablename__ = 'sources'
39 id = Column(Integer, primary_key=True)
40 filesystem_id = Column(String(96), unique=True)
41 journalist_designation = Column(String(255), nullable=False)
42 flagged = Column(Boolean, default=False)
43 last_updated = Column(DateTime, default=datetime.datetime.now)
44
45 # sources are "pending" and don't get displayed to journalists until they submit something
46 pending = Column(Boolean, default=True)
47
48 # keep track of how many interactions have happened, for filenames
49 interaction_count = Column(Integer, default=0, nullable=False)
50
51 def __init__(self, filesystem_id=None, journalist_designation=None):
52 self.filesystem_id = filesystem_id
53 self.journalist_designation = journalist_designation
54
55 def __repr__(self):
56 return '<Source %r>' % (self.journalist_designation)
57
58 def journalist_filename(self):
59 valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'
60 return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])
61
62 class Submission(Base):
63 __tablename__ = 'submissions'
64 id = Column(Integer, primary_key=True)
65 source_id = Column(Integer, ForeignKey('sources.id'))
66 source = relationship("Source", backref=backref('submissions', order_by=id))
67 filename = Column(String(255), nullable=False)
68 size = Column(Integer, nullable=False)
69
70 def __init__(self, source, filename):
71 self.source_id = source.id
72 self.filename = filename
73 self.size = os.stat(store.path(source.filesystem_id, filename)).st_size
74
75 def __repr__(self):
76 return '<Submission %r>' % (self.filename)
77
78
79 # Declare (or import) models before init_db
80 def init_db():
81 Base.metadata.create_all(bind=engine)
82
83
[end of securedrop/db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/db.py b/securedrop/db.py
--- a/securedrop/db.py
+++ b/securedrop/db.py
@@ -59,6 +59,19 @@
valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'
return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])
+ def documents_messages_count(self):
+ try:
+ return self.docs_msgs_count
+ except AttributeError:
+ self.docs_msgs_count = {'messages': 0, 'documents': 0}
+ for submission in self.submissions:
+ if submission.filename.endswith('msg.gpg'):
+ self.docs_msgs_count['messages'] += 1
+ elif submission.filename.endswith('doc.zip.gpg'):
+ self.docs_msgs_count['documents'] += 1
+ return self.docs_msgs_count
+
+
class Submission(Base):
__tablename__ = 'submissions'
id = Column(Integer, primary_key=True)
| {"golden_diff": "diff --git a/securedrop/db.py b/securedrop/db.py\n--- a/securedrop/db.py\n+++ b/securedrop/db.py\n@@ -59,6 +59,19 @@\n valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'\n return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])\n \n+ def documents_messages_count(self):\n+ try:\n+ return self.docs_msgs_count\n+ except AttributeError:\n+ self.docs_msgs_count = {'messages': 0, 'documents': 0}\n+ for submission in self.submissions:\n+ if submission.filename.endswith('msg.gpg'):\n+ self.docs_msgs_count['messages'] += 1\n+ elif submission.filename.endswith('doc.zip.gpg'):\n+ self.docs_msgs_count['documents'] += 1\n+ return self.docs_msgs_count\n+\n+\n class Submission(Base):\n __tablename__ = 'submissions'\n id = Column(Integer, primary_key=True)\n", "issue": "Display number of docs and messages per source in source list\n> At the moment each source in the list displays: source codename, last updated. It would be helpful to also see: total # of messages/docs.\n\nExtracted from #322\n\n", "before_files": [{"content": "import os\nimport datetime\n\nfrom sqlalchemy import create_engine, ForeignKey\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Boolean, DateTime\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport config\nimport crypto_util\nimport store\n\n# http://flask.pocoo.org/docs/patterns/sqlalchemy/\n\nif config.DATABASE_ENGINE == \"sqlite\":\n engine = create_engine(\n config.DATABASE_ENGINE + \":///\" +\n config.DATABASE_FILE\n )\nelse:\n engine = create_engine(\n config.DATABASE_ENGINE + '://' +\n config.DATABASE_USERNAME + ':' +\n config.DATABASE_PASSWORD + '@' +\n config.DATABASE_HOST + '/' +\n config.DATABASE_NAME, echo=False\n )\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\nclass Source(Base):\n __tablename__ = 'sources'\n id = Column(Integer, primary_key=True)\n filesystem_id = Column(String(96), unique=True)\n journalist_designation = Column(String(255), nullable=False)\n flagged = Column(Boolean, default=False)\n last_updated = Column(DateTime, default=datetime.datetime.now)\n \n # sources are \"pending\" and don't get displayed to journalists until they submit something\n pending = Column(Boolean, default=True)\n\n # keep track of how many interactions have happened, for filenames\n interaction_count = Column(Integer, default=0, nullable=False)\n\n def __init__(self, filesystem_id=None, journalist_designation=None):\n self.filesystem_id = filesystem_id\n self.journalist_designation = journalist_designation\n\n def __repr__(self):\n return '<Source %r>' % (self.journalist_designation)\n\n def journalist_filename(self):\n valid_chars = 'abcdefghijklmnopqrstuvwxyz1234567890-_'\n return ''.join([c for c in self.journalist_designation.lower().replace(' ', '_') if c in valid_chars])\n\nclass Submission(Base):\n __tablename__ = 'submissions'\n id = Column(Integer, primary_key=True)\n source_id = Column(Integer, ForeignKey('sources.id'))\n source = relationship(\"Source\", backref=backref('submissions', order_by=id))\n filename = Column(String(255), nullable=False)\n size = Column(Integer, nullable=False)\n\n def __init__(self, source, filename):\n self.source_id = source.id\n self.filename = filename\n self.size = os.stat(store.path(source.filesystem_id, filename)).st_size\n\n def __repr__(self):\n return '<Submission %r>' % (self.filename)\n\n\n# Declare (or import) models before init_db\ndef init_db():\n Base.metadata.create_all(bind=engine)\n\n", "path": "securedrop/db.py"}]} | 1,372 | 227 |
gh_patches_debug_34089 | rasdani/github-patches | git_diff | microsoft__nni-3416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wrong field name in ExperimentConfig, "accessor" should be "assessor"
**Environment**:
- NNI version: 2.0
- NNI mode (local|remote|pai): local
- Client OS: Ubuntu 20.10
- Server OS (for remote mode only):
- Python version: 3.8.5
- PyTorch/TensorFlow version: pytorch 1.9.0a+a62b0de
- Is conda/virtualenv/venv used?: conda 4.9.2
- Is running in Docker?: No
**Log message**:
- nnimanager.log: None
- dispatcher.log: None
- nnictl stdout and stderr:
```
WARNING: Validation with V1 schema failed. Trying to convert from V2 format...
ERROR: Conversion from v2 format failed: ValueError('ExperimentConfig: Unrecognized fields assessor')
ERROR: Config in v1 format validation failed. KeyError('trial')
```
**What issue meet, what's expected?**:
In `ExperimentConfig`, field `accessor` should be `assessor`.
**How to reproduce it?**:
A V2 config with 'assessor'
**Additional information**:
Fix by renaming all `accessor` in `nni.experiment.config.common.py` to `assessor`.
There are some `accessor` in doc as well.
</issue>
<code>
[start of nni/experiment/config/common.py]
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 from dataclasses import dataclass
5 from pathlib import Path
6 from typing import Any, Dict, List, Optional, Union
7
8 from .base import ConfigBase, PathLike
9 from . import util
10
11 __all__ = [
12 'ExperimentConfig',
13 'AlgorithmConfig',
14 'CustomAlgorithmConfig',
15 'TrainingServiceConfig',
16 ]
17
18
19 @dataclass(init=False)
20 class _AlgorithmConfig(ConfigBase):
21 name: Optional[str] = None
22 class_name: Optional[str] = None
23 code_directory: Optional[PathLike] = None
24 class_args: Optional[Dict[str, Any]] = None
25
26 def validate(self):
27 super().validate()
28 _validate_algo(self)
29
30
31 @dataclass(init=False)
32 class AlgorithmConfig(_AlgorithmConfig):
33 name: str
34 class_args: Optional[Dict[str, Any]] = None
35
36
37 @dataclass(init=False)
38 class CustomAlgorithmConfig(_AlgorithmConfig):
39 class_name: str
40 class_directory: Optional[PathLike] = None
41 class_args: Optional[Dict[str, Any]] = None
42
43
44 class TrainingServiceConfig(ConfigBase):
45 platform: str
46
47
48 @dataclass(init=False)
49 class ExperimentConfig(ConfigBase):
50 experiment_name: Optional[str] = None
51 search_space_file: Optional[PathLike] = None
52 search_space: Any = None
53 trial_command: str
54 trial_code_directory: PathLike = '.'
55 trial_concurrency: int
56 trial_gpu_number: Optional[int] = None
57 max_experiment_duration: Optional[str] = None
58 max_trial_number: Optional[int] = None
59 nni_manager_ip: Optional[str] = None
60 use_annotation: bool = False
61 debug: bool = False
62 log_level: Optional[str] = None
63 experiment_working_directory: Optional[PathLike] = None
64 tuner_gpu_indices: Optional[Union[List[int], str]] = None
65 tuner: Optional[_AlgorithmConfig] = None
66 accessor: Optional[_AlgorithmConfig] = None
67 advisor: Optional[_AlgorithmConfig] = None
68 training_service: Union[TrainingServiceConfig, List[TrainingServiceConfig]]
69
70 def __init__(self, training_service_platform: Optional[Union[str, List[str]]] = None, **kwargs):
71 kwargs = util.case_insensitive(kwargs)
72 if training_service_platform is not None:
73 assert 'trainingservice' not in kwargs
74 kwargs['trainingservice'] = util.training_service_config_factory(platform = training_service_platform)
75 elif isinstance(kwargs.get('trainingservice'), (dict, list)):
76 # dict means a single training service
77 # list means hybrid training service
78 kwargs['trainingservice'] = util.training_service_config_factory(config = kwargs['trainingservice'])
79 else:
80 raise RuntimeError('Unsupported Training service configuration!')
81 super().__init__(**kwargs)
82
83 def validate(self, initialized_tuner: bool = False) -> None:
84 super().validate()
85 if initialized_tuner:
86 _validate_for_exp(self)
87 else:
88 _validate_for_nnictl(self)
89 if self.trial_gpu_number and hasattr(self.training_service, 'use_active_gpu'):
90 if self.training_service.use_active_gpu is None:
91 raise ValueError('Please set "use_active_gpu"')
92
93 ## End of public API ##
94
95 @property
96 def _canonical_rules(self):
97 return _canonical_rules
98
99 @property
100 def _validation_rules(self):
101 return _validation_rules
102
103
104 _canonical_rules = {
105 'search_space_file': util.canonical_path,
106 'trial_code_directory': util.canonical_path,
107 'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,
108 'experiment_working_directory': util.canonical_path,
109 'tuner_gpu_indices': lambda value: [int(idx) for idx in value.split(',')] if isinstance(value, str) else value
110 }
111
112 _validation_rules = {
113 'search_space_file': lambda value: (Path(value).is_file(), f'"{value}" does not exist or is not regular file'),
114 'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'),
115 'trial_concurrency': lambda value: value > 0,
116 'trial_gpu_number': lambda value: value >= 0,
117 'max_experiment_duration': lambda value: util.parse_time(value) > 0,
118 'max_trial_number': lambda value: value > 0,
119 'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"],
120 'tuner_gpu_indices': lambda value: all(i >= 0 for i in value) and len(value) == len(set(value)),
121 'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')
122 }
123
124 def _validate_for_exp(config: ExperimentConfig) -> None:
125 # validate experiment for nni.Experiment, where tuner is already initialized outside
126 if config.use_annotation:
127 raise ValueError('ExperimentConfig: annotation is not supported in this mode')
128 if util.count(config.search_space, config.search_space_file) != 1:
129 raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')
130 if util.count(config.tuner, config.accessor, config.advisor) != 0:
131 raise ValueError('ExperimentConfig: tuner, accessor, and advisor must not be set in for this mode')
132 if config.tuner_gpu_indices is not None:
133 raise ValueError('ExperimentConfig: tuner_gpu_indices is not supported in this mode')
134
135 def _validate_for_nnictl(config: ExperimentConfig) -> None:
136 # validate experiment for normal launching approach
137 if config.use_annotation:
138 if util.count(config.search_space, config.search_space_file) != 0:
139 raise ValueError('ExperimentConfig: search_space and search_space_file must not be set with annotationn')
140 else:
141 if util.count(config.search_space, config.search_space_file) != 1:
142 raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')
143 if util.count(config.tuner, config.advisor) != 1:
144 raise ValueError('ExperimentConfig: tuner and advisor must be set one')
145
146 def _validate_algo(algo: AlgorithmConfig) -> None:
147 if algo.name is None:
148 if algo.class_name is None:
149 raise ValueError('Missing algorithm name')
150 if algo.code_directory is not None and not Path(algo.code_directory).is_dir():
151 raise ValueError(f'code_directory "{algo.code_directory}" does not exist or is not directory')
152 else:
153 if algo.class_name is not None or algo.code_directory is not None:
154 raise ValueError(f'When name is set for registered algorithm, class_name and code_directory cannot be used')
155 # TODO: verify algorithm installation and class args
156
[end of nni/experiment/config/common.py]
[start of nni/assessor.py]
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 """
5 Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset)
6 to tell whether this trial can be early stopped or not.
7
8 See :class:`Assessor`' specification and ``docs/en_US/assessors.rst`` for details.
9 """
10
11 from enum import Enum
12 import logging
13
14 from .recoverable import Recoverable
15
16 __all__ = ['AssessResult', 'Assessor']
17
18 _logger = logging.getLogger(__name__)
19
20
21 class AssessResult(Enum):
22 """
23 Enum class for :meth:`Assessor.assess_trial` return value.
24 """
25
26 Good = True
27 """The trial works well."""
28
29 Bad = False
30 """The trial works poorly and should be early stopped."""
31
32
33 class Assessor(Recoverable):
34 """
35 Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset)
36 to tell whether this trial can be early stopped or not.
37
38 This is the abstract base class for all assessors.
39 Early stopping algorithms should inherit this class and override :meth:`assess_trial` method,
40 which receives intermediate results from trials and give an assessing result.
41
42 If :meth:`assess_trial` returns :obj:`AssessResult.Bad` for a trial,
43 it hints NNI framework that the trial is likely to result in a poor final accuracy,
44 and therefore should be killed to save resource.
45
46 If an accessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.
47
48 To write a new assessor, you can reference :class:`~nni.medianstop_assessor.MedianstopAssessor`'s code as an example.
49
50 See Also
51 --------
52 Builtin assessors:
53 :class:`~nni.algorithms.hpo.medianstop_assessor.MedianstopAssessor`
54 :class:`~nni.algorithms.hpo.curvefitting_assessor.CurvefittingAssessor`
55 """
56
57 def assess_trial(self, trial_job_id, trial_history):
58 """
59 Abstract method for determining whether a trial should be killed. Must override.
60
61 The NNI framework has little guarantee on ``trial_history``.
62 This method is not guaranteed to be invoked for each time ``trial_history`` get updated.
63 It is also possible that a trial's history keeps updating after receiving a bad result.
64 And if the trial failed and retried, ``trial_history`` may be inconsistent with its previous value.
65
66 The only guarantee is that ``trial_history`` is always growing.
67 It will not be empty and will always be longer than previous value.
68
69 This is an example of how :meth:`assess_trial` get invoked sequentially:
70
71 ::
72
73 trial_job_id | trial_history | return value
74 ------------ | --------------- | ------------
75 Trial_A | [1.0, 2.0] | Good
76 Trial_B | [1.5, 1.3] | Bad
77 Trial_B | [1.5, 1.3, 1.9] | Good
78 Trial_A | [0.9, 1.8, 2.3] | Good
79
80 Parameters
81 ----------
82 trial_job_id : str
83 Unique identifier of the trial.
84 trial_history : list
85 Intermediate results of this trial. The element type is decided by trial code.
86
87 Returns
88 -------
89 AssessResult
90 :obj:`AssessResult.Good` or :obj:`AssessResult.Bad`.
91 """
92 raise NotImplementedError('Assessor: assess_trial not implemented')
93
94 def trial_end(self, trial_job_id, success):
95 """
96 Abstract method invoked when a trial is completed or terminated. Do nothing by default.
97
98 Parameters
99 ----------
100 trial_job_id : str
101 Unique identifier of the trial.
102 success : bool
103 True if the trial successfully completed; False if failed or terminated.
104 """
105
106 def load_checkpoint(self):
107 """
108 Internal API under revising, not recommended for end users.
109 """
110 checkpoin_path = self.get_checkpoint_path()
111 _logger.info('Load checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path)
112
113 def save_checkpoint(self):
114 """
115 Internal API under revising, not recommended for end users.
116 """
117 checkpoin_path = self.get_checkpoint_path()
118 _logger.info('Save checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path)
119
120 def _on_exit(self):
121 pass
122
123 def _on_error(self):
124 pass
125
[end of nni/assessor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nni/assessor.py b/nni/assessor.py
--- a/nni/assessor.py
+++ b/nni/assessor.py
@@ -43,7 +43,7 @@
it hints NNI framework that the trial is likely to result in a poor final accuracy,
and therefore should be killed to save resource.
- If an accessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.
+ If an assessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.
To write a new assessor, you can reference :class:`~nni.medianstop_assessor.MedianstopAssessor`'s code as an example.
diff --git a/nni/experiment/config/common.py b/nni/experiment/config/common.py
--- a/nni/experiment/config/common.py
+++ b/nni/experiment/config/common.py
@@ -63,7 +63,7 @@
experiment_working_directory: Optional[PathLike] = None
tuner_gpu_indices: Optional[Union[List[int], str]] = None
tuner: Optional[_AlgorithmConfig] = None
- accessor: Optional[_AlgorithmConfig] = None
+ assessor: Optional[_AlgorithmConfig] = None
advisor: Optional[_AlgorithmConfig] = None
training_service: Union[TrainingServiceConfig, List[TrainingServiceConfig]]
@@ -127,8 +127,8 @@
raise ValueError('ExperimentConfig: annotation is not supported in this mode')
if util.count(config.search_space, config.search_space_file) != 1:
raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')
- if util.count(config.tuner, config.accessor, config.advisor) != 0:
- raise ValueError('ExperimentConfig: tuner, accessor, and advisor must not be set in for this mode')
+ if util.count(config.tuner, config.assessor, config.advisor) != 0:
+ raise ValueError('ExperimentConfig: tuner, assessor, and advisor must not be set in for this mode')
if config.tuner_gpu_indices is not None:
raise ValueError('ExperimentConfig: tuner_gpu_indices is not supported in this mode')
| {"golden_diff": "diff --git a/nni/assessor.py b/nni/assessor.py\n--- a/nni/assessor.py\n+++ b/nni/assessor.py\n@@ -43,7 +43,7 @@\n it hints NNI framework that the trial is likely to result in a poor final accuracy,\n and therefore should be killed to save resource.\n \n- If an accessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.\n+ If an assessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.\n \n To write a new assessor, you can reference :class:`~nni.medianstop_assessor.MedianstopAssessor`'s code as an example.\n \ndiff --git a/nni/experiment/config/common.py b/nni/experiment/config/common.py\n--- a/nni/experiment/config/common.py\n+++ b/nni/experiment/config/common.py\n@@ -63,7 +63,7 @@\n experiment_working_directory: Optional[PathLike] = None\n tuner_gpu_indices: Optional[Union[List[int], str]] = None\n tuner: Optional[_AlgorithmConfig] = None\n- accessor: Optional[_AlgorithmConfig] = None\n+ assessor: Optional[_AlgorithmConfig] = None\n advisor: Optional[_AlgorithmConfig] = None\n training_service: Union[TrainingServiceConfig, List[TrainingServiceConfig]]\n \n@@ -127,8 +127,8 @@\n raise ValueError('ExperimentConfig: annotation is not supported in this mode')\n if util.count(config.search_space, config.search_space_file) != 1:\n raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')\n- if util.count(config.tuner, config.accessor, config.advisor) != 0:\n- raise ValueError('ExperimentConfig: tuner, accessor, and advisor must not be set in for this mode')\n+ if util.count(config.tuner, config.assessor, config.advisor) != 0:\n+ raise ValueError('ExperimentConfig: tuner, assessor, and advisor must not be set in for this mode')\n if config.tuner_gpu_indices is not None:\n raise ValueError('ExperimentConfig: tuner_gpu_indices is not supported in this mode')\n", "issue": "wrong field name in ExperimentConfig, \"accessor\" should be \"assessor\"\n**Environment**:\r\n- NNI version: 2.0\r\n- NNI mode (local|remote|pai): local\r\n- Client OS: Ubuntu 20.10\r\n- Server OS (for remote mode only):\r\n- Python version: 3.8.5\r\n- PyTorch/TensorFlow version: pytorch 1.9.0a+a62b0de\r\n- Is conda/virtualenv/venv used?: conda 4.9.2\r\n- Is running in Docker?: No\r\n\r\n**Log message**:\r\n - nnimanager.log: None\r\n - dispatcher.log: None\r\n - nnictl stdout and stderr:\r\n```\r\nWARNING: Validation with V1 schema failed. Trying to convert from V2 format...\r\nERROR: Conversion from v2 format failed: ValueError('ExperimentConfig: Unrecognized fields assessor')\r\nERROR: Config in v1 format validation failed. KeyError('trial')\r\n```\r\n\r\n**What issue meet, what's expected?**:\r\nIn `ExperimentConfig`, field `accessor` should be `assessor`.\r\n\r\n**How to reproduce it?**: \r\nA V2 config with 'assessor'\r\n\r\n**Additional information**:\r\nFix by renaming all `accessor` in `nni.experiment.config.common.py` to `assessor`.\r\nThere are some `accessor` in doc as well.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom .base import ConfigBase, PathLike\nfrom . import util\n\n__all__ = [\n 'ExperimentConfig',\n 'AlgorithmConfig',\n 'CustomAlgorithmConfig',\n 'TrainingServiceConfig',\n]\n\n\n@dataclass(init=False)\nclass _AlgorithmConfig(ConfigBase):\n name: Optional[str] = None\n class_name: Optional[str] = None\n code_directory: Optional[PathLike] = None\n class_args: Optional[Dict[str, Any]] = None\n\n def validate(self):\n super().validate()\n _validate_algo(self)\n\n\n@dataclass(init=False)\nclass AlgorithmConfig(_AlgorithmConfig):\n name: str\n class_args: Optional[Dict[str, Any]] = None\n\n\n@dataclass(init=False)\nclass CustomAlgorithmConfig(_AlgorithmConfig):\n class_name: str\n class_directory: Optional[PathLike] = None\n class_args: Optional[Dict[str, Any]] = None\n\n\nclass TrainingServiceConfig(ConfigBase):\n platform: str\n\n\n@dataclass(init=False)\nclass ExperimentConfig(ConfigBase):\n experiment_name: Optional[str] = None\n search_space_file: Optional[PathLike] = None\n search_space: Any = None\n trial_command: str\n trial_code_directory: PathLike = '.'\n trial_concurrency: int\n trial_gpu_number: Optional[int] = None\n max_experiment_duration: Optional[str] = None\n max_trial_number: Optional[int] = None\n nni_manager_ip: Optional[str] = None\n use_annotation: bool = False\n debug: bool = False\n log_level: Optional[str] = None\n experiment_working_directory: Optional[PathLike] = None\n tuner_gpu_indices: Optional[Union[List[int], str]] = None\n tuner: Optional[_AlgorithmConfig] = None\n accessor: Optional[_AlgorithmConfig] = None\n advisor: Optional[_AlgorithmConfig] = None\n training_service: Union[TrainingServiceConfig, List[TrainingServiceConfig]]\n\n def __init__(self, training_service_platform: Optional[Union[str, List[str]]] = None, **kwargs):\n kwargs = util.case_insensitive(kwargs)\n if training_service_platform is not None:\n assert 'trainingservice' not in kwargs\n kwargs['trainingservice'] = util.training_service_config_factory(platform = training_service_platform)\n elif isinstance(kwargs.get('trainingservice'), (dict, list)):\n # dict means a single training service\n # list means hybrid training service\n kwargs['trainingservice'] = util.training_service_config_factory(config = kwargs['trainingservice'])\n else:\n raise RuntimeError('Unsupported Training service configuration!')\n super().__init__(**kwargs)\n\n def validate(self, initialized_tuner: bool = False) -> None:\n super().validate()\n if initialized_tuner:\n _validate_for_exp(self)\n else:\n _validate_for_nnictl(self)\n if self.trial_gpu_number and hasattr(self.training_service, 'use_active_gpu'):\n if self.training_service.use_active_gpu is None:\n raise ValueError('Please set \"use_active_gpu\"')\n\n## End of public API ##\n\n @property\n def _canonical_rules(self):\n return _canonical_rules\n\n @property\n def _validation_rules(self):\n return _validation_rules\n\n\n_canonical_rules = {\n 'search_space_file': util.canonical_path,\n 'trial_code_directory': util.canonical_path,\n 'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,\n 'experiment_working_directory': util.canonical_path,\n 'tuner_gpu_indices': lambda value: [int(idx) for idx in value.split(',')] if isinstance(value, str) else value\n}\n\n_validation_rules = {\n 'search_space_file': lambda value: (Path(value).is_file(), f'\"{value}\" does not exist or is not regular file'),\n 'trial_code_directory': lambda value: (Path(value).is_dir(), f'\"{value}\" does not exist or is not directory'),\n 'trial_concurrency': lambda value: value > 0,\n 'trial_gpu_number': lambda value: value >= 0,\n 'max_experiment_duration': lambda value: util.parse_time(value) > 0,\n 'max_trial_number': lambda value: value > 0,\n 'log_level': lambda value: value in [\"trace\", \"debug\", \"info\", \"warning\", \"error\", \"fatal\"],\n 'tuner_gpu_indices': lambda value: all(i >= 0 for i in value) and len(value) == len(set(value)),\n 'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')\n}\n\ndef _validate_for_exp(config: ExperimentConfig) -> None:\n # validate experiment for nni.Experiment, where tuner is already initialized outside\n if config.use_annotation:\n raise ValueError('ExperimentConfig: annotation is not supported in this mode')\n if util.count(config.search_space, config.search_space_file) != 1:\n raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')\n if util.count(config.tuner, config.accessor, config.advisor) != 0:\n raise ValueError('ExperimentConfig: tuner, accessor, and advisor must not be set in for this mode')\n if config.tuner_gpu_indices is not None:\n raise ValueError('ExperimentConfig: tuner_gpu_indices is not supported in this mode')\n\ndef _validate_for_nnictl(config: ExperimentConfig) -> None:\n # validate experiment for normal launching approach\n if config.use_annotation:\n if util.count(config.search_space, config.search_space_file) != 0:\n raise ValueError('ExperimentConfig: search_space and search_space_file must not be set with annotationn')\n else:\n if util.count(config.search_space, config.search_space_file) != 1:\n raise ValueError('ExperimentConfig: search_space and search_space_file must be set one')\n if util.count(config.tuner, config.advisor) != 1:\n raise ValueError('ExperimentConfig: tuner and advisor must be set one')\n\ndef _validate_algo(algo: AlgorithmConfig) -> None:\n if algo.name is None:\n if algo.class_name is None:\n raise ValueError('Missing algorithm name')\n if algo.code_directory is not None and not Path(algo.code_directory).is_dir():\n raise ValueError(f'code_directory \"{algo.code_directory}\" does not exist or is not directory')\n else:\n if algo.class_name is not None or algo.code_directory is not None:\n raise ValueError(f'When name is set for registered algorithm, class_name and code_directory cannot be used')\n # TODO: verify algorithm installation and class args\n", "path": "nni/experiment/config/common.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n\"\"\"\nAssessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset)\nto tell whether this trial can be early stopped or not.\n\nSee :class:`Assessor`' specification and ``docs/en_US/assessors.rst`` for details.\n\"\"\"\n\nfrom enum import Enum\nimport logging\n\nfrom .recoverable import Recoverable\n\n__all__ = ['AssessResult', 'Assessor']\n\n_logger = logging.getLogger(__name__)\n\n\nclass AssessResult(Enum):\n \"\"\"\n Enum class for :meth:`Assessor.assess_trial` return value.\n \"\"\"\n\n Good = True\n \"\"\"The trial works well.\"\"\"\n\n Bad = False\n \"\"\"The trial works poorly and should be early stopped.\"\"\"\n\n\nclass Assessor(Recoverable):\n \"\"\"\n Assessor analyzes trial's intermediate results (e.g., periodically evaluated accuracy on test dataset)\n to tell whether this trial can be early stopped or not.\n\n This is the abstract base class for all assessors.\n Early stopping algorithms should inherit this class and override :meth:`assess_trial` method,\n which receives intermediate results from trials and give an assessing result.\n\n If :meth:`assess_trial` returns :obj:`AssessResult.Bad` for a trial,\n it hints NNI framework that the trial is likely to result in a poor final accuracy,\n and therefore should be killed to save resource.\n\n If an accessor want's to be notified when a trial ends, it can also override :meth:`trial_end`.\n\n To write a new assessor, you can reference :class:`~nni.medianstop_assessor.MedianstopAssessor`'s code as an example.\n\n See Also\n --------\n Builtin assessors:\n :class:`~nni.algorithms.hpo.medianstop_assessor.MedianstopAssessor`\n :class:`~nni.algorithms.hpo.curvefitting_assessor.CurvefittingAssessor`\n \"\"\"\n\n def assess_trial(self, trial_job_id, trial_history):\n \"\"\"\n Abstract method for determining whether a trial should be killed. Must override.\n\n The NNI framework has little guarantee on ``trial_history``.\n This method is not guaranteed to be invoked for each time ``trial_history`` get updated.\n It is also possible that a trial's history keeps updating after receiving a bad result.\n And if the trial failed and retried, ``trial_history`` may be inconsistent with its previous value.\n\n The only guarantee is that ``trial_history`` is always growing.\n It will not be empty and will always be longer than previous value.\n\n This is an example of how :meth:`assess_trial` get invoked sequentially:\n\n ::\n\n trial_job_id | trial_history | return value\n ------------ | --------------- | ------------\n Trial_A | [1.0, 2.0] | Good\n Trial_B | [1.5, 1.3] | Bad\n Trial_B | [1.5, 1.3, 1.9] | Good\n Trial_A | [0.9, 1.8, 2.3] | Good\n\n Parameters\n ----------\n trial_job_id : str\n Unique identifier of the trial.\n trial_history : list\n Intermediate results of this trial. The element type is decided by trial code.\n\n Returns\n -------\n AssessResult\n :obj:`AssessResult.Good` or :obj:`AssessResult.Bad`.\n \"\"\"\n raise NotImplementedError('Assessor: assess_trial not implemented')\n\n def trial_end(self, trial_job_id, success):\n \"\"\"\n Abstract method invoked when a trial is completed or terminated. Do nothing by default.\n\n Parameters\n ----------\n trial_job_id : str\n Unique identifier of the trial.\n success : bool\n True if the trial successfully completed; False if failed or terminated.\n \"\"\"\n\n def load_checkpoint(self):\n \"\"\"\n Internal API under revising, not recommended for end users.\n \"\"\"\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path)\n\n def save_checkpoint(self):\n \"\"\"\n Internal API under revising, not recommended for end users.\n \"\"\"\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by assessor, checkpoint path: %s', checkpoin_path)\n\n def _on_exit(self):\n pass\n\n def _on_error(self):\n pass\n", "path": "nni/assessor.py"}]} | 3,954 | 499 |
gh_patches_debug_29713 | rasdani/github-patches | git_diff | biopython__biopython-3653 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
editable/develop install warning: You may be importing Biopython from inside the source tree.
### Setup
I am reporting a problem with Biopython version, Python version, and operating
system as follows:
```pycon
$ python
Python 3.7.4 (default, Aug 13 2019, 15:17:50)
[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sys; print(sys.version)
3.7.4 (default, Aug 13 2019, 15:17:50)
[Clang 4.0.1 (tags/RELEASE_401/final)]
>>> import platform; print(platform.python_implementation()); print(platform.platform())
CPython
Darwin-18.7.0-x86_64-i386-64bit
>>> import Bio; print(Bio.__version__)
/Users/xxx/repositories/biopython/Bio/__init__.py:128: BiopythonWarning: You may be importing Biopython from inside the source tree. This is bad practice and might lead to downstream issues. In particular, you might encounter ImportErrors due to missing compiled C extensions. We recommend that you try running your code from outside the source tree. If you are outside the source tree then you have a setup.py file in an unexpected directory: /Users/xxx/repositories/biopython.
format(_parent_dir), BiopythonWarning)
1.75.dev0
```
(*Please copy and run the above in your Python, and copy-and-paste the output*)
### Expected behaviour
No warning ``BiopythonWarning: You may be importing Biopython from inside the source tree. ...``
### Actual behaviour
Noisy warning as above.
### Steps to reproduce
Using pip to install in editable (develop) mode:
```
$ pip install -h
...
-e, --editable <path/url> Install a project in editable mode (i.e. setuptools "develop mode") from a local
project path or a VCS url.
...
```
```
$ git clone [email protected]:biopython/biopython.git
$ cd biopython
$ pip install -e .
```
This is an unfortunate side effect of the changes in #2007, intended to help with confusing messages when C code was not compiled.
</issue>
<code>
[start of Bio/__init__.py]
1 # Copyright 1999-2003 by Jeffrey Chang. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Collection of modules for dealing with biological data in Python.
8
9 The Biopython Project is an international association of developers
10 of freely available Python tools for computational molecular biology.
11
12 https://biopython.org
13 """
14
15 import os
16 import warnings
17
18 __version__ = "1.80.dev0"
19
20
21 class MissingExternalDependencyError(Exception):
22 """Missing an external dependency.
23
24 Used for things like missing command line tools. Important for our unit
25 tests to allow skipping tests with missing external dependencies.
26 """
27
28
29 class MissingPythonDependencyError(MissingExternalDependencyError, ImportError):
30 """Missing an external python dependency (subclass of ImportError).
31
32 Used for missing Python modules (rather than just a typical ImportError).
33 Important for our unit tests to allow skipping tests with missing external
34 python dependencies, while also allowing the exception to be caught as an
35 ImportError.
36 """
37
38
39 class StreamModeError(ValueError):
40 """Incorrect stream mode (text vs binary).
41
42 This error should be raised when a stream (file or file-like object)
43 argument is in text mode while the receiving function expects binary mode,
44 or vice versa.
45 """
46
47
48 class BiopythonWarning(Warning):
49 """Biopython warning.
50
51 Biopython should use this warning (or subclasses of it), making it easy to
52 silence all our warning messages should you wish to:
53
54 >>> import warnings
55 >>> from Bio import BiopythonWarning
56 >>> warnings.simplefilter('ignore', BiopythonWarning)
57
58 Consult the warnings module documentation for more details.
59 """
60
61
62 class BiopythonParserWarning(BiopythonWarning):
63 """Biopython parser warning.
64
65 Some in-valid data files cannot be parsed and will trigger an exception.
66 Where a reasonable interpretation is possible, Biopython will issue this
67 warning to indicate a potential problem. To silence these warnings, use:
68
69 >>> import warnings
70 >>> from Bio import BiopythonParserWarning
71 >>> warnings.simplefilter('ignore', BiopythonParserWarning)
72
73 Consult the warnings module documentation for more details.
74 """
75
76
77 class BiopythonDeprecationWarning(BiopythonWarning):
78 """Biopython deprecation warning.
79
80 Biopython uses this warning instead of the built in DeprecationWarning
81 since those are ignored by default since Python 2.7.
82
83 To silence all our deprecation warning messages, use:
84
85 >>> import warnings
86 >>> from Bio import BiopythonDeprecationWarning
87 >>> warnings.simplefilter('ignore', BiopythonDeprecationWarning)
88
89 Code marked as deprecated is likely to be removed in a future version
90 of Biopython. To avoid removal of this code, please contact the Biopython
91 developers via the mailing list or GitHub.
92 """
93
94
95 class BiopythonExperimentalWarning(BiopythonWarning):
96 """Biopython experimental code warning.
97
98 Biopython uses this warning for experimental code ('alpha' or 'beta'
99 level code) which is released as part of the standard releases to mark
100 sub-modules or functions for early adopters to test & give feedback.
101
102 Code issuing this warning is likely to change (or even be removed) in
103 a subsequent release of Biopython. Such code should NOT be used for
104 production/stable code. It should only be used if:
105
106 - You are running the latest release of Biopython, or ideally the
107 latest code from our repository.
108 - You are subscribed to the biopython-dev mailing list to provide
109 feedback on this code, and to be alerted of changes to it.
110
111 If all goes well, experimental code would be promoted to stable in
112 a subsequent release, and this warning removed from it.
113 """
114
115
116 _parent_dir = os.path.dirname(os.path.dirname(__file__))
117 if os.path.exists(os.path.join(_parent_dir, "setup.py")):
118 warnings.warn(
119 "You may be importing Biopython from inside the source tree."
120 " This is bad practice and might lead to downstream issues."
121 " In particular, you might encounter ImportErrors due to"
122 " missing compiled C extensions. We recommend that you"
123 " try running your code from outside the source tree."
124 " If you are outside the source tree then you have a"
125 " setup.py file in an unexpected directory: " + _parent_dir,
126 BiopythonWarning,
127 )
128 # See #PR 2007 and issue #1991 for discussion on this warning:
129 # https://github.com/biopython/biopython/pull/2007
130
[end of Bio/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Bio/__init__.py b/Bio/__init__.py
--- a/Bio/__init__.py
+++ b/Bio/__init__.py
@@ -115,15 +115,33 @@
_parent_dir = os.path.dirname(os.path.dirname(__file__))
if os.path.exists(os.path.join(_parent_dir, "setup.py")):
- warnings.warn(
- "You may be importing Biopython from inside the source tree."
- " This is bad practice and might lead to downstream issues."
- " In particular, you might encounter ImportErrors due to"
- " missing compiled C extensions. We recommend that you"
- " try running your code from outside the source tree."
- " If you are outside the source tree then you have a"
- " setup.py file in an unexpected directory: " + _parent_dir,
- BiopythonWarning,
- )
-# See #PR 2007 and issue #1991 for discussion on this warning:
-# https://github.com/biopython/biopython/pull/2007
+ # Looks like we are running from our source directory,
+ # a bad idea except if installed in development mode.
+ #
+ # See https://setuptools.readthedocs.io/en/latest/userguide/development_mode.html
+ # Do we have .../site-packages/biopython.egg-link present?
+ #
+ # Note "pip install -e ." currently calls setuptools internally
+ import site
+
+ _dev_mode = False
+ for _p in site.getsitepackages():
+ if os.path.isfile(os.path.join(_p, "biopython.egg-link")):
+ _dev_mode = True
+ break
+ # Also check the user specific site packages
+ if not _dev_mode and os.path.isfile(
+ os.path.join(site.getusersitepackages(), "biopython.egg-link")
+ ):
+ _dev_mode = True
+ if not _dev_mode:
+ warnings.warn(
+ "You may be importing Biopython from inside the source tree."
+ " This is bad practice and might lead to downstream issues."
+ " In particular, you might encounter ImportErrors due to"
+ " missing compiled C extensions. We recommend that you"
+ " try running your code from outside the source tree."
+ " If you are outside the source tree then you have a"
+ " setup.py file in an unexpected directory: " + _parent_dir,
+ BiopythonWarning,
+ )
| {"golden_diff": "diff --git a/Bio/__init__.py b/Bio/__init__.py\n--- a/Bio/__init__.py\n+++ b/Bio/__init__.py\n@@ -115,15 +115,33 @@\n \n _parent_dir = os.path.dirname(os.path.dirname(__file__))\n if os.path.exists(os.path.join(_parent_dir, \"setup.py\")):\n- warnings.warn(\n- \"You may be importing Biopython from inside the source tree.\"\n- \" This is bad practice and might lead to downstream issues.\"\n- \" In particular, you might encounter ImportErrors due to\"\n- \" missing compiled C extensions. We recommend that you\"\n- \" try running your code from outside the source tree.\"\n- \" If you are outside the source tree then you have a\"\n- \" setup.py file in an unexpected directory: \" + _parent_dir,\n- BiopythonWarning,\n- )\n-# See #PR 2007 and issue #1991 for discussion on this warning:\n-# https://github.com/biopython/biopython/pull/2007\n+ # Looks like we are running from our source directory,\n+ # a bad idea except if installed in development mode.\n+ #\n+ # See https://setuptools.readthedocs.io/en/latest/userguide/development_mode.html\n+ # Do we have .../site-packages/biopython.egg-link present?\n+ #\n+ # Note \"pip install -e .\" currently calls setuptools internally\n+ import site\n+\n+ _dev_mode = False\n+ for _p in site.getsitepackages():\n+ if os.path.isfile(os.path.join(_p, \"biopython.egg-link\")):\n+ _dev_mode = True\n+ break\n+ # Also check the user specific site packages\n+ if not _dev_mode and os.path.isfile(\n+ os.path.join(site.getusersitepackages(), \"biopython.egg-link\")\n+ ):\n+ _dev_mode = True\n+ if not _dev_mode:\n+ warnings.warn(\n+ \"You may be importing Biopython from inside the source tree.\"\n+ \" This is bad practice and might lead to downstream issues.\"\n+ \" In particular, you might encounter ImportErrors due to\"\n+ \" missing compiled C extensions. We recommend that you\"\n+ \" try running your code from outside the source tree.\"\n+ \" If you are outside the source tree then you have a\"\n+ \" setup.py file in an unexpected directory: \" + _parent_dir,\n+ BiopythonWarning,\n+ )\n", "issue": "editable/develop install warning: You may be importing Biopython from inside the source tree. \n### Setup\r\n\r\nI am reporting a problem with Biopython version, Python version, and operating\r\nsystem as follows:\r\n\r\n```pycon\r\n$ python\r\nPython 3.7.4 (default, Aug 13 2019, 15:17:50) \r\n[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import sys; print(sys.version)\r\n3.7.4 (default, Aug 13 2019, 15:17:50) \r\n[Clang 4.0.1 (tags/RELEASE_401/final)]\r\n>>> import platform; print(platform.python_implementation()); print(platform.platform())\r\nCPython\r\nDarwin-18.7.0-x86_64-i386-64bit\r\n>>> import Bio; print(Bio.__version__)\r\n/Users/xxx/repositories/biopython/Bio/__init__.py:128: BiopythonWarning: You may be importing Biopython from inside the source tree. This is bad practice and might lead to downstream issues. In particular, you might encounter ImportErrors due to missing compiled C extensions. We recommend that you try running your code from outside the source tree. If you are outside the source tree then you have a setup.py file in an unexpected directory: /Users/xxx/repositories/biopython.\r\n format(_parent_dir), BiopythonWarning)\r\n1.75.dev0\r\n```\r\n\r\n(*Please copy and run the above in your Python, and copy-and-paste the output*)\r\n\r\n### Expected behaviour\r\n\r\nNo warning ``BiopythonWarning: You may be importing Biopython from inside the source tree. ...``\r\n\r\n### Actual behaviour\r\n\r\nNoisy warning as above.\r\n\r\n### Steps to reproduce\r\n\r\nUsing pip to install in editable (develop) mode:\r\n\r\n```\r\n$ pip install -h\r\n...\r\n -e, --editable <path/url> Install a project in editable mode (i.e. setuptools \"develop mode\") from a local\r\n project path or a VCS url.\r\n...\r\n```\r\n\r\n```\r\n$ git clone [email protected]:biopython/biopython.git\r\n$ cd biopython\r\n$ pip install -e .\r\n```\r\n\r\nThis is an unfortunate side effect of the changes in #2007, intended to help with confusing messages when C code was not compiled.\n", "before_files": [{"content": "# Copyright 1999-2003 by Jeffrey Chang. All rights reserved.\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\"\"\"Collection of modules for dealing with biological data in Python.\n\nThe Biopython Project is an international association of developers\nof freely available Python tools for computational molecular biology.\n\nhttps://biopython.org\n\"\"\"\n\nimport os\nimport warnings\n\n__version__ = \"1.80.dev0\"\n\n\nclass MissingExternalDependencyError(Exception):\n \"\"\"Missing an external dependency.\n\n Used for things like missing command line tools. Important for our unit\n tests to allow skipping tests with missing external dependencies.\n \"\"\"\n\n\nclass MissingPythonDependencyError(MissingExternalDependencyError, ImportError):\n \"\"\"Missing an external python dependency (subclass of ImportError).\n\n Used for missing Python modules (rather than just a typical ImportError).\n Important for our unit tests to allow skipping tests with missing external\n python dependencies, while also allowing the exception to be caught as an\n ImportError.\n \"\"\"\n\n\nclass StreamModeError(ValueError):\n \"\"\"Incorrect stream mode (text vs binary).\n\n This error should be raised when a stream (file or file-like object)\n argument is in text mode while the receiving function expects binary mode,\n or vice versa.\n \"\"\"\n\n\nclass BiopythonWarning(Warning):\n \"\"\"Biopython warning.\n\n Biopython should use this warning (or subclasses of it), making it easy to\n silence all our warning messages should you wish to:\n\n >>> import warnings\n >>> from Bio import BiopythonWarning\n >>> warnings.simplefilter('ignore', BiopythonWarning)\n\n Consult the warnings module documentation for more details.\n \"\"\"\n\n\nclass BiopythonParserWarning(BiopythonWarning):\n \"\"\"Biopython parser warning.\n\n Some in-valid data files cannot be parsed and will trigger an exception.\n Where a reasonable interpretation is possible, Biopython will issue this\n warning to indicate a potential problem. To silence these warnings, use:\n\n >>> import warnings\n >>> from Bio import BiopythonParserWarning\n >>> warnings.simplefilter('ignore', BiopythonParserWarning)\n\n Consult the warnings module documentation for more details.\n \"\"\"\n\n\nclass BiopythonDeprecationWarning(BiopythonWarning):\n \"\"\"Biopython deprecation warning.\n\n Biopython uses this warning instead of the built in DeprecationWarning\n since those are ignored by default since Python 2.7.\n\n To silence all our deprecation warning messages, use:\n\n >>> import warnings\n >>> from Bio import BiopythonDeprecationWarning\n >>> warnings.simplefilter('ignore', BiopythonDeprecationWarning)\n\n Code marked as deprecated is likely to be removed in a future version\n of Biopython. To avoid removal of this code, please contact the Biopython\n developers via the mailing list or GitHub.\n \"\"\"\n\n\nclass BiopythonExperimentalWarning(BiopythonWarning):\n \"\"\"Biopython experimental code warning.\n\n Biopython uses this warning for experimental code ('alpha' or 'beta'\n level code) which is released as part of the standard releases to mark\n sub-modules or functions for early adopters to test & give feedback.\n\n Code issuing this warning is likely to change (or even be removed) in\n a subsequent release of Biopython. Such code should NOT be used for\n production/stable code. It should only be used if:\n\n - You are running the latest release of Biopython, or ideally the\n latest code from our repository.\n - You are subscribed to the biopython-dev mailing list to provide\n feedback on this code, and to be alerted of changes to it.\n\n If all goes well, experimental code would be promoted to stable in\n a subsequent release, and this warning removed from it.\n \"\"\"\n\n\n_parent_dir = os.path.dirname(os.path.dirname(__file__))\nif os.path.exists(os.path.join(_parent_dir, \"setup.py\")):\n warnings.warn(\n \"You may be importing Biopython from inside the source tree.\"\n \" This is bad practice and might lead to downstream issues.\"\n \" In particular, you might encounter ImportErrors due to\"\n \" missing compiled C extensions. We recommend that you\"\n \" try running your code from outside the source tree.\"\n \" If you are outside the source tree then you have a\"\n \" setup.py file in an unexpected directory: \" + _parent_dir,\n BiopythonWarning,\n )\n# See #PR 2007 and issue #1991 for discussion on this warning:\n# https://github.com/biopython/biopython/pull/2007\n", "path": "Bio/__init__.py"}]} | 2,430 | 567 |
gh_patches_debug_38619 | rasdani/github-patches | git_diff | sktime__sktime-1600 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor issue #1043
Fixes #1043
Removed methods load_UCR_UEA_dataset & _load_dataset from datasets/base.py and moved them to utils/data_io.py
</issue>
<code>
[start of sktime/transformations/panel/signature_based/_signature_method.py]
1 # -*- coding: utf-8 -*-
2 from sklearn.pipeline import Pipeline
3 from sktime.transformations.base import _PanelToTabularTransformer
4 from sktime.transformations.panel.signature_based._compute import (
5 _WindowSignatureTransform,
6 )
7 from sktime.transformations.panel.signature_based._augmentations import (
8 _make_augmentation_pipeline,
9 )
10 from sktime.transformations.panel.signature_based._checks import (
11 _handle_sktime_signatures,
12 )
13
14
15 class SignatureTransformer(_PanelToTabularTransformer):
16 """Transformation class from the signature method.
17
18 Follows the methodology laid out in the paper:
19 "A Generalised Signature Method for Multivariate Time Series"
20
21 Parameters
22 ----------
23 augmentation_list: tuple of strings, contains the augmentations to be
24 applied before application of the signature transform.
25 window_name: str, The name of the window transform to apply.
26 window_depth: int, The depth of the dyadic window. (Active only if
27 `window_name == 'dyadic'`).
28 window_length: int, The length of the sliding/expanding window. (Active
29 only if `window_name in ['sliding, 'expanding']`.
30 window_step: int, The step of the sliding/expanding window. (Active
31 only if `window_name in ['sliding, 'expanding']`.
32 rescaling: str or None, The method of signature rescaling.
33 sig_tfm: str, String to specify the type of signature transform. One of:
34 ['signature', 'logsignature']).
35 depth: int, Signature truncation depth.
36
37 Attributes
38 ----------
39 signature_method: sklearn.Pipeline, A sklearn pipeline object that contains
40 all the steps to extract the signature features.
41 """
42
43 def __init__(
44 self,
45 augmentation_list=("basepoint", "addtime"),
46 window_name="dyadic",
47 window_depth=3,
48 window_length=None,
49 window_step=None,
50 rescaling=None,
51 sig_tfm="signature",
52 depth=4,
53 ):
54 super(SignatureTransformer, self).__init__()
55 self.augmentation_list = augmentation_list
56 self.window_name = window_name
57 self.window_depth = window_depth
58 self.window_length = window_length
59 self.window_step = window_step
60 self.rescaling = rescaling
61 self.sig_tfm = sig_tfm
62 self.depth = depth
63
64 self.setup_feature_pipeline()
65
66 def _assertions(self):
67 """Some assertions to run on initialisation."""
68 assert not all(
69 [self.sig_tfm == "logsignature", self.rescaling == "post"]
70 ), "Cannot have post rescaling with the logsignature."
71
72 def setup_feature_pipeline(self):
73 """Sets up the signature method as an sklearn pipeline."""
74 augmentation_step = _make_augmentation_pipeline(self.augmentation_list)
75 transform_step = _WindowSignatureTransform(
76 window_name=self.window_name,
77 window_depth=self.window_depth,
78 window_length=self.window_length,
79 window_step=self.window_step,
80 sig_tfm=self.sig_tfm,
81 sig_depth=self.depth,
82 rescaling=self.rescaling,
83 )
84
85 # The so-called 'signature method' as defined in the reference paper
86 self.signature_method = Pipeline(
87 [
88 ("augmentations", augmentation_step),
89 ("window_and_transform", transform_step),
90 ]
91 )
92
93 @_handle_sktime_signatures(check_fitted=False)
94 def fit(self, data, labels=None):
95 self.signature_method.fit(data, labels)
96 self._is_fitted = True
97 return self
98
99 @_handle_sktime_signatures(check_fitted=True)
100 def transform(self, data, labels=None):
101 return self.signature_method.transform(data)
102
[end of sktime/transformations/panel/signature_based/_signature_method.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/transformations/panel/signature_based/_signature_method.py b/sktime/transformations/panel/signature_based/_signature_method.py
--- a/sktime/transformations/panel/signature_based/_signature_method.py
+++ b/sktime/transformations/panel/signature_based/_signature_method.py
@@ -1,15 +1,16 @@
# -*- coding: utf-8 -*-
from sklearn.pipeline import Pipeline
+
from sktime.transformations.base import _PanelToTabularTransformer
-from sktime.transformations.panel.signature_based._compute import (
- _WindowSignatureTransform,
-)
from sktime.transformations.panel.signature_based._augmentations import (
_make_augmentation_pipeline,
)
from sktime.transformations.panel.signature_based._checks import (
_handle_sktime_signatures,
)
+from sktime.transformations.panel.signature_based._compute import (
+ _WindowSignatureTransform,
+)
class SignatureTransformer(_PanelToTabularTransformer):
@@ -63,14 +64,8 @@
self.setup_feature_pipeline()
- def _assertions(self):
- """Some assertions to run on initialisation."""
- assert not all(
- [self.sig_tfm == "logsignature", self.rescaling == "post"]
- ), "Cannot have post rescaling with the logsignature."
-
def setup_feature_pipeline(self):
- """Sets up the signature method as an sklearn pipeline."""
+ """Set up the signature method as an sklearn pipeline."""
augmentation_step = _make_augmentation_pipeline(self.augmentation_list)
transform_step = _WindowSignatureTransform(
window_name=self.window_name,
@@ -92,10 +87,38 @@
@_handle_sktime_signatures(check_fitted=False)
def fit(self, data, labels=None):
+ """Fit to data, then transform it.
+
+ Parameters
+ ----------
+ data: pd.Dataframe or np.ndarray (3d array)
+ Data to transform.
+ labels: np.ndarray (1d array) or pd.series or list
+ Labels for the data.
+
+ Returns
+ -------
+ pd.Dataframe or np.ndarray or pd.series
+ Transformed data.
+ """
self.signature_method.fit(data, labels)
self._is_fitted = True
return self
@_handle_sktime_signatures(check_fitted=True)
def transform(self, data, labels=None):
+ """Transform the class from the signature method.
+
+ Parameters
+ ----------
+ data: pd.Dataframe or np.ndarray (3d array)
+ Data to transform.
+ labels: np.ndarray (1d array) or pd.series or list
+ Labels for the data.
+
+ Returns
+ -------
+ pd.Dataframe or np.ndarray or pd.series
+ Transformed data.
+ """
return self.signature_method.transform(data)
| {"golden_diff": "diff --git a/sktime/transformations/panel/signature_based/_signature_method.py b/sktime/transformations/panel/signature_based/_signature_method.py\n--- a/sktime/transformations/panel/signature_based/_signature_method.py\n+++ b/sktime/transformations/panel/signature_based/_signature_method.py\n@@ -1,15 +1,16 @@\n # -*- coding: utf-8 -*-\n from sklearn.pipeline import Pipeline\n+\n from sktime.transformations.base import _PanelToTabularTransformer\n-from sktime.transformations.panel.signature_based._compute import (\n- _WindowSignatureTransform,\n-)\n from sktime.transformations.panel.signature_based._augmentations import (\n _make_augmentation_pipeline,\n )\n from sktime.transformations.panel.signature_based._checks import (\n _handle_sktime_signatures,\n )\n+from sktime.transformations.panel.signature_based._compute import (\n+ _WindowSignatureTransform,\n+)\n \n \n class SignatureTransformer(_PanelToTabularTransformer):\n@@ -63,14 +64,8 @@\n \n self.setup_feature_pipeline()\n \n- def _assertions(self):\n- \"\"\"Some assertions to run on initialisation.\"\"\"\n- assert not all(\n- [self.sig_tfm == \"logsignature\", self.rescaling == \"post\"]\n- ), \"Cannot have post rescaling with the logsignature.\"\n-\n def setup_feature_pipeline(self):\n- \"\"\"Sets up the signature method as an sklearn pipeline.\"\"\"\n+ \"\"\"Set up the signature method as an sklearn pipeline.\"\"\"\n augmentation_step = _make_augmentation_pipeline(self.augmentation_list)\n transform_step = _WindowSignatureTransform(\n window_name=self.window_name,\n@@ -92,10 +87,38 @@\n \n @_handle_sktime_signatures(check_fitted=False)\n def fit(self, data, labels=None):\n+ \"\"\"Fit to data, then transform it.\n+\n+ Parameters\n+ ----------\n+ data: pd.Dataframe or np.ndarray (3d array)\n+ Data to transform.\n+ labels: np.ndarray (1d array) or pd.series or list\n+ Labels for the data.\n+\n+ Returns\n+ -------\n+ pd.Dataframe or np.ndarray or pd.series\n+ Transformed data.\n+ \"\"\"\n self.signature_method.fit(data, labels)\n self._is_fitted = True\n return self\n \n @_handle_sktime_signatures(check_fitted=True)\n def transform(self, data, labels=None):\n+ \"\"\"Transform the class from the signature method.\n+\n+ Parameters\n+ ----------\n+ data: pd.Dataframe or np.ndarray (3d array)\n+ Data to transform.\n+ labels: np.ndarray (1d array) or pd.series or list\n+ Labels for the data.\n+\n+ Returns\n+ -------\n+ pd.Dataframe or np.ndarray or pd.series\n+ Transformed data.\n+ \"\"\"\n return self.signature_method.transform(data)\n", "issue": "Refactor issue #1043\nFixes #1043 \r\n\r\nRemoved methods load_UCR_UEA_dataset & _load_dataset from datasets/base.py and moved them to utils/data_io.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom sklearn.pipeline import Pipeline\nfrom sktime.transformations.base import _PanelToTabularTransformer\nfrom sktime.transformations.panel.signature_based._compute import (\n _WindowSignatureTransform,\n)\nfrom sktime.transformations.panel.signature_based._augmentations import (\n _make_augmentation_pipeline,\n)\nfrom sktime.transformations.panel.signature_based._checks import (\n _handle_sktime_signatures,\n)\n\n\nclass SignatureTransformer(_PanelToTabularTransformer):\n \"\"\"Transformation class from the signature method.\n\n Follows the methodology laid out in the paper:\n \"A Generalised Signature Method for Multivariate Time Series\"\n\n Parameters\n ----------\n augmentation_list: tuple of strings, contains the augmentations to be\n applied before application of the signature transform.\n window_name: str, The name of the window transform to apply.\n window_depth: int, The depth of the dyadic window. (Active only if\n `window_name == 'dyadic'`).\n window_length: int, The length of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n window_step: int, The step of the sliding/expanding window. (Active\n only if `window_name in ['sliding, 'expanding']`.\n rescaling: str or None, The method of signature rescaling.\n sig_tfm: str, String to specify the type of signature transform. One of:\n ['signature', 'logsignature']).\n depth: int, Signature truncation depth.\n\n Attributes\n ----------\n signature_method: sklearn.Pipeline, A sklearn pipeline object that contains\n all the steps to extract the signature features.\n \"\"\"\n\n def __init__(\n self,\n augmentation_list=(\"basepoint\", \"addtime\"),\n window_name=\"dyadic\",\n window_depth=3,\n window_length=None,\n window_step=None,\n rescaling=None,\n sig_tfm=\"signature\",\n depth=4,\n ):\n super(SignatureTransformer, self).__init__()\n self.augmentation_list = augmentation_list\n self.window_name = window_name\n self.window_depth = window_depth\n self.window_length = window_length\n self.window_step = window_step\n self.rescaling = rescaling\n self.sig_tfm = sig_tfm\n self.depth = depth\n\n self.setup_feature_pipeline()\n\n def _assertions(self):\n \"\"\"Some assertions to run on initialisation.\"\"\"\n assert not all(\n [self.sig_tfm == \"logsignature\", self.rescaling == \"post\"]\n ), \"Cannot have post rescaling with the logsignature.\"\n\n def setup_feature_pipeline(self):\n \"\"\"Sets up the signature method as an sklearn pipeline.\"\"\"\n augmentation_step = _make_augmentation_pipeline(self.augmentation_list)\n transform_step = _WindowSignatureTransform(\n window_name=self.window_name,\n window_depth=self.window_depth,\n window_length=self.window_length,\n window_step=self.window_step,\n sig_tfm=self.sig_tfm,\n sig_depth=self.depth,\n rescaling=self.rescaling,\n )\n\n # The so-called 'signature method' as defined in the reference paper\n self.signature_method = Pipeline(\n [\n (\"augmentations\", augmentation_step),\n (\"window_and_transform\", transform_step),\n ]\n )\n\n @_handle_sktime_signatures(check_fitted=False)\n def fit(self, data, labels=None):\n self.signature_method.fit(data, labels)\n self._is_fitted = True\n return self\n\n @_handle_sktime_signatures(check_fitted=True)\n def transform(self, data, labels=None):\n return self.signature_method.transform(data)\n", "path": "sktime/transformations/panel/signature_based/_signature_method.py"}]} | 1,579 | 629 |
gh_patches_debug_11144 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1119 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Configuration object stores ints as floats
The global configuration object will store `"2"` as `2.0` instead of `2`. Fix that.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Simple configuration manager
17
18 This is a configuration manager for OpenTelemetry. It reads configuration
19 values from environment variables prefixed with ``OTEL_`` (for environment
20 variables that apply to any OpenTelemetry implementation) or with
21 ``OTEL_PYTHON_`` (for environment variables that are specific to the Python
22 implementation of OpenTelemetry) whose characters are only alphanumeric
23 characters and unserscores, except for the first character after ``OTEL_`` or
24 ``OTEL_PYTHON_`` which must not be a number.
25
26 For example, these environment variables will be read:
27
28 1. ``OTEL_SOMETHING``
29 2. ``OTEL_SOMETHING_ELSE_``
30 3. ``OTEL_SOMETHING_ELSE_AND__ELSE``
31 4. ``OTEL_SOMETHING_ELSE_AND_else``
32 5. ``OTEL_SOMETHING_ELSE_AND_else2``
33
34 These won't:
35
36 1. ``OPENTELEMETRY_PYTH_SOMETHING``
37 2. ``OTEL_2_SOMETHING_AND__ELSE``
38 3. ``OTEL_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns always the same object.
43
44 For example, if the environment variable
45 ``OTEL_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OTEL_PYTHON_METER_PROVIDER
56 2. OTEL_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OTEL_PYTHON_METER_PROVIDER`` should be set to
77 ``"default_meter_provider"`` (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80
81 Configuration values that are exactly ``"True"`` or ``"False"`` will be
82 converted to its boolean values of ``True`` and ``False`` respectively.
83
84 Configuration values that can be casted to integers or floats will be casted.
85
86 This object can be used by any OpenTelemetry component, native or external.
87 For that reason, the ``Configuration`` object is designed to be immutable.
88 If a component would change the value of one of the ``Configuration`` object
89 attributes then another component that relied on that value may break, leading
90 to bugs that are very hard to debug. To avoid this situation, the preferred
91 approach for components that need a different value than the one provided by
92 the ``Configuration`` object is to implement a mechanism that allows the user
93 to override this value instead of changing it.
94 """
95
96 from os import environ
97 from re import fullmatch
98 from typing import ClassVar, Dict, Optional, TypeVar, Union
99
100 ConfigValue = Union[str, bool, int, float]
101 _T = TypeVar("_T", ConfigValue, Optional[ConfigValue])
102
103
104 class Configuration:
105 _instance = None # type: ClassVar[Optional[Configuration]]
106 _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]
107
108 def __new__(cls) -> "Configuration":
109 if cls._instance is not None:
110 instance = cls._instance
111 else:
112
113 instance = super().__new__(cls)
114 for key, value_str in environ.items():
115
116 match = fullmatch(r"OTEL_(PYTHON_)?([A-Za-z_][\w_]*)", key)
117
118 if match is not None:
119
120 key = match.group(2)
121 value = value_str # type: ConfigValue
122
123 if value_str == "True":
124 value = True
125 elif value_str == "False":
126 value = False
127 else:
128 try:
129 value = int(value_str)
130 except ValueError:
131 pass
132 try:
133 value = float(value_str)
134 except ValueError:
135 pass
136
137 instance._config_map[key] = value
138
139 cls._instance = instance
140
141 return instance
142
143 def __getattr__(self, name: str) -> Optional[ConfigValue]:
144 return self._config_map.get(name)
145
146 def __setattr__(self, name: str, value: ConfigValue) -> None:
147 if name not in self._config_map.keys():
148 self._config_map[name] = value
149 else:
150 raise AttributeError(name)
151
152 def get(self, name: str, default: _T) -> _T:
153 """Use this typed method for dynamic access instead of `getattr`
154
155 :rtype: str or bool or int or float or None
156 """
157 return self._config_map.get(name, default)
158
159 @classmethod
160 def _reset(cls) -> None:
161 """
162 This method "resets" the global configuration attributes
163
164 It is not intended to be used by production code but by testing code
165 only.
166 """
167
168 if cls._instance:
169 cls._instance._config_map.clear() # pylint: disable=protected-access
170 cls._instance = None
171
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -128,11 +128,10 @@
try:
value = int(value_str)
except ValueError:
- pass
- try:
- value = float(value_str)
- except ValueError:
- pass
+ try:
+ value = float(value_str)
+ except ValueError:
+ pass
instance._config_map[key] = value
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -128,11 +128,10 @@\n try:\n value = int(value_str)\n except ValueError:\n- pass\n- try:\n- value = float(value_str)\n- except ValueError:\n- pass\n+ try:\n+ value = float(value_str)\n+ except ValueError:\n+ pass\n \n instance._config_map[key] = value\n", "issue": "Configuration object stores ints as floats\nThe global configuration object will store `\"2\"` as `2.0` instead of `2`. Fix that.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OTEL_`` (for environment\nvariables that apply to any OpenTelemetry implementation) or with\n``OTEL_PYTHON_`` (for environment variables that are specific to the Python\nimplementation of OpenTelemetry) whose characters are only alphanumeric\ncharacters and unserscores, except for the first character after ``OTEL_`` or\n``OTEL_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OTEL_SOMETHING``\n2. ``OTEL_SOMETHING_ELSE_``\n3. ``OTEL_SOMETHING_ELSE_AND__ELSE``\n4. ``OTEL_SOMETHING_ELSE_AND_else``\n5. ``OTEL_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OTEL_2_SOMETHING_AND__ELSE``\n3. ``OTEL_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OTEL_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OTEL_PYTHON_METER_PROVIDER\n2. OTEL_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OTEL_PYTHON_METER_PROVIDER`` should be set to\n``\"default_meter_provider\"`` (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\nConfiguration values that are exactly ``\"True\"`` or ``\"False\"`` will be\nconverted to its boolean values of ``True`` and ``False`` respectively.\n\nConfiguration values that can be casted to integers or floats will be casted.\n\nThis object can be used by any OpenTelemetry component, native or external.\nFor that reason, the ``Configuration`` object is designed to be immutable.\nIf a component would change the value of one of the ``Configuration`` object\nattributes then another component that relied on that value may break, leading\nto bugs that are very hard to debug. To avoid this situation, the preferred\napproach for components that need a different value than the one provided by\nthe ``Configuration`` object is to implement a mechanism that allows the user\nto override this value instead of changing it.\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\nfrom typing import ClassVar, Dict, Optional, TypeVar, Union\n\nConfigValue = Union[str, bool, int, float]\n_T = TypeVar(\"_T\", ConfigValue, Optional[ConfigValue])\n\n\nclass Configuration:\n _instance = None # type: ClassVar[Optional[Configuration]]\n _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]\n\n def __new__(cls) -> \"Configuration\":\n if cls._instance is not None:\n instance = cls._instance\n else:\n\n instance = super().__new__(cls)\n for key, value_str in environ.items():\n\n match = fullmatch(r\"OTEL_(PYTHON_)?([A-Za-z_][\\w_]*)\", key)\n\n if match is not None:\n\n key = match.group(2)\n value = value_str # type: ConfigValue\n\n if value_str == \"True\":\n value = True\n elif value_str == \"False\":\n value = False\n else:\n try:\n value = int(value_str)\n except ValueError:\n pass\n try:\n value = float(value_str)\n except ValueError:\n pass\n\n instance._config_map[key] = value\n\n cls._instance = instance\n\n return instance\n\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n\n def __setattr__(self, name: str, value: ConfigValue) -> None:\n if name not in self._config_map.keys():\n self._config_map[name] = value\n else:\n raise AttributeError(name)\n\n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n\n :rtype: str or bool or int or float or None\n \"\"\"\n return self._config_map.get(name, default)\n\n @classmethod\n def _reset(cls) -> None:\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n if cls._instance:\n cls._instance._config_map.clear() # pylint: disable=protected-access\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]} | 2,394 | 150 |
gh_patches_debug_56181 | rasdani/github-patches | git_diff | TOMToolkit__tom_base-196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing dataclasses
Following the tom_base install instructions, I pip installed the requirements.txt and then tried
> ./manage.py migrate
which ended with the following error:
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed
File "/Users/rstreet/software/tom_base/tom_alerts/urls.py", line 3, in <module>
from tom_alerts.views import BrokerQueryCreateView, BrokerQueryListView, BrokerQueryUpdateView, RunQueryView
File "/Users/rstreet/software/tom_base/tom_alerts/views.py", line 3, in <module>
from tom_alerts.alerts import get_service_class, get_service_classes
File "/Users/rstreet/software/tom_base/tom_alerts/alerts.py", line 5, in <module>
from dataclasses import dataclass
ModuleNotFoundError: No module named 'dataclasses'
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from os import path
3
4 here = path.abspath(path.dirname(__file__))
5 with open(path.join(here, 'README.md'), encoding='utf-8') as f:
6 long_description = f.read()
7
8 setup(
9 name='tomtoolkit',
10 version='1.1.0',
11 description='The TOM Toolkit and base modules',
12 long_description=long_description,
13 long_description_content_type='text/markdown',
14 url='https://tomtoolkit.github.io',
15 author='TOM Toolkit Project',
16 author_email='[email protected]',
17 classifiers=[
18 'Development Status :: 3 - Alpha',
19 'Intended Audience :: Science/Research',
20 'License :: OSI Approved :: BSD License',
21 'Operating System :: OS Independent',
22 'Programming Language :: Python :: 3',
23 'Programming Language :: Python :: 3.7',
24 'Topic :: Scientific/Engineering :: Astronomy',
25 'Topic :: Scientific/Engineering :: Physics'
26 ],
27 keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],
28 packages=find_packages(),
29 install_requires=[
30 'django',
31 'django-bootstrap4',
32 'django-extensions',
33 'django-filter',
34 'django-contrib-comments',
35 'django-gravatar2',
36 'django-crispy-forms',
37 'django-guardian',
38 'numpy',
39 'python-dateutil',
40 'requests',
41 'astroquery',
42 'astropy',
43 'astroplan',
44 'plotly',
45 'matplotlib',
46 'pillow',
47 'fits2image',
48 'specutils',
49 ],
50 extras_require={
51 'test': ['factory_boy']
52 },
53 include_package_data=True,
54 )
55
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,6 +46,7 @@
'pillow',
'fits2image',
'specutils',
+ "dataclasses; python_version < '3.7'",
],
extras_require={
'test': ['factory_boy']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,6 +46,7 @@\n 'pillow',\n 'fits2image',\n 'specutils',\n+ \"dataclasses; python_version < '3.7'\",\n ],\n extras_require={\n 'test': ['factory_boy']\n", "issue": "Missing dataclasses\nFollowing the tom_base install instructions, I pip installed the requirements.txt and then tried \r\n> ./manage.py migrate\r\n\r\nwhich ended with the following error:\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/urls.py\", line 3, in <module>\r\n from tom_alerts.views import BrokerQueryCreateView, BrokerQueryListView, BrokerQueryUpdateView, RunQueryView\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/views.py\", line 3, in <module>\r\n from tom_alerts.alerts import get_service_class, get_service_classes\r\n File \"/Users/rstreet/software/tom_base/tom_alerts/alerts.py\", line 5, in <module>\r\n from dataclasses import dataclass\r\nModuleNotFoundError: No module named 'dataclasses'\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='tomtoolkit',\n version='1.1.0',\n description='The TOM Toolkit and base modules',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://tomtoolkit.github.io',\n author='TOM Toolkit Project',\n author_email='[email protected]',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n keywords=['tomtoolkit', 'astronomy', 'astrophysics', 'cosmology', 'science', 'fits', 'observatory'],\n packages=find_packages(),\n install_requires=[\n 'django',\n 'django-bootstrap4',\n 'django-extensions',\n 'django-filter',\n 'django-contrib-comments',\n 'django-gravatar2',\n 'django-crispy-forms',\n 'django-guardian',\n 'numpy',\n 'python-dateutil',\n 'requests',\n 'astroquery',\n 'astropy',\n 'astroplan',\n 'plotly',\n 'matplotlib',\n 'pillow',\n 'fits2image',\n 'specutils',\n ],\n extras_require={\n 'test': ['factory_boy']\n },\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 1,224 | 77 |
gh_patches_debug_29137 | rasdani/github-patches | git_diff | spack__spack-4584 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
building flex with +lex variant fails
Using an older system (suse 13 with python 2.7.6) and the symlink code in the package fails entirely.
@mjwoods
</issue>
<code>
[start of var/spack/repos/builtin/packages/flex/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26 import os
27
28
29 class Flex(AutotoolsPackage):
30 """Flex is a tool for generating scanners."""
31
32 homepage = "https://github.com/westes/flex"
33 url = "https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz"
34
35 version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')
36 # Problematic version:
37 # See issue #2554; https://github.com/westes/flex/issues/113
38 # version('2.6.2', 'cc6d76c333db7653d5caf423a3335239')
39 version('2.6.1', '05bcd8fb629e0ae130311e8a6106fa82')
40 version('2.6.0', '760be2ee9433e822b6eb65318311c19d')
41 version('2.5.39', '5865e76ac69c05699f476515592750d7')
42
43 variant('lex', default=True,
44 description="Provide symlinks for lex and libl")
45
46 depends_on('bison', type='build')
47 depends_on('[email protected]:', type='build')
48 depends_on('help2man', type='build')
49
50 # Older tarballs don't come with a configure script
51 depends_on('m4', type='build')
52 depends_on('autoconf', type='build', when='@:2.6.0')
53 depends_on('automake', type='build', when='@:2.6.0')
54 depends_on('libtool', type='build', when='@:2.6.0')
55
56 def url_for_version(self, version):
57 url = "https://github.com/westes/flex"
58 if version >= Version('2.6.1'):
59 url += "/releases/download/v{0}/flex-{0}.tar.gz".format(version)
60 elif version == Version('2.6.0'):
61 url += "/archive/v{0}.tar.gz".format(version)
62 elif version >= Version('2.5.37'):
63 url += "/archive/flex-{0}.tar.gz".format(version)
64 else:
65 url += "/archive/flex-{0}.tar.gz".format(version.dashed)
66
67 return url
68
69 @run_after('install')
70 def symlink_lex(self):
71 if self.spec.satisfies('+lex'):
72 dso = dso_suffix
73 for dir, flex, lex in \
74 ((self.prefix.bin, 'flex', 'lex'),
75 (self.prefix.lib, 'libfl.a', 'libl.a'),
76 (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):
77 with working_dir(dir):
78 if (os.path.isfile(flex) and not
79 os.path.lexists(lex)):
80 symlink(flex, lex)
81
[end of var/spack/repos/builtin/packages/flex/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/flex/package.py b/var/spack/repos/builtin/packages/flex/package.py
--- a/var/spack/repos/builtin/packages/flex/package.py
+++ b/var/spack/repos/builtin/packages/flex/package.py
@@ -32,6 +32,7 @@
homepage = "https://github.com/westes/flex"
url = "https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz"
+ version('2.6.4', '2882e3179748cc9f9c23ec593d6adc8d')
version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')
# Problematic version:
# See issue #2554; https://github.com/westes/flex/issues/113
@@ -68,13 +69,17 @@
@run_after('install')
def symlink_lex(self):
+ """Install symlinks for lex compatibility."""
if self.spec.satisfies('+lex'):
dso = dso_suffix
for dir, flex, lex in \
- ((self.prefix.bin, 'flex', 'lex'),
- (self.prefix.lib, 'libfl.a', 'libl.a'),
- (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):
- with working_dir(dir):
- if (os.path.isfile(flex) and not
- os.path.lexists(lex)):
- symlink(flex, lex)
+ ((self.prefix.bin, 'flex', 'lex'),
+ (self.prefix.lib, 'libfl.a', 'libl.a'),
+ (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso),
+ (self.prefix.lib64, 'libfl.a', 'libl.a'),
+ (self.prefix.lib64, 'libfl.' + dso, 'libl.' + dso)):
+
+ if os.path.isdir(dir):
+ with working_dir(dir):
+ if (os.path.isfile(flex) and not os.path.lexists(lex)):
+ symlink(flex, lex)
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/flex/package.py b/var/spack/repos/builtin/packages/flex/package.py\n--- a/var/spack/repos/builtin/packages/flex/package.py\n+++ b/var/spack/repos/builtin/packages/flex/package.py\n@@ -32,6 +32,7 @@\n homepage = \"https://github.com/westes/flex\"\n url = \"https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz\"\n \n+ version('2.6.4', '2882e3179748cc9f9c23ec593d6adc8d')\n version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')\n # Problematic version:\n # See issue #2554; https://github.com/westes/flex/issues/113\n@@ -68,13 +69,17 @@\n \n @run_after('install')\n def symlink_lex(self):\n+ \"\"\"Install symlinks for lex compatibility.\"\"\"\n if self.spec.satisfies('+lex'):\n dso = dso_suffix\n for dir, flex, lex in \\\n- ((self.prefix.bin, 'flex', 'lex'),\n- (self.prefix.lib, 'libfl.a', 'libl.a'),\n- (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):\n- with working_dir(dir):\n- if (os.path.isfile(flex) and not\n- os.path.lexists(lex)):\n- symlink(flex, lex)\n+ ((self.prefix.bin, 'flex', 'lex'),\n+ (self.prefix.lib, 'libfl.a', 'libl.a'),\n+ (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso),\n+ (self.prefix.lib64, 'libfl.a', 'libl.a'),\n+ (self.prefix.lib64, 'libfl.' + dso, 'libl.' + dso)):\n+\n+ if os.path.isdir(dir):\n+ with working_dir(dir):\n+ if (os.path.isfile(flex) and not os.path.lexists(lex)):\n+ symlink(flex, lex)\n", "issue": "building flex with +lex variant fails\nUsing an older system (suse 13 with python 2.7.6) and the symlink code in the package fails entirely.\r\n@mjwoods \r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Flex(AutotoolsPackage):\n \"\"\"Flex is a tool for generating scanners.\"\"\"\n\n homepage = \"https://github.com/westes/flex\"\n url = \"https://github.com/westes/flex/releases/download/v2.6.1/flex-2.6.1.tar.gz\"\n\n version('2.6.3', 'a5f65570cd9107ec8a8ec88f17b31bb1')\n # Problematic version:\n # See issue #2554; https://github.com/westes/flex/issues/113\n # version('2.6.2', 'cc6d76c333db7653d5caf423a3335239')\n version('2.6.1', '05bcd8fb629e0ae130311e8a6106fa82')\n version('2.6.0', '760be2ee9433e822b6eb65318311c19d')\n version('2.5.39', '5865e76ac69c05699f476515592750d7')\n\n variant('lex', default=True,\n description=\"Provide symlinks for lex and libl\")\n\n depends_on('bison', type='build')\n depends_on('[email protected]:', type='build')\n depends_on('help2man', type='build')\n\n # Older tarballs don't come with a configure script\n depends_on('m4', type='build')\n depends_on('autoconf', type='build', when='@:2.6.0')\n depends_on('automake', type='build', when='@:2.6.0')\n depends_on('libtool', type='build', when='@:2.6.0')\n\n def url_for_version(self, version):\n url = \"https://github.com/westes/flex\"\n if version >= Version('2.6.1'):\n url += \"/releases/download/v{0}/flex-{0}.tar.gz\".format(version)\n elif version == Version('2.6.0'):\n url += \"/archive/v{0}.tar.gz\".format(version)\n elif version >= Version('2.5.37'):\n url += \"/archive/flex-{0}.tar.gz\".format(version)\n else:\n url += \"/archive/flex-{0}.tar.gz\".format(version.dashed)\n\n return url\n\n @run_after('install')\n def symlink_lex(self):\n if self.spec.satisfies('+lex'):\n dso = dso_suffix\n for dir, flex, lex in \\\n ((self.prefix.bin, 'flex', 'lex'),\n (self.prefix.lib, 'libfl.a', 'libl.a'),\n (self.prefix.lib, 'libfl.' + dso, 'libl.' + dso)):\n with working_dir(dir):\n if (os.path.isfile(flex) and not\n os.path.lexists(lex)):\n symlink(flex, lex)\n", "path": "var/spack/repos/builtin/packages/flex/package.py"}]} | 1,750 | 524 |
gh_patches_debug_15053 | rasdani/github-patches | git_diff | deis__deis-4373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better error message when registration is disabled
When `/deis/controller/registrationMode` is `disabled`, attempt to register returns
```
Registration failed: {"detail":"Authentication credentials were not provided."}
```
This message is misleading. It should explicitly say that registration is disabled.
</issue>
<code>
[start of controller/api/permissions.py]
1 from rest_framework import permissions
2 from django.conf import settings
3 from django.contrib.auth.models import AnonymousUser
4
5 from api import models
6
7
8 def is_app_user(request, obj):
9 if request.user.is_superuser or \
10 isinstance(obj, models.App) and obj.owner == request.user or \
11 hasattr(obj, 'app') and obj.app.owner == request.user:
12 return True
13 elif request.user.has_perm('use_app', obj) or \
14 hasattr(obj, 'app') and request.user.has_perm('use_app', obj.app):
15 return request.method != 'DELETE'
16 else:
17 return False
18
19
20 class IsAnonymous(permissions.BasePermission):
21 """
22 View permission to allow anonymous users.
23 """
24
25 def has_permission(self, request, view):
26 """
27 Return `True` if permission is granted, `False` otherwise.
28 """
29 return type(request.user) is AnonymousUser
30
31
32 class IsOwner(permissions.BasePermission):
33 """
34 Object-level permission to allow only owners of an object to access it.
35 Assumes the model instance has an `owner` attribute.
36 """
37
38 def has_object_permission(self, request, view, obj):
39 if hasattr(obj, 'owner'):
40 return obj.owner == request.user
41 else:
42 return False
43
44
45 class IsOwnerOrAdmin(permissions.BasePermission):
46 """
47 Object-level permission to allow only owners of an object or administrators to access it.
48 Assumes the model instance has an `owner` attribute.
49 """
50 def has_object_permission(self, request, view, obj):
51 if request.user.is_superuser:
52 return True
53 if hasattr(obj, 'owner'):
54 return obj.owner == request.user
55 else:
56 return False
57
58
59 class IsAppUser(permissions.BasePermission):
60 """
61 Object-level permission to allow owners or collaborators to access
62 an app-related model.
63 """
64 def has_object_permission(self, request, view, obj):
65 return is_app_user(request, obj)
66
67
68 class IsAdmin(permissions.BasePermission):
69 """
70 View permission to allow only admins.
71 """
72
73 def has_permission(self, request, view):
74 """
75 Return `True` if permission is granted, `False` otherwise.
76 """
77 return request.user.is_superuser
78
79
80 class IsAdminOrSafeMethod(permissions.BasePermission):
81 """
82 View permission to allow only admins to use unsafe methods
83 including POST, PUT, DELETE.
84
85 This allows
86 """
87
88 def has_permission(self, request, view):
89 """
90 Return `True` if permission is granted, `False` otherwise.
91 """
92 return request.method in permissions.SAFE_METHODS or request.user.is_superuser
93
94
95 class HasRegistrationAuth(permissions.BasePermission):
96 """
97 Checks to see if registration is enabled
98 """
99 def has_permission(self, request, view):
100 """
101 If settings.REGISTRATION_MODE does not exist, such as during a test, return True
102 Return `True` if permission is granted, `False` otherwise.
103 """
104 try:
105 if settings.REGISTRATION_MODE == 'disabled':
106 return False
107 if settings.REGISTRATION_MODE == 'enabled':
108 return True
109 elif settings.REGISTRATION_MODE == 'admin_only':
110 return request.user.is_superuser
111 else:
112 raise Exception("{} is not a valid registation mode"
113 .format(settings.REGISTRATION_MODE))
114 except AttributeError:
115 return True
116
117
118 class HasBuilderAuth(permissions.BasePermission):
119 """
120 View permission to allow builder to perform actions
121 with a special HTTP header
122 """
123
124 def has_permission(self, request, view):
125 """
126 Return `True` if permission is granted, `False` otherwise.
127 """
128 auth_header = request.environ.get('HTTP_X_DEIS_BUILDER_AUTH')
129 if not auth_header:
130 return False
131 return auth_header == settings.BUILDER_KEY
132
133
134 class CanRegenerateToken(permissions.BasePermission):
135 """
136 Checks if a user can regenerate a token
137 """
138
139 def has_permission(self, request, view):
140 """
141 Return `True` if permission is granted, `False` otherwise.
142 """
143 if 'username' in request.data or 'all' in request.data:
144 return request.user.is_superuser
145 else:
146 return True
147
[end of controller/api/permissions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/controller/api/permissions.py b/controller/api/permissions.py
--- a/controller/api/permissions.py
+++ b/controller/api/permissions.py
@@ -1,3 +1,5 @@
+
+from rest_framework import exceptions
from rest_framework import permissions
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
@@ -103,7 +105,7 @@
"""
try:
if settings.REGISTRATION_MODE == 'disabled':
- return False
+ raise exceptions.PermissionDenied('Registration is disabled')
if settings.REGISTRATION_MODE == 'enabled':
return True
elif settings.REGISTRATION_MODE == 'admin_only':
| {"golden_diff": "diff --git a/controller/api/permissions.py b/controller/api/permissions.py\n--- a/controller/api/permissions.py\n+++ b/controller/api/permissions.py\n@@ -1,3 +1,5 @@\n+\n+from rest_framework import exceptions\n from rest_framework import permissions\n from django.conf import settings\n from django.contrib.auth.models import AnonymousUser\n@@ -103,7 +105,7 @@\n \"\"\"\n try:\n if settings.REGISTRATION_MODE == 'disabled':\n- return False\n+ raise exceptions.PermissionDenied('Registration is disabled')\n if settings.REGISTRATION_MODE == 'enabled':\n return True\n elif settings.REGISTRATION_MODE == 'admin_only':\n", "issue": "Better error message when registration is disabled\nWhen `/deis/controller/registrationMode` is `disabled`, attempt to register returns\n\n```\nRegistration failed: {\"detail\":\"Authentication credentials were not provided.\"}\n```\n\nThis message is misleading. It should explicitly say that registration is disabled.\n\n", "before_files": [{"content": "from rest_framework import permissions\nfrom django.conf import settings\nfrom django.contrib.auth.models import AnonymousUser\n\nfrom api import models\n\n\ndef is_app_user(request, obj):\n if request.user.is_superuser or \\\n isinstance(obj, models.App) and obj.owner == request.user or \\\n hasattr(obj, 'app') and obj.app.owner == request.user:\n return True\n elif request.user.has_perm('use_app', obj) or \\\n hasattr(obj, 'app') and request.user.has_perm('use_app', obj.app):\n return request.method != 'DELETE'\n else:\n return False\n\n\nclass IsAnonymous(permissions.BasePermission):\n \"\"\"\n View permission to allow anonymous users.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return type(request.user) is AnonymousUser\n\n\nclass IsOwner(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsOwnerOrAdmin(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow only owners of an object or administrators to access it.\n Assumes the model instance has an `owner` attribute.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n if request.user.is_superuser:\n return True\n if hasattr(obj, 'owner'):\n return obj.owner == request.user\n else:\n return False\n\n\nclass IsAppUser(permissions.BasePermission):\n \"\"\"\n Object-level permission to allow owners or collaborators to access\n an app-related model.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n return is_app_user(request, obj)\n\n\nclass IsAdmin(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.user.is_superuser\n\n\nclass IsAdminOrSafeMethod(permissions.BasePermission):\n \"\"\"\n View permission to allow only admins to use unsafe methods\n including POST, PUT, DELETE.\n\n This allows\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n return request.method in permissions.SAFE_METHODS or request.user.is_superuser\n\n\nclass HasRegistrationAuth(permissions.BasePermission):\n \"\"\"\n Checks to see if registration is enabled\n \"\"\"\n def has_permission(self, request, view):\n \"\"\"\n If settings.REGISTRATION_MODE does not exist, such as during a test, return True\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n try:\n if settings.REGISTRATION_MODE == 'disabled':\n return False\n if settings.REGISTRATION_MODE == 'enabled':\n return True\n elif settings.REGISTRATION_MODE == 'admin_only':\n return request.user.is_superuser\n else:\n raise Exception(\"{} is not a valid registation mode\"\n .format(settings.REGISTRATION_MODE))\n except AttributeError:\n return True\n\n\nclass HasBuilderAuth(permissions.BasePermission):\n \"\"\"\n View permission to allow builder to perform actions\n with a special HTTP header\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n auth_header = request.environ.get('HTTP_X_DEIS_BUILDER_AUTH')\n if not auth_header:\n return False\n return auth_header == settings.BUILDER_KEY\n\n\nclass CanRegenerateToken(permissions.BasePermission):\n \"\"\"\n Checks if a user can regenerate a token\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Return `True` if permission is granted, `False` otherwise.\n \"\"\"\n if 'username' in request.data or 'all' in request.data:\n return request.user.is_superuser\n else:\n return True\n", "path": "controller/api/permissions.py"}]} | 1,822 | 141 |
gh_patches_debug_37554 | rasdani/github-patches | git_diff | litestar-org__litestar-1695 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/jwt/jwt_token.py]
1 from __future__ import annotations
2
3 from dataclasses import asdict, dataclass, field
4 from datetime import datetime, timezone
5 from typing import cast
6
7 from jose import JWSError, JWTError, jwt
8
9 from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException
10
11 __all__ = ("Token",)
12
13
14 def _normalize_datetime(value: datetime) -> datetime:
15 """Convert the given value into UTC and strip microseconds.
16
17 Args:
18 value: A datetime instance
19
20 Returns:
21 A datetime instance
22 """
23 if value.tzinfo is not None:
24 value.astimezone(timezone.utc)
25
26 return value.replace(microsecond=0)
27
28
29 @dataclass
30 class Token:
31 """JWT Token DTO."""
32
33 exp: datetime
34 """Expiration - datetime for token expiration."""
35 sub: str
36 """Subject - usually a unique identifier of the user or equivalent entity."""
37 iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc)))
38 """Issued at - should always be current now."""
39 iss: str | None = field(default=None)
40 """Issuer - optional unique identifier for the issuer."""
41 aud: str | None = field(default=None)
42 """Audience - intended audience."""
43 jti: str | None = field(default=None)
44 """JWT ID - a unique identifier of the JWT between different issuers."""
45
46 def __post_init__(self) -> None:
47 if len(self.sub) < 1:
48 raise ImproperlyConfiguredException("sub must be a string with a length greater than 0")
49
50 if isinstance(self.exp, datetime) and (
51 (exp := _normalize_datetime(self.exp))
52 and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp()
53 ):
54 self.exp = exp
55 else:
56 raise ImproperlyConfiguredException("exp value must be a datetime in the future")
57
58 if isinstance(self.iat, datetime) and (
59 (iat := _normalize_datetime(self.iat))
60 and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp()
61 ):
62 self.iat = iat
63 else:
64 raise ImproperlyConfiguredException("iat must be a current or past time")
65
66 @staticmethod
67 def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:
68 """Decode a passed in token string and returns a Token instance.
69
70 Args:
71 encoded_token: A base64 string containing an encoded JWT.
72 secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict
73 algorithm: The algorithm used to encode the JWT.
74
75 Returns:
76 A decoded Token instance.
77
78 Raises:
79 NotAuthorizedException: If the token is invalid.
80 """
81 try:
82 payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={"verify_aud": False})
83 exp = datetime.fromtimestamp(payload.pop("exp"), tz=timezone.utc)
84 iat = datetime.fromtimestamp(payload.pop("iat"), tz=timezone.utc)
85 return Token(exp=exp, iat=iat, **payload)
86 except (KeyError, JWTError, ImproperlyConfiguredException) as e:
87 raise NotAuthorizedException("Invalid token") from e
88
89 def encode(self, secret: str, algorithm: str) -> str:
90 """Encode the token instance into a string.
91
92 Args:
93 secret: The secret with which the JWT is encoded.
94 algorithm: The algorithm used to encode the JWT.
95
96 Returns:
97 An encoded token string.
98
99 Raises:
100 ImproperlyConfiguredException: If encoding fails.
101 """
102 try:
103 return cast(
104 "str",
105 jwt.encode(
106 claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm
107 ),
108 )
109 except (JWTError, JWSError) as e:
110 raise ImproperlyConfiguredException("Failed to encode token") from e
111
[end of litestar/contrib/jwt/jwt_token.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py
--- a/litestar/contrib/jwt/jwt_token.py
+++ b/litestar/contrib/jwt/jwt_token.py
@@ -1,13 +1,18 @@
from __future__ import annotations
+import dataclasses
from dataclasses import asdict, dataclass, field
from datetime import datetime, timezone
-from typing import cast
+from typing import TYPE_CHECKING, Any, cast
from jose import JWSError, JWTError, jwt
from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException
+if TYPE_CHECKING:
+ from typing_extensions import Self
+
+
__all__ = ("Token",)
@@ -42,6 +47,8 @@
"""Audience - intended audience."""
jti: str | None = field(default=None)
"""JWT ID - a unique identifier of the JWT between different issuers."""
+ extras: dict[str, Any] = field(default_factory=dict)
+ """Extra fields that were found on the JWT token."""
def __post_init__(self) -> None:
if len(self.sub) < 1:
@@ -63,8 +70,8 @@
else:
raise ImproperlyConfiguredException("iat must be a current or past time")
- @staticmethod
- def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:
+ @classmethod
+ def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self:
"""Decode a passed in token string and returns a Token instance.
Args:
@@ -82,7 +89,12 @@
payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={"verify_aud": False})
exp = datetime.fromtimestamp(payload.pop("exp"), tz=timezone.utc)
iat = datetime.fromtimestamp(payload.pop("iat"), tz=timezone.utc)
- return Token(exp=exp, iat=iat, **payload)
+ field_names = {f.name for f in dataclasses.fields(Token)}
+ extra_fields = payload.keys() - field_names
+ extras = payload.pop("extras", {})
+ for key in extra_fields:
+ extras[key] = payload.pop(key)
+ return cls(exp=exp, iat=iat, **payload, extras=extras)
except (KeyError, JWTError, ImproperlyConfiguredException) as e:
raise NotAuthorizedException("Invalid token") from e
| {"golden_diff": "diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py\n--- a/litestar/contrib/jwt/jwt_token.py\n+++ b/litestar/contrib/jwt/jwt_token.py\n@@ -1,13 +1,18 @@\n from __future__ import annotations\n \n+import dataclasses\n from dataclasses import asdict, dataclass, field\n from datetime import datetime, timezone\n-from typing import cast\n+from typing import TYPE_CHECKING, Any, cast\n \n from jose import JWSError, JWTError, jwt\n \n from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n \n+if TYPE_CHECKING:\n+ from typing_extensions import Self\n+\n+\n __all__ = (\"Token\",)\n \n \n@@ -42,6 +47,8 @@\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n+ extras: dict[str, Any] = field(default_factory=dict)\n+ \"\"\"Extra fields that were found on the JWT token.\"\"\"\n \n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n@@ -63,8 +70,8 @@\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n \n- @staticmethod\n- def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:\n+ @classmethod\n+ def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self:\n \"\"\"Decode a passed in token string and returns a Token instance.\n \n Args:\n@@ -82,7 +89,12 @@\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n- return Token(exp=exp, iat=iat, **payload)\n+ field_names = {f.name for f in dataclasses.fields(Token)}\n+ extra_fields = payload.keys() - field_names\n+ extras = payload.pop(\"extras\", {})\n+ for key in extra_fields:\n+ extras[key] = payload.pop(key)\n+ return cls(exp=exp, iat=iat, **payload, extras=extras)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime, timezone\nfrom typing import cast\n\nfrom jose import JWSError, JWTError, jwt\n\nfrom litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n\n__all__ = (\"Token\",)\n\n\ndef _normalize_datetime(value: datetime) -> datetime:\n \"\"\"Convert the given value into UTC and strip microseconds.\n\n Args:\n value: A datetime instance\n\n Returns:\n A datetime instance\n \"\"\"\n if value.tzinfo is not None:\n value.astimezone(timezone.utc)\n\n return value.replace(microsecond=0)\n\n\n@dataclass\nclass Token:\n \"\"\"JWT Token DTO.\"\"\"\n\n exp: datetime\n \"\"\"Expiration - datetime for token expiration.\"\"\"\n sub: str\n \"\"\"Subject - usually a unique identifier of the user or equivalent entity.\"\"\"\n iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc)))\n \"\"\"Issued at - should always be current now.\"\"\"\n iss: str | None = field(default=None)\n \"\"\"Issuer - optional unique identifier for the issuer.\"\"\"\n aud: str | None = field(default=None)\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n\n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n raise ImproperlyConfiguredException(\"sub must be a string with a length greater than 0\")\n\n if isinstance(self.exp, datetime) and (\n (exp := _normalize_datetime(self.exp))\n and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.exp = exp\n else:\n raise ImproperlyConfiguredException(\"exp value must be a datetime in the future\")\n\n if isinstance(self.iat, datetime) and (\n (iat := _normalize_datetime(self.iat))\n and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.iat = iat\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n\n @staticmethod\n def decode(encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Token:\n \"\"\"Decode a passed in token string and returns a Token instance.\n\n Args:\n encoded_token: A base64 string containing an encoded JWT.\n secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n A decoded Token instance.\n\n Raises:\n NotAuthorizedException: If the token is invalid.\n \"\"\"\n try:\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n return Token(exp=exp, iat=iat, **payload)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n\n def encode(self, secret: str, algorithm: str) -> str:\n \"\"\"Encode the token instance into a string.\n\n Args:\n secret: The secret with which the JWT is encoded.\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n An encoded token string.\n\n Raises:\n ImproperlyConfiguredException: If encoding fails.\n \"\"\"\n try:\n return cast(\n \"str\",\n jwt.encode(\n claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n ),\n )\n except (JWTError, JWSError) as e:\n raise ImproperlyConfiguredException(\"Failed to encode token\") from e\n", "path": "litestar/contrib/jwt/jwt_token.py"}]} | 1,813 | 575 |
gh_patches_debug_26260 | rasdani/github-patches | git_diff | genialis__resolwe-196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Elasticserach returns paginated results when querying/mapping features using RESDK
In resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10.
</issue>
<code>
[start of resolwe/elastic/viewsets.py]
1 """.. Ignore pydocstyle D400.
2
3 ================
4 Elastic Viewsets
5 ================
6
7 .. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin
8 :members:
9
10 """
11 from __future__ import absolute_import, division, print_function, unicode_literals
12
13 from elasticsearch_dsl.query import Q
14
15 from django.conf import settings
16 from django.contrib.auth import get_user_model
17
18 from rest_framework.response import Response
19 from rest_framework.viewsets import GenericViewSet
20
21 __all__ = (
22 'ElasticSearchMixin',
23 'PaginationMixin',
24 'ElasticSearchBaseViewSet',
25 )
26
27
28 class ElasticSearchMixin(object):
29 """Mixin to use Django REST Framework with ElasticSearch based querysets.
30
31 This mixin adds following methods:
32 * :func:`~ElasticSearchMixin.order_search`
33 * :func:`~ElasticSearchMixin.filter_search`
34 * :func:`~ElasticSearchMixin.filter_permissions`
35
36 """
37
38 filtering_fields = []
39 ordering_fields = []
40 ordering = None
41
42 def get_query_param(self, key, default=None):
43 """Get query parameter uniformly for GET and POST requests."""
44 value = self.request.query_params.get(key, None)
45 if value is None:
46 value = self.request.data.get(key, None)
47 if value is None:
48 value = default
49 return value
50
51 def order_search(self, search):
52 """Order given search by the ordering parameter given in request.
53
54 :param search: ElasticSearch query object
55
56 """
57 ordering = self.get_query_param('ordering', self.ordering)
58
59 ordering_field = ordering.lstrip('-')
60 if ordering_field not in self.ordering_fields:
61 raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))
62
63 return search.sort(ordering)
64
65 def filter_search(self, search):
66 """Filter given search by the filter parameter given in request.
67
68 :param search: ElasticSearch query object
69
70 """
71 for field in self.filtering_fields:
72 value = self.get_query_param(field, None)
73 if value:
74 if isinstance(value, list):
75 filters = [Q('match', **{field: item}) for item in value]
76 search = search.query('bool', should=filters)
77 else:
78 search = search.query('wildcard', **{field: value})
79
80 return search
81
82 def filter_permissions(self, search):
83 """Filter given query based on permissions of the user in the request.
84
85 :param search: ElasticSearch query object
86
87 """
88 user = self.request.user
89 if user.is_superuser:
90 return search
91 if user.is_anonymous():
92 user_model = get_user_model()
93 user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})
94
95 filters = [Q('match', users_with_permissions=user.pk)]
96 filters.extend([
97 Q('match', groups_with_permissions=group.pk) for group in user.groups.all()
98 ])
99
100 # `minimum_should_match` is set to 1 by default
101 return search.query('bool', should=filters)
102
103
104 class PaginationMixin(object):
105 """Mixin for making paginated response in case pagination parameters are provided."""
106
107 def paginate_response(self, queryset):
108 """Optionally return paginated response.
109
110 If pagination parameters are provided in the request, then paginated response
111 is returned, otherwise response is not paginated.
112
113 """
114 page = self.paginate_queryset(queryset)
115 if page is not None:
116 serializer = self.get_serializer(page, many=True)
117 return self.get_paginated_response(serializer.data)
118
119 serializer = self.get_serializer(queryset, many=True)
120 return Response(serializer.data)
121
122
123 class ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):
124 """Base ViewSet for ElasticSearch based views.
125
126 This ViewSet creates search based on ``document_class`` parameter, specified
127 in subclass and:
128
129 * filter it by query parameters
130 * apply ordering
131 * filter permissions
132 * apply pagination
133
134 .. IMPORTANT::
135
136 Both ``POST`` and ``GET`` requests are supported.
137 """
138
139 document_class = None
140
141 def custom_filter(self, search):
142 """Perform custom search filtering.
143
144 This method is intended to be overriden in subclasses.
145 """
146 return search
147
148 def list_with_post(self, request):
149 """Endpoint handler."""
150 search = self.document_class().search() # pylint: disable=not-callable
151
152 search = self.custom_filter(search)
153
154 search = self.filter_search(search)
155 search = self.order_search(search)
156 search = self.filter_permissions(search)
157
158 return self.paginate_response(search)
159
160 def list(self, request):
161 """Endpoint handler."""
162 return self.list_with_post(request)
163
[end of resolwe/elastic/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py
--- a/resolwe/elastic/viewsets.py
+++ b/resolwe/elastic/viewsets.py
@@ -15,6 +15,7 @@
from django.conf import settings
from django.contrib.auth import get_user_model
+from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
@@ -24,6 +25,16 @@
'ElasticSearchBaseViewSet',
)
+ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch
+
+
+class TooManyResults(APIException):
+ """Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results."""
+
+ status_code = 400
+ default_detail = 'Query returned too many results. Please, add more filters or use pagination.'
+ default_code = 'bad_request'
+
class ElasticSearchMixin(object):
"""Mixin to use Django REST Framework with ElasticSearch based querysets.
@@ -155,6 +166,11 @@
search = self.order_search(search)
search = self.filter_permissions(search)
+ if search.count() > ELASTICSEARCH_SIZE:
+ raise TooManyResults()
+
+ search = search.extra(size=ELASTICSEARCH_SIZE)
+
return self.paginate_response(search)
def list(self, request):
| {"golden_diff": "diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py\n--- a/resolwe/elastic/viewsets.py\n+++ b/resolwe/elastic/viewsets.py\n@@ -15,6 +15,7 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n \n+from rest_framework.exceptions import APIException\n from rest_framework.response import Response\n from rest_framework.viewsets import GenericViewSet\n \n@@ -24,6 +25,16 @@\n 'ElasticSearchBaseViewSet',\n )\n \n+ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch\n+\n+\n+class TooManyResults(APIException):\n+ \"\"\"Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results.\"\"\"\n+\n+ status_code = 400\n+ default_detail = 'Query returned too many results. Please, add more filters or use pagination.'\n+ default_code = 'bad_request'\n+\n \n class ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n@@ -155,6 +166,11 @@\n search = self.order_search(search)\n search = self.filter_permissions(search)\n \n+ if search.count() > ELASTICSEARCH_SIZE:\n+ raise TooManyResults()\n+\n+ search = search.extra(size=ELASTICSEARCH_SIZE)\n+\n return self.paginate_response(search)\n \n def list(self, request):\n", "issue": "Elasticserach returns paginated results when querying/mapping features using RESDK\nIn resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10.\n", "before_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n================\nElastic Viewsets\n================\n\n.. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin\n :members:\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom elasticsearch_dsl.query import Q\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n__all__ = (\n 'ElasticSearchMixin',\n 'PaginationMixin',\n 'ElasticSearchBaseViewSet',\n)\n\n\nclass ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n\n This mixin adds following methods:\n * :func:`~ElasticSearchMixin.order_search`\n * :func:`~ElasticSearchMixin.filter_search`\n * :func:`~ElasticSearchMixin.filter_permissions`\n\n \"\"\"\n\n filtering_fields = []\n ordering_fields = []\n ordering = None\n\n def get_query_param(self, key, default=None):\n \"\"\"Get query parameter uniformly for GET and POST requests.\"\"\"\n value = self.request.query_params.get(key, None)\n if value is None:\n value = self.request.data.get(key, None)\n if value is None:\n value = default\n return value\n\n def order_search(self, search):\n \"\"\"Order given search by the ordering parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n ordering = self.get_query_param('ordering', self.ordering)\n\n ordering_field = ordering.lstrip('-')\n if ordering_field not in self.ordering_fields:\n raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))\n\n return search.sort(ordering)\n\n def filter_search(self, search):\n \"\"\"Filter given search by the filter parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n for field in self.filtering_fields:\n value = self.get_query_param(field, None)\n if value:\n if isinstance(value, list):\n filters = [Q('match', **{field: item}) for item in value]\n search = search.query('bool', should=filters)\n else:\n search = search.query('wildcard', **{field: value})\n\n return search\n\n def filter_permissions(self, search):\n \"\"\"Filter given query based on permissions of the user in the request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n user = self.request.user\n if user.is_superuser:\n return search\n if user.is_anonymous():\n user_model = get_user_model()\n user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})\n\n filters = [Q('match', users_with_permissions=user.pk)]\n filters.extend([\n Q('match', groups_with_permissions=group.pk) for group in user.groups.all()\n ])\n\n # `minimum_should_match` is set to 1 by default\n return search.query('bool', should=filters)\n\n\nclass PaginationMixin(object):\n \"\"\"Mixin for making paginated response in case pagination parameters are provided.\"\"\"\n\n def paginate_response(self, queryset):\n \"\"\"Optionally return paginated response.\n\n If pagination parameters are provided in the request, then paginated response\n is returned, otherwise response is not paginated.\n\n \"\"\"\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):\n \"\"\"Base ViewSet for ElasticSearch based views.\n\n This ViewSet creates search based on ``document_class`` parameter, specified\n in subclass and:\n\n * filter it by query parameters\n * apply ordering\n * filter permissions\n * apply pagination\n\n .. IMPORTANT::\n\n Both ``POST`` and ``GET`` requests are supported.\n \"\"\"\n\n document_class = None\n\n def custom_filter(self, search):\n \"\"\"Perform custom search filtering.\n\n This method is intended to be overriden in subclasses.\n \"\"\"\n return search\n\n def list_with_post(self, request):\n \"\"\"Endpoint handler.\"\"\"\n search = self.document_class().search() # pylint: disable=not-callable\n\n search = self.custom_filter(search)\n\n search = self.filter_search(search)\n search = self.order_search(search)\n search = self.filter_permissions(search)\n\n return self.paginate_response(search)\n\n def list(self, request):\n \"\"\"Endpoint handler.\"\"\"\n return self.list_with_post(request)\n", "path": "resolwe/elastic/viewsets.py"}]} | 2,005 | 320 |
gh_patches_debug_28671 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider avis is broken
During the global build at 2021-06-02-14-42-40, spider **avis** failed with **4383 features** and **36 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/avis.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson))
</issue>
<code>
[start of locations/spiders/avis.py]
1 import scrapy
2 import re
3
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 DAY_MAPPING = {
9 'Mon': 'Mo',
10 'Tue': 'Tu',
11 'Wed': 'We',
12 'Thu': 'Th',
13 'Fri': 'Fr',
14 'Sat': 'Sa',
15 'Sun': 'Su'
16 }
17 DAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
18
19
20 class AvisSpider(scrapy.Spider):
21
22 name = "avis"
23 item_attributes = { 'brand': "Avis", 'brand_wikidata': "Q791136" }
24 download_delay = 0.5
25 allowed_domains = [
26 "avis.com",
27 ]
28 start_urls = (
29 'https://www.avis.com/en/locations/avisworldwide',
30 )
31
32 def parse_hours(self, hours):
33 "Sun - Sat 7:00 AM - 10:00 PM"
34 opening_hours = OpeningHours()
35 hours = [h.strip() for h in hours.split(';')]
36
37 for hour in hours:
38 if hour == "Sun - Sat open 24 hrs":
39 return "24/7"
40 range_match = re.search(r'([A-Za-z]{3})\s-\s([A-Za-z]{3})\s([\d:\sAMP]+)\s-\s([\d:\sAMP]+)', hour)
41 if range_match:
42 start_day, end_day, start_time, end_time = range_match.groups()
43 else:
44 single_match = re.search(r'([A-Za-z]{3})\s([\d:\sAMP]+)\s-\s([\d:\sAMP]+)', hour)
45 if not single_match:
46 continue
47 start_day, start_time, end_time = single_match.groups()
48 end_day = start_day
49
50 for day in DAYS[DAYS.index(start_day):DAYS.index(end_day)+1]:
51 opening_hours.add_range(day=DAY_MAPPING[day],
52 open_time=start_time.strip(),
53 close_time=end_time.strip(),
54 time_format='%I:%M %p')
55 return opening_hours.as_opening_hours()
56
57 def parse_store(self, response):
58 if response.url == 'https://www.avis.com/en/error/500':
59 # some closed locations get redirected to this error page
60 return
61
62 def clean(val):
63 if val:
64 return val.strip(', ')
65 return val
66
67 ref = response.url.split('/')[-1]
68
69 properties = {
70 'name': clean(response.xpath('//h2/span[@itemprop="name"]/text()').extract_first()),
71 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first()),
72 'phone': response.xpath('normalize-space(//span[@itemprop="telephone"]/text())').extract_first(),
73 'city': clean(response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first()),
74 'state': clean(response.xpath('normalize-space(//span[@itemprop="addressRegion"]/text())').extract_first()),
75 'postcode': clean(response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first()),
76 'country': clean(response.xpath('normalize-space(//span[@itemprop="addressCountry"]/text())').extract_first()),
77 'ref': ref,
78 'website': response.url,
79 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
80 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
81 }
82 hours = response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()
83 if hours:
84 properties['opening_hours'] = self.parse_hours(hours)
85 yield GeojsonPointItem(**properties)
86
87 def parse_state(self, response):
88 urls = response.xpath('//ul[contains(@class, "location-list-ul")]//li/a/@href').extract()
89
90 if not urls:
91 urls = set(response.xpath('//ul[contains(@class, "LocContainer")]//a/@href').extract())
92 urls = [u for u in urls if 'javascript:void' not in u]
93
94 location_list = re.compile("^/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+$")
95 us_single_location = re.compile(r'/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+/[^/]+$')
96 single_location = re.compile(r'/en/locations/(?!us|ca|au)[a-z]{2}/[^/]+/[^/]+$')
97
98 for url in urls:
99 if single_location.match(url) or us_single_location.match(url):
100 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
101 elif location_list.match(url):
102 # skip these, we get them already
103 continue
104 elif 'xx' in url:
105 continue
106
107 def parse_country(self,response):
108 urls = response.xpath('//div[contains(@class,"country-wrapper")]//li/a/@href').extract()
109
110 for url in urls:
111 yield scrapy.Request(response.urljoin(url), callback=self.parse_state)
112
113 def parse(self, response):
114 urls = response.xpath('//div[@class="wl-location-state"]//li/a/@href').extract()
115
116 for url in urls:
117 yield scrapy.Request(response.urljoin(url), callback=self.parse_country)
118
[end of locations/spiders/avis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/avis.py b/locations/spiders/avis.py
--- a/locations/spiders/avis.py
+++ b/locations/spiders/avis.py
@@ -66,6 +66,15 @@
ref = response.url.split('/')[-1]
+ latitude = None
+ longitude = None
+
+ if response.xpath('//meta[@itemprop="latitude"]/@content').extract_first() is not None:
+ latitude = float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first())
+
+ if response.xpath('//meta[@itemprop="longitude"]/@content').extract_first() is not None:
+ longitude = float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first())
+
properties = {
'name': clean(response.xpath('//h2/span[@itemprop="name"]/text()').extract_first()),
'addr_full': clean(response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first()),
@@ -76,8 +85,8 @@
'country': clean(response.xpath('normalize-space(//span[@itemprop="addressCountry"]/text())').extract_first()),
'ref': ref,
'website': response.url,
- 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
- 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
+ 'lat': latitude,
+ 'lon': longitude,
}
hours = response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()
if hours:
| {"golden_diff": "diff --git a/locations/spiders/avis.py b/locations/spiders/avis.py\n--- a/locations/spiders/avis.py\n+++ b/locations/spiders/avis.py\n@@ -66,6 +66,15 @@\n \n ref = response.url.split('/')[-1]\n \n+ latitude = None\n+ longitude = None\n+\n+ if response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first() is not None:\n+ latitude = float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first())\n+\n+ if response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first() is not None:\n+ longitude = float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first())\n+\n properties = {\n 'name': clean(response.xpath('//h2/span[@itemprop=\"name\"]/text()').extract_first()),\n 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first()),\n@@ -76,8 +85,8 @@\n 'country': clean(response.xpath('normalize-space(//span[@itemprop=\"addressCountry\"]/text())').extract_first()),\n 'ref': ref,\n 'website': response.url,\n- 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n- 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n+ 'lat': latitude,\n+ 'lon': longitude,\n }\n hours = response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()\n if hours:\n", "issue": "Spider avis is broken\nDuring the global build at 2021-06-02-14-42-40, spider **avis** failed with **4383 features** and **36 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/avis.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/avis.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 'Mon': 'Mo',\n 'Tue': 'Tu',\n 'Wed': 'We',\n 'Thu': 'Th',\n 'Fri': 'Fr',\n 'Sat': 'Sa',\n 'Sun': 'Su'\n}\nDAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n\nclass AvisSpider(scrapy.Spider):\n\n name = \"avis\"\n item_attributes = { 'brand': \"Avis\", 'brand_wikidata': \"Q791136\" }\n download_delay = 0.5\n allowed_domains = [\n \"avis.com\",\n ]\n start_urls = (\n 'https://www.avis.com/en/locations/avisworldwide',\n )\n\n def parse_hours(self, hours):\n \"Sun - Sat 7:00 AM - 10:00 PM\"\n opening_hours = OpeningHours()\n hours = [h.strip() for h in hours.split(';')]\n\n for hour in hours:\n if hour == \"Sun - Sat open 24 hrs\":\n return \"24/7\"\n range_match = re.search(r'([A-Za-z]{3})\\s-\\s([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if range_match:\n start_day, end_day, start_time, end_time = range_match.groups()\n else:\n single_match = re.search(r'([A-Za-z]{3})\\s([\\d:\\sAMP]+)\\s-\\s([\\d:\\sAMP]+)', hour)\n if not single_match:\n continue\n start_day, start_time, end_time = single_match.groups()\n end_day = start_day\n\n for day in DAYS[DAYS.index(start_day):DAYS.index(end_day)+1]:\n opening_hours.add_range(day=DAY_MAPPING[day],\n open_time=start_time.strip(),\n close_time=end_time.strip(),\n time_format='%I:%M %p')\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n if response.url == 'https://www.avis.com/en/error/500':\n # some closed locations get redirected to this error page\n return\n\n def clean(val):\n if val:\n return val.strip(', ')\n return val\n\n ref = response.url.split('/')[-1]\n\n properties = {\n 'name': clean(response.xpath('//h2/span[@itemprop=\"name\"]/text()').extract_first()),\n 'addr_full': clean(response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first()),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]/text())').extract_first(),\n 'city': clean(response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first()),\n 'state': clean(response.xpath('normalize-space(//span[@itemprop=\"addressRegion\"]/text())').extract_first()),\n 'postcode': clean(response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first()),\n 'country': clean(response.xpath('normalize-space(//span[@itemprop=\"addressCountry\"]/text())').extract_first()),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n hours = response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()\n if hours:\n properties['opening_hours'] = self.parse_hours(hours)\n yield GeojsonPointItem(**properties)\n\n def parse_state(self, response):\n urls = response.xpath('//ul[contains(@class, \"location-list-ul\")]//li/a/@href').extract()\n\n if not urls:\n urls = set(response.xpath('//ul[contains(@class, \"LocContainer\")]//a/@href').extract())\n urls = [u for u in urls if 'javascript:void' not in u]\n\n location_list = re.compile(\"^/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+$\")\n us_single_location = re.compile(r'/en/locations/(?:us|ca|au)/[a-z]{2}/[^/]+/[^/]+$')\n single_location = re.compile(r'/en/locations/(?!us|ca|au)[a-z]{2}/[^/]+/[^/]+$')\n\n for url in urls:\n if single_location.match(url) or us_single_location.match(url):\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n elif location_list.match(url):\n # skip these, we get them already\n continue\n elif 'xx' in url:\n continue\n\n def parse_country(self,response):\n urls = response.xpath('//div[contains(@class,\"country-wrapper\")]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_state)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"wl-location-state\"]//li/a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_country)\n", "path": "locations/spiders/avis.py"}]} | 2,144 | 360 |
gh_patches_debug_13499 | rasdani/github-patches | git_diff | lutris__lutris-488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Specify a User Agent for HTTP requests
Right now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code.
Testing the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA.
</issue>
<code>
[start of lutris/util/http.py]
1 import json
2 import socket
3 import urllib.request
4 import urllib.error
5 import urllib.parse
6 from ssl import CertificateError
7
8 from lutris.settings import SITE_URL
9 from lutris.util.log import logger
10
11
12 class Request(object):
13 def __init__(self, url, timeout=5, stop_request=None,
14 thread_queue=None, headers={}):
15
16 if not url:
17 raise ValueError('An URL is required!')
18
19 if url.startswith('//'):
20 url = 'https:' + url
21
22 if url.startswith('/'):
23 url = SITE_URL + url
24
25 self.url = url
26 self.content = ''
27 self.timeout = timeout
28 self.stop_request = stop_request
29 self.thread_queue = thread_queue
30 self.buffer_size = 32 * 1024 # Bytes
31 self.downloaded_size = 0
32 self.headers = headers
33
34 def get(self, data=None):
35 req = urllib.request.Request(url=self.url, data=data, headers=self.headers)
36 try:
37 request = urllib.request.urlopen(req, timeout=self.timeout)
38 except (urllib.error.HTTPError, CertificateError) as e:
39 logger.error("Unavailable url (%s): %s", self.url, e)
40 except (socket.timeout, urllib.error.URLError) as e:
41 logger.error("Unable to connect to server (%s): %s", self.url, e)
42 else:
43 try:
44 total_size = request.info().get('Content-Length').strip()
45 total_size = int(total_size)
46 except AttributeError:
47 total_size = 0
48
49 chunks = []
50 while 1:
51 if self.stop_request and self.stop_request.is_set():
52 self.content = ''
53 return self
54 try:
55 chunk = request.read(self.buffer_size)
56 except socket.timeout as e:
57 logger.error("Request timed out")
58 self.content = ''
59 return self
60 self.downloaded_size += len(chunk)
61 if self.thread_queue:
62 self.thread_queue.put(
63 (chunk, self.downloaded_size, total_size)
64 )
65 else:
66 chunks.append(chunk)
67 if not chunk:
68 break
69 request.close()
70 self.content = b''.join(chunks)
71 return self
72
73 def post(self, data):
74 raise NotImplementedError
75
76 def write_to_file(self, path):
77 content = self.content
78 if content:
79 with open(path, 'wb') as dest_file:
80 dest_file.write(content)
81
82 @property
83 def json(self):
84 if self.content:
85 return json.loads(self.text)
86
87 @property
88 def text(self):
89 if self.content:
90 return self.content.decode()
91
[end of lutris/util/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/http.py b/lutris/util/http.py
--- a/lutris/util/http.py
+++ b/lutris/util/http.py
@@ -5,6 +5,8 @@
import urllib.parse
from ssl import CertificateError
+from lutris.settings import PROJECT
+from lutris.settings import VERSION
from lutris.settings import SITE_URL
from lutris.util.log import logger
@@ -29,6 +31,8 @@
self.thread_queue = thread_queue
self.buffer_size = 32 * 1024 # Bytes
self.downloaded_size = 0
+ if not headers.get('User-Agent'):
+ headers['User-Agent'] = PROJECT + '/' + VERSION
self.headers = headers
def get(self, data=None):
| {"golden_diff": "diff --git a/lutris/util/http.py b/lutris/util/http.py\n--- a/lutris/util/http.py\n+++ b/lutris/util/http.py\n@@ -5,6 +5,8 @@\n import urllib.parse\n from ssl import CertificateError\n \n+from lutris.settings import PROJECT\n+from lutris.settings import VERSION\n from lutris.settings import SITE_URL\n from lutris.util.log import logger\n \n@@ -29,6 +31,8 @@\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n+ if not headers.get('User-Agent'):\n+ headers['User-Agent'] = PROJECT + '/' + VERSION\n self.headers = headers\n \n def get(self, data=None):\n", "issue": "Specify a User Agent for HTTP requests\nRight now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code.\r\nTesting the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA.\n", "before_files": [{"content": "import json\nimport socket\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nfrom ssl import CertificateError\n\nfrom lutris.settings import SITE_URL\nfrom lutris.util.log import logger\n\n\nclass Request(object):\n def __init__(self, url, timeout=5, stop_request=None,\n thread_queue=None, headers={}):\n\n if not url:\n raise ValueError('An URL is required!')\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if url.startswith('/'):\n url = SITE_URL + url\n\n self.url = url\n self.content = ''\n self.timeout = timeout\n self.stop_request = stop_request\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n self.headers = headers\n\n def get(self, data=None):\n req = urllib.request.Request(url=self.url, data=data, headers=self.headers)\n try:\n request = urllib.request.urlopen(req, timeout=self.timeout)\n except (urllib.error.HTTPError, CertificateError) as e:\n logger.error(\"Unavailable url (%s): %s\", self.url, e)\n except (socket.timeout, urllib.error.URLError) as e:\n logger.error(\"Unable to connect to server (%s): %s\", self.url, e)\n else:\n try:\n total_size = request.info().get('Content-Length').strip()\n total_size = int(total_size)\n except AttributeError:\n total_size = 0\n\n chunks = []\n while 1:\n if self.stop_request and self.stop_request.is_set():\n self.content = ''\n return self\n try:\n chunk = request.read(self.buffer_size)\n except socket.timeout as e:\n logger.error(\"Request timed out\")\n self.content = ''\n return self\n self.downloaded_size += len(chunk)\n if self.thread_queue:\n self.thread_queue.put(\n (chunk, self.downloaded_size, total_size)\n )\n else:\n chunks.append(chunk)\n if not chunk:\n break\n request.close()\n self.content = b''.join(chunks)\n return self\n\n def post(self, data):\n raise NotImplementedError\n\n def write_to_file(self, path):\n content = self.content\n if content:\n with open(path, 'wb') as dest_file:\n dest_file.write(content)\n\n @property\n def json(self):\n if self.content:\n return json.loads(self.text)\n\n @property\n def text(self):\n if self.content:\n return self.content.decode()\n", "path": "lutris/util/http.py"}]} | 1,332 | 173 |
gh_patches_debug_6933 | rasdani/github-patches | git_diff | Flexget__Flexget-3204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
python 3.10 issue
I have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below.
- FlexGet version: 3.1.137
- Python version: 3.10
- Installation method: pip
- Using daemon (yes/no): no
- OS and version: Linux / Slackware / 5.14.8 kernel
Traceback (most recent call last):
File "/usr/bin/flexget", line 5, in <module>
from flexget import main
File "/usr/lib/python3.10/site-packages/flexget/__init__.py", line 11, in <module>
from flexget.manager import Manager # noqa
File "/usr/lib/python3.10/site-packages/flexget/manager.py", line 47, in <module>
from flexget.ipc import IPCClient, IPCServer # noqa
File "/usr/lib/python3.10/site-packages/flexget/ipc.py", line 14, in <module>
from flexget import terminal
File "/usr/lib/python3.10/site-packages/flexget/terminal.py", line 7, in <module>
from colorclass import Color, Windows
File "/usr/lib/python3.10/site-packages/colorclass/__init__.py", line 11, in <module>
from colorclass.codes import list_tags # noqa
File "/usr/lib/python3.10/site-packages/colorclass/codes.py", line 4, in <module>
from collections import Mapping
ImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py)
Thanks!
</issue>
<code>
[start of setup.py]
1 import sys
2 from pathlib import Path
3 from typing import List
4
5 from setuptools import find_packages, setup
6
7 long_description = Path('README.rst').read_text()
8
9 # Populates __version__ without importing the package
10 __version__ = None
11 with open('flexget/_version.py', encoding='utf-8') as ver_file:
12 exec(ver_file.read()) # pylint: disable=W0122
13 if not __version__:
14 print('Could not find __version__ from flexget/_version.py')
15 sys.exit(1)
16
17
18 def load_requirements(filename: str) -> List[str]:
19 return [
20 line.strip()
21 for line in Path(filename).read_text().splitlines()
22 if not line.startswith('#')
23 ]
24
25
26 setup(
27 name='FlexGet',
28 version=__version__,
29 description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
30 'from different sources like RSS-feeds, html-pages, various sites and more.',
31 long_description=long_description,
32 long_description_content_type='text/x-rst',
33 author='Marko Koivusalo',
34 author_email='[email protected]',
35 license='MIT',
36 url='https://flexget.com',
37 project_urls={
38 'Repository': 'https://github.com/Flexget/Flexget',
39 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',
40 'Forum': 'https://discuss.flexget.com',
41 },
42 packages=find_packages(exclude=['flexget.tests']),
43 include_package_data=True,
44 zip_safe=False,
45 install_requires=load_requirements('requirements.txt'),
46 tests_require=['pytest'],
47 extras_require={'dev': load_requirements('dev-requirements.txt')},
48 entry_points={
49 'console_scripts': ['flexget = flexget:main'],
50 'gui_scripts': [
51 'flexget-headless = flexget:main'
52 ], # This is useful on Windows to avoid a cmd popup
53 },
54 python_requires='>=3.6',
55 classifiers=[
56 "Development Status :: 5 - Production/Stable",
57 "License :: OSI Approved :: MIT License",
58 "Operating System :: OS Independent",
59 "Programming Language :: Python",
60 "Programming Language :: Python :: 3.6",
61 "Programming Language :: Python :: 3.7",
62 "Programming Language :: Python :: 3.8",
63 "Programming Language :: Python :: 3.9",
64 "Programming Language :: Python :: Implementation :: CPython",
65 "Programming Language :: Python :: Implementation :: PyPy",
66 ],
67 )
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n", "issue": "python 3.10 issue\nI have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below.\r\n\r\n- FlexGet version: 3.1.137\r\n- Python version: 3.10\r\n- Installation method: pip\r\n- Using daemon (yes/no): no\r\n- OS and version: Linux / Slackware / 5.14.8 kernel\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/flexget\", line 5, in <module>\r\n from flexget import main\r\n File \"/usr/lib/python3.10/site-packages/flexget/__init__.py\", line 11, in <module>\r\n from flexget.manager import Manager # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/manager.py\", line 47, in <module>\r\n from flexget.ipc import IPCClient, IPCServer # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/ipc.py\", line 14, in <module>\r\n from flexget import terminal\r\n File \"/usr/lib/python3.10/site-packages/flexget/terminal.py\", line 7, in <module>\r\n from colorclass import Color, Windows\r\n File \"/usr/lib/python3.10/site-packages/colorclass/__init__.py\", line 11, in <module>\r\n from colorclass.codes import list_tags # noqa\r\n File \"/usr/lib/python3.10/site-packages/colorclass/codes.py\", line 4, in <module>\r\n from collections import Mapping\r\nImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py)\r\n\r\nThanks!\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nlong_description = Path('README.rst').read_text()\n\n# Populates __version__ without importing the package\n__version__ = None\nwith open('flexget/_version.py', encoding='utf-8') as ver_file:\n exec(ver_file.read()) # pylint: disable=W0122\nif not __version__:\n print('Could not find __version__ from flexget/_version.py')\n sys.exit(1)\n\n\ndef load_requirements(filename: str) -> List[str]:\n return [\n line.strip()\n for line in Path(filename).read_text().splitlines()\n if not line.startswith('#')\n ]\n\n\nsetup(\n name='FlexGet',\n version=__version__,\n description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '\n 'from different sources like RSS-feeds, html-pages, various sites and more.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n author='Marko Koivusalo',\n author_email='[email protected]',\n license='MIT',\n url='https://flexget.com',\n project_urls={\n 'Repository': 'https://github.com/Flexget/Flexget',\n 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',\n 'Forum': 'https://discuss.flexget.com',\n },\n packages=find_packages(exclude=['flexget.tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=load_requirements('requirements.txt'),\n tests_require=['pytest'],\n extras_require={'dev': load_requirements('dev-requirements.txt')},\n entry_points={\n 'console_scripts': ['flexget = flexget:main'],\n 'gui_scripts': [\n 'flexget-headless = flexget:main'\n ], # This is useful on Windows to avoid a cmd popup\n },\n python_requires='>=3.6',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n", "path": "setup.py"}]} | 1,628 | 108 |
gh_patches_debug_31687 | rasdani/github-patches | git_diff | liqd__a4-opin-735 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DST | Changes on "Aim"-Page
Hey hey, Nicolas from Nexus/WP3 sent some remarks for changes. On this page it would be helpful to
1) Start the sentences on the left hand side with a capital letter 2) Change the question into "What is the aim of your participation project?" and lastly 3) Add a line above the first row: Text on left column "Aim", Text for the right column "Examples"

</issue>
<code>
[start of euth/blueprintsuggest/blueprints.py]
1 from collections import namedtuple
2 from enum import Enum, unique
3
4 from django.utils.translation import ugettext_lazy as _
5
6 from euth.documents import phases as documents_phases
7 from euth.flashpoll import phases as flashpoll_phases
8 from euth.ideas import phases as ideas_phases
9 from euth.maps import phases as map_phases
10
11
12 @unique
13 class Aim(Enum):
14 collect_ideas = (
15 'collect_ideas',
16 _('create and gather new ideas or visions.'),
17 [_('(Urban) planning processes'),
18 _('Develop concepts or guiding principles')]
19 )
20 discuss_topic = (
21 'discuss_topic',
22 _('gather feedback on a topic and discuss it in greater detail.'),
23 [_('Discuss existing concepts or plans'),
24 _('Develop solutions for existing problems')]
25 )
26 design_place = (
27 'design_place',
28 _('design a place.'),
29 [_('(Urban) planning processes'),
30 _('Set the agenda of an event')]
31 )
32 run_survey = (
33 'run_survey',
34 _('learn about what people like most.'),
35 [_('Majority votes'), _('Opinion polls')]
36 )
37 run_competition = (
38 'run_competition',
39 _('run a competition.'),
40 [_('All sorts of competitions, '
41 'like idea contests etc.')]
42 )
43 work_document = (
44 'work_document',
45 _('work collaboratively on a text document.'),
46 [_('Draft or revise statutes, articles, or charters'),
47 _('Involve different authors in writing a shared text')]
48 )
49
50 def __new__(cls, value, label, examples):
51 obj = object.__new__(cls)
52 obj._value_ = value
53 obj.label = label
54 obj.examples = examples
55 return obj
56
57
58 @unique
59 class Result(Enum):
60 collect_ideas = 3, _('Collection of ideas or arguments')
61 majority_vote = 2, _('Majority vote')
62 weighted_arguments = 1, _('Weighted arguments')
63
64 def __new__(cls, value, label):
65 obj = object.__new__(cls)
66 obj._value_ = value
67 obj.label = label
68 return obj
69
70
71 @unique
72 class Experience(Enum):
73 five_projects = 4, _('More than 5 participative projects')
74 two_projects = 3, _('More than 2 participative projects')
75 one_project = 2, ('1-2 partcipative projects')
76 no_projects = 1, ('I have no experiences in organising participative '
77 ' projects')
78
79 def __new__(cls, value, label):
80 obj = object.__new__(cls)
81 obj._value_ = value
82 obj.label = label
83 return obj
84
85
86 class Motivation(Enum):
87 high = 4, _('High motivation')
88 medium = 3, _('Medium motivation')
89 low = 2, _('Low motivation')
90 not_found = 1, _('No motivation')
91 unkown = 2, _('I don\'t know.')
92
93 def __new__(cls, value, label):
94 obj = object.__new__(cls)
95 obj._value_ = value
96 obj.label = label
97 return obj
98
99
100 Requirements = namedtuple(
101 'Requirements', [
102 'aims', 'results', 'experience', 'motivation'
103 ])
104
105
106 Blueprint = namedtuple(
107 'Blueprint', [
108 'title', 'description', 'content', 'image', 'settings_model',
109 'requirements'
110 ])
111
112
113 blueprints = [
114 ('brainstorming',
115 Blueprint(
116 title=_('Brainstorming'),
117 description=_('Collect ideas, questions and input concerning '
118 'a problem or a question from a wide array of people.'),
119 content=[
120 ideas_phases.CollectPhase(),
121 ],
122 image='images/brainstorming.png',
123 settings_model=None,
124 requirements=Requirements(
125 aims=[Aim.collect_ideas, Aim.discuss_topic],
126 results=[Result.collect_ideas],
127 experience=Experience.no_projects,
128 motivation=Motivation.not_found
129 ),
130 )),
131 ('map-brainstorming',
132 Blueprint(
133 title=_('Spatial Brainstorming'),
134 description=_('Collect ideas, questions and input concerning a '
135 'problem or a question from a wide array of people.'),
136 content=[
137 map_phases.CollectPhase(),
138 ],
139 image='images/spatial_brainstorming.png',
140 settings_model=('euth_maps', 'AreaSettings'),
141 requirements=Requirements(
142 aims=[Aim.design_place],
143 results=[Result.collect_ideas],
144 experience=Experience.no_projects,
145 motivation=Motivation.not_found
146 ),
147 )),
148 ('idea-challenge',
149 Blueprint(
150 title=_('Idea Challenge'),
151 description=_('Run a challenge and find the best ideas to solve '
152 'a particular problem.'),
153 content=[
154 ideas_phases.CollectPhase(),
155 ideas_phases.RatingPhase(),
156 ],
157 image='images/challenge.png',
158 settings_model=None,
159 requirements=Requirements(
160 aims=[Aim.run_competition, Aim.run_survey],
161 results=list(Result),
162 experience=Experience.one_project,
163 motivation=Motivation.low
164 ),
165 )),
166 ('map-idea-challenge',
167 Blueprint(
168 title=_('Spatial Idea Challenge'),
169 description=_('Run a challenge concerning a certain area or space in '
170 'your community and find the best ideas to solve a '
171 'particular problem.'),
172 content=[
173 map_phases.CollectPhase(),
174 map_phases.RatingPhase(),
175 ],
176 image='images/spatial_challenge.png',
177 settings_model=('euth_maps', 'AreaSettings'),
178 requirements=Requirements(
179 aims=[Aim.design_place],
180 results=list(Result),
181 experience=Experience.one_project,
182 motivation=Motivation.low
183 ),
184 )),
185 ('agenda-setting',
186 Blueprint(
187 title=_('Agenda Setting'),
188 description=_('You can involve everyone in planning a meeting. '
189 'Collect ideas for an upcoming event and let your '
190 'participants vote on the topics you want to tackle.'),
191 content=[
192 ideas_phases.CollectPhase(),
193 ideas_phases.RatingPhase(),
194 ],
195 image='images/agenda_setting.png',
196 settings_model=None,
197 requirements=Requirements(
198 aims=[Aim.collect_ideas, Aim.discuss_topic, Aim.run_survey],
199 results=list(Result),
200 experience=Experience.one_project,
201 motivation=Motivation.low
202 ),
203 )),
204 ('commenting-text',
205 Blueprint(
206 title=_('Text Review'),
207 description=_('Let participants discuss individual paragraphs of a '
208 'text. This is ideal for discussing position papers or '
209 'a mission statements with many people.'),
210 content=[
211 documents_phases.CreateDocumentPhase(),
212 documents_phases.CommentPhase(),
213 ],
214 image='images/text_review.png',
215 settings_model=None,
216 requirements=Requirements(
217 aims=[Aim.work_document],
218 results=None,
219 experience=None,
220 motivation=None
221 ),
222 )),
223 ('flashpoll',
224 Blueprint(
225 title=_('Poll'),
226 description=_('Run customizable, multi-step polls on OPIN to get '
227 'detailed opinions on topics from the public or your '
228 'members. Via the OPIN polling app for iOS and Android '
229 'these polls are also accessible on smartphones.'),
230 content=[
231 flashpoll_phases.FlashpollPhase(),
232 ],
233 image='images/poll.png',
234 settings_model=('euth_flashpoll', 'Flashpoll'),
235 requirements=Requirements(
236 aims=[Aim.run_survey],
237 results=[Result.majority_vote],
238 experience=Experience.no_projects,
239 motivation=Motivation.not_found
240 ),
241 )),
242 ]
243
244
245 def get_fallback_blueprint(aim):
246 fallbacks = {
247 Aim.collect_ideas: 'brainstorming',
248 Aim.discuss_topic: 'brainstorming',
249 Aim.design_place: 'map-brainstorming',
250 Aim.run_survey: 'flashpoll',
251 Aim.run_competition: 'agenda-setting',
252 Aim.work_document: 'commenting-text'
253 }
254
255 name = fallbacks[aim]
256 return name, dict(blueprints)[name]
257
[end of euth/blueprintsuggest/blueprints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/blueprintsuggest/blueprints.py b/euth/blueprintsuggest/blueprints.py
--- a/euth/blueprintsuggest/blueprints.py
+++ b/euth/blueprintsuggest/blueprints.py
@@ -13,36 +13,36 @@
class Aim(Enum):
collect_ideas = (
'collect_ideas',
- _('create and gather new ideas or visions.'),
+ _('Create and gather new ideas or visions.'),
[_('(Urban) planning processes'),
_('Develop concepts or guiding principles')]
)
discuss_topic = (
'discuss_topic',
- _('gather feedback on a topic and discuss it in greater detail.'),
+ _('Gather feedback on a topic and discuss it in greater detail.'),
[_('Discuss existing concepts or plans'),
_('Develop solutions for existing problems')]
)
design_place = (
'design_place',
- _('design a place.'),
+ _('Design a place.'),
[_('(Urban) planning processes'),
_('Set the agenda of an event')]
)
run_survey = (
'run_survey',
- _('learn about what people like most.'),
+ _('Learn about what people like most.'),
[_('Majority votes'), _('Opinion polls')]
)
run_competition = (
'run_competition',
- _('run a competition.'),
+ _('Run a competition.'),
[_('All sorts of competitions, '
'like idea contests etc.')]
)
work_document = (
'work_document',
- _('work collaboratively on a text document.'),
+ _('Work collaboratively on a text document.'),
[_('Draft or revise statutes, articles, or charters'),
_('Involve different authors in writing a shared text')]
)
| {"golden_diff": "diff --git a/euth/blueprintsuggest/blueprints.py b/euth/blueprintsuggest/blueprints.py\n--- a/euth/blueprintsuggest/blueprints.py\n+++ b/euth/blueprintsuggest/blueprints.py\n@@ -13,36 +13,36 @@\n class Aim(Enum):\n collect_ideas = (\n 'collect_ideas',\n- _('create and gather new ideas or visions.'),\n+ _('Create and gather new ideas or visions.'),\n [_('(Urban) planning processes'),\n _('Develop concepts or guiding principles')]\n )\n discuss_topic = (\n 'discuss_topic',\n- _('gather feedback on a topic and discuss it in greater detail.'),\n+ _('Gather feedback on a topic and discuss it in greater detail.'),\n [_('Discuss existing concepts or plans'),\n _('Develop solutions for existing problems')]\n )\n design_place = (\n 'design_place',\n- _('design a place.'),\n+ _('Design a place.'),\n [_('(Urban) planning processes'),\n _('Set the agenda of an event')]\n )\n run_survey = (\n 'run_survey',\n- _('learn about what people like most.'),\n+ _('Learn about what people like most.'),\n [_('Majority votes'), _('Opinion polls')]\n )\n run_competition = (\n 'run_competition',\n- _('run a competition.'),\n+ _('Run a competition.'),\n [_('All sorts of competitions, '\n 'like idea contests etc.')]\n )\n work_document = (\n 'work_document',\n- _('work collaboratively on a text document.'),\n+ _('Work collaboratively on a text document.'),\n [_('Draft or revise statutes, articles, or charters'),\n _('Involve different authors in writing a shared text')]\n )\n", "issue": "DST | Changes on \"Aim\"-Page \nHey hey, Nicolas from Nexus/WP3 sent some remarks for changes. On this page it would be helpful to \r\n1) Start the sentences on the left hand side with a capital letter 2) Change the question into \"What is the aim of your participation project?\" and lastly 3) Add a line above the first row: Text on left column \"Aim\", Text for the right column \"Examples\"\r\n\r\n\r\n\n", "before_files": [{"content": "from collections import namedtuple\nfrom enum import Enum, unique\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.documents import phases as documents_phases\nfrom euth.flashpoll import phases as flashpoll_phases\nfrom euth.ideas import phases as ideas_phases\nfrom euth.maps import phases as map_phases\n\n\n@unique\nclass Aim(Enum):\n collect_ideas = (\n 'collect_ideas',\n _('create and gather new ideas or visions.'),\n [_('(Urban) planning processes'),\n _('Develop concepts or guiding principles')]\n )\n discuss_topic = (\n 'discuss_topic',\n _('gather feedback on a topic and discuss it in greater detail.'),\n [_('Discuss existing concepts or plans'),\n _('Develop solutions for existing problems')]\n )\n design_place = (\n 'design_place',\n _('design a place.'),\n [_('(Urban) planning processes'),\n _('Set the agenda of an event')]\n )\n run_survey = (\n 'run_survey',\n _('learn about what people like most.'),\n [_('Majority votes'), _('Opinion polls')]\n )\n run_competition = (\n 'run_competition',\n _('run a competition.'),\n [_('All sorts of competitions, '\n 'like idea contests etc.')]\n )\n work_document = (\n 'work_document',\n _('work collaboratively on a text document.'),\n [_('Draft or revise statutes, articles, or charters'),\n _('Involve different authors in writing a shared text')]\n )\n\n def __new__(cls, value, label, examples):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n obj.examples = examples\n return obj\n\n\n@unique\nclass Result(Enum):\n collect_ideas = 3, _('Collection of ideas or arguments')\n majority_vote = 2, _('Majority vote')\n weighted_arguments = 1, _('Weighted arguments')\n\n def __new__(cls, value, label):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n return obj\n\n\n@unique\nclass Experience(Enum):\n five_projects = 4, _('More than 5 participative projects')\n two_projects = 3, _('More than 2 participative projects')\n one_project = 2, ('1-2 partcipative projects')\n no_projects = 1, ('I have no experiences in organising participative '\n ' projects')\n\n def __new__(cls, value, label):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n return obj\n\n\nclass Motivation(Enum):\n high = 4, _('High motivation')\n medium = 3, _('Medium motivation')\n low = 2, _('Low motivation')\n not_found = 1, _('No motivation')\n unkown = 2, _('I don\\'t know.')\n\n def __new__(cls, value, label):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n return obj\n\n\nRequirements = namedtuple(\n 'Requirements', [\n 'aims', 'results', 'experience', 'motivation'\n ])\n\n\nBlueprint = namedtuple(\n 'Blueprint', [\n 'title', 'description', 'content', 'image', 'settings_model',\n 'requirements'\n ])\n\n\nblueprints = [\n ('brainstorming',\n Blueprint(\n title=_('Brainstorming'),\n description=_('Collect ideas, questions and input concerning '\n 'a problem or a question from a wide array of people.'),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n )),\n ('map-brainstorming',\n Blueprint(\n title=_('Spatial Brainstorming'),\n description=_('Collect ideas, questions and input concerning a '\n 'problem or a question from a wide array of people.'),\n content=[\n map_phases.CollectPhase(),\n ],\n image='images/spatial_brainstorming.png',\n settings_model=('euth_maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n )),\n ('idea-challenge',\n Blueprint(\n title=_('Idea Challenge'),\n description=_('Run a challenge and find the best ideas to solve '\n 'a particular problem.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/challenge.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.run_competition, Aim.run_survey],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n )),\n ('map-idea-challenge',\n Blueprint(\n title=_('Spatial Idea Challenge'),\n description=_('Run a challenge concerning a certain area or space in '\n 'your community and find the best ideas to solve a '\n 'particular problem.'),\n content=[\n map_phases.CollectPhase(),\n map_phases.RatingPhase(),\n ],\n image='images/spatial_challenge.png',\n settings_model=('euth_maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n )),\n ('agenda-setting',\n Blueprint(\n title=_('Agenda Setting'),\n description=_('You can involve everyone in planning a meeting. '\n 'Collect ideas for an upcoming event and let your '\n 'participants vote on the topics you want to tackle.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda_setting.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic, Aim.run_survey],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n )),\n ('commenting-text',\n Blueprint(\n title=_('Text Review'),\n description=_('Let participants discuss individual paragraphs of a '\n 'text. This is ideal for discussing position papers or '\n 'a mission statements with many people.'),\n content=[\n documents_phases.CreateDocumentPhase(),\n documents_phases.CommentPhase(),\n ],\n image='images/text_review.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.work_document],\n results=None,\n experience=None,\n motivation=None\n ),\n )),\n ('flashpoll',\n Blueprint(\n title=_('Poll'),\n description=_('Run customizable, multi-step polls on OPIN to get '\n 'detailed opinions on topics from the public or your '\n 'members. Via the OPIN polling app for iOS and Android '\n 'these polls are also accessible on smartphones.'),\n content=[\n flashpoll_phases.FlashpollPhase(),\n ],\n image='images/poll.png',\n settings_model=('euth_flashpoll', 'Flashpoll'),\n requirements=Requirements(\n aims=[Aim.run_survey],\n results=[Result.majority_vote],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n )),\n]\n\n\ndef get_fallback_blueprint(aim):\n fallbacks = {\n Aim.collect_ideas: 'brainstorming',\n Aim.discuss_topic: 'brainstorming',\n Aim.design_place: 'map-brainstorming',\n Aim.run_survey: 'flashpoll',\n Aim.run_competition: 'agenda-setting',\n Aim.work_document: 'commenting-text'\n }\n\n name = fallbacks[aim]\n return name, dict(blueprints)[name]\n", "path": "euth/blueprintsuggest/blueprints.py"}]} | 3,081 | 367 |
gh_patches_debug_57793 | rasdani/github-patches | git_diff | catalyst-team__catalyst-855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EarlyStoppingCallback considers first epoch as bad
## 🐛 Bug Report
EarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.
### How To Reproduce
You can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.
#### Code sample
```python
from unittest.mock import MagicMock, PropertyMock
from catalyst.core import EarlyStoppingCallback
def test_patience1():
"""@TODO: Docs. Contribution is welcome."""
early_stop = EarlyStoppingCallback(1)
runner = MagicMock()
type(runner).stage_name = PropertyMock(return_value="training")
type(runner).valid_metrics = PropertyMock(return_value={"loss": 0.001})
stop_mock = PropertyMock(return_value=False)
type(runner).need_early_stop = stop_mock
early_stop.on_epoch_end(runner)
assert stop_mock.mock_calls == []
```
### Expected behavior
Training doesn't stop after first epoch. And the unit test passes.
### Environment
```bash
Catalyst version: 20.06
PyTorch version: 1.5.1
Is debug build: No
CUDA used to build PyTorch: None
TensorFlow version: N/A
TensorBoard version: 2.2.2
OS: Mac OSX 10.15.5
GCC version: Could not collect
CMake version: version 3.8.0
Python version: 3.7
Is CUDA available: No
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
Versions of relevant libraries:
[pip3] catalyst-codestyle==20.4
[pip3] catalyst-sphinx-theme==1.1.1
[pip3] efficientnet-pytorch==0.6.3
[pip3] numpy==1.18.5
[pip3] segmentation-models-pytorch==0.1.0
[pip3] tensorboard==2.2.2
[pip3] tensorboard-plugin-wit==1.6.0.post3
[pip3] tensorboardX==2.0
[pip3] torch==1.5.1
[pip3] torchvision==0.6.1
[conda] catalyst-codestyle 20.4 <pip>
[conda] catalyst-sphinx-theme 1.1.1 <pip>
[conda] efficientnet-pytorch 0.6.3 <pip>
[conda] numpy 1.18.5 <pip>
[conda] segmentation-models-pytorch 0.1.0 <pip>
[conda] tensorboard 2.2.2 <pip>
[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>
[conda] tensorboardX 2.0 <pip>
[conda] torch 1.5.1 <pip>
[conda] torchvision 0.6.1 <pip>
```
</issue>
<code>
[start of catalyst/core/callbacks/early_stop.py]
1 from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
2 from catalyst.core.runner import IRunner
3
4
5 class CheckRunCallback(Callback):
6 """@TODO: Docs. Contribution is welcome."""
7
8 def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):
9 """@TODO: Docs. Contribution is welcome."""
10 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
11 self.num_batch_steps = num_batch_steps
12 self.num_epoch_steps = num_epoch_steps
13
14 def on_epoch_end(self, runner: IRunner):
15 """@TODO: Docs. Contribution is welcome."""
16 if runner.epoch >= self.num_epoch_steps:
17 runner.need_early_stop = True
18
19 def on_batch_end(self, runner: IRunner):
20 """@TODO: Docs. Contribution is welcome."""
21 if runner.loader_batch_step >= self.num_batch_steps:
22 runner.need_early_stop = True
23
24
25 class EarlyStoppingCallback(Callback):
26 """@TODO: Docs. Contribution is welcome."""
27
28 def __init__(
29 self,
30 patience: int,
31 metric: str = "loss",
32 minimize: bool = True,
33 min_delta: float = 1e-6,
34 ):
35 """@TODO: Docs. Contribution is welcome."""
36 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
37 self.best_score = None
38 self.metric = metric
39 self.patience = patience
40 self.num_bad_epochs = 0
41 self.is_better = None
42
43 if minimize:
44 self.is_better = lambda score, best: score <= (best - min_delta)
45 else:
46 self.is_better = lambda score, best: score >= (best + min_delta)
47
48 def on_epoch_end(self, runner: IRunner) -> None:
49 """@TODO: Docs. Contribution is welcome."""
50 if runner.stage_name.startswith("infer"):
51 return
52
53 score = runner.valid_metrics[self.metric]
54 if self.best_score is None:
55 self.best_score = score
56 if self.is_better(score, self.best_score):
57 self.num_bad_epochs = 0
58 self.best_score = score
59 else:
60 self.num_bad_epochs += 1
61
62 if self.num_bad_epochs >= self.patience:
63 print(f"Early stop at {runner.epoch} epoch")
64 runner.need_early_stop = True
65
[end of catalyst/core/callbacks/early_stop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py
--- a/catalyst/core/callbacks/early_stop.py
+++ b/catalyst/core/callbacks/early_stop.py
@@ -51,9 +51,7 @@
return
score = runner.valid_metrics[self.metric]
- if self.best_score is None:
- self.best_score = score
- if self.is_better(score, self.best_score):
+ if self.best_score is None or self.is_better(score, self.best_score):
self.num_bad_epochs = 0
self.best_score = score
else:
| {"golden_diff": "diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py\n--- a/catalyst/core/callbacks/early_stop.py\n+++ b/catalyst/core/callbacks/early_stop.py\n@@ -51,9 +51,7 @@\n return\n \n score = runner.valid_metrics[self.metric]\n- if self.best_score is None:\n- self.best_score = score\n- if self.is_better(score, self.best_score):\n+ if self.best_score is None or self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n", "issue": "EarlyStoppingCallback considers first epoch as bad\n## \ud83d\udc1b Bug Report\r\nEarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.\r\n\r\n\r\n### How To Reproduce\r\nYou can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.\r\n\r\n#### Code sample\r\n```python\r\nfrom unittest.mock import MagicMock, PropertyMock\r\n\r\nfrom catalyst.core import EarlyStoppingCallback\r\n\r\n\r\ndef test_patience1():\r\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\r\n early_stop = EarlyStoppingCallback(1)\r\n runner = MagicMock()\r\n type(runner).stage_name = PropertyMock(return_value=\"training\")\r\n type(runner).valid_metrics = PropertyMock(return_value={\"loss\": 0.001})\r\n stop_mock = PropertyMock(return_value=False)\r\n type(runner).need_early_stop = stop_mock\r\n\r\n early_stop.on_epoch_end(runner)\r\n\r\n assert stop_mock.mock_calls == []\r\n```\r\n\r\n### Expected behavior\r\nTraining doesn't stop after first epoch. And the unit test passes.\r\n\r\n\r\n### Environment\r\n```bash\r\nCatalyst version: 20.06\r\nPyTorch version: 1.5.1\r\nIs debug build: No\r\nCUDA used to build PyTorch: None\r\nTensorFlow version: N/A\r\nTensorBoard version: 2.2.2\r\n\r\nOS: Mac OSX 10.15.5\r\nGCC version: Could not collect\r\nCMake version: version 3.8.0\r\n\r\nPython version: 3.7\r\nIs CUDA available: No\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\n\r\nVersions of relevant libraries:\r\n[pip3] catalyst-codestyle==20.4\r\n[pip3] catalyst-sphinx-theme==1.1.1\r\n[pip3] efficientnet-pytorch==0.6.3\r\n[pip3] numpy==1.18.5\r\n[pip3] segmentation-models-pytorch==0.1.0\r\n[pip3] tensorboard==2.2.2\r\n[pip3] tensorboard-plugin-wit==1.6.0.post3\r\n[pip3] tensorboardX==2.0\r\n[pip3] torch==1.5.1\r\n[pip3] torchvision==0.6.1\r\n[conda] catalyst-codestyle 20.4 <pip>\r\n[conda] catalyst-sphinx-theme 1.1.1 <pip>\r\n[conda] efficientnet-pytorch 0.6.3 <pip>\r\n[conda] numpy 1.18.5 <pip>\r\n[conda] segmentation-models-pytorch 0.1.0 <pip>\r\n[conda] tensorboard 2.2.2 <pip>\r\n[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>\r\n[conda] tensorboardX 2.0 <pip>\r\n[conda] torch 1.5.1 <pip>\r\n[conda] torchvision 0.6.1 <pip>\r\n```\r\n\n", "before_files": [{"content": "from catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\n\n\nclass CheckRunCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.num_batch_steps = num_batch_steps\n self.num_epoch_steps = num_epoch_steps\n\n def on_epoch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.epoch >= self.num_epoch_steps:\n runner.need_early_stop = True\n\n def on_batch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.loader_batch_step >= self.num_batch_steps:\n runner.need_early_stop = True\n\n\nclass EarlyStoppingCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(\n self,\n patience: int,\n metric: str = \"loss\",\n minimize: bool = True,\n min_delta: float = 1e-6,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.best_score = None\n self.metric = metric\n self.patience = patience\n self.num_bad_epochs = 0\n self.is_better = None\n\n if minimize:\n self.is_better = lambda score, best: score <= (best - min_delta)\n else:\n self.is_better = lambda score, best: score >= (best + min_delta)\n\n def on_epoch_end(self, runner: IRunner) -> None:\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.stage_name.startswith(\"infer\"):\n return\n\n score = runner.valid_metrics[self.metric]\n if self.best_score is None:\n self.best_score = score\n if self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n self.num_bad_epochs += 1\n\n if self.num_bad_epochs >= self.patience:\n print(f\"Early stop at {runner.epoch} epoch\")\n runner.need_early_stop = True\n", "path": "catalyst/core/callbacks/early_stop.py"}]} | 1,882 | 144 |
gh_patches_debug_840 | rasdani/github-patches | git_diff | nilearn__nilearn-507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test for compatibility of old version of six
For the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.
</issue>
<code>
[start of continuous_integration/show-python-packages-versions.py]
1 import sys
2
3 DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
4
5
6 def print_package_version(package_name, indent=' '):
7 try:
8 package = __import__(package_name)
9 version = getattr(package, '__version__', None)
10 package_file = getattr(package, '__file__', )
11 provenance_info = '{0} from {1}'.format(version, package_file)
12 except ImportError:
13 provenance_info = 'not installed'
14
15 print('{0}{1}: {2}'.format(indent, package_name, provenance_info))
16
17 if __name__ == '__main__':
18 print('=' * 120)
19 print('Python %s' % str(sys.version))
20 print('from: %s\n' % sys.executable)
21
22 print('Dependencies versions')
23 for package_name in DEPENDENCIES:
24 print_package_version(package_name)
25 print('=' * 120)
26
[end of continuous_integration/show-python-packages-versions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py
--- a/continuous_integration/show-python-packages-versions.py
+++ b/continuous_integration/show-python-packages-versions.py
@@ -1,6 +1,6 @@
import sys
-DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
+DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
def print_package_version(package_name, indent=' '):
| {"golden_diff": "diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py\n--- a/continuous_integration/show-python-packages-versions.py\n+++ b/continuous_integration/show-python-packages-versions.py\n@@ -1,6 +1,6 @@\n import sys\n \n-DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n+DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n \n \n def print_package_version(package_name, indent=' '):\n", "issue": "Add test for compatibility of old version of six\nFor the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.\n\n", "before_files": [{"content": "import sys\n\nDEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(package, '__file__', )\n provenance_info = '{0} from {1}'.format(version, package_file)\n except ImportError:\n provenance_info = 'not installed'\n\n print('{0}{1}: {2}'.format(indent, package_name, provenance_info))\n\nif __name__ == '__main__':\n print('=' * 120)\n print('Python %s' % str(sys.version))\n print('from: %s\\n' % sys.executable)\n\n print('Dependencies versions')\n for package_name in DEPENDENCIES:\n print_package_version(package_name)\n print('=' * 120)\n", "path": "continuous_integration/show-python-packages-versions.py"}]} | 848 | 123 |
gh_patches_debug_11637 | rasdani/github-patches | git_diff | getsentry__sentry-59857 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jira deprecation of glance panels
Notice from Atlassian Support team about glance panel deprecation.
AC:
- Review the deprecation plan
- Build a recommendation based on how we're impacted. If minor development work is required, complete that with this ticket. If significant work is required, notify EM/PM to share impact and come up with next steps together.
Email from Atlassian:
```
Hope you are having a good day!
As part of this deprecation notice (https://developer.atlassian.com/cloud/jira/platform/changelog/#CHANGE-897), we are reaching out because we have identified that your app, “Sentry,” will be affected by the deprecation of glance panels.
This was initially scheduled for the 6th of October, but we have delayed it until the 30th of November.
The jiraIssueGlances and jira:issueGlance modules in Forge (https://developer.atlassian.com/platform/forge/manifest-reference/modules/jira-issue-glance/) and Connect (https://developer.atlassian.com/cloud/jira/platform/modules/issue-glance/) are being deprecated and replaced with the issueContext module.
We recommend transitioning from the glance panel to the new issue context module before the 30th of November.
Please note, we will not be extending this deprecation date as we announced it on the 30th of March.
Let me know if you need any further assistance,
Ahmud
Product Manager-Jira Cloud
```
</issue>
<code>
[start of src/sentry/integrations/jira/endpoints/descriptor.py]
1 from django.conf import settings
2 from django.urls import reverse
3 from rest_framework.request import Request
4 from rest_framework.response import Response
5
6 from sentry.api.api_publish_status import ApiPublishStatus
7 from sentry.api.base import Endpoint, control_silo_endpoint
8 from sentry.utils.assets import get_frontend_app_asset_url
9 from sentry.utils.http import absolute_uri
10
11 from .. import JIRA_KEY
12
13 scopes = ["read", "write", "act_as_user"]
14 # For Jira, only approved apps can use the access_email_addresses scope
15 # This scope allows Sentry to use the email endpoint (https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-user-email-get)
16 # We use the email with Jira 2-way sync in order to match the user
17 if settings.JIRA_USE_EMAIL_SCOPE:
18 scopes.append("access_email_addresses")
19
20
21 @control_silo_endpoint
22 class JiraDescriptorEndpoint(Endpoint):
23 publish_status = {
24 "GET": ApiPublishStatus.UNKNOWN,
25 }
26 """
27 Provides the metadata needed by Jira to setup an instance of the Sentry integration within Jira.
28 Only used by on-prem orgs and devs setting up local instances of the integration. (Sentry SAAS
29 already has an established, official instance of the Sentry integration registered with Jira.)
30 """
31
32 authentication_classes = ()
33 permission_classes = ()
34
35 def get(self, request: Request) -> Response:
36 sentry_logo = absolute_uri(
37 get_frontend_app_asset_url("sentry", "entrypoints/logo-sentry.svg")
38 )
39 return self.respond(
40 {
41 "name": "Sentry",
42 "description": "Connect your Sentry organization to one or more of your Jira cloud instances. Get started streamlining your bug-squashing workflow by allowing your Sentry and Jira instances to work together.",
43 "key": JIRA_KEY,
44 "baseUrl": absolute_uri(),
45 "vendor": {"name": "Sentry", "url": "https://sentry.io"},
46 "authentication": {"type": "jwt"},
47 "lifecycle": {
48 "installed": "/extensions/jira/installed/",
49 "uninstalled": "/extensions/jira/uninstalled/",
50 },
51 "apiVersion": 1,
52 "modules": {
53 "postInstallPage": {
54 "url": "/extensions/jira/ui-hook/",
55 "name": {"value": "Configure Sentry Add-on"},
56 "key": "post-install-sentry",
57 },
58 "configurePage": {
59 "url": "/extensions/jira/ui-hook/",
60 "name": {"value": "Configure Sentry Add-on"},
61 "key": "configure-sentry",
62 },
63 "jiraIssueGlances": [
64 {
65 "icon": {"width": 24, "height": 24, "url": sentry_logo},
66 "content": {"type": "label", "label": {"value": "Linked Issues"}},
67 "target": {
68 "type": "web_panel",
69 "url": "/extensions/jira/issue/{issue.key}/",
70 },
71 "name": {"value": "Sentry "},
72 "key": "sentry-issues-glance",
73 }
74 ],
75 "webhooks": [
76 {
77 "event": "jira:issue_updated",
78 "url": reverse("sentry-extensions-jira-issue-updated"),
79 "excludeBody": False,
80 }
81 ],
82 },
83 "apiMigrations": {"gdpr": True, "context-qsh": True, "signed-install": True},
84 "scopes": scopes,
85 }
86 )
87
[end of src/sentry/integrations/jira/endpoints/descriptor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/integrations/jira/endpoints/descriptor.py b/src/sentry/integrations/jira/endpoints/descriptor.py
--- a/src/sentry/integrations/jira/endpoints/descriptor.py
+++ b/src/sentry/integrations/jira/endpoints/descriptor.py
@@ -60,7 +60,7 @@
"name": {"value": "Configure Sentry Add-on"},
"key": "configure-sentry",
},
- "jiraIssueGlances": [
+ "jiraIssueContexts": [
{
"icon": {"width": 24, "height": 24, "url": sentry_logo},
"content": {"type": "label", "label": {"value": "Linked Issues"}},
| {"golden_diff": "diff --git a/src/sentry/integrations/jira/endpoints/descriptor.py b/src/sentry/integrations/jira/endpoints/descriptor.py\n--- a/src/sentry/integrations/jira/endpoints/descriptor.py\n+++ b/src/sentry/integrations/jira/endpoints/descriptor.py\n@@ -60,7 +60,7 @@\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"configure-sentry\",\n },\n- \"jiraIssueGlances\": [\n+ \"jiraIssueContexts\": [\n {\n \"icon\": {\"width\": 24, \"height\": 24, \"url\": sentry_logo},\n \"content\": {\"type\": \"label\", \"label\": {\"value\": \"Linked Issues\"}},\n", "issue": "Jira deprecation of glance panels\nNotice from Atlassian Support team about glance panel deprecation. \r\n\r\nAC:\r\n- Review the deprecation plan\r\n- Build a recommendation based on how we're impacted. If minor development work is required, complete that with this ticket. If significant work is required, notify EM/PM to share impact and come up with next steps together.\r\n\r\nEmail from Atlassian:\r\n```\r\nHope you are having a good day!\r\nAs part of this deprecation notice (https://developer.atlassian.com/cloud/jira/platform/changelog/#CHANGE-897), we are reaching out because we have identified that your app, \u201cSentry,\u201d will be affected by the deprecation of glance panels. \r\nThis was initially scheduled for the 6th of October, but we have delayed it until the 30th of November.\r\nThe jiraIssueGlances and jira:issueGlance modules in Forge (https://developer.atlassian.com/platform/forge/manifest-reference/modules/jira-issue-glance/) and Connect (https://developer.atlassian.com/cloud/jira/platform/modules/issue-glance/) are being deprecated and replaced with the issueContext module. \r\nWe recommend transitioning from the glance panel to the new issue context module before the 30th of November. \r\nPlease note, we will not be extending this deprecation date as we announced it on the 30th of March.\r\nLet me know if you need any further assistance,\r\nAhmud\r\nProduct Manager-Jira Cloud\r\n```\n", "before_files": [{"content": "from django.conf import settings\nfrom django.urls import reverse\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry.api.api_publish_status import ApiPublishStatus\nfrom sentry.api.base import Endpoint, control_silo_endpoint\nfrom sentry.utils.assets import get_frontend_app_asset_url\nfrom sentry.utils.http import absolute_uri\n\nfrom .. import JIRA_KEY\n\nscopes = [\"read\", \"write\", \"act_as_user\"]\n# For Jira, only approved apps can use the access_email_addresses scope\n# This scope allows Sentry to use the email endpoint (https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-user-email-get)\n# We use the email with Jira 2-way sync in order to match the user\nif settings.JIRA_USE_EMAIL_SCOPE:\n scopes.append(\"access_email_addresses\")\n\n\n@control_silo_endpoint\nclass JiraDescriptorEndpoint(Endpoint):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n \"\"\"\n Provides the metadata needed by Jira to setup an instance of the Sentry integration within Jira.\n Only used by on-prem orgs and devs setting up local instances of the integration. (Sentry SAAS\n already has an established, official instance of the Sentry integration registered with Jira.)\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(self, request: Request) -> Response:\n sentry_logo = absolute_uri(\n get_frontend_app_asset_url(\"sentry\", \"entrypoints/logo-sentry.svg\")\n )\n return self.respond(\n {\n \"name\": \"Sentry\",\n \"description\": \"Connect your Sentry organization to one or more of your Jira cloud instances. Get started streamlining your bug-squashing workflow by allowing your Sentry and Jira instances to work together.\",\n \"key\": JIRA_KEY,\n \"baseUrl\": absolute_uri(),\n \"vendor\": {\"name\": \"Sentry\", \"url\": \"https://sentry.io\"},\n \"authentication\": {\"type\": \"jwt\"},\n \"lifecycle\": {\n \"installed\": \"/extensions/jira/installed/\",\n \"uninstalled\": \"/extensions/jira/uninstalled/\",\n },\n \"apiVersion\": 1,\n \"modules\": {\n \"postInstallPage\": {\n \"url\": \"/extensions/jira/ui-hook/\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"post-install-sentry\",\n },\n \"configurePage\": {\n \"url\": \"/extensions/jira/ui-hook/\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"configure-sentry\",\n },\n \"jiraIssueGlances\": [\n {\n \"icon\": {\"width\": 24, \"height\": 24, \"url\": sentry_logo},\n \"content\": {\"type\": \"label\", \"label\": {\"value\": \"Linked Issues\"}},\n \"target\": {\n \"type\": \"web_panel\",\n \"url\": \"/extensions/jira/issue/{issue.key}/\",\n },\n \"name\": {\"value\": \"Sentry \"},\n \"key\": \"sentry-issues-glance\",\n }\n ],\n \"webhooks\": [\n {\n \"event\": \"jira:issue_updated\",\n \"url\": reverse(\"sentry-extensions-jira-issue-updated\"),\n \"excludeBody\": False,\n }\n ],\n },\n \"apiMigrations\": {\"gdpr\": True, \"context-qsh\": True, \"signed-install\": True},\n \"scopes\": scopes,\n }\n )\n", "path": "src/sentry/integrations/jira/endpoints/descriptor.py"}]} | 1,784 | 171 |
gh_patches_debug_21950 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] The Added Loss term for InducingKernel seems flipped in sign?
# 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
```
def loss(self, *params):
prior_covar = self.prior_dist.lazy_covariance_matrix
variational_covar = self.variational_dist.lazy_covariance_matrix
diag = prior_covar.diag() - variational_covar.diag()
shape = prior_covar.shape[:-1]
noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
return 0.5 * (diag / noise_diag).sum()
```
This is the current code for InducingPointKernelAddedLossTerm.loss
From what I see, this "loss term" is added into the mll that is returned by the `ExactMarginalLogLikelihood` class. This in itself is misleading as the loss is usually the negative of the mll.
Moreover, the variational negative loss used to evaluate inducing points is given below

As can be seen, the above is the expression for the pseudo-mll that is maximized when optimizing the inducing points. in this, the component of `InducingPointKernelAddedLossTerm` is negative to the value that is being added into the loss.
This is quite likely a significant bug. Please fix (just invert the sign of `diag` above)
</issue>
<code>
[start of gpytorch/mlls/inducing_point_kernel_added_loss_term.py]
1 #!/usr/bin/env python3
2
3 from .added_loss_term import AddedLossTerm
4
5
6 class InducingPointKernelAddedLossTerm(AddedLossTerm):
7 def __init__(self, variational_dist, prior_dist, likelihood):
8 self.prior_dist = prior_dist
9 self.variational_dist = variational_dist
10 self.likelihood = likelihood
11
12 def loss(self, *params):
13 prior_covar = self.prior_dist.lazy_covariance_matrix
14 variational_covar = self.variational_dist.lazy_covariance_matrix
15 diag = prior_covar.diag() - variational_covar.diag()
16 shape = prior_covar.shape[:-1]
17 noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
18 return 0.5 * (diag / noise_diag).sum()
19
[end of gpytorch/mlls/inducing_point_kernel_added_loss_term.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
--- a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
+++ b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
@@ -4,7 +4,7 @@
class InducingPointKernelAddedLossTerm(AddedLossTerm):
- def __init__(self, variational_dist, prior_dist, likelihood):
+ def __init__(self, prior_dist, variational_dist, likelihood):
self.prior_dist = prior_dist
self.variational_dist = variational_dist
self.likelihood = likelihood
@@ -12,7 +12,7 @@
def loss(self, *params):
prior_covar = self.prior_dist.lazy_covariance_matrix
variational_covar = self.variational_dist.lazy_covariance_matrix
- diag = prior_covar.diag() - variational_covar.diag()
+ diag = variational_covar.diag() - prior_covar.diag()
shape = prior_covar.shape[:-1]
noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
return 0.5 * (diag / noise_diag).sum()
| {"golden_diff": "diff --git a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n--- a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n+++ b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n@@ -4,7 +4,7 @@\n \n \n class InducingPointKernelAddedLossTerm(AddedLossTerm):\n- def __init__(self, variational_dist, prior_dist, likelihood):\n+ def __init__(self, prior_dist, variational_dist, likelihood):\n self.prior_dist = prior_dist\n self.variational_dist = variational_dist\n self.likelihood = likelihood\n@@ -12,7 +12,7 @@\n def loss(self, *params):\n prior_covar = self.prior_dist.lazy_covariance_matrix\n variational_covar = self.variational_dist.lazy_covariance_matrix\n- diag = prior_covar.diag() - variational_covar.diag()\n+ diag = variational_covar.diag() - prior_covar.diag()\n shape = prior_covar.shape[:-1]\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\n return 0.5 * (diag / noise_diag).sum()\n", "issue": "[Bug] The Added Loss term for InducingKernel seems flipped in sign?\n# \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n```\r\n def loss(self, *params):\r\n prior_covar = self.prior_dist.lazy_covariance_matrix\r\n variational_covar = self.variational_dist.lazy_covariance_matrix\r\n diag = prior_covar.diag() - variational_covar.diag()\r\n shape = prior_covar.shape[:-1]\r\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\r\n return 0.5 * (diag / noise_diag).sum()\r\n```\r\nThis is the current code for InducingPointKernelAddedLossTerm.loss\r\n\r\nFrom what I see, this \"loss term\" is added into the mll that is returned by the `ExactMarginalLogLikelihood` class. This in itself is misleading as the loss is usually the negative of the mll.\r\n\r\nMoreover, the variational negative loss used to evaluate inducing points is given below\r\n\r\n\r\n\r\nAs can be seen, the above is the expression for the pseudo-mll that is maximized when optimizing the inducing points. in this, the component of `InducingPointKernelAddedLossTerm` is negative to the value that is being added into the loss.\r\n\r\nThis is quite likely a significant bug. Please fix (just invert the sign of `diag` above)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom .added_loss_term import AddedLossTerm\n\n\nclass InducingPointKernelAddedLossTerm(AddedLossTerm):\n def __init__(self, variational_dist, prior_dist, likelihood):\n self.prior_dist = prior_dist\n self.variational_dist = variational_dist\n self.likelihood = likelihood\n\n def loss(self, *params):\n prior_covar = self.prior_dist.lazy_covariance_matrix\n variational_covar = self.variational_dist.lazy_covariance_matrix\n diag = prior_covar.diag() - variational_covar.diag()\n shape = prior_covar.shape[:-1]\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\n return 0.5 * (diag / noise_diag).sum()\n", "path": "gpytorch/mlls/inducing_point_kernel_added_loss_term.py"}]} | 1,110 | 284 |
gh_patches_debug_3955 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache path inconsistency in cc and python?
I noticed that theses two files are using different conventions for creating cache paths. The cc file says the python file should align with the cc file, but they are still different. Is this intended?
https://github.com/facebookresearch/CompilerGym/blob/61f460fadee2454ff8fca3bbd5a5d338854cc4a2/compiler_gym/util/runfiles_path.py#L101-L105
https://github.com/facebookresearch/CompilerGym/blob/1596776ad35a7aeca45ed85b2e073af824844e29/compiler_gym/util/RunfilesPath.cc#L61-L65
</issue>
<code>
[start of compiler_gym/util/runfiles_path.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 """Module for resolving a runfiles path."""
6 import os
7 from datetime import datetime
8 from getpass import getuser
9 from pathlib import Path
10 from threading import Lock
11 from time import sleep
12 from typing import Optional
13
14 # NOTE(cummins): Moving this file may require updating this relative path.
15 _PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve(
16 strict=True
17 )
18
19 _CREATE_LOGGING_DIR_LOCK = Lock()
20
21
22 def runfiles_path(relpath: str) -> Path:
23 """Resolve the path to a runfiles data path.
24
25 No checks are to made to ensure that the path, or the containing directory,
26 exist.
27
28 Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running
29 outside of bazel.
30
31 :param relpath: The relative path within the runfiles tree.
32
33 :return: An absolute path.
34 """
35 # There are three ways of determining a runfiles path:
36 # 1. Set the COMPILER_GYM_RUNFILES environment variable.
37 # 2. Using the rules_python library that is provided by bazel. This will
38 # fail if not being executed within a bazel sandbox.
39 # 3. Computing the path relative to the location of this file. This is the
40 # fallback approach that is used for when the code has been installed
41 # by setuptools.
42 runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES")
43 if runfiles_path:
44 return Path(runfiles_path) / relpath
45 else:
46 try:
47 from rules_python.python.runfiles import runfiles
48
49 return Path(
50 runfiles.Create().Rlocation(
51 "CompilerGym" if relpath == "." else f"CompilerGym/{relpath}"
52 )
53 )
54 except (ModuleNotFoundError, TypeError):
55 return _PACKAGE_ROOT / relpath
56
57
58 def site_data_path(relpath: str) -> Path:
59 """Return a path within the site data directory.
60
61 CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.
62 The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable
63 :code:`$COMPILER_GYM_SITE_DATA` to override this default location.
64
65 No checks are to made to ensure that the path, or the containing directory,
66 exist.
67
68 :param relpath: The relative path within the site data tree.
69
70 :return: An absolute path.
71 """
72 # NOTE(cummins): This function has a matching implementation in the C++
73 # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior
74 # here must be reflected in the C++ version.
75 forced = os.environ.get("COMPILER_GYM_SITE_DATA")
76 if forced:
77 return Path(forced) / relpath
78 elif os.environ.get("HOME"):
79 return Path("~/.local/share/compiler_gym").expanduser() / relpath
80 else:
81 return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath
82
83
84 def cache_path(relpath: str) -> Path:
85 """Return a path within the cache directory.
86
87 CompilerGym uses a directory to cache files in, such as downloaded content.
88 The default location for this cache is :code:`~/.cache/compiler_gym`. Set
89 the environment variable :code:`$COMPILER_GYM_CACHE` to override this
90 default location.
91
92 No checks are to made to ensure that the path, or the containing directory,
93 exist.
94
95 :param relpath: The relative path within the cache tree.
96
97 :return: An absolute path.
98 """
99 forced = os.environ.get("COMPILER_GYM_CACHE")
100 if forced:
101 return Path(forced) / relpath
102 elif os.environ.get("HOME"):
103 return Path("~/.cache/compiler_gym").expanduser() / relpath
104 else:
105 return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
106
107
108 def transient_cache_path(relpath: str) -> Path:
109 """Return a path within the transient cache directory.
110
111 The transient cache is a directory used to store files that do not need to
112 persist beyond the lifetime of the current process. When available, the
113 temporary filesystem :code:`/dev/shm` will be used. Else,
114 :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set
115 the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override
116 the default location.
117
118 No checks are to made to ensure that the path, or the containing directory,
119 exist.
120
121 :param relpath: The relative path within the cache tree.
122
123 :return: An absolute path.
124 """
125 forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE")
126 if forced:
127 return Path(forced) / relpath
128 elif Path("/dev/shm").is_dir():
129 return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath
130 else:
131 # Fallback to using the regular cache.
132 return cache_path(relpath)
133
134
135 def create_user_logs_dir(name: str, dir: Optional[Path] = None) -> Path:
136 """Create a directory for writing logs to.
137
138 Defaults to ~/logs/compiler_gym base directory, set the
139 :code:`COMPILER_GYM_LOGS` environment variable to override this.
140
141 Example use:
142
143 >>> create_user_logs_dir("my_experiment")
144 Path("~/logs/compiler_gym/my_experiment/2020-11-03T11:00:00")
145
146 :param name: The grouping name for the logs.
147
148 :return: A unique timestamped directory for logging. This directory exists.
149 """
150 base_dir = Path(
151 os.environ.get("COMPILER_GYM_LOGS", dir or "~/logs/compiler_gym")
152 ).expanduser()
153 group_dir = base_dir / name
154
155 with _CREATE_LOGGING_DIR_LOCK:
156 # Require that logging directory timestamps are unique by waiting until
157 # a unique timestamp is generated.
158 while True:
159 now = datetime.now()
160 subdirs = now.strftime("%Y-%m-%d/%H-%M-%S")
161
162 logs_dir = group_dir / subdirs
163 if logs_dir.is_dir():
164 sleep(0.3)
165 continue
166
167 logs_dir.mkdir(parents=True, exist_ok=False)
168
169 # Create a symlink to the "latest" logs results.
170 if (group_dir / "latest").exists():
171 os.unlink(group_dir / "latest")
172 os.symlink(subdirs, group_dir / "latest")
173
174 return logs_dir
175
[end of compiler_gym/util/runfiles_path.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py
--- a/compiler_gym/util/runfiles_path.py
+++ b/compiler_gym/util/runfiles_path.py
@@ -100,7 +100,7 @@
if forced:
return Path(forced) / relpath
elif os.environ.get("HOME"):
- return Path("~/.cache/compiler_gym").expanduser() / relpath
+ return Path("~/.local/cache/compiler_gym").expanduser() / relpath
else:
return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
| {"golden_diff": "diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py\n--- a/compiler_gym/util/runfiles_path.py\n+++ b/compiler_gym/util/runfiles_path.py\n@@ -100,7 +100,7 @@\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n- return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n+ return Path(\"~/.local/cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n", "issue": "Cache path inconsistency in cc and python?\nI noticed that theses two files are using different conventions for creating cache paths. The cc file says the python file should align with the cc file, but they are still different. Is this intended?\r\n\r\nhttps://github.com/facebookresearch/CompilerGym/blob/61f460fadee2454ff8fca3bbd5a5d338854cc4a2/compiler_gym/util/runfiles_path.py#L101-L105\r\n\r\nhttps://github.com/facebookresearch/CompilerGym/blob/1596776ad35a7aeca45ed85b2e073af824844e29/compiler_gym/util/RunfilesPath.cc#L61-L65\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport os\nfrom datetime import datetime\nfrom getpass import getuser\nfrom pathlib import Path\nfrom threading import Lock\nfrom time import sleep\nfrom typing import Optional\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n_CREATE_LOGGING_DIR_LOCK = Lock()\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n\n\ndef create_user_logs_dir(name: str, dir: Optional[Path] = None) -> Path:\n \"\"\"Create a directory for writing logs to.\n\n Defaults to ~/logs/compiler_gym base directory, set the\n :code:`COMPILER_GYM_LOGS` environment variable to override this.\n\n Example use:\n\n >>> create_user_logs_dir(\"my_experiment\")\n Path(\"~/logs/compiler_gym/my_experiment/2020-11-03T11:00:00\")\n\n :param name: The grouping name for the logs.\n\n :return: A unique timestamped directory for logging. This directory exists.\n \"\"\"\n base_dir = Path(\n os.environ.get(\"COMPILER_GYM_LOGS\", dir or \"~/logs/compiler_gym\")\n ).expanduser()\n group_dir = base_dir / name\n\n with _CREATE_LOGGING_DIR_LOCK:\n # Require that logging directory timestamps are unique by waiting until\n # a unique timestamp is generated.\n while True:\n now = datetime.now()\n subdirs = now.strftime(\"%Y-%m-%d/%H-%M-%S\")\n\n logs_dir = group_dir / subdirs\n if logs_dir.is_dir():\n sleep(0.3)\n continue\n\n logs_dir.mkdir(parents=True, exist_ok=False)\n\n # Create a symlink to the \"latest\" logs results.\n if (group_dir / \"latest\").exists():\n os.unlink(group_dir / \"latest\")\n os.symlink(subdirs, group_dir / \"latest\")\n\n return logs_dir\n", "path": "compiler_gym/util/runfiles_path.py"}]} | 2,628 | 140 |
gh_patches_debug_35035 | rasdani/github-patches | git_diff | python-pillow__Pillow-220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for RGBA webp image encoding and decoding
Would it be possible to wrap the `WebPEncodeRGBA` and `WebPDecodeRGBA` functionality of the webp library inside Pillow?
</issue>
<code>
[start of selftest.py]
1 # minimal sanity check
2 from __future__ import print_function
3 ROOT = "."
4
5 import os, sys
6 sys.path.insert(0, ROOT)
7
8 from PIL import Image
9 from PIL import ImageDraw
10 from PIL import ImageFilter
11 from PIL import ImageMath
12
13 try:
14 Image.core.ping
15 except ImportError as v:
16 print("***", v)
17 sys.exit()
18 except AttributeError:
19 pass
20
21 def _info(im):
22 im.load()
23 return im.format, im.mode, im.size
24
25 def testimage():
26 """
27 PIL lets you create in-memory images with various pixel types:
28
29 >>> im = Image.new("1", (128, 128)) # monochrome
30 >>> _info(im)
31 (None, '1', (128, 128))
32 >>> _info(Image.new("L", (128, 128))) # grayscale (luminance)
33 (None, 'L', (128, 128))
34 >>> _info(Image.new("P", (128, 128))) # palette
35 (None, 'P', (128, 128))
36 >>> _info(Image.new("RGB", (128, 128))) # truecolor
37 (None, 'RGB', (128, 128))
38 >>> _info(Image.new("I", (128, 128))) # 32-bit integer
39 (None, 'I', (128, 128))
40 >>> _info(Image.new("F", (128, 128))) # 32-bit floating point
41 (None, 'F', (128, 128))
42
43 Or open existing files:
44
45 >>> im = Image.open(os.path.join(ROOT, "Images/lena.gif"))
46 >>> _info(im)
47 ('GIF', 'P', (128, 128))
48 >>> _info(Image.open(os.path.join(ROOT, "Images/lena.ppm")))
49 ('PPM', 'RGB', (128, 128))
50 >>> try:
51 ... _info(Image.open(os.path.join(ROOT, "Images/lena.jpg")))
52 ... except IOError as v:
53 ... print(v)
54 ('JPEG', 'RGB', (128, 128))
55
56 PIL doesn't actually load the image data until it's needed,
57 or you call the "load" method:
58
59 >>> im = Image.open(os.path.join(ROOT, "Images/lena.ppm"))
60 >>> print(im.im) # internal image attribute
61 None
62 >>> a = im.load()
63 >>> type(im.im) # doctest: +ELLIPSIS
64 <... '...ImagingCore'>
65
66 You can apply many different operations on images. Most
67 operations return a new image:
68
69 >>> im = Image.open(os.path.join(ROOT, "Images/lena.ppm"))
70 >>> _info(im.convert("L"))
71 (None, 'L', (128, 128))
72 >>> _info(im.copy())
73 (None, 'RGB', (128, 128))
74 >>> _info(im.crop((32, 32, 96, 96)))
75 (None, 'RGB', (64, 64))
76 >>> _info(im.filter(ImageFilter.BLUR))
77 (None, 'RGB', (128, 128))
78 >>> im.getbands()
79 ('R', 'G', 'B')
80 >>> im.getbbox()
81 (0, 0, 128, 128)
82 >>> len(im.getdata())
83 16384
84 >>> im.getextrema()
85 ((61, 255), (26, 234), (44, 223))
86 >>> im.getpixel((0, 0))
87 (223, 162, 133)
88 >>> len(im.getprojection())
89 2
90 >>> len(im.histogram())
91 768
92 >>> _info(im.point(list(range(256))*3))
93 (None, 'RGB', (128, 128))
94 >>> _info(im.resize((64, 64)))
95 (None, 'RGB', (64, 64))
96 >>> _info(im.rotate(45))
97 (None, 'RGB', (128, 128))
98 >>> [_info(ch) for ch in im.split()]
99 [(None, 'L', (128, 128)), (None, 'L', (128, 128)), (None, 'L', (128, 128))]
100 >>> len(im.convert("1").tobitmap())
101 10456
102 >>> len(im.tobytes())
103 49152
104 >>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
105 (None, 'RGB', (512, 512))
106 >>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
107 (None, 'RGB', (512, 512))
108
109 The ImageDraw module lets you draw stuff in raster images:
110
111 >>> im = Image.new("L", (128, 128), 64)
112 >>> d = ImageDraw.ImageDraw(im)
113 >>> d.line((0, 0, 128, 128), fill=128)
114 >>> d.line((0, 128, 128, 0), fill=128)
115 >>> im.getextrema()
116 (64, 128)
117
118 In 1.1.4, you can specify colors in a number of ways:
119
120 >>> xy = 0, 0, 128, 128
121 >>> im = Image.new("RGB", (128, 128), 0)
122 >>> d = ImageDraw.ImageDraw(im)
123 >>> d.rectangle(xy, "#f00")
124 >>> im.getpixel((0, 0))
125 (255, 0, 0)
126 >>> d.rectangle(xy, "#ff0000")
127 >>> im.getpixel((0, 0))
128 (255, 0, 0)
129 >>> d.rectangle(xy, "rgb(255,0,0)")
130 >>> im.getpixel((0, 0))
131 (255, 0, 0)
132 >>> d.rectangle(xy, "rgb(100%,0%,0%)")
133 >>> im.getpixel((0, 0))
134 (255, 0, 0)
135 >>> d.rectangle(xy, "hsl(0, 100%, 50%)")
136 >>> im.getpixel((0, 0))
137 (255, 0, 0)
138 >>> d.rectangle(xy, "red")
139 >>> im.getpixel((0, 0))
140 (255, 0, 0)
141
142 In 1.1.6, you can use the ImageMath module to do image
143 calculations.
144
145 >>> im = ImageMath.eval("float(im + 20)", im=im.convert("L"))
146 >>> im.mode, im.size
147 ('F', (128, 128))
148
149 PIL can do many other things, but I'll leave that for another
150 day. If you're curious, check the handbook, available from:
151
152 http://www.pythonware.com
153
154 Cheers /F
155 """
156
157
158 def check_module(feature, module):
159 try:
160 __import__(module)
161 except ImportError:
162 print("***", feature, "support not installed")
163 else:
164 print("---", feature, "support ok")
165
166 def check_codec(feature, codec):
167 if codec + "_encoder" not in dir(Image.core):
168 print("***", feature, "support not installed")
169 else:
170 print("---", feature, "support ok")
171
172
173 if __name__ == "__main__":
174 # check build sanity
175
176 exit_status = 0
177
178 print("-"*68)
179 #print("PIL", Image.VERSION, "TEST SUMMARY ")
180 print("PIL (Pillow) TEST SUMMARY ")
181 print("-"*68)
182 print("Python modules loaded from", os.path.dirname(Image.__file__))
183 print("Binary modules loaded from", os.path.dirname(Image.core.__file__))
184 print("-"*68)
185 check_module("PIL CORE", "_imaging")
186 check_module("TKINTER", "_imagingtk")
187 check_codec("JPEG", "jpeg")
188 check_codec("ZLIB (PNG/ZIP)", "zip")
189 check_codec("G4 TIFF", "group4")
190 check_module("FREETYPE2", "_imagingft")
191 check_module("LITTLECMS", "_imagingcms")
192 check_module("WEBP", "_webp")
193 print("-"*68)
194
195 # use doctest to make sure the test program behaves as documented!
196 import doctest, selftest
197 print("Running selftest:")
198 status = doctest.testmod(selftest)
199 if status[0]:
200 print("*** %s tests of %d failed." % status)
201 exit_status = 1
202 else:
203 print("--- %s tests passed." % status[1])
204
205 sys.exit(exit_status)
206
207
[end of selftest.py]
[start of PIL/WebPImagePlugin.py]
1 from PIL import Image
2 from PIL import ImageFile
3 from io import BytesIO
4 import _webp
5
6 def _accept(prefix):
7 return prefix[:4] == b"RIFF" and prefix[8:16] == b"WEBPVP8 "
8
9 class WebPImageFile(ImageFile.ImageFile):
10
11 format = "WEBP"
12 format_description = "WebP image"
13
14 def _open(self):
15 self.mode = "RGB"
16 data, width, height = _webp.WebPDecodeRGB(self.fp.read())
17 self.size = width, height
18 self.fp = BytesIO(data)
19 self.tile = [("raw", (0, 0) + self.size, 0, 'RGB')]
20
21 def _save(im, fp, filename):
22 if im.mode != "RGB":
23 raise IOError("cannot write mode %s as WEBP" % im.mode)
24 quality = im.encoderinfo.get("quality", 80)
25
26 data = _webp.WebPEncodeRGB(im.tobytes(), im.size[0], im.size[1], im.size[0] * 3, float(quality))
27 fp.write(data)
28
29 Image.register_open("WEBP", WebPImageFile, _accept)
30 Image.register_save("WEBP", _save)
31
32 Image.register_extension("WEBP", ".webp")
33 Image.register_mime("WEBP", "image/webp")
34
[end of PIL/WebPImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PIL/WebPImagePlugin.py b/PIL/WebPImagePlugin.py
--- a/PIL/WebPImagePlugin.py
+++ b/PIL/WebPImagePlugin.py
@@ -3,29 +3,55 @@
from io import BytesIO
import _webp
+
+_VALID_WEBP_MODES = {
+ "RGB": True,
+ "RGBA": True,
+ }
+
+_VP8_MODES_BY_IDENTIFIER = {
+ b"VP8 ": "RGB",
+ b"VP8X": "RGBA",
+ }
+
+
def _accept(prefix):
- return prefix[:4] == b"RIFF" and prefix[8:16] == b"WEBPVP8 "
+ is_riff_file_format = prefix[:4] == b"RIFF"
+ is_webp_file = prefix[8:12] == b"WEBP"
+ is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
+
+ return is_riff_file_format and is_webp_file and is_valid_vp8_mode
+
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
- def _open(self):
- self.mode = "RGB"
- data, width, height = _webp.WebPDecodeRGB(self.fp.read())
+ def _open(self):
+ data, width, height, self.mode = _webp.WebPDecode(self.fp.read())
self.size = width, height
self.fp = BytesIO(data)
- self.tile = [("raw", (0, 0) + self.size, 0, 'RGB')]
+ self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
+
def _save(im, fp, filename):
- if im.mode != "RGB":
- raise IOError("cannot write mode %s as WEBP" % im.mode)
+ image_mode = im.mode
+ if im.mode not in _VALID_WEBP_MODES:
+ raise IOError("cannot write mode %s as WEBP" % image_mode)
+
quality = im.encoderinfo.get("quality", 80)
- data = _webp.WebPEncodeRGB(im.tobytes(), im.size[0], im.size[1], im.size[0] * 3, float(quality))
+ data = _webp.WebPEncode(
+ im.tobytes(),
+ im.size[0],
+ im.size[1],
+ float(quality),
+ im.mode
+ )
fp.write(data)
+
Image.register_open("WEBP", WebPImageFile, _accept)
Image.register_save("WEBP", _save)
diff --git a/selftest.py b/selftest.py
--- a/selftest.py
+++ b/selftest.py
@@ -190,6 +190,14 @@
check_module("FREETYPE2", "_imagingft")
check_module("LITTLECMS", "_imagingcms")
check_module("WEBP", "_webp")
+ try:
+ import _webp
+ if _webp.WebPDecoderBuggyAlpha():
+ print("***", "Transparent WEBP", "support not installed")
+ else:
+ print("---", "Transparent WEBP", "support ok")
+ except Exception:
+ pass
print("-"*68)
# use doctest to make sure the test program behaves as documented!
| {"golden_diff": "diff --git a/PIL/WebPImagePlugin.py b/PIL/WebPImagePlugin.py\n--- a/PIL/WebPImagePlugin.py\n+++ b/PIL/WebPImagePlugin.py\n@@ -3,29 +3,55 @@\n from io import BytesIO\n import _webp\n \n+\n+_VALID_WEBP_MODES = {\n+ \"RGB\": True,\n+ \"RGBA\": True,\n+ }\n+\n+_VP8_MODES_BY_IDENTIFIER = {\n+ b\"VP8 \": \"RGB\",\n+ b\"VP8X\": \"RGBA\",\n+ } \n+\n+\n def _accept(prefix):\n- return prefix[:4] == b\"RIFF\" and prefix[8:16] == b\"WEBPVP8 \"\n+ is_riff_file_format = prefix[:4] == b\"RIFF\"\n+ is_webp_file = prefix[8:12] == b\"WEBP\"\n+ is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER\n+ \n+ return is_riff_file_format and is_webp_file and is_valid_vp8_mode\n+\n \n class WebPImageFile(ImageFile.ImageFile):\n \n format = \"WEBP\"\n format_description = \"WebP image\"\n \n- def _open(self):\n- self.mode = \"RGB\"\n- data, width, height = _webp.WebPDecodeRGB(self.fp.read())\n+ def _open(self): \n+ data, width, height, self.mode = _webp.WebPDecode(self.fp.read())\n self.size = width, height\n self.fp = BytesIO(data)\n- self.tile = [(\"raw\", (0, 0) + self.size, 0, 'RGB')]\n+ self.tile = [(\"raw\", (0, 0) + self.size, 0, self.mode)]\n+\n \n def _save(im, fp, filename):\n- if im.mode != \"RGB\":\n- raise IOError(\"cannot write mode %s as WEBP\" % im.mode)\n+ image_mode = im.mode\n+ if im.mode not in _VALID_WEBP_MODES:\n+ raise IOError(\"cannot write mode %s as WEBP\" % image_mode)\n+ \n quality = im.encoderinfo.get(\"quality\", 80)\n \n- data = _webp.WebPEncodeRGB(im.tobytes(), im.size[0], im.size[1], im.size[0] * 3, float(quality))\n+ data = _webp.WebPEncode(\n+ im.tobytes(),\n+ im.size[0],\n+ im.size[1],\n+ float(quality),\n+\t\tim.mode\n+ )\n fp.write(data)\n \n+\n Image.register_open(\"WEBP\", WebPImageFile, _accept)\n Image.register_save(\"WEBP\", _save)\n \ndiff --git a/selftest.py b/selftest.py\n--- a/selftest.py\n+++ b/selftest.py\n@@ -190,6 +190,14 @@\n check_module(\"FREETYPE2\", \"_imagingft\")\n check_module(\"LITTLECMS\", \"_imagingcms\")\n check_module(\"WEBP\", \"_webp\")\n+ try:\n+ import _webp\n+ if _webp.WebPDecoderBuggyAlpha():\n+ print(\"***\", \"Transparent WEBP\", \"support not installed\")\n+ else:\n+ print(\"---\", \"Transparent WEBP\", \"support ok\")\n+ except Exception:\n+ pass\n print(\"-\"*68)\n \n # use doctest to make sure the test program behaves as documented!\n", "issue": "Add support for RGBA webp image encoding and decoding\nWould it be possible to wrap the `WebPEncodeRGBA` and `WebPDecodeRGBA` functionality of the webp library inside Pillow?\n\n", "before_files": [{"content": "# minimal sanity check\nfrom __future__ import print_function\nROOT = \".\"\n\nimport os, sys\nsys.path.insert(0, ROOT)\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFilter\nfrom PIL import ImageMath\n\ntry:\n Image.core.ping\nexcept ImportError as v:\n print(\"***\", v)\n sys.exit()\nexcept AttributeError:\n pass\n\ndef _info(im):\n im.load()\n return im.format, im.mode, im.size\n\ndef testimage():\n \"\"\"\n PIL lets you create in-memory images with various pixel types:\n\n >>> im = Image.new(\"1\", (128, 128)) # monochrome\n >>> _info(im)\n (None, '1', (128, 128))\n >>> _info(Image.new(\"L\", (128, 128))) # grayscale (luminance)\n (None, 'L', (128, 128))\n >>> _info(Image.new(\"P\", (128, 128))) # palette\n (None, 'P', (128, 128))\n >>> _info(Image.new(\"RGB\", (128, 128))) # truecolor\n (None, 'RGB', (128, 128))\n >>> _info(Image.new(\"I\", (128, 128))) # 32-bit integer\n (None, 'I', (128, 128))\n >>> _info(Image.new(\"F\", (128, 128))) # 32-bit floating point\n (None, 'F', (128, 128))\n\n Or open existing files:\n\n >>> im = Image.open(os.path.join(ROOT, \"Images/lena.gif\"))\n >>> _info(im)\n ('GIF', 'P', (128, 128))\n >>> _info(Image.open(os.path.join(ROOT, \"Images/lena.ppm\")))\n ('PPM', 'RGB', (128, 128))\n >>> try:\n ... _info(Image.open(os.path.join(ROOT, \"Images/lena.jpg\")))\n ... except IOError as v:\n ... print(v)\n ('JPEG', 'RGB', (128, 128))\n\n PIL doesn't actually load the image data until it's needed,\n or you call the \"load\" method:\n\n >>> im = Image.open(os.path.join(ROOT, \"Images/lena.ppm\"))\n >>> print(im.im) # internal image attribute\n None\n >>> a = im.load()\n >>> type(im.im) # doctest: +ELLIPSIS\n <... '...ImagingCore'>\n\n You can apply many different operations on images. Most\n operations return a new image:\n\n >>> im = Image.open(os.path.join(ROOT, \"Images/lena.ppm\"))\n >>> _info(im.convert(\"L\"))\n (None, 'L', (128, 128))\n >>> _info(im.copy())\n (None, 'RGB', (128, 128))\n >>> _info(im.crop((32, 32, 96, 96)))\n (None, 'RGB', (64, 64))\n >>> _info(im.filter(ImageFilter.BLUR))\n (None, 'RGB', (128, 128))\n >>> im.getbands()\n ('R', 'G', 'B')\n >>> im.getbbox()\n (0, 0, 128, 128)\n >>> len(im.getdata())\n 16384\n >>> im.getextrema()\n ((61, 255), (26, 234), (44, 223))\n >>> im.getpixel((0, 0))\n (223, 162, 133)\n >>> len(im.getprojection())\n 2\n >>> len(im.histogram())\n 768\n >>> _info(im.point(list(range(256))*3))\n (None, 'RGB', (128, 128))\n >>> _info(im.resize((64, 64)))\n (None, 'RGB', (64, 64))\n >>> _info(im.rotate(45))\n (None, 'RGB', (128, 128))\n >>> [_info(ch) for ch in im.split()]\n [(None, 'L', (128, 128)), (None, 'L', (128, 128)), (None, 'L', (128, 128))]\n >>> len(im.convert(\"1\").tobitmap())\n 10456\n >>> len(im.tobytes())\n 49152\n >>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))\n (None, 'RGB', (512, 512))\n >>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))\n (None, 'RGB', (512, 512))\n\n The ImageDraw module lets you draw stuff in raster images:\n\n >>> im = Image.new(\"L\", (128, 128), 64)\n >>> d = ImageDraw.ImageDraw(im)\n >>> d.line((0, 0, 128, 128), fill=128)\n >>> d.line((0, 128, 128, 0), fill=128)\n >>> im.getextrema()\n (64, 128)\n\n In 1.1.4, you can specify colors in a number of ways:\n\n >>> xy = 0, 0, 128, 128\n >>> im = Image.new(\"RGB\", (128, 128), 0)\n >>> d = ImageDraw.ImageDraw(im)\n >>> d.rectangle(xy, \"#f00\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n >>> d.rectangle(xy, \"#ff0000\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n >>> d.rectangle(xy, \"rgb(255,0,0)\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n >>> d.rectangle(xy, \"rgb(100%,0%,0%)\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n >>> d.rectangle(xy, \"hsl(0, 100%, 50%)\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n >>> d.rectangle(xy, \"red\")\n >>> im.getpixel((0, 0))\n (255, 0, 0)\n\n In 1.1.6, you can use the ImageMath module to do image\n calculations.\n\n >>> im = ImageMath.eval(\"float(im + 20)\", im=im.convert(\"L\"))\n >>> im.mode, im.size\n ('F', (128, 128))\n\n PIL can do many other things, but I'll leave that for another\n day. If you're curious, check the handbook, available from:\n\n http://www.pythonware.com\n\n Cheers /F\n \"\"\"\n\n\ndef check_module(feature, module):\n try:\n __import__(module)\n except ImportError:\n print(\"***\", feature, \"support not installed\")\n else:\n print(\"---\", feature, \"support ok\")\n\ndef check_codec(feature, codec):\n if codec + \"_encoder\" not in dir(Image.core):\n print(\"***\", feature, \"support not installed\")\n else:\n print(\"---\", feature, \"support ok\")\n\n\nif __name__ == \"__main__\":\n # check build sanity\n\n exit_status = 0\n\n print(\"-\"*68)\n #print(\"PIL\", Image.VERSION, \"TEST SUMMARY \")\n print(\"PIL (Pillow) TEST SUMMARY \")\n print(\"-\"*68)\n print(\"Python modules loaded from\", os.path.dirname(Image.__file__))\n print(\"Binary modules loaded from\", os.path.dirname(Image.core.__file__))\n print(\"-\"*68)\n check_module(\"PIL CORE\", \"_imaging\")\n check_module(\"TKINTER\", \"_imagingtk\")\n check_codec(\"JPEG\", \"jpeg\")\n check_codec(\"ZLIB (PNG/ZIP)\", \"zip\")\n check_codec(\"G4 TIFF\", \"group4\")\n check_module(\"FREETYPE2\", \"_imagingft\")\n check_module(\"LITTLECMS\", \"_imagingcms\")\n check_module(\"WEBP\", \"_webp\")\n print(\"-\"*68)\n\n # use doctest to make sure the test program behaves as documented!\n import doctest, selftest\n print(\"Running selftest:\")\n status = doctest.testmod(selftest)\n if status[0]:\n print(\"*** %s tests of %d failed.\" % status)\n exit_status = 1\n else:\n print(\"--- %s tests passed.\" % status[1])\n\n sys.exit(exit_status)\n\n", "path": "selftest.py"}, {"content": "from PIL import Image\nfrom PIL import ImageFile\nfrom io import BytesIO\nimport _webp\n\ndef _accept(prefix):\n return prefix[:4] == b\"RIFF\" and prefix[8:16] == b\"WEBPVP8 \"\n\nclass WebPImageFile(ImageFile.ImageFile):\n\n format = \"WEBP\"\n format_description = \"WebP image\"\n\n def _open(self):\n self.mode = \"RGB\"\n data, width, height = _webp.WebPDecodeRGB(self.fp.read())\n self.size = width, height\n self.fp = BytesIO(data)\n self.tile = [(\"raw\", (0, 0) + self.size, 0, 'RGB')]\n\ndef _save(im, fp, filename):\n if im.mode != \"RGB\":\n raise IOError(\"cannot write mode %s as WEBP\" % im.mode)\n quality = im.encoderinfo.get(\"quality\", 80)\n \n data = _webp.WebPEncodeRGB(im.tobytes(), im.size[0], im.size[1], im.size[0] * 3, float(quality))\n fp.write(data)\n\nImage.register_open(\"WEBP\", WebPImageFile, _accept)\nImage.register_save(\"WEBP\", _save)\n\nImage.register_extension(\"WEBP\", \".webp\")\nImage.register_mime(\"WEBP\", \"image/webp\")\n", "path": "PIL/WebPImagePlugin.py"}]} | 3,577 | 783 |
gh_patches_debug_29548 | rasdani/github-patches | git_diff | translate__pootle-3719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running migrate twice gives an error about changed models
If you run `migrate` a second time directly after an initial migration you will get the following error.
```
Running migrations:
No migrations to apply.
Your models have changes that are not yet reflected in a migration, and so won't be applied.
Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them.
```
`makemigrations` produces this file:
``` py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pootle.core.markup.fields
class Migration(migrations.Migration):
dependencies = [
('virtualfolder', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='virtualfolder',
name='description',
field=pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True),
preserve_default=True,
),
]
```
@unho Why are virtualfolders doing this?
</issue>
<code>
[start of pootle/apps/virtualfolder/migrations/0001_initial.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5 import pootle.core.markup.fields
6
7
8 class Migration(migrations.Migration):
9
10 dependencies = [
11 ('pootle_store', '0001_initial'),
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='VirtualFolder',
17 fields=[
18 ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
19 ('name', models.CharField(max_length=70, verbose_name='Name')),
20 ('location', models.CharField(help_text='Root path where this virtual folder is applied.', max_length=255, verbose_name='Location')),
21 ('filter_rules', models.TextField(help_text='Filtering rules that tell which stores this virtual folder comprises.', verbose_name='Filter')),
22 ('priority', models.FloatField(default=1, help_text='Number specifying importance. Greater priority means it is more important.', verbose_name='Priority')),
23 ('is_browsable', models.BooleanField(default=True, help_text='Whether this virtual folder is active or not.', verbose_name='Is browsable?')),
24 ('description', pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True)),
25 ('units', models.ManyToManyField(related_name='vfolders', to='pootle_store.Unit', db_index=True)),
26 ],
27 options={
28 'ordering': ['-priority', 'name'],
29 },
30 bases=(models.Model,),
31 ),
32 migrations.AlterUniqueTogether(
33 name='virtualfolder',
34 unique_together=set([('name', 'location')]),
35 ),
36 ]
37
[end of pootle/apps/virtualfolder/migrations/0001_initial.py]
[start of pootle/core/markup/fields.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import logging
11
12 from django.conf import settings
13 from django.core.cache import cache
14 from django.db import models
15 from django.utils.safestring import mark_safe
16
17 from .filters import apply_markup_filter
18 from .widgets import MarkupTextarea
19
20
21 __all__ = ('Markup', 'MarkupField',)
22
23
24 logger = logging.getLogger('pootle.markup')
25
26
27 _rendered_cache_key = lambda obj, pk, field: '_%s_%s_%s_rendered' % \
28 (obj, pk, field)
29
30
31 class Markup(object):
32
33 def __init__(self, instance, field_name, rendered_cache_key):
34 self.instance = instance
35 self.field_name = field_name
36 self.cache_key = rendered_cache_key
37
38 @property
39 def raw(self):
40 return self.instance.__dict__[self.field_name]
41
42 @raw.setter
43 def raw(self, value):
44 setattr(self.instance, self.field_name, value)
45
46 @property
47 def rendered(self):
48 rendered = cache.get(self.cache_key)
49
50 if not rendered:
51 logger.debug(u'Caching rendered output of %r', self.cache_key)
52 rendered = apply_markup_filter(self.raw)
53 cache.set(self.cache_key, rendered,
54 settings.OBJECT_CACHE_TIMEOUT)
55
56 return rendered
57
58 def __unicode__(self):
59 return mark_safe(self.rendered)
60
61 def __nonzero__(self):
62 return self.raw.strip() != '' and self.raw is not None
63
64
65 class MarkupDescriptor(object):
66
67 def __init__(self, field):
68 self.field = field
69
70 def __get__(self, obj, owner):
71 if obj is None:
72 raise AttributeError('Can only be accessed via an instance.')
73
74 markup = obj.__dict__[self.field.name]
75 if markup is None:
76 return None
77
78 cache_key = _rendered_cache_key(obj.__class__.__name__,
79 obj.pk,
80 self.field.name)
81 return Markup(obj, self.field.name, cache_key)
82
83 def __set__(self, obj, value):
84 if isinstance(value, Markup):
85 obj.__dict__[self.field.name] = value.raw
86 else:
87 obj.__dict__[self.field.name] = value
88
89
90 class MarkupField(models.TextField):
91
92 description = 'Text field supporting different markup formats.'
93
94 def contribute_to_class(self, cls, name):
95 super(MarkupField, self).contribute_to_class(cls, name)
96 setattr(cls, self.name, MarkupDescriptor(self))
97
98 def pre_save(self, model_instance, add):
99 value = super(MarkupField, self).pre_save(model_instance, add)
100
101 if not add:
102 # Invalidate cache to force rendering upon next retrieval
103 cache_key = _rendered_cache_key(model_instance.__class__.__name__,
104 model_instance.pk,
105 self.name)
106 logger.debug('Invalidating cache for %r', cache_key)
107 cache.delete(cache_key)
108
109 return value.raw
110
111 def get_prep_value(self, value):
112 if isinstance(value, Markup):
113 return value.raw
114
115 return value
116
117 def value_to_string(self, obj):
118 value = self._get_val_from_obj(obj)
119 return self.get_prep_value(value)
120
121 def formfield(self, **kwargs):
122 defaults = {'widget': MarkupTextarea}
123 defaults.update(kwargs)
124 return super(MarkupField, self).formfield(**defaults)
125
[end of pootle/core/markup/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/virtualfolder/migrations/0001_initial.py b/pootle/apps/virtualfolder/migrations/0001_initial.py
--- a/pootle/apps/virtualfolder/migrations/0001_initial.py
+++ b/pootle/apps/virtualfolder/migrations/0001_initial.py
@@ -21,7 +21,7 @@
('filter_rules', models.TextField(help_text='Filtering rules that tell which stores this virtual folder comprises.', verbose_name='Filter')),
('priority', models.FloatField(default=1, help_text='Number specifying importance. Greater priority means it is more important.', verbose_name='Priority')),
('is_browsable', models.BooleanField(default=True, help_text='Whether this virtual folder is active or not.', verbose_name='Is browsable?')),
- ('description', pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True)),
+ ('description', pootle.core.markup.fields.MarkupField(verbose_name='Description', blank=True)),
('units', models.ManyToManyField(related_name='vfolders', to='pootle_store.Unit', db_index=True)),
],
options={
diff --git a/pootle/core/markup/fields.py b/pootle/core/markup/fields.py
--- a/pootle/core/markup/fields.py
+++ b/pootle/core/markup/fields.py
@@ -122,3 +122,8 @@
defaults = {'widget': MarkupTextarea}
defaults.update(kwargs)
return super(MarkupField, self).formfield(**defaults)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(MarkupField, self).deconstruct()
+ kwargs.pop('help_text', None)
+ return name, path, args, kwargs
| {"golden_diff": "diff --git a/pootle/apps/virtualfolder/migrations/0001_initial.py b/pootle/apps/virtualfolder/migrations/0001_initial.py\n--- a/pootle/apps/virtualfolder/migrations/0001_initial.py\n+++ b/pootle/apps/virtualfolder/migrations/0001_initial.py\n@@ -21,7 +21,7 @@\n ('filter_rules', models.TextField(help_text='Filtering rules that tell which stores this virtual folder comprises.', verbose_name='Filter')),\n ('priority', models.FloatField(default=1, help_text='Number specifying importance. Greater priority means it is more important.', verbose_name='Priority')),\n ('is_browsable', models.BooleanField(default=True, help_text='Whether this virtual folder is active or not.', verbose_name='Is browsable?')),\n- ('description', pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True)),\n+ ('description', pootle.core.markup.fields.MarkupField(verbose_name='Description', blank=True)),\n ('units', models.ManyToManyField(related_name='vfolders', to='pootle_store.Unit', db_index=True)),\n ],\n options={\ndiff --git a/pootle/core/markup/fields.py b/pootle/core/markup/fields.py\n--- a/pootle/core/markup/fields.py\n+++ b/pootle/core/markup/fields.py\n@@ -122,3 +122,8 @@\n defaults = {'widget': MarkupTextarea}\n defaults.update(kwargs)\n return super(MarkupField, self).formfield(**defaults)\n+\n+ def deconstruct(self):\n+ name, path, args, kwargs = super(MarkupField, self).deconstruct()\n+ kwargs.pop('help_text', None)\n+ return name, path, args, kwargs\n", "issue": "Running migrate twice gives an error about changed models\nIf you run `migrate` a second time directly after an initial migration you will get the following error.\n\n```\nRunning migrations:\n No migrations to apply.\n Your models have changes that are not yet reflected in a migration, and so won't be applied.\n Run 'manage.py makemigrations' to make new migrations, and then re-run 'manage.py migrate' to apply them.\n```\n\n`makemigrations` produces this file:\n\n``` py\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport pootle.core.markup.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('virtualfolder', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='virtualfolder',\n name='description',\n field=pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True),\n preserve_default=True,\n ),\n ]\n```\n\n@unho Why are virtualfolders doing this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport pootle.core.markup.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pootle_store', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='VirtualFolder',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=70, verbose_name='Name')),\n ('location', models.CharField(help_text='Root path where this virtual folder is applied.', max_length=255, verbose_name='Location')),\n ('filter_rules', models.TextField(help_text='Filtering rules that tell which stores this virtual folder comprises.', verbose_name='Filter')),\n ('priority', models.FloatField(default=1, help_text='Number specifying importance. Greater priority means it is more important.', verbose_name='Priority')),\n ('is_browsable', models.BooleanField(default=True, help_text='Whether this virtual folder is active or not.', verbose_name='Is browsable?')),\n ('description', pootle.core.markup.fields.MarkupField(help_text='Use this to provide more information or instructions. Allowed markup: HTML', verbose_name='Description', blank=True)),\n ('units', models.ManyToManyField(related_name='vfolders', to='pootle_store.Unit', db_index=True)),\n ],\n options={\n 'ordering': ['-priority', 'name'],\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='virtualfolder',\n unique_together=set([('name', 'location')]),\n ),\n ]\n", "path": "pootle/apps/virtualfolder/migrations/0001_initial.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\n\nfrom .filters import apply_markup_filter\nfrom .widgets import MarkupTextarea\n\n\n__all__ = ('Markup', 'MarkupField',)\n\n\nlogger = logging.getLogger('pootle.markup')\n\n\n_rendered_cache_key = lambda obj, pk, field: '_%s_%s_%s_rendered' % \\\n (obj, pk, field)\n\n\nclass Markup(object):\n\n def __init__(self, instance, field_name, rendered_cache_key):\n self.instance = instance\n self.field_name = field_name\n self.cache_key = rendered_cache_key\n\n @property\n def raw(self):\n return self.instance.__dict__[self.field_name]\n\n @raw.setter\n def raw(self, value):\n setattr(self.instance, self.field_name, value)\n\n @property\n def rendered(self):\n rendered = cache.get(self.cache_key)\n\n if not rendered:\n logger.debug(u'Caching rendered output of %r', self.cache_key)\n rendered = apply_markup_filter(self.raw)\n cache.set(self.cache_key, rendered,\n settings.OBJECT_CACHE_TIMEOUT)\n\n return rendered\n\n def __unicode__(self):\n return mark_safe(self.rendered)\n\n def __nonzero__(self):\n return self.raw.strip() != '' and self.raw is not None\n\n\nclass MarkupDescriptor(object):\n\n def __init__(self, field):\n self.field = field\n\n def __get__(self, obj, owner):\n if obj is None:\n raise AttributeError('Can only be accessed via an instance.')\n\n markup = obj.__dict__[self.field.name]\n if markup is None:\n return None\n\n cache_key = _rendered_cache_key(obj.__class__.__name__,\n obj.pk,\n self.field.name)\n return Markup(obj, self.field.name, cache_key)\n\n def __set__(self, obj, value):\n if isinstance(value, Markup):\n obj.__dict__[self.field.name] = value.raw\n else:\n obj.__dict__[self.field.name] = value\n\n\nclass MarkupField(models.TextField):\n\n description = 'Text field supporting different markup formats.'\n\n def contribute_to_class(self, cls, name):\n super(MarkupField, self).contribute_to_class(cls, name)\n setattr(cls, self.name, MarkupDescriptor(self))\n\n def pre_save(self, model_instance, add):\n value = super(MarkupField, self).pre_save(model_instance, add)\n\n if not add:\n # Invalidate cache to force rendering upon next retrieval\n cache_key = _rendered_cache_key(model_instance.__class__.__name__,\n model_instance.pk,\n self.name)\n logger.debug('Invalidating cache for %r', cache_key)\n cache.delete(cache_key)\n\n return value.raw\n\n def get_prep_value(self, value):\n if isinstance(value, Markup):\n return value.raw\n\n return value\n\n def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)\n\n def formfield(self, **kwargs):\n defaults = {'widget': MarkupTextarea}\n defaults.update(kwargs)\n return super(MarkupField, self).formfield(**defaults)\n", "path": "pootle/core/markup/fields.py"}]} | 2,314 | 410 |
gh_patches_debug_4294 | rasdani/github-patches | git_diff | open-mmlab__mmpretrain-286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] CPU Testing
Since CPU training is already supported in PR #219, what about also adding the feature of CPU testing.
Besides, it seems there are still some problems with the CPU training feature @wangruohui :
When we set `--device CPU`, the expected behavior is using CPU for training, no matter if there exist GPUs on this machine. However, mmcls will use GPU for training if it exists, even if we set `--device CPU`.
</issue>
<code>
[start of mmcls/apis/train.py]
1 import random
2 import warnings
3
4 import numpy as np
5 import torch
6 from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
7 from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner
8
9 from mmcls.core import DistOptimizerHook
10 from mmcls.datasets import build_dataloader, build_dataset
11 from mmcls.utils import get_root_logger
12
13 # TODO import eval hooks from mmcv and delete them from mmcls
14 try:
15 from mmcv.runner.hooks import EvalHook, DistEvalHook
16 except ImportError:
17 warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '
18 'will be deprecated.'
19 'Please install mmcv through master branch.')
20 from mmcls.core import EvalHook, DistEvalHook
21
22 # TODO import optimizer hook from mmcv and delete them from mmcls
23 try:
24 from mmcv.runner import Fp16OptimizerHook
25 except ImportError:
26 warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be '
27 'deprecated. Please install mmcv>=1.1.4.')
28 from mmcls.core import Fp16OptimizerHook
29
30
31 def set_random_seed(seed, deterministic=False):
32 """Set random seed.
33
34 Args:
35 seed (int): Seed to be used.
36 deterministic (bool): Whether to set the deterministic option for
37 CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
38 to True and `torch.backends.cudnn.benchmark` to False.
39 Default: False.
40 """
41 random.seed(seed)
42 np.random.seed(seed)
43 torch.manual_seed(seed)
44 torch.cuda.manual_seed_all(seed)
45 if deterministic:
46 torch.backends.cudnn.deterministic = True
47 torch.backends.cudnn.benchmark = False
48
49
50 def train_model(model,
51 dataset,
52 cfg,
53 distributed=False,
54 validate=False,
55 timestamp=None,
56 device='cuda',
57 meta=None):
58 logger = get_root_logger(cfg.log_level)
59
60 # prepare data loaders
61 dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
62
63 data_loaders = [
64 build_dataloader(
65 ds,
66 cfg.data.samples_per_gpu,
67 cfg.data.workers_per_gpu,
68 # cfg.gpus will be ignored if distributed
69 num_gpus=len(cfg.gpu_ids),
70 dist=distributed,
71 round_up=True,
72 seed=cfg.seed) for ds in dataset
73 ]
74
75 # put model on gpus
76 if distributed:
77 find_unused_parameters = cfg.get('find_unused_parameters', False)
78 # Sets the `find_unused_parameters` parameter in
79 # torch.nn.parallel.DistributedDataParallel
80 model = MMDistributedDataParallel(
81 model.cuda(),
82 device_ids=[torch.cuda.current_device()],
83 broadcast_buffers=False,
84 find_unused_parameters=find_unused_parameters)
85 else:
86 if device == 'cuda':
87 model = MMDataParallel(
88 model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
89 elif device == 'cpu':
90 model = MMDataParallel(model.cpu())
91 else:
92 raise ValueError(F'unsupported device name {device}.')
93
94 # build runner
95 optimizer = build_optimizer(model, cfg.optimizer)
96
97 if cfg.get('runner') is None:
98 cfg.runner = {
99 'type': 'EpochBasedRunner',
100 'max_epochs': cfg.total_epochs
101 }
102 warnings.warn(
103 'config is now expected to have a `runner` section, '
104 'please set `runner` in your config.', UserWarning)
105
106 runner = build_runner(
107 cfg.runner,
108 default_args=dict(
109 model=model,
110 batch_processor=None,
111 optimizer=optimizer,
112 work_dir=cfg.work_dir,
113 logger=logger,
114 meta=meta))
115
116 # an ugly walkaround to make the .log and .log.json filenames the same
117 runner.timestamp = timestamp
118
119 # fp16 setting
120 fp16_cfg = cfg.get('fp16', None)
121 if fp16_cfg is not None:
122 optimizer_config = Fp16OptimizerHook(
123 **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
124 elif distributed and 'type' not in cfg.optimizer_config:
125 optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
126 else:
127 optimizer_config = cfg.optimizer_config
128
129 # register hooks
130 runner.register_training_hooks(cfg.lr_config, optimizer_config,
131 cfg.checkpoint_config, cfg.log_config,
132 cfg.get('momentum_config', None))
133 if distributed:
134 runner.register_hook(DistSamplerSeedHook())
135
136 # register eval hooks
137 if validate:
138 val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
139 val_dataloader = build_dataloader(
140 val_dataset,
141 samples_per_gpu=cfg.data.samples_per_gpu,
142 workers_per_gpu=cfg.data.workers_per_gpu,
143 dist=distributed,
144 shuffle=False,
145 round_up=True)
146 eval_cfg = cfg.get('evaluation', {})
147 eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
148 eval_hook = DistEvalHook if distributed else EvalHook
149 runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
150
151 if cfg.resume_from:
152 runner.resume(cfg.resume_from)
153 elif cfg.load_from:
154 runner.load_checkpoint(cfg.load_from)
155 runner.run(data_loaders, cfg.workflow)
156
[end of mmcls/apis/train.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmcls/apis/train.py b/mmcls/apis/train.py
--- a/mmcls/apis/train.py
+++ b/mmcls/apis/train.py
@@ -87,7 +87,7 @@
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
elif device == 'cpu':
- model = MMDataParallel(model.cpu())
+ model = model.cpu()
else:
raise ValueError(F'unsupported device name {device}.')
| {"golden_diff": "diff --git a/mmcls/apis/train.py b/mmcls/apis/train.py\n--- a/mmcls/apis/train.py\n+++ b/mmcls/apis/train.py\n@@ -87,7 +87,7 @@\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n elif device == 'cpu':\n- model = MMDataParallel(model.cpu())\n+ model = model.cpu()\n else:\n raise ValueError(F'unsupported device name {device}.')\n", "issue": "[Feature Request] CPU Testing\nSince CPU training is already supported in PR #219, what about also adding the feature of CPU testing. \r\n\r\nBesides, it seems there are still some problems with the CPU training feature @wangruohui : \r\nWhen we set `--device CPU`, the expected behavior is using CPU for training, no matter if there exist GPUs on this machine. However, mmcls will use GPU for training if it exists, even if we set `--device CPU`. \n", "before_files": [{"content": "import random\nimport warnings\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner\n\nfrom mmcls.core import DistOptimizerHook\nfrom mmcls.datasets import build_dataloader, build_dataset\nfrom mmcls.utils import get_root_logger\n\n# TODO import eval hooks from mmcv and delete them from mmcls\ntry:\n from mmcv.runner.hooks import EvalHook, DistEvalHook\nexcept ImportError:\n warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '\n 'will be deprecated.'\n 'Please install mmcv through master branch.')\n from mmcls.core import EvalHook, DistEvalHook\n\n# TODO import optimizer hook from mmcv and delete them from mmcls\ntry:\n from mmcv.runner import Fp16OptimizerHook\nexcept ImportError:\n warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be '\n 'deprecated. Please install mmcv>=1.1.4.')\n from mmcls.core import Fp16OptimizerHook\n\n\ndef set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n device='cuda',\n meta=None):\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n round_up=True,\n seed=cfg.seed) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n if device == 'cuda':\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n elif device == 'cpu':\n model = MMDataParallel(model.cpu())\n else:\n raise ValueError(F'unsupported device name {device}.')\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n\n if cfg.get('runner') is None:\n cfg.runner = {\n 'type': 'EpochBasedRunner',\n 'max_epochs': cfg.total_epochs\n }\n warnings.warn(\n 'config is now expected to have a `runner` section, '\n 'please set `runner` in your config.', UserWarning)\n\n runner = build_runner(\n cfg.runner,\n default_args=dict(\n model=model,\n batch_processor=None,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta))\n\n # an ugly walkaround to make the .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = DistOptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=cfg.data.samples_per_gpu,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False,\n round_up=True)\n eval_cfg = cfg.get('evaluation', {})\n eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow)\n", "path": "mmcls/apis/train.py"}]} | 2,145 | 106 |
gh_patches_debug_307 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
botocore gets monkey patched before gevent when using pynamoDB
In [0.43.0 ssl libs are patched on import](https://github.com/DataDog/dd-trace-py/pull/1629) to allow `ddtrace-run` and `gevent` to exist in harmony.
`pynamodb` imports `botocore` and PynamoDB is patched by default. The result of this is that `ddtrace-run` ends up monkey patching `botocore` before `gevent` does.
I believe PynamoDB should be listed in the SSL libs that only get patched on import.
### Which version of dd-trace-py are you using?
0.43.0
### Which version of the libraries are you using?
ddtrace==0.43.0
gevent==20.9.0
greenlet==0.4.17
gunicorn==20.0.4
pynamodb==4.3.3
### How can we reproduce your problem?
1. Create new virtualenv
```
$ mkdir temp
$ cd temp
$ virtualenv .
$ . ./bin/active
```
2. Install libs
```
pip install ddtrace gunicorn[gevent] pynamodb
```
3. Create empty `app.py`
```
import time
while True:
time.sleep(1)
```
Run the failing command
```
ddtrace-run gunicorn -k gevent app
```
The following warning is displayed, which will turn into a SSL recursion error if you try and use urllib3.
```
$ ddtrace-run gunicorn -k gevent app
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
[2020-10-12 16:46:09 +1100] [69996] [INFO] Starting gunicorn 20.0.4
[2020-10-12 16:46:09 +1100] [69996] [INFO] Listening at: http://127.0.0.1:8000 (69996)
[2020-10-12 16:46:09 +1100] [69996] [INFO] Using worker: gevent
[2020-10-12 16:46:09 +1100] [70004] [INFO] Booting worker with pid: 70004
/private/tmp/venv/lib/python3.7/site-packages/gunicorn/workers/ggevent.py:53: MonkeyPatchWarning: Monkey-patching ssl after ssl has already been imported may lead to errors, including RecursionError on Python 3.6. It may also silently lead to incorrect behaviour on Python 3.7. Please monkey-patch earlier. See https://github.com/gevent/gevent/issues/1016. Modules that had direct imports (NOT patched): ['botocore.httpsession (/private/tmp/venv/lib/python3.7/site-packages/botocore/httpsession.py)', 'urllib3.util.ssl_ (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/ssl_.py)', 'urllib3.util (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/__init__.py)'].
monkey.patch_all()
```
Disable pynamodb tracing to fix
```
DD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app
```
Which gives the following output
```
$ DD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused
[2020-10-12 16:48:11 +1100] [70038] [INFO] Starting gunicorn 20.0.4
[2020-10-12 16:48:11 +1100] [70038] [INFO] Listening at: http://127.0.0.1:8000 (70038)
[2020-10-12 16:48:11 +1100] [70038] [INFO] Using worker: gevent
[2020-10-12 16:48:11 +1100] [70046] [INFO] Booting worker with pid: 70046
```
</issue>
<code>
[start of ddtrace/monkey.py]
1 """Patch libraries to be automatically instrumented.
2
3 It can monkey patch supported standard libraries and third party modules.
4 A patched module will automatically report spans with its default configuration.
5
6 A library instrumentation can be configured (for instance, to report as another service)
7 using Pin. For that, check its documentation.
8 """
9 import importlib
10 import os
11 import sys
12 import threading
13
14 from ddtrace.vendor.wrapt.importer import when_imported
15
16 from .internal.logger import get_logger
17 from .settings import config
18 from .utils import formats
19
20
21 log = get_logger(__name__)
22
23 # Default set of modules to automatically patch or not
24 PATCH_MODULES = {
25 "asyncio": True,
26 "boto": True,
27 "botocore": True,
28 "bottle": False,
29 "cassandra": True,
30 "celery": True,
31 "consul": True,
32 "django": True,
33 "elasticsearch": True,
34 "algoliasearch": True,
35 "futures": False, # experimental propagation
36 "grpc": True,
37 "mongoengine": True,
38 "mysql": True,
39 "mysqldb": True,
40 "pymysql": True,
41 "psycopg": True,
42 "pylibmc": True,
43 "pymemcache": True,
44 "pymongo": True,
45 "redis": True,
46 "rediscluster": True,
47 "requests": True,
48 "sanic": True,
49 "sqlalchemy": False, # Prefer DB client instrumentation
50 "sqlite3": True,
51 "aiohttp": True, # requires asyncio (Python 3.4+)
52 "aiopg": True,
53 "aiobotocore": False,
54 "httplib": False,
55 "vertica": True,
56 "molten": True,
57 "jinja2": True,
58 "mako": True,
59 "flask": True,
60 "kombu": False,
61 "starlette": True,
62 # Ignore some web framework integrations that might be configured explicitly in code
63 "falcon": False,
64 "pylons": False,
65 "pyramid": False,
66 # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true
67 "logging": config.logs_injection,
68 "pynamodb": True,
69 }
70
71 _LOCK = threading.Lock()
72 _PATCHED_MODULES = set()
73
74 # Modules which are patched on first use
75 # DEV: These modules are patched when the user first imports them, rather than
76 # explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`
77 # DEV: This ensures we do not patch a module until it is needed
78 # DEV: <contrib name> => <list of module names that trigger a patch>
79 _PATCH_ON_IMPORT = {
80 "aiohttp": ("aiohttp",),
81 "aiobotocore": ("aiobotocore",),
82 "celery": ("celery",),
83 "flask": ("flask, "),
84 "gevent": ("gevent",),
85 "requests": ("requests",),
86 "botocore": ("botocore",),
87 "elasticsearch": ("elasticsearch",),
88 }
89
90
91 class PatchException(Exception):
92 """Wraps regular `Exception` class when patching modules"""
93
94 pass
95
96
97 class ModuleNotFoundException(PatchException):
98 pass
99
100
101 def _on_import_factory(module, raise_errors=True):
102 """Factory to create an import hook for the provided module name"""
103
104 def on_import(hook):
105 # Import and patch module
106 path = "ddtrace.contrib.%s" % module
107 imported_module = importlib.import_module(path)
108 imported_module.patch()
109
110 return on_import
111
112
113 def patch_all(**patch_modules):
114 """Automatically patches all available modules.
115
116 In addition to ``patch_modules``, an override can be specified via an
117 environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.
118
119 ``patch_modules`` have the highest precedence for overriding.
120
121 :param dict patch_modules: Override whether particular modules are patched or not.
122
123 >>> patch_all(redis=False, cassandra=False)
124 """
125 modules = PATCH_MODULES.copy()
126
127 # The enabled setting can be overridden by environment variables
128 for module, enabled in modules.items():
129 env_var = "DD_TRACE_%s_ENABLED" % module.upper()
130 if env_var not in os.environ:
131 continue
132
133 override_enabled = formats.asbool(os.environ[env_var])
134 modules[module] = override_enabled
135
136 # Arguments take precedence over the environment and the defaults.
137 modules.update(patch_modules)
138
139 patch(raise_errors=False, **modules)
140
141
142 def patch(raise_errors=True, **patch_modules):
143 """Patch only a set of given modules.
144
145 :param bool raise_errors: Raise error if one patch fail.
146 :param dict patch_modules: List of modules to patch.
147
148 >>> patch(psycopg=True, elasticsearch=True)
149 """
150 modules = [m for (m, should_patch) in patch_modules.items() if should_patch]
151 for module in modules:
152 if module in _PATCH_ON_IMPORT:
153 # If the module has already been imported then patch immediately
154 if module in sys.modules:
155 patch_module(module, raise_errors=raise_errors)
156
157 # Otherwise, add a hook to patch when it is imported for the first time
158 else:
159 # Use factory to create handler to close over `module` and `raise_errors` values from this loop
160 when_imported(module)(_on_import_factory(module, raise_errors))
161
162 # manually add module to patched modules
163 with _LOCK:
164 _PATCHED_MODULES.add(module)
165 else:
166 patch_module(module, raise_errors=raise_errors)
167
168 patched_modules = get_patched_modules()
169 log.info(
170 "patched %s/%s modules (%s)",
171 len(patched_modules),
172 len(modules),
173 ",".join(patched_modules),
174 )
175
176
177 def patch_module(module, raise_errors=True):
178 """Patch a single module
179
180 Returns if the module got properly patched.
181 """
182 try:
183 return _patch_module(module)
184 except ModuleNotFoundException:
185 if raise_errors:
186 raise
187 return False
188 except Exception:
189 if raise_errors:
190 raise
191 log.debug("failed to patch %s", module, exc_info=True)
192 return False
193
194
195 def get_patched_modules():
196 """Get the list of patched modules"""
197 with _LOCK:
198 return sorted(_PATCHED_MODULES)
199
200
201 def _patch_module(module):
202 """_patch_module will attempt to monkey patch the module.
203
204 Returns if the module got patched.
205 Can also raise errors if it fails.
206 """
207 path = "ddtrace.contrib.%s" % module
208 with _LOCK:
209 if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:
210 log.debug("already patched: %s", path)
211 return False
212
213 try:
214 imported_module = importlib.import_module(path)
215 except ImportError:
216 # if the import fails, the integration is not available
217 raise PatchException("integration '%s' not available" % path)
218 else:
219 # if patch() is not available in the module, it means
220 # that the library is not installed in the environment
221 if not hasattr(imported_module, "patch"):
222 raise ModuleNotFoundException("module '%s' not installed" % module)
223
224 imported_module.patch()
225 _PATCHED_MODULES.add(module)
226 return True
227
[end of ddtrace/monkey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py
--- a/ddtrace/monkey.py
+++ b/ddtrace/monkey.py
@@ -85,6 +85,7 @@
"requests": ("requests",),
"botocore": ("botocore",),
"elasticsearch": ("elasticsearch",),
+ "pynamodb": ("pynamodb",),
}
| {"golden_diff": "diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -85,6 +85,7 @@\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n+ \"pynamodb\": (\"pynamodb\",),\n }\n", "issue": "botocore gets monkey patched before gevent when using pynamoDB\nIn [0.43.0 ssl libs are patched on import](https://github.com/DataDog/dd-trace-py/pull/1629) to allow `ddtrace-run` and `gevent` to exist in harmony.\r\n\r\n`pynamodb` imports `botocore` and PynamoDB is patched by default. The result of this is that `ddtrace-run` ends up monkey patching `botocore` before `gevent` does.\r\n\r\nI believe PynamoDB should be listed in the SSL libs that only get patched on import.\r\n\r\n### Which version of dd-trace-py are you using?\r\n0.43.0\r\n\r\n### Which version of the libraries are you using?\r\n\r\nddtrace==0.43.0\r\ngevent==20.9.0\r\ngreenlet==0.4.17\r\ngunicorn==20.0.4\r\npynamodb==4.3.3\r\n\r\n### How can we reproduce your problem?\r\n\r\n1. Create new virtualenv\r\n```\r\n$ mkdir temp\r\n$ cd temp\r\n$ virtualenv .\r\n$ . ./bin/active\r\n```\r\n\r\n2. Install libs\r\n```\r\npip install ddtrace gunicorn[gevent] pynamodb\r\n```\r\n\r\n3. Create empty `app.py`\r\n```\r\nimport time\r\nwhile True:\r\n time.sleep(1)\r\n```\r\n\r\nRun the failing command\r\n```\r\n ddtrace-run gunicorn -k gevent app\r\n```\r\n\r\nThe following warning is displayed, which will turn into a SSL recursion error if you try and use urllib3.\r\n\r\n```\r\n$ ddtrace-run gunicorn -k gevent app\r\n- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused\r\n- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused\r\n[2020-10-12 16:46:09 +1100] [69996] [INFO] Starting gunicorn 20.0.4\r\n[2020-10-12 16:46:09 +1100] [69996] [INFO] Listening at: http://127.0.0.1:8000 (69996)\r\n[2020-10-12 16:46:09 +1100] [69996] [INFO] Using worker: gevent\r\n[2020-10-12 16:46:09 +1100] [70004] [INFO] Booting worker with pid: 70004\r\n/private/tmp/venv/lib/python3.7/site-packages/gunicorn/workers/ggevent.py:53: MonkeyPatchWarning: Monkey-patching ssl after ssl has already been imported may lead to errors, including RecursionError on Python 3.6. It may also silently lead to incorrect behaviour on Python 3.7. Please monkey-patch earlier. See https://github.com/gevent/gevent/issues/1016. Modules that had direct imports (NOT patched): ['botocore.httpsession (/private/tmp/venv/lib/python3.7/site-packages/botocore/httpsession.py)', 'urllib3.util.ssl_ (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/ssl_.py)', 'urllib3.util (/private/tmp/venv/lib/python3.7/site-packages/urllib3/util/__init__.py)'].\r\n monkey.patch_all()\r\n```\r\n\r\nDisable pynamodb tracing to fix\r\n```\r\nDD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app\r\n```\r\n\r\nWhich gives the following output\r\n```\r\n$ DD_TRACE_PYNAMODB_ENABLED=False ddtrace-run gunicorn -k gevent app\r\n- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused\r\n- DATADOG TRACER DIAGNOSTIC - Agent not reachable. Exception raised: [Errno 61] Connection refused\r\n[2020-10-12 16:48:11 +1100] [70038] [INFO] Starting gunicorn 20.0.4\r\n[2020-10-12 16:48:11 +1100] [70038] [INFO] Listening at: http://127.0.0.1:8000 (70038)\r\n[2020-10-12 16:48:11 +1100] [70038] [INFO] Using worker: gevent\r\n[2020-10-12 16:48:11 +1100] [70046] [INFO] Booting worker with pid: 70046\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]} | 3,843 | 86 |
gh_patches_debug_5537 | rasdani/github-patches | git_diff | nextcloud__appstore-619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verify email addresses after E-Mail change
When a user changes their email address, it should be verified. allauth provides some views for that which may or may not be useful. Unsure whether email addresses currently are verified at signup, but it would be appropriate for it to use the same mechanism.
</issue>
<code>
[start of nextcloudappstore/user/views.py]
1 from allauth.account.models import EmailAddress
2 from allauth.account.views import PasswordChangeView
3 from django.contrib import messages
4 from django.contrib.auth.mixins import LoginRequiredMixin
5 from django.urls import reverse_lazy
6 from django.shortcuts import redirect, render, get_object_or_404
7 from django.urls import reverse
8 from django.views.generic import TemplateView
9 from django.views.generic import UpdateView
10
11 from nextcloudappstore.core.models import App
12 from nextcloudappstore.user.forms import DeleteAccountForm, AccountForm
13
14
15 class TransferAppsView(LoginRequiredMixin, TemplateView):
16 template_name = 'user/transfer-apps.html'
17
18 def post(self, request, pk):
19 app = get_object_or_404(App, pk=pk, owner=self.request.user)
20 app.ownership_transfer_enabled = not app.ownership_transfer_enabled
21 app.save()
22 return redirect(reverse('user:account-transfer-apps'))
23
24 def get_context_data(self, **kwargs):
25 context = super().get_context_data(**kwargs)
26 context['apps'] = App.objects.filter(owner=self.request.user)
27 context['acc_page'] = 'account-transfer-apps'
28 return context
29
30
31 class ChangeLanguageView(LoginRequiredMixin, TemplateView):
32 template_name = 'user/set-language.html'
33
34 def get_context_data(self, **kwargs):
35 context = super().get_context_data(**kwargs)
36 context['acc_page'] = 'account-change-language'
37 return context
38
39
40 class DeleteAccountView(LoginRequiredMixin, TemplateView):
41 template_name = 'user/delete-account.html'
42
43 def get_context_data(self, **kwargs):
44 context = super().get_context_data(**kwargs)
45 context['form'] = DeleteAccountForm()
46 context['acc_page'] = 'delete-account'
47 return context
48
49 def post(self, request, *args, **kwargs):
50 form = DeleteAccountForm(request.POST, user=request.user)
51 if form.is_valid():
52 request.user.delete()
53 return redirect(reverse_lazy('home'))
54 else:
55 return render(request, self.template_name, {'form': form})
56
57
58 class AccountView(LoginRequiredMixin, UpdateView):
59 """Display and allow changing of the user's name."""
60
61 template_name = 'user/account.html'
62 template_name_suffix = ''
63 form_class = AccountForm
64 success_url = reverse_lazy('user:account')
65
66 def get_context_data(self, **kwargs):
67 context = super().get_context_data(**kwargs)
68 context['acc_page'] = 'account'
69 return context
70
71 def form_valid(self, form):
72 email = EmailAddress.objects.get_primary(user=self.request.user)
73 email.email = form.cleaned_data['email']
74 email.save()
75 messages.success(self.request, 'Account details saved.')
76 return super().form_valid(form)
77
78 def get_object(self, queryset=None):
79 return self.request.user
80
81
82 class PasswordView(LoginRequiredMixin, PasswordChangeView):
83 """Allow the user to change their password."""
84
85 template_name = 'user/password.html'
86 success_url = reverse_lazy('user:account-password')
87
88 def get_context_data(self, **kwargs):
89 context = super().get_context_data(**kwargs)
90 context['acc_page'] = 'password'
91 return context
92
93
94 class APITokenView(LoginRequiredMixin, TemplateView):
95 """Display the user's API token, and allow it to be regenerated."""
96
97 template_name = 'user/api-token.html'
98
99 def get_context_data(self, **kwargs):
100 context = super().get_context_data(**kwargs)
101 context['acc_page'] = 'api-token'
102 return context
103
[end of nextcloudappstore/user/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/user/views.py b/nextcloudappstore/user/views.py
--- a/nextcloudappstore/user/views.py
+++ b/nextcloudappstore/user/views.py
@@ -70,8 +70,7 @@
def form_valid(self, form):
email = EmailAddress.objects.get_primary(user=self.request.user)
- email.email = form.cleaned_data['email']
- email.save()
+ email.change(None, form.cleaned_data['email'])
messages.success(self.request, 'Account details saved.')
return super().form_valid(form)
| {"golden_diff": "diff --git a/nextcloudappstore/user/views.py b/nextcloudappstore/user/views.py\n--- a/nextcloudappstore/user/views.py\n+++ b/nextcloudappstore/user/views.py\n@@ -70,8 +70,7 @@\n \n def form_valid(self, form):\n email = EmailAddress.objects.get_primary(user=self.request.user)\n- email.email = form.cleaned_data['email']\n- email.save()\n+ email.change(None, form.cleaned_data['email'])\n messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n", "issue": "Verify email addresses after E-Mail change\nWhen a user changes their email address, it should be verified. allauth provides some views for that which may or may not be useful. Unsure whether email addresses currently are verified at signup, but it would be appropriate for it to use the same mechanism.\n\n", "before_files": [{"content": "from allauth.account.models import EmailAddress\nfrom allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.models import App\nfrom nextcloudappstore.user.forms import DeleteAccountForm, AccountForm\n\n\nclass TransferAppsView(LoginRequiredMixin, TemplateView):\n template_name = 'user/transfer-apps.html'\n\n def post(self, request, pk):\n app = get_object_or_404(App, pk=pk, owner=self.request.user)\n app.ownership_transfer_enabled = not app.ownership_transfer_enabled\n app.save()\n return redirect(reverse('user:account-transfer-apps'))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['apps'] = App.objects.filter(owner=self.request.user)\n context['acc_page'] = 'account-transfer-apps'\n return context\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n form_class = AccountForm\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n email = EmailAddress.objects.get_primary(user=self.request.user)\n email.email = form.cleaned_data['email']\n email.save()\n messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/user/views.py"}]} | 1,555 | 125 |
gh_patches_debug_31147 | rasdani/github-patches | git_diff | onnx__onnx-5757 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
check_function requires contexts as arguments which breaks backward compatibility
https://github.com/onnx/onnx/pull/5693 added required parameters to the `check_function` function in checker which breaks backward compatibility. Should we provide default contexts to `check_function` as well?
</issue>
<code>
[start of onnx/checker.py]
1 # Copyright (c) ONNX Project Contributors
2 #
3 # SPDX-License-Identifier: Apache-2.0
4 """Graph utilities for checking whether an ONNX proto message is legal."""
5
6 from __future__ import annotations
7
8 __all__ = [
9 "check_attribute",
10 "check_function",
11 "check_graph",
12 "check_model",
13 "check_node",
14 "check_sparse_tensor",
15 "check_tensor",
16 "check_value_info",
17 "DEFAULT_CONTEXT",
18 "LEXICAL_SCOPE_CONTEXT",
19 "ValidationError",
20 "C",
21 "MAXIMUM_PROTOBUF",
22 ]
23
24 import os
25 import sys
26 from typing import Any, Callable, TypeVar
27
28 from google.protobuf.message import Message
29
30 import onnx.defs
31 import onnx.onnx_cpp2py_export.checker as C # noqa: N812
32 import onnx.shape_inference
33 from onnx import (
34 IR_VERSION,
35 AttributeProto,
36 FunctionProto,
37 GraphProto,
38 ModelProto,
39 NodeProto,
40 SparseTensorProto,
41 TensorProto,
42 ValueInfoProto,
43 )
44
45 # Limitation of single protobuf file is 2GB
46 MAXIMUM_PROTOBUF = 2000000000
47
48 # TODO: This thing where we reserialize the protobuf back into the
49 # string, only to deserialize it at the call site, is really goofy.
50 # Stop doing that.
51
52
53 # NB: Please don't edit this context!
54 DEFAULT_CONTEXT = C.CheckerContext()
55 DEFAULT_CONTEXT.ir_version = IR_VERSION
56 # TODO: Maybe ONNX-ML should also be defaulted?
57 DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()}
58
59 LEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()
60
61
62 FuncType = TypeVar("FuncType", bound=Callable[..., Any])
63
64
65 def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:
66 if not isinstance(proto, proto_type):
67 raise TypeError(
68 f"The proto message needs to be of type '{proto_type.__name__}'"
69 )
70
71
72 def check_value_info(
73 value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
74 ) -> None:
75 _ensure_proto_type(value_info, ValueInfoProto)
76 return C.check_value_info(value_info.SerializeToString(), ctx)
77
78
79 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
80 _ensure_proto_type(tensor, TensorProto)
81 return C.check_tensor(tensor.SerializeToString(), ctx)
82
83
84 def check_attribute(
85 attr: AttributeProto,
86 ctx: C.CheckerContext = DEFAULT_CONTEXT,
87 lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
88 ) -> None:
89 _ensure_proto_type(attr, AttributeProto)
90 return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)
91
92
93 def check_node(
94 node: NodeProto,
95 ctx: C.CheckerContext = DEFAULT_CONTEXT,
96 lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
97 ) -> None:
98 _ensure_proto_type(node, NodeProto)
99 return C.check_node(node.SerializeToString(), ctx, lex_ctx)
100
101
102 def check_function(
103 function: FunctionProto,
104 ctx: C.CheckerContext,
105 lex_ctx: C.LexicalScopeContext,
106 ) -> None:
107 _ensure_proto_type(function, FunctionProto)
108 C.check_function(function.SerializeToString(), ctx, lex_ctx)
109
110
111 def check_graph(
112 graph: GraphProto,
113 ctx: C.CheckerContext = DEFAULT_CONTEXT,
114 lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
115 ) -> None:
116 _ensure_proto_type(graph, GraphProto)
117 return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)
118
119
120 def check_sparse_tensor(
121 sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT
122 ) -> None:
123 _ensure_proto_type(sparse, SparseTensorProto)
124 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
125
126
127 def check_model(
128 model: ModelProto | str | bytes | os.PathLike,
129 full_check: bool = False,
130 skip_opset_compatibility_check: bool = False,
131 ) -> None:
132 """Check the consistency of a model.
133
134 An exception will be raised if the model's ir_version is not set
135 properly or is higher than checker's ir_version, or if the model
136 has duplicate keys in metadata_props.
137
138 If IR version >= 3, the model must specify opset_import.
139 If IR version < 3, the model cannot have any opset_import specified.
140
141 Args:
142 model: Model to check. If model is a path, the function checks model
143 path first. If the model bytes size is larger than 2GB, function
144 should be called using model path.
145 full_check: If True, the function also runs shape inference check.
146 skip_opset_compatibility_check: If True, the function skips the check for
147 opset compatibility.
148 """
149 # If model is a path instead of ModelProto
150 if isinstance(model, (str, os.PathLike)):
151 C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)
152 else:
153 protobuf_string = (
154 model if isinstance(model, bytes) else model.SerializeToString()
155 )
156 # If the protobuf is larger than 2GB,
157 # remind users should use the model path to check
158 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
159 raise ValueError(
160 "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead."
161 )
162 C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
163
164
165 ValidationError = C.ValidationError
166
[end of onnx/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -84,37 +84,37 @@
def check_attribute(
attr: AttributeProto,
ctx: C.CheckerContext = DEFAULT_CONTEXT,
- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
) -> None:
_ensure_proto_type(attr, AttributeProto)
- return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)
+ return C.check_attribute(attr.SerializeToString(), ctx, lexical_scope_ctx)
def check_node(
node: NodeProto,
ctx: C.CheckerContext = DEFAULT_CONTEXT,
- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
) -> None:
_ensure_proto_type(node, NodeProto)
- return C.check_node(node.SerializeToString(), ctx, lex_ctx)
+ return C.check_node(node.SerializeToString(), ctx, lexical_scope_ctx)
def check_function(
function: FunctionProto,
- ctx: C.CheckerContext,
- lex_ctx: C.LexicalScopeContext,
+ ctx: C.CheckerContext = DEFAULT_CONTEXT,
+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
) -> None:
_ensure_proto_type(function, FunctionProto)
- C.check_function(function.SerializeToString(), ctx, lex_ctx)
+ C.check_function(function.SerializeToString(), ctx, lexical_scope_ctx)
def check_graph(
graph: GraphProto,
ctx: C.CheckerContext = DEFAULT_CONTEXT,
- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,
) -> None:
_ensure_proto_type(graph, GraphProto)
- return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)
+ return C.check_graph(graph.SerializeToString(), ctx, lexical_scope_ctx)
def check_sparse_tensor(
| {"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -84,37 +84,37 @@\n def check_attribute(\n attr: AttributeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(attr, AttributeProto)\n- return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)\n+ return C.check_attribute(attr.SerializeToString(), ctx, lexical_scope_ctx)\n \n \n def check_node(\n node: NodeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(node, NodeProto)\n- return C.check_node(node.SerializeToString(), ctx, lex_ctx)\n+ return C.check_node(node.SerializeToString(), ctx, lexical_scope_ctx)\n \n \n def check_function(\n function: FunctionProto,\n- ctx: C.CheckerContext,\n- lex_ctx: C.LexicalScopeContext,\n+ ctx: C.CheckerContext = DEFAULT_CONTEXT,\n+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(function, FunctionProto)\n- C.check_function(function.SerializeToString(), ctx, lex_ctx)\n+ C.check_function(function.SerializeToString(), ctx, lexical_scope_ctx)\n \n \n def check_graph(\n graph: GraphProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n- lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n+ lexical_scope_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n ) -> None:\n _ensure_proto_type(graph, GraphProto)\n- return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)\n+ return C.check_graph(graph.SerializeToString(), ctx, lexical_scope_ctx)\n \n \n def check_sparse_tensor(\n", "issue": "check_function requires contexts as arguments which breaks backward compatibility\nhttps://github.com/onnx/onnx/pull/5693 added required parameters to the `check_function` function in checker which breaks backward compatibility. Should we provide default contexts to `check_function` as well?\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"LEXICAL_SCOPE_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\nLEXICAL_SCOPE_CONTEXT = C.LexicalScopeContext()\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_node(\n node: NodeProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_function(\n function: FunctionProto,\n ctx: C.CheckerContext,\n lex_ctx: C.LexicalScopeContext,\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n C.check_function(function.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_graph(\n graph: GraphProto,\n ctx: C.CheckerContext = DEFAULT_CONTEXT,\n lex_ctx: C.LexicalScopeContext = LEXICAL_SCOPE_CONTEXT,\n) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx, lex_ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model.\n\n An exception will be raised if the model's ir_version is not set\n properly or is higher than checker's ir_version, or if the model\n has duplicate keys in metadata_props.\n\n If IR version >= 3, the model must specify opset_import.\n If IR version < 3, the model cannot have any opset_import specified.\n\n Args:\n model: Model to check. If model is a path, the function checks model\n path first. If the model bytes size is larger than 2GB, function\n should be called using model path.\n full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]} | 2,209 | 469 |
gh_patches_debug_33978 | rasdani/github-patches | git_diff | matrix-org__synapse-3136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add six as dependency
Just a quick tracking issue to remember to add six as dependency. It is currently used, but it's just an indirect dependency of many other packages. For clarity, it would be good to add it to the dependencies. I'm not sure how to do it myself, the file is non-standard.
</issue>
<code>
[start of synapse/python_dependencies.py]
1 # Copyright 2015, 2016 OpenMarket Ltd
2 # Copyright 2017 Vector Creations Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import logging
17 from distutils.version import LooseVersion
18
19 logger = logging.getLogger(__name__)
20
21 REQUIREMENTS = {
22 "jsonschema>=2.5.1": ["jsonschema>=2.5.1"],
23 "frozendict>=0.4": ["frozendict"],
24 "unpaddedbase64>=1.1.0": ["unpaddedbase64>=1.1.0"],
25 "canonicaljson>=1.1.3": ["canonicaljson>=1.1.3"],
26 "signedjson>=1.0.0": ["signedjson>=1.0.0"],
27 "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
28 "service_identity>=1.0.0": ["service_identity>=1.0.0"],
29 "Twisted>=16.0.0": ["twisted>=16.0.0"],
30 "pyopenssl>=0.14": ["OpenSSL>=0.14"],
31 "pyyaml": ["yaml"],
32 "pyasn1": ["pyasn1"],
33 "daemonize": ["daemonize"],
34 "bcrypt": ["bcrypt>=3.1.0"],
35 "pillow": ["PIL"],
36 "pydenticon": ["pydenticon"],
37 "blist": ["blist"],
38 "pysaml2>=3.0.0": ["saml2>=3.0.0"],
39 "pymacaroons-pynacl": ["pymacaroons"],
40 "msgpack-python>=0.3.0": ["msgpack"],
41 "phonenumbers>=8.2.0": ["phonenumbers"],
42 }
43 CONDITIONAL_REQUIREMENTS = {
44 "web_client": {
45 "matrix_angular_sdk>=0.6.8": ["syweb>=0.6.8"],
46 },
47 "preview_url": {
48 "netaddr>=0.7.18": ["netaddr"],
49 },
50 "email.enable_notifs": {
51 "Jinja2>=2.8": ["Jinja2>=2.8"],
52 "bleach>=1.4.2": ["bleach>=1.4.2"],
53 },
54 "matrix-synapse-ldap3": {
55 "matrix-synapse-ldap3>=0.1": ["ldap_auth_provider"],
56 },
57 "psutil": {
58 "psutil>=2.0.0": ["psutil>=2.0.0"],
59 },
60 "affinity": {
61 "affinity": ["affinity"],
62 },
63 }
64
65
66 def requirements(config=None, include_conditional=False):
67 reqs = REQUIREMENTS.copy()
68 if include_conditional:
69 for _, req in CONDITIONAL_REQUIREMENTS.items():
70 reqs.update(req)
71 return reqs
72
73
74 def github_link(project, version, egg):
75 return "https://github.com/%s/tarball/%s/#egg=%s" % (project, version, egg)
76
77
78 DEPENDENCY_LINKS = {
79 }
80
81
82 class MissingRequirementError(Exception):
83 def __init__(self, message, module_name, dependency):
84 super(MissingRequirementError, self).__init__(message)
85 self.module_name = module_name
86 self.dependency = dependency
87
88
89 def check_requirements(config=None):
90 """Checks that all the modules needed by synapse have been correctly
91 installed and are at the correct version"""
92 for dependency, module_requirements in (
93 requirements(config, include_conditional=False).items()):
94 for module_requirement in module_requirements:
95 if ">=" in module_requirement:
96 module_name, required_version = module_requirement.split(">=")
97 version_test = ">="
98 elif "==" in module_requirement:
99 module_name, required_version = module_requirement.split("==")
100 version_test = "=="
101 else:
102 module_name = module_requirement
103 version_test = None
104
105 try:
106 module = __import__(module_name)
107 except ImportError:
108 logging.exception(
109 "Can't import %r which is part of %r",
110 module_name, dependency
111 )
112 raise MissingRequirementError(
113 "Can't import %r which is part of %r"
114 % (module_name, dependency), module_name, dependency
115 )
116 version = getattr(module, "__version__", None)
117 file_path = getattr(module, "__file__", None)
118 logger.info(
119 "Using %r version %r from %r to satisfy %r",
120 module_name, version, file_path, dependency
121 )
122
123 if version_test == ">=":
124 if version is None:
125 raise MissingRequirementError(
126 "Version of %r isn't set as __version__ of module %r"
127 % (dependency, module_name), module_name, dependency
128 )
129 if LooseVersion(version) < LooseVersion(required_version):
130 raise MissingRequirementError(
131 "Version of %r in %r is too old. %r < %r"
132 % (dependency, file_path, version, required_version),
133 module_name, dependency
134 )
135 elif version_test == "==":
136 if version is None:
137 raise MissingRequirementError(
138 "Version of %r isn't set as __version__ of module %r"
139 % (dependency, module_name), module_name, dependency
140 )
141 if LooseVersion(version) != LooseVersion(required_version):
142 raise MissingRequirementError(
143 "Unexpected version of %r in %r. %r != %r"
144 % (dependency, file_path, version, required_version),
145 module_name, dependency
146 )
147
148
149 def list_requirements():
150 result = []
151 linked = []
152 for link in DEPENDENCY_LINKS.values():
153 egg = link.split("#egg=")[1]
154 linked.append(egg.split('-')[0])
155 result.append(link)
156 for requirement in requirements(include_conditional=True):
157 is_linked = False
158 for link in linked:
159 if requirement.replace('-', '_').startswith(link):
160 is_linked = True
161 if not is_linked:
162 result.append(requirement)
163 return result
164
165
166 if __name__ == "__main__":
167 import sys
168 sys.stdout.writelines(req + "\n" for req in list_requirements())
169
[end of synapse/python_dependencies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -1,5 +1,6 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +19,18 @@
logger = logging.getLogger(__name__)
+# this dict maps from python package name to a list of modules we expect it to
+# provide.
+#
+# the key is a "requirement specifier", as used as a parameter to `pip
+# install`[1], or an `install_requires` argument to `setuptools.setup` [2].
+#
+# the value is a sequence of strings; each entry should be the name of the
+# python module, optionally followed by a version assertion which can be either
+# ">=<ver>" or "==<ver>".
+#
+# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
+# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies
REQUIREMENTS = {
"jsonschema>=2.5.1": ["jsonschema>=2.5.1"],
"frozendict>=0.4": ["frozendict"],
@@ -26,7 +39,11 @@
"signedjson>=1.0.0": ["signedjson>=1.0.0"],
"pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"],
"service_identity>=1.0.0": ["service_identity>=1.0.0"],
- "Twisted>=16.0.0": ["twisted>=16.0.0"],
+
+ # we break under Twisted 18.4
+ # (https://github.com/matrix-org/synapse/issues/3135)
+ "Twisted>=16.0.0,<18.4": ["twisted>=16.0.0"],
+
"pyopenssl>=0.14": ["OpenSSL>=0.14"],
"pyyaml": ["yaml"],
"pyasn1": ["pyasn1"],
@@ -39,6 +56,7 @@
"pymacaroons-pynacl": ["pymacaroons"],
"msgpack-python>=0.3.0": ["msgpack"],
"phonenumbers>=8.2.0": ["phonenumbers"],
+ "six": ["six"],
}
CONDITIONAL_REQUIREMENTS = {
"web_client": {
| {"golden_diff": "diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py\n--- a/synapse/python_dependencies.py\n+++ b/synapse/python_dependencies.py\n@@ -1,5 +1,6 @@\n # Copyright 2015, 2016 OpenMarket Ltd\n # Copyright 2017 Vector Creations Ltd\n+# Copyright 2018 New Vector Ltd\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -18,6 +19,18 @@\n \n logger = logging.getLogger(__name__)\n \n+# this dict maps from python package name to a list of modules we expect it to\n+# provide.\n+#\n+# the key is a \"requirement specifier\", as used as a parameter to `pip\n+# install`[1], or an `install_requires` argument to `setuptools.setup` [2].\n+#\n+# the value is a sequence of strings; each entry should be the name of the\n+# python module, optionally followed by a version assertion which can be either\n+# \">=<ver>\" or \"==<ver>\".\n+#\n+# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.\n+# [2] https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies\n REQUIREMENTS = {\n \"jsonschema>=2.5.1\": [\"jsonschema>=2.5.1\"],\n \"frozendict>=0.4\": [\"frozendict\"],\n@@ -26,7 +39,11 @@\n \"signedjson>=1.0.0\": [\"signedjson>=1.0.0\"],\n \"pynacl>=1.2.1\": [\"nacl>=1.2.1\", \"nacl.bindings\"],\n \"service_identity>=1.0.0\": [\"service_identity>=1.0.0\"],\n- \"Twisted>=16.0.0\": [\"twisted>=16.0.0\"],\n+\n+ # we break under Twisted 18.4\n+ # (https://github.com/matrix-org/synapse/issues/3135)\n+ \"Twisted>=16.0.0,<18.4\": [\"twisted>=16.0.0\"],\n+\n \"pyopenssl>=0.14\": [\"OpenSSL>=0.14\"],\n \"pyyaml\": [\"yaml\"],\n \"pyasn1\": [\"pyasn1\"],\n@@ -39,6 +56,7 @@\n \"pymacaroons-pynacl\": [\"pymacaroons\"],\n \"msgpack-python>=0.3.0\": [\"msgpack\"],\n \"phonenumbers>=8.2.0\": [\"phonenumbers\"],\n+ \"six\": [\"six\"],\n }\n CONDITIONAL_REQUIREMENTS = {\n \"web_client\": {\n", "issue": "Add six as dependency\nJust a quick tracking issue to remember to add six as dependency. It is currently used, but it's just an indirect dependency of many other packages. For clarity, it would be good to add it to the dependencies. I'm not sure how to do it myself, the file is non-standard.\n", "before_files": [{"content": "# Copyright 2015, 2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom distutils.version import LooseVersion\n\nlogger = logging.getLogger(__name__)\n\nREQUIREMENTS = {\n \"jsonschema>=2.5.1\": [\"jsonschema>=2.5.1\"],\n \"frozendict>=0.4\": [\"frozendict\"],\n \"unpaddedbase64>=1.1.0\": [\"unpaddedbase64>=1.1.0\"],\n \"canonicaljson>=1.1.3\": [\"canonicaljson>=1.1.3\"],\n \"signedjson>=1.0.0\": [\"signedjson>=1.0.0\"],\n \"pynacl>=1.2.1\": [\"nacl>=1.2.1\", \"nacl.bindings\"],\n \"service_identity>=1.0.0\": [\"service_identity>=1.0.0\"],\n \"Twisted>=16.0.0\": [\"twisted>=16.0.0\"],\n \"pyopenssl>=0.14\": [\"OpenSSL>=0.14\"],\n \"pyyaml\": [\"yaml\"],\n \"pyasn1\": [\"pyasn1\"],\n \"daemonize\": [\"daemonize\"],\n \"bcrypt\": [\"bcrypt>=3.1.0\"],\n \"pillow\": [\"PIL\"],\n \"pydenticon\": [\"pydenticon\"],\n \"blist\": [\"blist\"],\n \"pysaml2>=3.0.0\": [\"saml2>=3.0.0\"],\n \"pymacaroons-pynacl\": [\"pymacaroons\"],\n \"msgpack-python>=0.3.0\": [\"msgpack\"],\n \"phonenumbers>=8.2.0\": [\"phonenumbers\"],\n}\nCONDITIONAL_REQUIREMENTS = {\n \"web_client\": {\n \"matrix_angular_sdk>=0.6.8\": [\"syweb>=0.6.8\"],\n },\n \"preview_url\": {\n \"netaddr>=0.7.18\": [\"netaddr\"],\n },\n \"email.enable_notifs\": {\n \"Jinja2>=2.8\": [\"Jinja2>=2.8\"],\n \"bleach>=1.4.2\": [\"bleach>=1.4.2\"],\n },\n \"matrix-synapse-ldap3\": {\n \"matrix-synapse-ldap3>=0.1\": [\"ldap_auth_provider\"],\n },\n \"psutil\": {\n \"psutil>=2.0.0\": [\"psutil>=2.0.0\"],\n },\n \"affinity\": {\n \"affinity\": [\"affinity\"],\n },\n}\n\n\ndef requirements(config=None, include_conditional=False):\n reqs = REQUIREMENTS.copy()\n if include_conditional:\n for _, req in CONDITIONAL_REQUIREMENTS.items():\n reqs.update(req)\n return reqs\n\n\ndef github_link(project, version, egg):\n return \"https://github.com/%s/tarball/%s/#egg=%s\" % (project, version, egg)\n\n\nDEPENDENCY_LINKS = {\n}\n\n\nclass MissingRequirementError(Exception):\n def __init__(self, message, module_name, dependency):\n super(MissingRequirementError, self).__init__(message)\n self.module_name = module_name\n self.dependency = dependency\n\n\ndef check_requirements(config=None):\n \"\"\"Checks that all the modules needed by synapse have been correctly\n installed and are at the correct version\"\"\"\n for dependency, module_requirements in (\n requirements(config, include_conditional=False).items()):\n for module_requirement in module_requirements:\n if \">=\" in module_requirement:\n module_name, required_version = module_requirement.split(\">=\")\n version_test = \">=\"\n elif \"==\" in module_requirement:\n module_name, required_version = module_requirement.split(\"==\")\n version_test = \"==\"\n else:\n module_name = module_requirement\n version_test = None\n\n try:\n module = __import__(module_name)\n except ImportError:\n logging.exception(\n \"Can't import %r which is part of %r\",\n module_name, dependency\n )\n raise MissingRequirementError(\n \"Can't import %r which is part of %r\"\n % (module_name, dependency), module_name, dependency\n )\n version = getattr(module, \"__version__\", None)\n file_path = getattr(module, \"__file__\", None)\n logger.info(\n \"Using %r version %r from %r to satisfy %r\",\n module_name, version, file_path, dependency\n )\n\n if version_test == \">=\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) < LooseVersion(required_version):\n raise MissingRequirementError(\n \"Version of %r in %r is too old. %r < %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n elif version_test == \"==\":\n if version is None:\n raise MissingRequirementError(\n \"Version of %r isn't set as __version__ of module %r\"\n % (dependency, module_name), module_name, dependency\n )\n if LooseVersion(version) != LooseVersion(required_version):\n raise MissingRequirementError(\n \"Unexpected version of %r in %r. %r != %r\"\n % (dependency, file_path, version, required_version),\n module_name, dependency\n )\n\n\ndef list_requirements():\n result = []\n linked = []\n for link in DEPENDENCY_LINKS.values():\n egg = link.split(\"#egg=\")[1]\n linked.append(egg.split('-')[0])\n result.append(link)\n for requirement in requirements(include_conditional=True):\n is_linked = False\n for link in linked:\n if requirement.replace('-', '_').startswith(link):\n is_linked = True\n if not is_linked:\n result.append(requirement)\n return result\n\n\nif __name__ == \"__main__\":\n import sys\n sys.stdout.writelines(req + \"\\n\" for req in list_requirements())\n", "path": "synapse/python_dependencies.py"}]} | 2,481 | 636 |
gh_patches_debug_800 | rasdani/github-patches | git_diff | spyder-ide__spyder-4602 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move to support only Rope 0.10.5+
That's because 0.10.5 is the first version to support Python 2 and 3 in the same package.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Spyder
9 ======
10
11 The Scientific PYthon Development EnviRonment
12 """
13
14 from __future__ import print_function
15
16 import os
17 import os.path as osp
18 import subprocess
19 import sys
20 import shutil
21
22 from distutils.core import setup
23 from distutils.command.build import build
24 from distutils.command.install import install
25 from distutils.command.install_data import install_data
26
27
28 #==============================================================================
29 # Check for Python 3
30 #==============================================================================
31 PY3 = sys.version_info[0] == 3
32
33
34 #==============================================================================
35 # Minimal Python version sanity check
36 # Taken from the notebook setup.py -- Modified BSD License
37 #==============================================================================
38 v = sys.version_info
39 if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):
40 error = "ERROR: Spyder requires Python version 2.7 or 3.3 or above."
41 print(error, file=sys.stderr)
42 sys.exit(1)
43
44
45 #==============================================================================
46 # Constants
47 #==============================================================================
48 NAME = 'spyder'
49 LIBNAME = 'spyder'
50 from spyder import __version__, __project_url__
51
52
53 #==============================================================================
54 # Auxiliary functions
55 #==============================================================================
56 def get_package_data(name, extlist):
57 """Return data files for package *name* with extensions in *extlist*"""
58 flist = []
59 # Workaround to replace os.path.relpath (not available until Python 2.6):
60 offset = len(name)+len(os.pathsep)
61 for dirpath, _dirnames, filenames in os.walk(name):
62 for fname in filenames:
63 if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:
64 flist.append(osp.join(dirpath, fname)[offset:])
65 return flist
66
67
68 def get_subpackages(name):
69 """Return subpackages of package *name*"""
70 splist = []
71 for dirpath, _dirnames, _filenames in os.walk(name):
72 if osp.isfile(osp.join(dirpath, '__init__.py')):
73 splist.append(".".join(dirpath.split(os.sep)))
74 return splist
75
76
77 def get_data_files():
78 """Return data_files in a platform dependent manner"""
79 if sys.platform.startswith('linux'):
80 if PY3:
81 data_files = [('share/applications', ['scripts/spyder3.desktop']),
82 ('share/pixmaps', ['img_src/spyder3.png']),
83 ('share/metainfo', ['scripts/spyder3.appdata.xml'])]
84 else:
85 data_files = [('share/applications', ['scripts/spyder.desktop']),
86 ('share/pixmaps', ['img_src/spyder.png'])]
87 elif os.name == 'nt':
88 data_files = [('scripts', ['img_src/spyder.ico',
89 'img_src/spyder_reset.ico'])]
90 else:
91 data_files = []
92 return data_files
93
94
95 def get_packages():
96 """Return package list"""
97 packages = (
98 get_subpackages(LIBNAME)
99 + get_subpackages('spyder_breakpoints')
100 + get_subpackages('spyder_profiler')
101 + get_subpackages('spyder_pylint')
102 + get_subpackages('spyder_io_dcm')
103 + get_subpackages('spyder_io_hdf5')
104 )
105 return packages
106
107
108 #==============================================================================
109 # Make Linux detect Spyder desktop file
110 #==============================================================================
111 class MyInstallData(install_data):
112 def run(self):
113 install_data.run(self)
114 if sys.platform.startswith('linux'):
115 try:
116 subprocess.call(['update-desktop-database'])
117 except:
118 print("ERROR: unable to update desktop database",
119 file=sys.stderr)
120 CMDCLASS = {'install_data': MyInstallData}
121
122
123 #==============================================================================
124 # Sphinx build (documentation)
125 #==============================================================================
126 def get_html_help_exe():
127 """Return HTML Help Workshop executable path (Windows only)"""
128 if os.name == 'nt':
129 hhc_base = r'C:\Program Files%s\HTML Help Workshop\hhc.exe'
130 for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):
131 if osp.isfile(hhc_exe):
132 return hhc_exe
133 else:
134 return
135
136 try:
137 from sphinx import setup_command
138
139 class MyBuild(build):
140 user_options = [('no-doc', None, "Don't build Spyder documentation")] \
141 + build.user_options
142 def __init__(self, *args, **kwargs):
143 build.__init__(self, *args, **kwargs)
144 self.no_doc = False
145 def with_doc(self):
146 setup_dir = os.path.dirname(os.path.abspath(__file__))
147 is_doc_dir = os.path.isdir(os.path.join(setup_dir, 'doc'))
148 install_obj = self.distribution.get_command_obj('install')
149 return (is_doc_dir and not self.no_doc and not install_obj.no_doc)
150 sub_commands = build.sub_commands + [('build_doc', with_doc)]
151 CMDCLASS['build'] = MyBuild
152
153
154 class MyInstall(install):
155 user_options = [('no-doc', None, "Don't build Spyder documentation")] \
156 + install.user_options
157 def __init__(self, *args, **kwargs):
158 install.__init__(self, *args, **kwargs)
159 self.no_doc = False
160 CMDCLASS['install'] = MyInstall
161
162
163 class MyBuildDoc(setup_command.BuildDoc):
164 def run(self):
165 build = self.get_finalized_command('build')
166 sys.path.insert(0, os.path.abspath(build.build_lib))
167 dirname = self.distribution.get_command_obj('build').build_purelib
168 self.builder_target_dir = osp.join(dirname, 'spyder', 'doc')
169
170 if not osp.exists(self.builder_target_dir):
171 os.mkdir(self.builder_target_dir)
172
173 hhc_exe = get_html_help_exe()
174 self.builder = "html" if hhc_exe is None else "htmlhelp"
175
176 try:
177 setup_command.BuildDoc.run(self)
178 except UnicodeDecodeError:
179 print("ERROR: unable to build documentation because Sphinx "\
180 "do not handle source path with non-ASCII characters. "\
181 "Please try to move the source package to another "\
182 "location (path with *only* ASCII characters).",
183 file=sys.stderr)
184 sys.path.pop(0)
185
186 # Building chm doc, if HTML Help Workshop is installed
187 if hhc_exe is not None:
188 fname = osp.join(self.builder_target_dir, 'Spyderdoc.chm')
189 subprocess.call('"%s" %s' % (hhc_exe, fname), shell=True)
190 if osp.isfile(fname):
191 dest = osp.join(dirname, 'spyder')
192 try:
193 shutil.move(fname, dest)
194 except shutil.Error:
195 print("Unable to replace %s" % dest)
196 shutil.rmtree(self.builder_target_dir)
197
198 CMDCLASS['build_doc'] = MyBuildDoc
199 except ImportError:
200 print('WARNING: unable to build documentation because Sphinx '\
201 'is not installed', file=sys.stderr)
202
203
204 #==============================================================================
205 # Main scripts
206 #==============================================================================
207 # NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows
208 # platforms due to a bug in pip installation process (see Issue 1158)
209 SCRIPTS = ['%s_win_post_install.py' % NAME]
210 if PY3 and sys.platform.startswith('linux'):
211 SCRIPTS.append('spyder3')
212 else:
213 SCRIPTS.append('spyder')
214
215
216 #==============================================================================
217 # Files added to the package
218 #==============================================================================
219 EXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',
220 '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',
221 '.md', '.R', '.csv', '.pyx', '.ipynb']
222 if os.name == 'nt':
223 SCRIPTS += ['spyder.bat']
224 EXTLIST += ['.ico']
225
226
227 #==============================================================================
228 # Setup arguments
229 #==============================================================================
230 setup_args = dict(name=NAME,
231 version=__version__,
232 description='Scientific PYthon Development EnviRonment',
233 long_description=
234 """Spyder is an interactive Python development environment providing
235 MATLAB-like features in a simple and light-weighted software.
236 It also provides ready-to-use pure-Python widgets to your PyQt5 or
237 PyQt4 application: source code editor with syntax highlighting and
238 code introspection/analysis features, NumPy array editor, dictionary
239 editor, Python console, etc.""",
240 download_url='%s/files/%s-%s.zip' % (__project_url__, NAME, __version__),
241 author="The Spyder Project Contributors",
242 url=__project_url__,
243 license='MIT',
244 keywords='PyQt5 PyQt4 editor shell console widgets IDE',
245 platforms=['any'],
246 packages=get_packages(),
247 package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),
248 'spyder_breakpoints': get_package_data('spyder_breakpoints', EXTLIST),
249 'spyder_profiler': get_package_data('spyder_profiler', EXTLIST),
250 'spyder_pylint': get_package_data('spyder_pylint', EXTLIST),
251 'spyder_io_dcm': get_package_data('spyder_io_dcm', EXTLIST),
252 'spyder_io_hdf5': get_package_data('spyder_io_hdf5', EXTLIST),
253 },
254 scripts=[osp.join('scripts', fname) for fname in SCRIPTS],
255 data_files=get_data_files(),
256 classifiers=['License :: OSI Approved :: MIT License',
257 'Operating System :: MacOS',
258 'Operating System :: Microsoft :: Windows',
259 'Operating System :: POSIX :: Linux',
260 'Programming Language :: Python :: 2.7',
261 'Programming Language :: Python :: 3',
262 'Development Status :: 5 - Production/Stable',
263 'Topic :: Scientific/Engineering',
264 'Topic :: Software Development :: Widget Sets'],
265 cmdclass=CMDCLASS)
266
267
268 #==============================================================================
269 # Setuptools deps
270 #==============================================================================
271 if any(arg == 'bdist_wheel' for arg in sys.argv):
272 import setuptools # analysis:ignore
273
274 install_requires = [
275 'rope_py3k' if PY3 else 'rope>=0.9.4',
276 'jedi>=0.9.0',
277 'pyflakes',
278 'pygments>=2.0',
279 'qtconsole>=4.2.0',
280 'nbconvert',
281 'sphinx',
282 'pycodestyle',
283 'pylint',
284 'psutil',
285 'qtawesome>=0.4.1',
286 'qtpy>=1.1.0',
287 'pickleshare',
288 'pyzmq',
289 'chardet>=2.0.0',
290 'numpydoc',
291 ]
292
293 extras_require = {
294 'test:python_version == "2.7"': ['mock'],
295 'test': ['pytest',
296 'pytest-qt',
297 'pytest-cov',
298 'pytest-xvfb',
299 'mock',
300 'flaky',
301 'pandas',
302 'scipy',
303 'sympy',
304 'pillow',
305 'matplotlib',
306 'cython'],
307 }
308
309 if 'setuptools' in sys.modules:
310 setup_args['install_requires'] = install_requires
311 setup_args['extras_require'] = extras_require
312
313 setup_args['entry_points'] = {
314 'gui_scripts': [
315 '{} = spyder.app.start:main'.format(
316 'spyder3' if PY3 else 'spyder')
317 ]
318 }
319
320 setup_args.pop('scripts', None)
321
322
323 #==============================================================================
324 # Main setup
325 #==============================================================================
326 setup(**setup_args)
327
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -272,7 +272,7 @@
import setuptools # analysis:ignore
install_requires = [
- 'rope_py3k' if PY3 else 'rope>=0.9.4',
+ 'rope>=0.10.5',
'jedi>=0.9.0',
'pyflakes',
'pygments>=2.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -272,7 +272,7 @@\n import setuptools # analysis:ignore\n \n install_requires = [\n- 'rope_py3k' if PY3 else 'rope>=0.9.4',\n+ 'rope>=0.10.5',\n 'jedi>=0.9.0',\n 'pyflakes',\n 'pygments>=2.0',\n", "issue": "Move to support only Rope 0.10.5+\nThat's because 0.10.5 is the first version to support Python 2 and 3 in the same package.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific PYthon Development EnviRonment\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.build import build\nfrom distutils.command.install import install\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __project_url__\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n for fname in filenames:\n if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/pixmaps', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/pixmaps', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = (\n get_subpackages(LIBNAME)\n + get_subpackages('spyder_breakpoints')\n + get_subpackages('spyder_profiler')\n + get_subpackages('spyder_pylint')\n + get_subpackages('spyder_io_dcm')\n + get_subpackages('spyder_io_hdf5')\n )\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Sphinx build (documentation)\n#==============================================================================\ndef get_html_help_exe():\n \"\"\"Return HTML Help Workshop executable path (Windows only)\"\"\"\n if os.name == 'nt':\n hhc_base = r'C:\\Program Files%s\\HTML Help Workshop\\hhc.exe'\n for hhc_exe in (hhc_base % '', hhc_base % ' (x86)'):\n if osp.isfile(hhc_exe):\n return hhc_exe\n else:\n return\n\ntry:\n from sphinx import setup_command\n\n class MyBuild(build):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + build.user_options\n def __init__(self, *args, **kwargs):\n build.__init__(self, *args, **kwargs)\n self.no_doc = False\n def with_doc(self):\n setup_dir = os.path.dirname(os.path.abspath(__file__))\n is_doc_dir = os.path.isdir(os.path.join(setup_dir, 'doc'))\n install_obj = self.distribution.get_command_obj('install')\n return (is_doc_dir and not self.no_doc and not install_obj.no_doc)\n sub_commands = build.sub_commands + [('build_doc', with_doc)]\n CMDCLASS['build'] = MyBuild\n\n\n class MyInstall(install):\n user_options = [('no-doc', None, \"Don't build Spyder documentation\")] \\\n + install.user_options\n def __init__(self, *args, **kwargs):\n install.__init__(self, *args, **kwargs)\n self.no_doc = False\n CMDCLASS['install'] = MyInstall\n\n\n class MyBuildDoc(setup_command.BuildDoc):\n def run(self):\n build = self.get_finalized_command('build')\n sys.path.insert(0, os.path.abspath(build.build_lib))\n dirname = self.distribution.get_command_obj('build').build_purelib\n self.builder_target_dir = osp.join(dirname, 'spyder', 'doc')\n\n if not osp.exists(self.builder_target_dir):\n os.mkdir(self.builder_target_dir)\n\n hhc_exe = get_html_help_exe()\n self.builder = \"html\" if hhc_exe is None else \"htmlhelp\"\n\n try:\n setup_command.BuildDoc.run(self)\n except UnicodeDecodeError:\n print(\"ERROR: unable to build documentation because Sphinx \"\\\n \"do not handle source path with non-ASCII characters. \"\\\n \"Please try to move the source package to another \"\\\n \"location (path with *only* ASCII characters).\",\n file=sys.stderr)\n sys.path.pop(0)\n\n # Building chm doc, if HTML Help Workshop is installed\n if hhc_exe is not None:\n fname = osp.join(self.builder_target_dir, 'Spyderdoc.chm')\n subprocess.call('\"%s\" %s' % (hhc_exe, fname), shell=True)\n if osp.isfile(fname):\n dest = osp.join(dirname, 'spyder')\n try:\n shutil.move(fname, dest)\n except shutil.Error:\n print(\"Unable to replace %s\" % dest)\n shutil.rmtree(self.builder_target_dir)\n\n CMDCLASS['build_doc'] = MyBuildDoc\nexcept ImportError:\n print('WARNING: unable to build documentation because Sphinx '\\\n 'is not installed', file=sys.stderr)\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process (see Issue 1158)\nSCRIPTS = ['%s_win_post_install.py' % NAME]\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',\n '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',\n '.md', '.R', '.csv', '.pyx', '.ipynb']\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n EXTLIST += ['.ico']\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(name=NAME,\n version=__version__,\n description='Scientific PYthon Development EnviRonment',\n long_description=\n\"\"\"Spyder is an interactive Python development environment providing\nMATLAB-like features in a simple and light-weighted software.\nIt also provides ready-to-use pure-Python widgets to your PyQt5 or\nPyQt4 application: source code editor with syntax highlighting and\ncode introspection/analysis features, NumPy array editor, dictionary\neditor, Python console, etc.\"\"\",\n download_url='%s/files/%s-%s.zip' % (__project_url__, NAME, __version__),\n author=\"The Spyder Project Contributors\",\n url=__project_url__,\n license='MIT',\n keywords='PyQt5 PyQt4 editor shell console widgets IDE',\n platforms=['any'],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),\n 'spyder_breakpoints': get_package_data('spyder_breakpoints', EXTLIST),\n 'spyder_profiler': get_package_data('spyder_profiler', EXTLIST),\n 'spyder_pylint': get_package_data('spyder_pylint', EXTLIST),\n 'spyder_io_dcm': get_package_data('spyder_io_dcm', EXTLIST),\n 'spyder_io_hdf5': get_package_data('spyder_io_hdf5', EXTLIST),\n },\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 5 - Production/Stable',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'rope_py3k' if PY3 else 'rope>=0.9.4',\n 'jedi>=0.9.0',\n 'pyflakes',\n 'pygments>=2.0',\n 'qtconsole>=4.2.0',\n 'nbconvert',\n 'sphinx',\n 'pycodestyle',\n 'pylint',\n 'psutil',\n 'qtawesome>=0.4.1',\n 'qtpy>=1.1.0',\n 'pickleshare',\n 'pyzmq',\n 'chardet>=2.0.0',\n 'numpydoc',\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test': ['pytest',\n 'pytest-qt',\n 'pytest-cov',\n 'pytest-xvfb',\n 'mock',\n 'flaky',\n 'pandas',\n 'scipy',\n 'sympy',\n 'pillow',\n 'matplotlib',\n 'cython'],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py"}]} | 3,988 | 109 |
gh_patches_debug_17505 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect total number of batches
**Describe the bug**
Sometimes total number of batches is computed wrong.
**To Reproduce**
Run the following code with current master branch:
```
from time import sleep
import torch
from torch.utils.data import DataLoader, Dataset
import pytorch_lightning as pl
class DummyDataset(Dataset):
def __init__(self, n):
super().__init__()
self.n = n
def __len__(self):
return self.n
def __getitem__(self, idx):
return torch.rand(10)
class CoolSystem(pl.LightningModule):
def __init__(self):
super(CoolSystem, self).__init__()
self.layer = torch.nn.Linear(10, 10)
def forward(self, x):
return self.layer(x)
def training_step(self, batch, batch_nb):
sleep(1)
return {'loss': torch.mean(self.forward(batch) ** 2)}
def validation_step(self, batch, batch_nb):
sleep(1)
return {}
def validation_end(self, outputs):
return {}
def configure_optimizers(self):
return [torch.optim.Adam(self.layer.parameters())]
@pl.data_loader
def train_dataloader(self):
return DataLoader(DummyDataset(10), batch_size=1)
@pl.data_loader
def val_dataloader(self):
return DataLoader(DummyDataset(5), batch_size=1)
model = CoolSystem()
trainer = pl.Trainer(weights_summary=None, nb_sanity_val_steps=0, early_stop_callback=False,
val_percent_check=1.0, val_check_interval=0.5)
trainer.fit(model)
```
At first output will look like:
`67%|█████▋ | 10/15 [00:10<00:05, 1.05s/it, batch_nb=4, epoch=0, loss=0.194, v_nb=0]`
But at the end of the epoch it will be like:
`20it [00:20, 1.07s/it, batch_nb=9, epoch=0, loss=0.212, v_nb=0]`
Moreover, if you run
```
trainer = pl.Trainer(weights_summary=None, nb_sanity_val_steps=0, early_stop_callback=False,
val_percent_check=1.0, val_check_interval=0.5, check_val_every_n_epoch=10)
```
The first epoch will end at the point
`67%|█████▋ | 10/15 [00:09<00:04, 1.01it/s, batch_nb=8, epoch=1, loss=0.069, v_nb=0]`
**Expected behavior**
Correct total number of batches.
**Possible solution**
Now we have `total_batches = nb_training_batches + nb_val_batches`, where `nb_val_batches` is the number of batches of only one validation loop. And the problem arises because actually there can be several validation loops during one training epoch. Moreover there is a parameter `check_val_every_n_epoch` and thus there can be no validation loops at all.
With this in mind, it looks like the correct formula is:
```
is_val_epoch = (current_epoch + 1) % check_val_every_n_epoch == 0
val_checks_per_epoch = nb_training_batches // val_check_batch if is_val_epoch else 0
total_batches = nb_training_batches + nb_val_batches * val_checks_per_epoch
```
</issue>
<code>
[start of pytorch_lightning/trainer/train_loop_mixin.py]
1 import numpy as np
2
3 try:
4 from apex import amp
5
6 APEX_AVAILABLE = True
7 except ImportError:
8 APEX_AVAILABLE = False
9
10
11 class TrainerTrainLoopMixin(object):
12
13 def train(self):
14 # run all epochs
15 for epoch_nb in range(self.current_epoch, self.max_nb_epochs):
16 # set seed for distributed sampler (enables shuffling for each epoch)
17 if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
18 self.get_train_dataloader().sampler.set_epoch(epoch_nb)
19
20 # get model
21 model = self.get_model()
22
23 # update training progress in trainer and model
24 model.current_epoch = epoch_nb
25 self.current_epoch = epoch_nb
26 self.total_batches = self.nb_training_batches + self.nb_val_batches
27 self.batch_loss_value = 0 # accumulated grads
28
29 # limit the number of batches to 1 in fast_dev_run
30 if self.fast_dev_run:
31 self.total_batches = 1
32
33 # init progress_bar when requested
34 if self.show_progress_bar:
35 nb_iterations = self.total_batches
36
37 # for iterable train loader, the progress bar never ends
38 if self.is_iterable_train_dataloader:
39 nb_iterations = float('inf')
40 self.progress_bar.reset(nb_iterations)
41
42 # changing gradient according accumulation_scheduler
43 self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)
44
45 # -----------------
46 # RUN TNG EPOCH
47 # -----------------
48 self.run_training_epoch()
49
50 # update LR schedulers
51 if self.lr_schedulers is not None:
52 for lr_scheduler in self.lr_schedulers:
53 lr_scheduler.step(self.current_epoch)
54
55 # early stopping
56 met_min_epochs = epoch_nb > self.min_nb_epochs
57 if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):
58 should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,
59 logs=self.callback_metrics)
60 # stop training
61 stop = should_stop and met_min_epochs
62 if stop:
63 return
64
65 if self.logger is not None:
66 self.logger.finalize("success")
67
68 def run_training_epoch(self):
69 # before epoch hook
70 if self.is_function_implemented('on_epoch_start'):
71 model = self.get_model()
72 model.on_epoch_start()
73
74 # run epoch
75 for batch_nb, batch in enumerate(self.get_train_dataloader()):
76 self.batch_nb = batch_nb
77
78 model = self.get_model()
79 model.global_step = self.global_step
80
81 # ---------------
82 # RUN TRAIN STEP
83 # ---------------
84 output = self.run_training_batch(batch, batch_nb)
85 batch_result, grad_norm_dic, batch_step_metrics = output
86
87 # when returning -1 from train_step, we end epoch early
88 early_stop_epoch = batch_result == -1
89
90 # ---------------
91 # RUN VAL STEP
92 # ---------------
93 is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0
94 can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
95 should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)
96
97 # fast_dev_run always forces val checking after train batch
98 if self.fast_dev_run or should_check_val:
99 self.run_evaluation(test=self.testing)
100
101 # when logs should be saved
102 should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch
103 if should_save_log or self.fast_dev_run:
104 if self.proc_rank == 0 and self.logger is not None:
105 self.logger.save()
106
107 # when metrics should be logged
108 should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch
109 if should_log_metrics or self.fast_dev_run:
110 # logs user requested information to logger
111 self.log_metrics(batch_step_metrics, grad_norm_dic)
112
113 self.global_step += 1
114 self.total_batch_nb += 1
115
116 # end epoch early
117 # stop when the flag is changed or we've gone past the amount
118 # requested in the batches
119 if early_stop_epoch or self.fast_dev_run:
120 break
121
122 # stop epoch if we limited nb batches
123 met_batch_limit = batch_nb >= self.nb_training_batches
124 if met_batch_limit:
125 break
126
127 # epoch end hook
128 if self.is_function_implemented('on_epoch_end'):
129 model = self.get_model()
130 model.on_epoch_end()
131
132 def run_training_batch(self, batch, batch_nb):
133 # track grad norms
134 grad_norm_dic = {}
135
136 # track all metrics for callbacks
137 all_callback_metrics = []
138
139 # track metrics to log
140 all_log_metrics = []
141
142 if batch is None:
143 return 0, grad_norm_dic
144
145 # hook
146 if self.is_function_implemented('on_batch_start'):
147 model_ref = self.get_model()
148 response = model_ref.on_batch_start(batch)
149
150 if response == -1:
151 return -1, grad_norm_dic
152
153 if self.show_progress_bar:
154 self.progress_bar.update(1)
155
156 # call training_step once per optimizer
157 for opt_idx, optimizer in enumerate(self.optimizers):
158
159 # wrap the forward step in a closure so second order methods work
160 def optimizer_closure():
161 # forward pass
162 output = self.training_forward(batch, batch_nb, opt_idx)
163 closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output
164
165 # track metrics for callbacks
166 all_callback_metrics.append(callback_metrics)
167
168 # track progress bar metrics
169 self.add_tqdm_metrics(progress_bar_metrics)
170 all_log_metrics.append(log_metrics)
171
172 # accumulate loss
173 # (if accumulate_grad_batches = 1 no effect)
174 closure_loss = closure_loss / self.accumulate_grad_batches
175
176 # backward pass
177 # done in hook so user can overwrite if needed
178 model_ref = self.get_model()
179 model_ref.backward(self.use_amp, closure_loss, optimizer)
180
181 # insert after step hook
182 if self.is_function_implemented('on_after_backward'):
183 model_ref = self.get_model()
184 model_ref.on_after_backward()
185
186 return closure_loss
187
188 # calculate loss
189 loss = optimizer_closure()
190
191 # nan grads
192 if self.print_nan_grads:
193 self.print_nan_gradients()
194
195 # track total loss for logging (avoid mem leaks)
196 self.batch_loss_value += loss.item()
197
198 # gradient update with accumulated gradients
199 if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:
200
201 # track gradient norms when requested
202 if batch_nb % self.row_log_interval == 0:
203 if self.track_grad_norm > 0:
204 model = self.get_model()
205 grad_norm_dic = model.grad_norm(self.track_grad_norm)
206
207 # clip gradients
208 self.clip_gradients()
209
210 # calls .step(), .zero_grad()
211 # override function to modify this behavior
212 model = self.get_model()
213 model.optimizer_step(self.current_epoch, batch_nb,
214 optimizer, opt_idx, optimizer_closure)
215
216 # calculate running loss for display
217 self.running_loss.append(self.batch_loss_value)
218 self.batch_loss_value = 0
219 self.avg_loss = np.mean(self.running_loss[-100:])
220
221 # update progress bar
222 if self.show_progress_bar:
223 # add model specific metrics
224 tqdm_metrics = self.training_tqdm_dict
225 self.progress_bar.set_postfix(**tqdm_metrics)
226
227 # activate batch end hook
228 if self.is_function_implemented('on_batch_end'):
229 model = self.get_model()
230 model.on_batch_end()
231
232 # collapse all metrics into one dict
233 all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
234
235 # track all metrics for callbacks
236 self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}
237
238 return 0, grad_norm_dic, all_log_metrics
239
240 def training_forward(self, batch, batch_nb, opt_idx):
241 """
242 Handle forward for each training case (distributed, single gpu, etc...)
243 :param batch:
244 :param batch_nb:
245 :return:
246 """
247 # ---------------
248 # FORWARD
249 # ---------------
250 # enable not needing to add opt_idx to training_step
251 args = [batch, batch_nb]
252 if len(self.optimizers) > 1:
253 args.append(opt_idx)
254
255 if self.use_ddp or self.use_ddp2:
256 output = self.model(*args)
257 elif self.use_dp:
258 output = self.model(*args)
259 elif self.single_gpu:
260 gpu_id = 0
261 if type(self.data_parallel_device_ids) is list:
262 gpu_id = self.data_parallel_device_ids[0]
263 batch = self.transfer_batch_to_gpu(batch, gpu_id)
264 args[0] = batch
265 output = self.model.training_step(*args)
266
267 else:
268 output = self.model.training_step(*args)
269
270 # format and reduce outputs accordingly
271 output = self.process_output(output, train=True)
272 loss, progress_bar_metrics, log_metrics, callback_metrics = output
273 return loss, progress_bar_metrics, log_metrics, callback_metrics
274
[end of pytorch_lightning/trainer/train_loop_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py
--- a/pytorch_lightning/trainer/train_loop_mixin.py
+++ b/pytorch_lightning/trainer/train_loop_mixin.py
@@ -23,7 +23,15 @@
# update training progress in trainer and model
model.current_epoch = epoch_nb
self.current_epoch = epoch_nb
- self.total_batches = self.nb_training_batches + self.nb_val_batches
+
+ # val can be checked multiple times in epoch
+ is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
+ val_checks_per_epoch = self.nb_training_batches // self.val_check_batch
+ val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
+
+ # total batches includes multiple val checks
+ self.total_batches = (self.nb_training_batches +
+ self.nb_val_batches * val_checks_per_epoch)
self.batch_loss_value = 0 # accumulated grads
# limit the number of batches to 1 in fast_dev_run
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py\n--- a/pytorch_lightning/trainer/train_loop_mixin.py\n+++ b/pytorch_lightning/trainer/train_loop_mixin.py\n@@ -23,7 +23,15 @@\n # update training progress in trainer and model\n model.current_epoch = epoch_nb\n self.current_epoch = epoch_nb\n- self.total_batches = self.nb_training_batches + self.nb_val_batches\n+\n+ # val can be checked multiple times in epoch\n+ is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0\n+ val_checks_per_epoch = self.nb_training_batches // self.val_check_batch\n+ val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0\n+\n+ # total batches includes multiple val checks\n+ self.total_batches = (self.nb_training_batches +\n+ self.nb_val_batches * val_checks_per_epoch)\n self.batch_loss_value = 0 # accumulated grads\n \n # limit the number of batches to 1 in fast_dev_run\n", "issue": "Incorrect total number of batches\n**Describe the bug**\r\nSometimes total number of batches is computed wrong.\r\n\r\n**To Reproduce**\r\nRun the following code with current master branch:\r\n```\r\nfrom time import sleep\r\nimport torch\r\nfrom torch.utils.data import DataLoader, Dataset\r\n\r\nimport pytorch_lightning as pl\r\n\r\n\r\nclass DummyDataset(Dataset):\r\n def __init__(self, n):\r\n super().__init__()\r\n self.n = n\r\n\r\n def __len__(self):\r\n return self.n\r\n\r\n def __getitem__(self, idx):\r\n return torch.rand(10)\r\n\r\n\r\nclass CoolSystem(pl.LightningModule):\r\n def __init__(self):\r\n super(CoolSystem, self).__init__()\r\n self.layer = torch.nn.Linear(10, 10)\r\n\r\n def forward(self, x):\r\n return self.layer(x)\r\n\r\n def training_step(self, batch, batch_nb):\r\n sleep(1)\r\n return {'loss': torch.mean(self.forward(batch) ** 2)}\r\n\r\n def validation_step(self, batch, batch_nb):\r\n sleep(1)\r\n return {}\r\n\r\n def validation_end(self, outputs):\r\n return {}\r\n\r\n def configure_optimizers(self):\r\n return [torch.optim.Adam(self.layer.parameters())]\r\n\r\n @pl.data_loader\r\n def train_dataloader(self):\r\n return DataLoader(DummyDataset(10), batch_size=1)\r\n\r\n @pl.data_loader\r\n def val_dataloader(self):\r\n return DataLoader(DummyDataset(5), batch_size=1)\r\n\r\nmodel = CoolSystem()\r\ntrainer = pl.Trainer(weights_summary=None, nb_sanity_val_steps=0, early_stop_callback=False,\r\n val_percent_check=1.0, val_check_interval=0.5)\r\ntrainer.fit(model)\r\n```\r\nAt first output will look like:\r\n`67%|\u2588\u2588\u2588\u2588\u2588\u258b | 10/15 [00:10<00:05, 1.05s/it, batch_nb=4, epoch=0, loss=0.194, v_nb=0]`\r\n\r\nBut at the end of the epoch it will be like:\r\n`20it [00:20, 1.07s/it, batch_nb=9, epoch=0, loss=0.212, v_nb=0]`\r\n\r\nMoreover, if you run\r\n```\r\ntrainer = pl.Trainer(weights_summary=None, nb_sanity_val_steps=0, early_stop_callback=False,\r\n val_percent_check=1.0, val_check_interval=0.5, check_val_every_n_epoch=10)\r\n```\r\nThe first epoch will end at the point \r\n`67%|\u2588\u2588\u2588\u2588\u2588\u258b | 10/15 [00:09<00:04, 1.01it/s, batch_nb=8, epoch=1, loss=0.069, v_nb=0]` \r\n\r\n**Expected behavior**\r\nCorrect total number of batches.\r\n\r\n**Possible solution**\r\nNow we have `total_batches = nb_training_batches + nb_val_batches`, where `nb_val_batches` is the number of batches of only one validation loop. And the problem arises because actually there can be several validation loops during one training epoch. Moreover there is a parameter `check_val_every_n_epoch` and thus there can be no validation loops at all.\r\n\r\nWith this in mind, it looks like the correct formula is:\r\n```\r\nis_val_epoch = (current_epoch + 1) % check_val_every_n_epoch == 0\r\nval_checks_per_epoch = nb_training_batches // val_check_batch if is_val_epoch else 0\r\ntotal_batches = nb_training_batches + nb_val_batches * val_checks_per_epoch\r\n```\r\n\n", "before_files": [{"content": "import numpy as np\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerTrainLoopMixin(object):\n\n def train(self):\n # run all epochs\n for epoch_nb in range(self.current_epoch, self.max_nb_epochs):\n # set seed for distributed sampler (enables shuffling for each epoch)\n if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):\n self.get_train_dataloader().sampler.set_epoch(epoch_nb)\n\n # get model\n model = self.get_model()\n\n # update training progress in trainer and model\n model.current_epoch = epoch_nb\n self.current_epoch = epoch_nb\n self.total_batches = self.nb_training_batches + self.nb_val_batches\n self.batch_loss_value = 0 # accumulated grads\n\n # limit the number of batches to 1 in fast_dev_run\n if self.fast_dev_run:\n self.total_batches = 1\n\n # init progress_bar when requested\n if self.show_progress_bar:\n nb_iterations = self.total_batches\n\n # for iterable train loader, the progress bar never ends\n if self.is_iterable_train_dataloader:\n nb_iterations = float('inf')\n self.progress_bar.reset(nb_iterations)\n\n # changing gradient according accumulation_scheduler\n self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)\n\n # -----------------\n # RUN TNG EPOCH\n # -----------------\n self.run_training_epoch()\n\n # update LR schedulers\n if self.lr_schedulers is not None:\n for lr_scheduler in self.lr_schedulers:\n lr_scheduler.step(self.current_epoch)\n\n # early stopping\n met_min_epochs = epoch_nb > self.min_nb_epochs\n if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):\n should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,\n logs=self.callback_metrics)\n # stop training\n stop = should_stop and met_min_epochs\n if stop:\n return\n\n if self.logger is not None:\n self.logger.finalize(\"success\")\n\n def run_training_epoch(self):\n # before epoch hook\n if self.is_function_implemented('on_epoch_start'):\n model = self.get_model()\n model.on_epoch_start()\n\n # run epoch\n for batch_nb, batch in enumerate(self.get_train_dataloader()):\n self.batch_nb = batch_nb\n\n model = self.get_model()\n model.global_step = self.global_step\n\n # ---------------\n # RUN TRAIN STEP\n # ---------------\n output = self.run_training_batch(batch, batch_nb)\n batch_result, grad_norm_dic, batch_step_metrics = output\n\n # when returning -1 from train_step, we end epoch early\n early_stop_epoch = batch_result == -1\n\n # ---------------\n # RUN VAL STEP\n # ---------------\n is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0\n can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0\n should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)\n\n # fast_dev_run always forces val checking after train batch\n if self.fast_dev_run or should_check_val:\n self.run_evaluation(test=self.testing)\n\n # when logs should be saved\n should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch\n if should_save_log or self.fast_dev_run:\n if self.proc_rank == 0 and self.logger is not None:\n self.logger.save()\n\n # when metrics should be logged\n should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch\n if should_log_metrics or self.fast_dev_run:\n # logs user requested information to logger\n self.log_metrics(batch_step_metrics, grad_norm_dic)\n\n self.global_step += 1\n self.total_batch_nb += 1\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if early_stop_epoch or self.fast_dev_run:\n break\n\n # stop epoch if we limited nb batches\n met_batch_limit = batch_nb >= self.nb_training_batches\n if met_batch_limit:\n break\n\n # epoch end hook\n if self.is_function_implemented('on_epoch_end'):\n model = self.get_model()\n model.on_epoch_end()\n\n def run_training_batch(self, batch, batch_nb):\n # track grad norms\n grad_norm_dic = {}\n\n # track all metrics for callbacks\n all_callback_metrics = []\n\n # track metrics to log\n all_log_metrics = []\n\n if batch is None:\n return 0, grad_norm_dic\n\n # hook\n if self.is_function_implemented('on_batch_start'):\n model_ref = self.get_model()\n response = model_ref.on_batch_start(batch)\n\n if response == -1:\n return -1, grad_norm_dic\n\n if self.show_progress_bar:\n self.progress_bar.update(1)\n\n # call training_step once per optimizer\n for opt_idx, optimizer in enumerate(self.optimizers):\n\n # wrap the forward step in a closure so second order methods work\n def optimizer_closure():\n # forward pass\n output = self.training_forward(batch, batch_nb, opt_idx)\n closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output\n\n # track metrics for callbacks\n all_callback_metrics.append(callback_metrics)\n\n # track progress bar metrics\n self.add_tqdm_metrics(progress_bar_metrics)\n all_log_metrics.append(log_metrics)\n\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n closure_loss = closure_loss / self.accumulate_grad_batches\n\n # backward pass\n # done in hook so user can overwrite if needed\n model_ref = self.get_model()\n model_ref.backward(self.use_amp, closure_loss, optimizer)\n\n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n model_ref = self.get_model()\n model_ref.on_after_backward()\n\n return closure_loss\n\n # calculate loss\n loss = optimizer_closure()\n\n # nan grads\n if self.print_nan_grads:\n self.print_nan_gradients()\n\n # track total loss for logging (avoid mem leaks)\n self.batch_loss_value += loss.item()\n\n # gradient update with accumulated gradients\n if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:\n\n # track gradient norms when requested\n if batch_nb % self.row_log_interval == 0:\n if self.track_grad_norm > 0:\n model = self.get_model()\n grad_norm_dic = model.grad_norm(self.track_grad_norm)\n\n # clip gradients\n self.clip_gradients()\n\n # calls .step(), .zero_grad()\n # override function to modify this behavior\n model = self.get_model()\n model.optimizer_step(self.current_epoch, batch_nb,\n optimizer, opt_idx, optimizer_closure)\n\n # calculate running loss for display\n self.running_loss.append(self.batch_loss_value)\n self.batch_loss_value = 0\n self.avg_loss = np.mean(self.running_loss[-100:])\n\n # update progress bar\n if self.show_progress_bar:\n # add model specific metrics\n tqdm_metrics = self.training_tqdm_dict\n self.progress_bar.set_postfix(**tqdm_metrics)\n\n # activate batch end hook\n if self.is_function_implemented('on_batch_end'):\n model = self.get_model()\n model.on_batch_end()\n\n # collapse all metrics into one dict\n all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}\n\n # track all metrics for callbacks\n self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}\n\n return 0, grad_norm_dic, all_log_metrics\n\n def training_forward(self, batch, batch_nb, opt_idx):\n \"\"\"\n Handle forward for each training case (distributed, single gpu, etc...)\n :param batch:\n :param batch_nb:\n :return:\n \"\"\"\n # ---------------\n # FORWARD\n # ---------------\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_nb]\n if len(self.optimizers) > 1:\n args.append(opt_idx)\n\n if self.use_ddp or self.use_ddp2:\n output = self.model(*args)\n elif self.use_dp:\n output = self.model(*args)\n elif self.single_gpu:\n gpu_id = 0\n if type(self.data_parallel_device_ids) is list:\n gpu_id = self.data_parallel_device_ids[0]\n batch = self.transfer_batch_to_gpu(batch, gpu_id)\n args[0] = batch\n output = self.model.training_step(*args)\n\n else:\n output = self.model.training_step(*args)\n\n # format and reduce outputs accordingly\n output = self.process_output(output, train=True)\n loss, progress_bar_metrics, log_metrics, callback_metrics = output\n return loss, progress_bar_metrics, log_metrics, callback_metrics\n", "path": "pytorch_lightning/trainer/train_loop_mixin.py"}]} | 4,053 | 248 |
gh_patches_debug_11270 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2886 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2520 raised for mutually exclusive properties when using Conditions
### CloudFormation Lint Version
cfn-lint 0.80.2
### What operating system are you using?
Windows
### Describe the bug
[E2520](https://github.com/aws-cloudformation/cfn-lint/blob/main/docs/rules.md#E2520) is raised for mutually exclusive properties when using Conditions
```
cfn-lint -t ./template.yaml
E2520 Property SourceSecurityGroupId should NOT exist with CidrIp for Resources/Ingress/Properties
.\template.yaml:13:7
```
The same was working prior `0.79.11`. PR [2875](https://github.com/aws-cloudformation/cfn-lint/pull/2875) seems to be the cause.
```
> cfn-lint --version
cfn-lint 0.79.10
> cfn-lint -t ./template.yaml
> echo $lastexitcode
0
```
### Expected behavior
E2520 is ignored for mutually exclusive properties that use the same Condition and Fn::If intrinsic function which makes sure only one of the properties has value.
### Reproduction template
```yaml
AWSTemplateFormatVersion: 2010-09-09
Parameters:
pCidr:
Type: String
Default: ''
Conditions:
cIsCidr: !Not [!Equals [!Ref pCidr, '']]
Resources:
Ingress:
Type: AWS::EC2::SecurityGroupIngress
Properties:
SourceSecurityGroupId: !If [ cIsCidr, !Ref AWS::NoValue, sg-abc12345 ]
CidrIp: !If [ cIsCidr, !Ref pCidr, !Ref AWS::NoValue ]
IpProtocol: "-1"
GroupId: sg-abc1234567
```
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/Exclusive.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import cfnlint.helpers
6 from cfnlint.data import AdditionalSpecs
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class Exclusive(CloudFormationLintRule):
11 """Check Properties Resource Configuration"""
12
13 id = "E2520"
14 shortdesc = "Check Properties that are mutually exclusive"
15 description = (
16 "Making sure CloudFormation properties that are exclusive are not defined"
17 )
18 source_url = "https://github.com/aws-cloudformation/cfn-python-lint"
19 tags = ["resources"]
20
21 def __init__(self):
22 """Init"""
23 super().__init__()
24 exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, "Exclusive.json")
25 self.resource_types_specs = exclusivespec["ResourceTypes"]
26 self.property_types_specs = exclusivespec["PropertyTypes"]
27 for resource_type_spec in self.resource_types_specs:
28 self.resource_property_types.append(resource_type_spec)
29 for property_type_spec in self.property_types_specs:
30 self.resource_sub_property_types.append(property_type_spec)
31
32 def check(self, properties, exclusions, path, cfn):
33 """Check itself"""
34 matches = []
35 for p_value, p_path in properties.items_safe(path[:]):
36 for k, v in exclusions.items():
37 property_sets = cfn.get_object_without_conditions(p_value, [k] + v)
38 for property_set in property_sets:
39 obj = property_set["Object"].clean()
40 for prop in obj:
41 if prop in exclusions:
42 for excl_property in exclusions[prop]:
43 if excl_property in obj:
44 if property_set["Scenario"] is None:
45 message = "Property {0} should NOT exist with {1} for {2}"
46 matches.append(
47 RuleMatch(
48 p_path + [prop],
49 message.format(
50 excl_property,
51 prop,
52 "/".join(map(str, p_path)),
53 ),
54 )
55 )
56 else:
57 scenario_text = " and ".join(
58 [
59 f'when condition "{k}" is {v}'
60 for (k, v) in property_set[
61 "Scenario"
62 ].items()
63 ]
64 )
65 message = "Property {0} should NOT exist with {1} {2} for {3}"
66 matches.append(
67 RuleMatch(
68 p_path + [prop],
69 message.format(
70 excl_property,
71 prop,
72 scenario_text,
73 "/".join(map(str, p_path)),
74 ),
75 )
76 )
77
78 return matches
79
80 def match_resource_sub_properties(self, properties, property_type, path, cfn):
81 """Match for sub properties"""
82 matches = []
83
84 exclusions = self.property_types_specs.get(property_type, {})
85 matches.extend(self.check(properties, exclusions, path, cfn))
86
87 return matches
88
89 def match_resource_properties(self, properties, resource_type, path, cfn):
90 """Check CloudFormation Properties"""
91 matches = []
92
93 exclusions = self.resource_types_specs.get(resource_type, {})
94 matches.extend(self.check(properties, exclusions, path, cfn))
95
96 return matches
97
[end of src/cfnlint/rules/resources/properties/Exclusive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py
--- a/src/cfnlint/rules/resources/properties/Exclusive.py
+++ b/src/cfnlint/rules/resources/properties/Exclusive.py
@@ -38,7 +38,7 @@
for property_set in property_sets:
obj = property_set["Object"].clean()
for prop in obj:
- if prop in exclusions:
+ if prop == k:
for excl_property in exclusions[prop]:
if excl_property in obj:
if property_set["Scenario"] is None:
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py\n--- a/src/cfnlint/rules/resources/properties/Exclusive.py\n+++ b/src/cfnlint/rules/resources/properties/Exclusive.py\n@@ -38,7 +38,7 @@\n for property_set in property_sets:\n obj = property_set[\"Object\"].clean()\n for prop in obj:\n- if prop in exclusions:\n+ if prop == k:\n for excl_property in exclusions[prop]:\n if excl_property in obj:\n if property_set[\"Scenario\"] is None:\n", "issue": "E2520 raised for mutually exclusive properties when using Conditions\n### CloudFormation Lint Version\n\ncfn-lint 0.80.2\n\n### What operating system are you using?\n\nWindows\n\n### Describe the bug\n\n[E2520](https://github.com/aws-cloudformation/cfn-lint/blob/main/docs/rules.md#E2520) is raised for mutually exclusive properties when using Conditions\r\n\r\n```\r\ncfn-lint -t ./template.yaml\r\nE2520 Property SourceSecurityGroupId should NOT exist with CidrIp for Resources/Ingress/Properties\r\n.\\template.yaml:13:7\r\n```\r\n\r\nThe same was working prior `0.79.11`. PR [2875](https://github.com/aws-cloudformation/cfn-lint/pull/2875) seems to be the cause.\r\n\r\n```\r\n> cfn-lint --version \r\ncfn-lint 0.79.10\r\n> cfn-lint -t ./template.yaml \r\n> echo $lastexitcode\r\n0\r\n```\n\n### Expected behavior\n\nE2520 is ignored for mutually exclusive properties that use the same Condition and Fn::If intrinsic function which makes sure only one of the properties has value.\n\n### Reproduction template\n\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nParameters:\r\n pCidr:\r\n Type: String\r\n Default: ''\r\nConditions:\r\n cIsCidr: !Not [!Equals [!Ref pCidr, '']]\r\nResources:\r\n Ingress:\r\n Type: AWS::EC2::SecurityGroupIngress\r\n Properties:\r\n SourceSecurityGroupId: !If [ cIsCidr, !Ref AWS::NoValue, sg-abc12345 ]\r\n CidrIp: !If [ cIsCidr, !Ref pCidr, !Ref AWS::NoValue ]\r\n IpProtocol: \"-1\"\r\n GroupId: sg-abc1234567\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport cfnlint.helpers\nfrom cfnlint.data import AdditionalSpecs\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Exclusive(CloudFormationLintRule):\n \"\"\"Check Properties Resource Configuration\"\"\"\n\n id = \"E2520\"\n shortdesc = \"Check Properties that are mutually exclusive\"\n description = (\n \"Making sure CloudFormation properties that are exclusive are not defined\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint\"\n tags = [\"resources\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, \"Exclusive.json\")\n self.resource_types_specs = exclusivespec[\"ResourceTypes\"]\n self.property_types_specs = exclusivespec[\"PropertyTypes\"]\n for resource_type_spec in self.resource_types_specs:\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in self.property_types_specs:\n self.resource_sub_property_types.append(property_type_spec)\n\n def check(self, properties, exclusions, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for k, v in exclusions.items():\n property_sets = cfn.get_object_without_conditions(p_value, [k] + v)\n for property_set in property_sets:\n obj = property_set[\"Object\"].clean()\n for prop in obj:\n if prop in exclusions:\n for excl_property in exclusions[prop]:\n if excl_property in obj:\n if property_set[\"Scenario\"] is None:\n message = \"Property {0} should NOT exist with {1} for {2}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n else:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in property_set[\n \"Scenario\"\n ].items()\n ]\n )\n message = \"Property {0} should NOT exist with {1} {2} for {3}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n scenario_text,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n exclusions = self.property_types_specs.get(property_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n exclusions = self.resource_types_specs.get(resource_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Exclusive.py"}]} | 1,848 | 133 |
gh_patches_debug_7736 | rasdani/github-patches | git_diff | google__flax-2492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve documentation for `Dropout` and `rngs` argument in `linen.Module.apply()`
Here is an example of `Dropout` in a model definition:
https://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/models.py#L211
Here is the `apply()`, where `rngs` is passed in
https://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/train.py#L206-L207
However the `rng` is not very clearly explained in `apply()`
https://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/linen/module.py#L749
The `rngs` seems to be passed to `flax/core/scope.py`
Here is the code for `Dropout` (linen)
https://github.com/google/flax/blob/9b4807840c5cb26ef5e29028e3558d404aee00a0/flax/linen/stochastic.py#L56-L57
Here is the code for `make_rng()`
https://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/core/scope.py#L441-L447
The documentation for `rngs` in `apply()` should have a (pointer to) list of names of possible rngs
And documentation for `Dropout` should mention how to pass in rng using `apply()`, without directly passing in like `Dropout()(x,rng=rng)`.
Also probably need to mention the `make_rng()` `fold_in` the rng so each dropout layer will use different rng if there are multiple dropout layers.
</issue>
<code>
[start of flax/linen/stochastic.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Stochastic modules."""
16
17 from typing import Optional, Sequence
18
19 from flax.linen.module import compact
20 from flax.linen.module import merge_param
21 from flax.linen.module import Module
22 from jax import lax
23 from jax import random
24 import jax.numpy as jnp
25
26
27 class Dropout(Module):
28 """Create a dropout layer.
29
30 Attributes:
31 rate: the dropout probability. (_not_ the keep rate!)
32 broadcast_dims: dimensions that will share the same dropout mask
33 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and
34 masked, whereas if true, no mask is applied and the inputs are returned
35 as is.
36 """
37 rate: float
38 broadcast_dims: Sequence[int] = ()
39 deterministic: Optional[bool] = None
40
41 @compact
42 def __call__(self, inputs, deterministic: Optional[bool] = None):
43 """Applies a random dropout mask to the input.
44
45 Args:
46 inputs: the inputs that should be randomly masked.
47 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and
48 masked, whereas if true, no mask is applied and the inputs are returned
49 as is.
50
51 Returns:
52 The masked inputs reweighted to preserve mean.
53 """
54 deterministic = merge_param(
55 'deterministic', self.deterministic, deterministic)
56 if self.rate == 0.:
57 return inputs
58 # Prevent gradient NaNs in 1.0 edge-case.
59 if self.rate == 1.0:
60 return jnp.zeros_like(inputs)
61 keep_prob = 1. - self.rate
62 if deterministic:
63 return inputs
64 else:
65 rng = self.make_rng('dropout')
66 broadcast_shape = list(inputs.shape)
67 for dim in self.broadcast_dims:
68 broadcast_shape[dim] = 1
69 mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)
70 mask = jnp.broadcast_to(mask, inputs.shape)
71 return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))
72
[end of flax/linen/stochastic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py
--- a/flax/linen/stochastic.py
+++ b/flax/linen/stochastic.py
@@ -27,6 +27,11 @@
class Dropout(Module):
"""Create a dropout layer.
+ Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure
+ to include an RNG seed named `'dropout'`. For example::
+
+ model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`
+
Attributes:
rate: the dropout probability. (_not_ the keep rate!)
broadcast_dims: dimensions that will share the same dropout mask
| {"golden_diff": "diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py\n--- a/flax/linen/stochastic.py\n+++ b/flax/linen/stochastic.py\n@@ -27,6 +27,11 @@\n class Dropout(Module):\n \"\"\"Create a dropout layer.\n \n+ Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure\n+ to include an RNG seed named `'dropout'`. For example::\n+ \n+ model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`\n+\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n", "issue": "Improve documentation for `Dropout` and `rngs` argument in `linen.Module.apply()`\n\r\nHere is an example of `Dropout` in a model definition:\r\nhttps://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/models.py#L211\r\n\r\nHere is the `apply()`, where `rngs` is passed in\r\nhttps://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/train.py#L206-L207\r\nHowever the `rng` is not very clearly explained in `apply()`\r\nhttps://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/linen/module.py#L749\r\nThe `rngs` seems to be passed to `flax/core/scope.py`\r\nHere is the code for `Dropout` (linen)\r\nhttps://github.com/google/flax/blob/9b4807840c5cb26ef5e29028e3558d404aee00a0/flax/linen/stochastic.py#L56-L57\r\nHere is the code for `make_rng()`\r\nhttps://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/core/scope.py#L441-L447\r\n\r\nThe documentation for `rngs` in `apply()` should have a (pointer to) list of names of possible rngs\r\nAnd documentation for `Dropout` should mention how to pass in rng using `apply()`, without directly passing in like `Dropout()(x,rng=rng)`.\r\nAlso probably need to mention the `make_rng()` `fold_in` the rng so each dropout layer will use different rng if there are multiple dropout layers.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic modules.\"\"\"\n\nfrom typing import Optional, Sequence\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import merge_param\nfrom flax.linen.module import Module\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass Dropout(Module):\n \"\"\"Create a dropout layer.\n\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n\n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n \"\"\"Applies a random dropout mask to the input.\n\n Args:\n inputs: the inputs that should be randomly masked.\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n\n Returns:\n The masked inputs reweighted to preserve mean.\n \"\"\"\n deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic)\n if self.rate == 0.:\n return inputs\n # Prevent gradient NaNs in 1.0 edge-case.\n if self.rate == 1.0:\n return jnp.zeros_like(inputs)\n keep_prob = 1. - self.rate\n if deterministic:\n return inputs\n else:\n rng = self.make_rng('dropout')\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)\n mask = jnp.broadcast_to(mask, inputs.shape)\n return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))\n", "path": "flax/linen/stochastic.py"}]} | 1,750 | 167 |
gh_patches_debug_7598 | rasdani/github-patches | git_diff | onnx__sklearn-onnx-525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Conversion of HistGradientBoosting fails with scikit-learn 0.24
</issue>
<code>
[start of skl2onnx/common/tree_ensemble.py]
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for
4 # license information.
5 # --------------------------------------------------------------------------
6 """
7 Common functions to convert any learner based on trees.
8 """
9 import numpy as np
10
11
12 def get_default_tree_classifier_attribute_pairs():
13 attrs = {}
14 attrs['post_transform'] = 'NONE'
15 attrs['nodes_treeids'] = []
16 attrs['nodes_nodeids'] = []
17 attrs['nodes_featureids'] = []
18 attrs['nodes_modes'] = []
19 attrs['nodes_values'] = []
20 attrs['nodes_truenodeids'] = []
21 attrs['nodes_falsenodeids'] = []
22 attrs['nodes_missing_value_tracks_true'] = []
23 attrs['nodes_hitrates'] = []
24 attrs['class_treeids'] = []
25 attrs['class_nodeids'] = []
26 attrs['class_ids'] = []
27 attrs['class_weights'] = []
28 return attrs
29
30
31 def get_default_tree_regressor_attribute_pairs():
32 attrs = {}
33 attrs['post_transform'] = 'NONE'
34 attrs['n_targets'] = 0
35 attrs['nodes_treeids'] = []
36 attrs['nodes_nodeids'] = []
37 attrs['nodes_featureids'] = []
38 attrs['nodes_modes'] = []
39 attrs['nodes_values'] = []
40 attrs['nodes_truenodeids'] = []
41 attrs['nodes_falsenodeids'] = []
42 attrs['nodes_missing_value_tracks_true'] = []
43 attrs['nodes_hitrates'] = []
44 attrs['target_treeids'] = []
45 attrs['target_nodeids'] = []
46 attrs['target_ids'] = []
47 attrs['target_weights'] = []
48 return attrs
49
50
51 def find_switch_point(fy, nfy):
52 """
53 Finds the double so that
54 ``(float)x != (float)(x + espilon)``.
55 """
56 a = np.float64(fy)
57 b = np.float64(nfy)
58 fa = np.float32(a)
59 a0, b0 = a, a
60 while a != a0 or b != b0:
61 a0, b0 = a, b
62 m = (a + b) / 2
63 fm = np.float32(m)
64 if fm == fa:
65 a = m
66 fa = fm
67 else:
68 b = m
69 return a
70
71
72 def sklearn_threshold(dy, dtype, mode):
73 """
74 *scikit-learn* does not compare x to a threshold
75 but (float)x to a double threshold. As we need a float
76 threshold, we need a different value than the threshold
77 rounded to float. For floats, it finds float *w* which
78 verifies::
79
80 (float)x <= y <=> (float)x <= w
81
82 For doubles, it finds double *w* which verifies::
83
84 (float)x <= y <=> x <= w
85 """
86 if mode == "BRANCH_LEQ":
87 if dtype == np.float32:
88 fy = np.float32(dy)
89 if fy == dy:
90 return np.float64(fy)
91 if fy < dy:
92 return np.float64(fy)
93 eps = max(abs(fy), np.finfo(np.float32).eps) * 10
94 nfy = np.nextafter([fy], [fy - eps], dtype=np.float32)[0]
95 return np.float64(nfy)
96 elif dtype == np.float64:
97 fy = np.float32(dy)
98 eps = max(abs(fy), np.finfo(np.float32).eps) * 10
99 afy = np.nextafter([fy], [fy - eps], dtype=np.float32)[0]
100 afy2 = find_switch_point(afy, fy)
101 if fy > dy > afy2:
102 return afy2
103 bfy = np.nextafter([fy], [fy + eps], dtype=np.float32)[0]
104 bfy2 = find_switch_point(fy, bfy)
105 if fy <= dy <= bfy2:
106 return bfy2
107 return np.float64(fy)
108 raise TypeError("Unexpected dtype {}.".format(dtype))
109 raise RuntimeError("Threshold is not changed for other mode and "
110 "'BRANCH_LEQ' (actually '{}').".format(mode))
111
112
113 def add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,
114 feature_id, mode, value, true_child_id, false_child_id,
115 weights, weight_id_bias, leaf_weights_are_counts,
116 adjust_threshold_for_sklearn, dtype,
117 nodes_missing_value_tracks_true=False):
118 attr_pairs['nodes_treeids'].append(tree_id)
119 attr_pairs['nodes_nodeids'].append(node_id)
120 attr_pairs['nodes_featureids'].append(feature_id)
121 attr_pairs['nodes_modes'].append(mode)
122 if adjust_threshold_for_sklearn and mode != 'LEAF':
123 attr_pairs['nodes_values'].append(
124 sklearn_threshold(value, dtype, mode))
125 else:
126 attr_pairs['nodes_values'].append(value)
127 attr_pairs['nodes_truenodeids'].append(true_child_id)
128 attr_pairs['nodes_falsenodeids'].append(false_child_id)
129 attr_pairs['nodes_missing_value_tracks_true'].append(
130 nodes_missing_value_tracks_true)
131 attr_pairs['nodes_hitrates'].append(1.)
132
133 # Add leaf information for making prediction
134 if mode == 'LEAF':
135 flattened_weights = weights.flatten()
136 factor = tree_weight
137 # If the values stored at leaves are counts of possible classes, we
138 # need convert them to probabilities by doing a normalization.
139 if leaf_weights_are_counts:
140 s = sum(flattened_weights)
141 factor /= float(s) if s != 0. else 1.
142 flattened_weights = [w * factor for w in flattened_weights]
143 if len(flattened_weights) == 2 and is_classifier:
144 flattened_weights = [flattened_weights[1]]
145
146 # Note that attribute names for making prediction are different for
147 # classifiers and regressors
148 if is_classifier:
149 for i, w in enumerate(flattened_weights):
150 attr_pairs['class_treeids'].append(tree_id)
151 attr_pairs['class_nodeids'].append(node_id)
152 attr_pairs['class_ids'].append(i + weight_id_bias)
153 attr_pairs['class_weights'].append(w)
154 else:
155 for i, w in enumerate(flattened_weights):
156 attr_pairs['target_treeids'].append(tree_id)
157 attr_pairs['target_nodeids'].append(node_id)
158 attr_pairs['target_ids'].append(i + weight_id_bias)
159 attr_pairs['target_weights'].append(w)
160
161
162 def add_tree_to_attribute_pairs(attr_pairs, is_classifier, tree, tree_id,
163 tree_weight, weight_id_bias,
164 leaf_weights_are_counts,
165 adjust_threshold_for_sklearn=False,
166 dtype=None):
167 for i in range(tree.node_count):
168 node_id = i
169 weight = tree.value[i]
170
171 if tree.children_left[i] > i or tree.children_right[i] > i:
172 mode = 'BRANCH_LEQ'
173 feat_id = tree.feature[i]
174 threshold = tree.threshold[i]
175 left_child_id = int(tree.children_left[i])
176 right_child_id = int(tree.children_right[i])
177 else:
178 mode = 'LEAF'
179 feat_id = 0
180 threshold = 0.
181 left_child_id = 0
182 right_child_id = 0
183
184 add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,
185 feat_id, mode, threshold, left_child_id, right_child_id,
186 weight, weight_id_bias, leaf_weights_are_counts,
187 adjust_threshold_for_sklearn=adjust_threshold_for_sklearn,
188 dtype=dtype)
189
190
191 def add_tree_to_attribute_pairs_hist_gradient_boosting(
192 attr_pairs, is_classifier, tree, tree_id,
193 tree_weight, weight_id_bias,
194 leaf_weights_are_counts,
195 adjust_threshold_for_sklearn=False,
196 dtype=None):
197 for i, node in enumerate(tree.nodes):
198 node_id = i
199 weight = node['value']
200
201 if node['is_leaf']:
202 mode = 'LEAF'
203 feat_id = 0
204 threshold = 0.
205 left_child_id = 0
206 right_child_id = 0
207 missing = False
208 else:
209 mode = 'BRANCH_LEQ'
210 feat_id = node['feature_idx']
211 threshold = node['threshold']
212 left_child_id = node['left']
213 right_child_id = node['right']
214 missing = node['missing_go_to_left']
215
216 add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,
217 feat_id, mode, threshold, left_child_id, right_child_id,
218 weight, weight_id_bias, leaf_weights_are_counts,
219 adjust_threshold_for_sklearn=adjust_threshold_for_sklearn,
220 dtype=dtype, nodes_missing_value_tracks_true=missing)
221
[end of skl2onnx/common/tree_ensemble.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skl2onnx/common/tree_ensemble.py b/skl2onnx/common/tree_ensemble.py
--- a/skl2onnx/common/tree_ensemble.py
+++ b/skl2onnx/common/tree_ensemble.py
@@ -208,7 +208,10 @@
else:
mode = 'BRANCH_LEQ'
feat_id = node['feature_idx']
- threshold = node['threshold']
+ try:
+ threshold = node['threshold']
+ except ValueError:
+ threshold = node['num_threshold']
left_child_id = node['left']
right_child_id = node['right']
missing = node['missing_go_to_left']
| {"golden_diff": "diff --git a/skl2onnx/common/tree_ensemble.py b/skl2onnx/common/tree_ensemble.py\n--- a/skl2onnx/common/tree_ensemble.py\n+++ b/skl2onnx/common/tree_ensemble.py\n@@ -208,7 +208,10 @@\n else:\n mode = 'BRANCH_LEQ'\n feat_id = node['feature_idx']\n- threshold = node['threshold']\n+ try:\n+ threshold = node['threshold']\n+ except ValueError:\n+ threshold = node['num_threshold']\n left_child_id = node['left']\n right_child_id = node['right']\n missing = node['missing_go_to_left']\n", "issue": "Conversion of HistGradientBoosting fails with scikit-learn 0.24\n\n", "before_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\"\"\"\nCommon functions to convert any learner based on trees.\n\"\"\"\nimport numpy as np\n\n\ndef get_default_tree_classifier_attribute_pairs():\n attrs = {}\n attrs['post_transform'] = 'NONE'\n attrs['nodes_treeids'] = []\n attrs['nodes_nodeids'] = []\n attrs['nodes_featureids'] = []\n attrs['nodes_modes'] = []\n attrs['nodes_values'] = []\n attrs['nodes_truenodeids'] = []\n attrs['nodes_falsenodeids'] = []\n attrs['nodes_missing_value_tracks_true'] = []\n attrs['nodes_hitrates'] = []\n attrs['class_treeids'] = []\n attrs['class_nodeids'] = []\n attrs['class_ids'] = []\n attrs['class_weights'] = []\n return attrs\n\n\ndef get_default_tree_regressor_attribute_pairs():\n attrs = {}\n attrs['post_transform'] = 'NONE'\n attrs['n_targets'] = 0\n attrs['nodes_treeids'] = []\n attrs['nodes_nodeids'] = []\n attrs['nodes_featureids'] = []\n attrs['nodes_modes'] = []\n attrs['nodes_values'] = []\n attrs['nodes_truenodeids'] = []\n attrs['nodes_falsenodeids'] = []\n attrs['nodes_missing_value_tracks_true'] = []\n attrs['nodes_hitrates'] = []\n attrs['target_treeids'] = []\n attrs['target_nodeids'] = []\n attrs['target_ids'] = []\n attrs['target_weights'] = []\n return attrs\n\n\ndef find_switch_point(fy, nfy):\n \"\"\"\n Finds the double so that\n ``(float)x != (float)(x + espilon)``.\n \"\"\"\n a = np.float64(fy)\n b = np.float64(nfy)\n fa = np.float32(a)\n a0, b0 = a, a\n while a != a0 or b != b0:\n a0, b0 = a, b\n m = (a + b) / 2\n fm = np.float32(m)\n if fm == fa:\n a = m\n fa = fm\n else:\n b = m\n return a\n\n\ndef sklearn_threshold(dy, dtype, mode):\n \"\"\"\n *scikit-learn* does not compare x to a threshold\n but (float)x to a double threshold. As we need a float\n threshold, we need a different value than the threshold\n rounded to float. For floats, it finds float *w* which\n verifies::\n\n (float)x <= y <=> (float)x <= w\n\n For doubles, it finds double *w* which verifies::\n\n (float)x <= y <=> x <= w\n \"\"\"\n if mode == \"BRANCH_LEQ\":\n if dtype == np.float32:\n fy = np.float32(dy)\n if fy == dy:\n return np.float64(fy)\n if fy < dy:\n return np.float64(fy)\n eps = max(abs(fy), np.finfo(np.float32).eps) * 10\n nfy = np.nextafter([fy], [fy - eps], dtype=np.float32)[0]\n return np.float64(nfy)\n elif dtype == np.float64:\n fy = np.float32(dy)\n eps = max(abs(fy), np.finfo(np.float32).eps) * 10\n afy = np.nextafter([fy], [fy - eps], dtype=np.float32)[0]\n afy2 = find_switch_point(afy, fy)\n if fy > dy > afy2:\n return afy2\n bfy = np.nextafter([fy], [fy + eps], dtype=np.float32)[0]\n bfy2 = find_switch_point(fy, bfy)\n if fy <= dy <= bfy2:\n return bfy2\n return np.float64(fy)\n raise TypeError(\"Unexpected dtype {}.\".format(dtype))\n raise RuntimeError(\"Threshold is not changed for other mode and \"\n \"'BRANCH_LEQ' (actually '{}').\".format(mode))\n\n\ndef add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,\n feature_id, mode, value, true_child_id, false_child_id,\n weights, weight_id_bias, leaf_weights_are_counts,\n adjust_threshold_for_sklearn, dtype,\n nodes_missing_value_tracks_true=False):\n attr_pairs['nodes_treeids'].append(tree_id)\n attr_pairs['nodes_nodeids'].append(node_id)\n attr_pairs['nodes_featureids'].append(feature_id)\n attr_pairs['nodes_modes'].append(mode)\n if adjust_threshold_for_sklearn and mode != 'LEAF':\n attr_pairs['nodes_values'].append(\n sklearn_threshold(value, dtype, mode))\n else:\n attr_pairs['nodes_values'].append(value)\n attr_pairs['nodes_truenodeids'].append(true_child_id)\n attr_pairs['nodes_falsenodeids'].append(false_child_id)\n attr_pairs['nodes_missing_value_tracks_true'].append(\n nodes_missing_value_tracks_true)\n attr_pairs['nodes_hitrates'].append(1.)\n\n # Add leaf information for making prediction\n if mode == 'LEAF':\n flattened_weights = weights.flatten()\n factor = tree_weight\n # If the values stored at leaves are counts of possible classes, we\n # need convert them to probabilities by doing a normalization.\n if leaf_weights_are_counts:\n s = sum(flattened_weights)\n factor /= float(s) if s != 0. else 1.\n flattened_weights = [w * factor for w in flattened_weights]\n if len(flattened_weights) == 2 and is_classifier:\n flattened_weights = [flattened_weights[1]]\n\n # Note that attribute names for making prediction are different for\n # classifiers and regressors\n if is_classifier:\n for i, w in enumerate(flattened_weights):\n attr_pairs['class_treeids'].append(tree_id)\n attr_pairs['class_nodeids'].append(node_id)\n attr_pairs['class_ids'].append(i + weight_id_bias)\n attr_pairs['class_weights'].append(w)\n else:\n for i, w in enumerate(flattened_weights):\n attr_pairs['target_treeids'].append(tree_id)\n attr_pairs['target_nodeids'].append(node_id)\n attr_pairs['target_ids'].append(i + weight_id_bias)\n attr_pairs['target_weights'].append(w)\n\n\ndef add_tree_to_attribute_pairs(attr_pairs, is_classifier, tree, tree_id,\n tree_weight, weight_id_bias,\n leaf_weights_are_counts,\n adjust_threshold_for_sklearn=False,\n dtype=None):\n for i in range(tree.node_count):\n node_id = i\n weight = tree.value[i]\n\n if tree.children_left[i] > i or tree.children_right[i] > i:\n mode = 'BRANCH_LEQ'\n feat_id = tree.feature[i]\n threshold = tree.threshold[i]\n left_child_id = int(tree.children_left[i])\n right_child_id = int(tree.children_right[i])\n else:\n mode = 'LEAF'\n feat_id = 0\n threshold = 0.\n left_child_id = 0\n right_child_id = 0\n\n add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,\n feat_id, mode, threshold, left_child_id, right_child_id,\n weight, weight_id_bias, leaf_weights_are_counts,\n adjust_threshold_for_sklearn=adjust_threshold_for_sklearn,\n dtype=dtype)\n\n\ndef add_tree_to_attribute_pairs_hist_gradient_boosting(\n attr_pairs, is_classifier, tree, tree_id,\n tree_weight, weight_id_bias,\n leaf_weights_are_counts,\n adjust_threshold_for_sklearn=False,\n dtype=None):\n for i, node in enumerate(tree.nodes):\n node_id = i\n weight = node['value']\n\n if node['is_leaf']:\n mode = 'LEAF'\n feat_id = 0\n threshold = 0.\n left_child_id = 0\n right_child_id = 0\n missing = False\n else:\n mode = 'BRANCH_LEQ'\n feat_id = node['feature_idx']\n threshold = node['threshold']\n left_child_id = node['left']\n right_child_id = node['right']\n missing = node['missing_go_to_left']\n\n add_node(attr_pairs, is_classifier, tree_id, tree_weight, node_id,\n feat_id, mode, threshold, left_child_id, right_child_id,\n weight, weight_id_bias, leaf_weights_are_counts,\n adjust_threshold_for_sklearn=adjust_threshold_for_sklearn,\n dtype=dtype, nodes_missing_value_tracks_true=missing)\n", "path": "skl2onnx/common/tree_ensemble.py"}]} | 3,041 | 150 |
gh_patches_debug_23848 | rasdani/github-patches | git_diff | cocotb__cocotb-2959 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include logger name in log output
The culprit is logged, but the new "short" log format doesn't include the logger name so I don't get any useful information from this. IMO the default log format should include the logger name.
https://github.com/cocotb/cocotb/blob/c69454db92388a8915c99a35ca9cba06565fe4d5/cocotb/scheduler.py#L451
</issue>
<code>
[start of cocotb/log.py]
1 # Copyright (c) 2013, 2018 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Everything related to logging
30 """
31
32 import logging
33 import os
34 import sys
35 import warnings
36
37 import cocotb.ANSI as ANSI
38 from cocotb import simulator
39 from cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output
40
41 try:
42 _suppress = int(os.environ.get("COCOTB_REDUCED_LOG_FMT", "1"))
43 except ValueError:
44 _suppress = 1
45
46 # Column alignment
47 _LEVEL_CHARS = len("CRITICAL") # noqa
48 _RECORD_CHARS = 35 # noqa
49 _FILENAME_CHARS = 20 # noqa
50 _LINENO_CHARS = 4 # noqa
51 _FUNCNAME_CHARS = 31 # noqa
52
53 # Custom log level
54 logging.TRACE = 5
55 logging.addLevelName(5, "TRACE")
56
57 # Default log level if not overwritten by the user.
58 _COCOTB_LOG_LEVEL_DEFAULT = "INFO"
59
60
61 def default_config():
62 """Apply the default cocotb log formatting to the root logger.
63
64 This hooks up the logger to write to stdout, using either
65 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
66 on whether colored output is requested. It also adds a
67 :class:`SimTimeContextFilter` filter so that
68 :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.
69
70 The logging level for cocotb logs is set based on the
71 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.
72
73 If desired, this logging configuration can be overwritten by calling
74 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by
75 manually resetting the root logger instance.
76 An example of this can be found in the section on :ref:`rotating-logger`.
77
78 .. versionadded:: 1.4
79 """
80 # construct an appropriate handler
81 hdlr = logging.StreamHandler(sys.stdout)
82 hdlr.addFilter(SimTimeContextFilter())
83 if want_color_output():
84 hdlr.setFormatter(SimColourLogFormatter())
85 else:
86 hdlr.setFormatter(SimLogFormatter())
87
88 logging.setLoggerClass(SimBaseLog) # For backwards compatibility
89 logging.basicConfig()
90 logging.getLogger().handlers = [hdlr] # overwrite default handlers
91
92 # apply level settings for cocotb
93 log = logging.getLogger("cocotb")
94
95 try:
96 # All log levels are upper case, convert the user input for convenience.
97 level = os.environ["COCOTB_LOG_LEVEL"].upper()
98 except KeyError:
99 level = _COCOTB_LOG_LEVEL_DEFAULT
100
101 try:
102 log.setLevel(level)
103 except ValueError:
104 valid_levels = ("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "TRACE")
105 raise ValueError(
106 "Invalid log level %r passed through the "
107 "COCOTB_LOG_LEVEL environment variable. Valid log "
108 "levels: %s" % (level, ", ".join(valid_levels))
109 )
110
111 # Notify GPI of log level, which it uses as an optimization to avoid
112 # calling into Python.
113 simulator.log_level(log.getEffectiveLevel())
114
115
116 class SimBaseLog(logging.getLoggerClass()):
117 """This class only exists for backwards compatibility"""
118
119 @property
120 def logger(self):
121 warnings.warn(
122 "the .logger attribute should not be used now that `SimLog` "
123 "returns a native logger instance directly.",
124 DeprecationWarning,
125 stacklevel=2,
126 )
127 return self
128
129 @property
130 def colour(self):
131 warnings.warn(
132 "the .colour attribute may be removed in future, use the "
133 "equivalent `cocotb.utils.want_color_output()` instead",
134 DeprecationWarning,
135 stacklevel=2,
136 )
137 return want_color_output()
138
139 def setLevel(self, level: int) -> None:
140 super().setLevel(level)
141 if self.name == "gpi":
142 simulator.log_level(level)
143
144
145 # this used to be a class, hence the unusual capitalization
146 def SimLog(name, ident=None):
147 """Like logging.getLogger, but append a numeric identifier to the name"""
148 if ident is not None:
149 name = f"{name}.0x{ident:x}"
150 return logging.getLogger(name)
151
152
153 class SimTimeContextFilter(logging.Filter):
154 """
155 A filter to inject simulator times into the log records.
156
157 This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.
158
159 This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.
160
161 .. versionadded:: 1.4
162 """
163
164 # needed to make our docs render well
165 def __init__(self):
166 """"""
167 super().__init__()
168
169 def filter(self, record):
170 try:
171 record.created_sim_time = get_sim_time()
172 except RecursionError:
173 # get_sim_time may try to log - if that happens, we can't
174 # attach a simulator time to this message.
175 record.created_sim_time = None
176 return True
177
178
179 class SimLogFormatter(logging.Formatter):
180 """Log formatter to provide consistent log message handling.
181
182 This will only add simulator timestamps if the handler object this
183 formatter is attached to has a :class:`SimTimeContextFilter` filter
184 attached, which cocotb ensures by default.
185 """
186
187 # Removes the arguments from the base class. Docstring needed to make
188 # sphinx happy.
189 def __init__(self):
190 """Takes no arguments."""
191 super().__init__()
192
193 # Justify and truncate
194 @staticmethod
195 def ljust(string, chars):
196 if len(string) > chars:
197 return ".." + string[(chars - 2) * -1 :]
198 return string.ljust(chars)
199
200 @staticmethod
201 def rjust(string, chars):
202 if len(string) > chars:
203 return ".." + string[(chars - 2) * -1 :]
204 return string.rjust(chars)
205
206 def _format(self, level, record, msg, coloured=False):
207 sim_time = getattr(record, "created_sim_time", None)
208 if sim_time is None:
209 sim_time_str = " -.--ns"
210 else:
211 time_ns = get_time_from_sim_steps(sim_time, "ns")
212 sim_time_str = f"{time_ns:6.2f}ns"
213 prefix = sim_time_str.rjust(11) + " " + level + " "
214 if not _suppress:
215 prefix += (
216 self.ljust(record.name, _RECORD_CHARS)
217 + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)
218 + ":"
219 + self.ljust(str(record.lineno), _LINENO_CHARS)
220 + " in "
221 + self.ljust(str(record.funcName), _FUNCNAME_CHARS)
222 + " "
223 )
224
225 # these lines are copied from the builtin logger
226 if record.exc_info:
227 # Cache the traceback text to avoid converting it multiple times
228 # (it's constant anyway)
229 if not record.exc_text:
230 record.exc_text = self.formatException(record.exc_info)
231 if record.exc_text:
232 if msg[-1:] != "\n":
233 msg = msg + "\n"
234 msg = msg + record.exc_text
235
236 prefix_len = len(prefix)
237 if coloured:
238 prefix_len -= len(level) - _LEVEL_CHARS
239 pad = "\n" + " " * (prefix_len)
240 return prefix + pad.join(msg.split("\n"))
241
242 def format(self, record):
243 """Prettify the log output, annotate with simulation time"""
244
245 msg = record.getMessage()
246 level = record.levelname.ljust(_LEVEL_CHARS)
247
248 return self._format(level, record, msg)
249
250
251 class SimColourLogFormatter(SimLogFormatter):
252 """Log formatter to provide consistent log message handling."""
253
254 loglevel2colour = {
255 logging.TRACE: "%s",
256 logging.DEBUG: "%s",
257 logging.INFO: "%s",
258 logging.WARNING: ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT,
259 logging.ERROR: ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT,
260 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT,
261 }
262
263 def format(self, record):
264 """Prettify the log output, annotate with simulation time"""
265
266 msg = record.getMessage()
267
268 # Need to colour each line in case coloring is applied in the message
269 msg = "\n".join(
270 [
271 SimColourLogFormatter.loglevel2colour.get(record.levelno, "%s") % line
272 for line in msg.split("\n")
273 ]
274 )
275 level = SimColourLogFormatter.loglevel2colour.get(
276 record.levelno, "%s"
277 ) % record.levelname.ljust(_LEVEL_CHARS)
278
279 return self._format(level, record, msg, coloured=True)
280
281
282 def _filter_from_c(logger_name, level):
283 return logging.getLogger(logger_name).isEnabledFor(level)
284
285
286 def _log_from_c(logger_name, level, filename, lineno, msg, function_name):
287 """
288 This is for use from the C world, and allows us to insert C stack
289 information.
290 """
291 logger = logging.getLogger(logger_name)
292 if logger.isEnabledFor(level):
293 record = logger.makeRecord(
294 logger.name, level, filename, lineno, msg, None, None, function_name
295 )
296 logger.handle(record)
297
[end of cocotb/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cocotb/log.py b/cocotb/log.py
--- a/cocotb/log.py
+++ b/cocotb/log.py
@@ -44,11 +44,11 @@
_suppress = 1
# Column alignment
-_LEVEL_CHARS = len("CRITICAL") # noqa
-_RECORD_CHARS = 35 # noqa
-_FILENAME_CHARS = 20 # noqa
-_LINENO_CHARS = 4 # noqa
-_FUNCNAME_CHARS = 31 # noqa
+_LEVEL_CHARS = len("CRITICAL")
+_RECORD_CHARS = 34
+_FILENAME_CHARS = 20
+_LINENO_CHARS = 4
+_FUNCNAME_CHARS = 31
# Custom log level
logging.TRACE = 5
@@ -210,11 +210,17 @@
else:
time_ns = get_time_from_sim_steps(sim_time, "ns")
sim_time_str = f"{time_ns:6.2f}ns"
- prefix = sim_time_str.rjust(11) + " " + level + " "
+ prefix = (
+ sim_time_str.rjust(11)
+ + " "
+ + level
+ + " "
+ + self.ljust(record.name, _RECORD_CHARS)
+ + " "
+ )
if not _suppress:
prefix += (
- self.ljust(record.name, _RECORD_CHARS)
- + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)
+ self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)
+ ":"
+ self.ljust(str(record.lineno), _LINENO_CHARS)
+ " in "
| {"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -44,11 +44,11 @@\n _suppress = 1\n \n # Column alignment\n-_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n-_RECORD_CHARS = 35 # noqa\n-_FILENAME_CHARS = 20 # noqa\n-_LINENO_CHARS = 4 # noqa\n-_FUNCNAME_CHARS = 31 # noqa\n+_LEVEL_CHARS = len(\"CRITICAL\")\n+_RECORD_CHARS = 34\n+_FILENAME_CHARS = 20\n+_LINENO_CHARS = 4\n+_FUNCNAME_CHARS = 31\n \n # Custom log level\n logging.TRACE = 5\n@@ -210,11 +210,17 @@\n else:\n time_ns = get_time_from_sim_steps(sim_time, \"ns\")\n sim_time_str = f\"{time_ns:6.2f}ns\"\n- prefix = sim_time_str.rjust(11) + \" \" + level + \" \"\n+ prefix = (\n+ sim_time_str.rjust(11)\n+ + \" \"\n+ + level\n+ + \" \"\n+ + self.ljust(record.name, _RECORD_CHARS)\n+ + \" \"\n+ )\n if not _suppress:\n prefix += (\n- self.ljust(record.name, _RECORD_CHARS)\n- + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)\n+ self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)\n + \":\"\n + self.ljust(str(record.lineno), _LINENO_CHARS)\n + \" in \"\n", "issue": "Include logger name in log output\nThe culprit is logged, but the new \"short\" log format doesn't include the logger name so I don't get any useful information from this. IMO the default log format should include the logger name.\r\nhttps://github.com/cocotb/cocotb/blob/c69454db92388a8915c99a35ca9cba06565fe4d5/cocotb/scheduler.py#L451\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport logging\nimport os\nimport sys\nimport warnings\n\nimport cocotb.ANSI as ANSI\nfrom cocotb import simulator\nfrom cocotb.utils import get_sim_time, get_time_from_sim_steps, want_color_output\n\ntry:\n _suppress = int(os.environ.get(\"COCOTB_REDUCED_LOG_FMT\", \"1\"))\nexcept ValueError:\n _suppress = 1\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n# Custom log level\nlogging.TRACE = 5\nlogging.addLevelName(5, \"TRACE\")\n\n# Default log level if not overwritten by the user.\n_COCOTB_LOG_LEVEL_DEFAULT = \"INFO\"\n\n\ndef default_config():\n \"\"\"Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance.\n An example of this can be found in the section on :ref:`rotating-logger`.\n\n .. versionadded:: 1.4\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger(\"cocotb\")\n\n try:\n # All log levels are upper case, convert the user input for convenience.\n level = os.environ[\"COCOTB_LOG_LEVEL\"].upper()\n except KeyError:\n level = _COCOTB_LOG_LEVEL_DEFAULT\n\n try:\n log.setLevel(level)\n except ValueError:\n valid_levels = (\"CRITICAL\", \"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"TRACE\")\n raise ValueError(\n \"Invalid log level %r passed through the \"\n \"COCOTB_LOG_LEVEL environment variable. Valid log \"\n \"levels: %s\" % (level, \", \".join(valid_levels))\n )\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n simulator.log_level(log.getEffectiveLevel())\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\"This class only exists for backwards compatibility\"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n return want_color_output()\n\n def setLevel(self, level: int) -> None:\n super().setLevel(level)\n if self.name == \"gpi\":\n simulator.log_level(level)\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\"Like logging.getLogger, but append a numeric identifier to the name\"\"\"\n if ident is not None:\n name = f\"{name}.0x{ident:x}\"\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n\n .. versionadded:: 1.4\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the handler object this\n formatter is attached to has a :class:`SimTimeContextFilter` filter\n attached, which cocotb ensures by default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\"Takes no arguments.\"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1 :]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1 :]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, \"created_sim_time\", None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, \"ns\")\n sim_time_str = f\"{time_ns:6.2f}ns\"\n prefix = sim_time_str.rjust(11) + \" \" + level + \" \"\n if not _suppress:\n prefix += (\n self.ljust(record.name, _RECORD_CHARS)\n + self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS)\n + \":\"\n + self.ljust(str(record.lineno), _LINENO_CHARS)\n + \" in \"\n + self.ljust(str(record.funcName), _FUNCNAME_CHARS)\n + \" \"\n )\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= len(level) - _LEVEL_CHARS\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split(\"\\n\"))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.TRACE: \"%s\",\n logging.DEBUG: \"%s\",\n logging.INFO: \"%s\",\n logging.WARNING: ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR: ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = \"\\n\".join(\n [\n SimColourLogFormatter.loglevel2colour.get(record.levelno, \"%s\") % line\n for line in msg.split(\"\\n\")\n ]\n )\n level = SimColourLogFormatter.loglevel2colour.get(\n record.levelno, \"%s\"\n ) % record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name, level, filename, lineno, msg, None, None, function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]} | 3,831 | 382 |
gh_patches_debug_18985 | rasdani/github-patches | git_diff | oppia__oppia-6309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
InteractiveMap interaction: in the rule editor, clicks on the map are not displayed correctly
Create an exploration with a map interaction. Add a rule and click on the map to choose the point the rule applies to. A marker should appear where you click, but it does not.
Save and close the rule, then re-open it. The marker is now displayed correctly.
Create a new rule. Before being clicked on the map should be blank, but instead it displays the position of the marker from the previous rule.
</issue>
<code>
[start of extensions/dependencies/dependencies_config.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Configuration for JavaScript library dependencies."""
18
19
20 # A dict mapping dependency ids to the Angular module names they
21 # should insert when the Angular app is first initialized.
22 DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {
23 'codemirror': ['ui.codemirror'],
24 'google_maps': ['ui.map'],
25 'guppy': [],
26 'logic_proof': [],
27 'math_expressions': [],
28 'midijs': [],
29 'pencilcode': [],
30 'skulpt': [],
31 }
32
[end of extensions/dependencies/dependencies_config.py]
[start of extensions/interactions/InteractiveMap/InteractiveMap.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Python configuration for InteractiveMap interaction."""
18
19 from extensions.interactions import base
20
21
22 class InteractiveMap(base.BaseInteraction):
23 """Interaction for pinpointing a location on a map."""
24
25 name = 'World Map'
26 description = 'Allows learners to specify a position on a world map.'
27 display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
28 is_trainable = False
29 _dependency_ids = ['google_maps']
30 answer_type = 'CoordTwoDim'
31 instructions = 'Click on the map'
32 narrow_instructions = 'View map'
33 needs_summary = True
34 # There needs to be a way to pass marker location so that an answer can be
35 # conveyed meaningfully to the learner. Once this issue is fixed,
36 # InteractiveMap interaction can be supported by the solution feature.
37 can_have_solution = False
38 show_generic_submit_button = False
39
40 _customization_arg_specs = [{
41 'name': 'latitude',
42 'description': 'Starting center latitude (-90 to 90)',
43 'schema': {
44 'type': 'float',
45 'validators': [{
46 'id': 'is_at_least',
47 'min_value': -90.0,
48 }, {
49 'id': 'is_at_most',
50 'max_value': 90.0,
51 }]
52 },
53 'default_value': 0.0,
54 }, {
55 'name': 'longitude',
56 'description': 'Starting center longitude (-180 to 180)',
57 'schema': {
58 'type': 'float',
59 'validators': [{
60 'id': 'is_at_least',
61 'min_value': -180.0,
62 }, {
63 'id': 'is_at_most',
64 'max_value': 180.0,
65 }]
66 },
67 'default_value': 0.0,
68 }, {
69 'name': 'zoom',
70 'description': 'Starting zoom level (0 shows the entire earth)',
71 'schema': {
72 'type': 'float',
73 },
74 'default_value': 0.0,
75 }]
76
77 _answer_visualization_specs = [{
78 # Table with answer counts for top N answers.
79 'id': 'FrequencyTable',
80 'options': {
81 'column_headers': ['Answer', 'Count'],
82 'title': 'Top 10 answers',
83 },
84 'calculation_id': 'Top10AnswerFrequencies',
85 'addressed_info_is_supported': True,
86 }]
87
[end of extensions/interactions/InteractiveMap/InteractiveMap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/dependencies/dependencies_config.py b/extensions/dependencies/dependencies_config.py
--- a/extensions/dependencies/dependencies_config.py
+++ b/extensions/dependencies/dependencies_config.py
@@ -21,7 +21,7 @@
# should insert when the Angular app is first initialized.
DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {
'codemirror': ['ui.codemirror'],
- 'google_maps': ['ui.map'],
+ 'ui_leaflet': ['ui-leaflet'],
'guppy': [],
'logic_proof': [],
'math_expressions': [],
diff --git a/extensions/interactions/InteractiveMap/InteractiveMap.py b/extensions/interactions/InteractiveMap/InteractiveMap.py
--- a/extensions/interactions/InteractiveMap/InteractiveMap.py
+++ b/extensions/interactions/InteractiveMap/InteractiveMap.py
@@ -26,7 +26,7 @@
description = 'Allows learners to specify a position on a world map.'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_trainable = False
- _dependency_ids = ['google_maps']
+ _dependency_ids = ['ui_leaflet']
answer_type = 'CoordTwoDim'
instructions = 'Click on the map'
narrow_instructions = 'View map'
| {"golden_diff": "diff --git a/extensions/dependencies/dependencies_config.py b/extensions/dependencies/dependencies_config.py\n--- a/extensions/dependencies/dependencies_config.py\n+++ b/extensions/dependencies/dependencies_config.py\n@@ -21,7 +21,7 @@\n # should insert when the Angular app is first initialized.\n DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {\n 'codemirror': ['ui.codemirror'],\n- 'google_maps': ['ui.map'],\n+ 'ui_leaflet': ['ui-leaflet'],\n 'guppy': [],\n 'logic_proof': [],\n 'math_expressions': [],\ndiff --git a/extensions/interactions/InteractiveMap/InteractiveMap.py b/extensions/interactions/InteractiveMap/InteractiveMap.py\n--- a/extensions/interactions/InteractiveMap/InteractiveMap.py\n+++ b/extensions/interactions/InteractiveMap/InteractiveMap.py\n@@ -26,7 +26,7 @@\n description = 'Allows learners to specify a position on a world map.'\n display_mode = base.DISPLAY_MODE_SUPPLEMENTAL\n is_trainable = False\n- _dependency_ids = ['google_maps']\n+ _dependency_ids = ['ui_leaflet']\n answer_type = 'CoordTwoDim'\n instructions = 'Click on the map'\n narrow_instructions = 'View map'\n", "issue": "InteractiveMap interaction: in the rule editor, clicks on the map are not displayed correctly\nCreate an exploration with a map interaction. Add a rule and click on the map to choose the point the rule applies to. A marker should appear where you click, but it does not.\n\nSave and close the rule, then re-open it. The marker is now displayed correctly.\n\nCreate a new rule. Before being clicked on the map should be blank, but instead it displays the position of the marker from the previous rule.\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration for JavaScript library dependencies.\"\"\"\n\n\n# A dict mapping dependency ids to the Angular module names they\n# should insert when the Angular app is first initialized.\nDEPENDENCIES_TO_ANGULAR_MODULES_DICT = {\n 'codemirror': ['ui.codemirror'],\n 'google_maps': ['ui.map'],\n 'guppy': [],\n 'logic_proof': [],\n 'math_expressions': [],\n 'midijs': [],\n 'pencilcode': [],\n 'skulpt': [],\n}\n", "path": "extensions/dependencies/dependencies_config.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python configuration for InteractiveMap interaction.\"\"\"\n\nfrom extensions.interactions import base\n\n\nclass InteractiveMap(base.BaseInteraction):\n \"\"\"Interaction for pinpointing a location on a map.\"\"\"\n\n name = 'World Map'\n description = 'Allows learners to specify a position on a world map.'\n display_mode = base.DISPLAY_MODE_SUPPLEMENTAL\n is_trainable = False\n _dependency_ids = ['google_maps']\n answer_type = 'CoordTwoDim'\n instructions = 'Click on the map'\n narrow_instructions = 'View map'\n needs_summary = True\n # There needs to be a way to pass marker location so that an answer can be\n # conveyed meaningfully to the learner. Once this issue is fixed,\n # InteractiveMap interaction can be supported by the solution feature.\n can_have_solution = False\n show_generic_submit_button = False\n\n _customization_arg_specs = [{\n 'name': 'latitude',\n 'description': 'Starting center latitude (-90 to 90)',\n 'schema': {\n 'type': 'float',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': -90.0,\n }, {\n 'id': 'is_at_most',\n 'max_value': 90.0,\n }]\n },\n 'default_value': 0.0,\n }, {\n 'name': 'longitude',\n 'description': 'Starting center longitude (-180 to 180)',\n 'schema': {\n 'type': 'float',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': -180.0,\n }, {\n 'id': 'is_at_most',\n 'max_value': 180.0,\n }]\n },\n 'default_value': 0.0,\n }, {\n 'name': 'zoom',\n 'description': 'Starting zoom level (0 shows the entire earth)',\n 'schema': {\n 'type': 'float',\n },\n 'default_value': 0.0,\n }]\n\n _answer_visualization_specs = [{\n # Table with answer counts for top N answers.\n 'id': 'FrequencyTable',\n 'options': {\n 'column_headers': ['Answer', 'Count'],\n 'title': 'Top 10 answers',\n },\n 'calculation_id': 'Top10AnswerFrequencies',\n 'addressed_info_is_supported': True,\n }]\n", "path": "extensions/interactions/InteractiveMap/InteractiveMap.py"}]} | 1,812 | 276 |
gh_patches_debug_8482 | rasdani/github-patches | git_diff | airctic__icevision-910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Efficientdet inference returns wrong bbox predictions
## 🐛 Bug
When running inference on efficientdet models, the predictions are squeezed to only fit square aspect ratio. This problem is only visible when running efficientdet in rectangular input shape (eg 512x768). Here is a screenshot of default behavior:

Note that predictions are squezzed, seemingly to only square image resolution. I have discovered that the bug comes from `process_infer_record` function, where the image input shape is passed to effdet in the wrong notation (H, W instead of W, H).
I applied that fix and the result is working as expected:

**To Reproduce**
Steps to reproduce the behavior:
1. Train efficientdet model in rectangular image input shape
2. Run inference
</issue>
<code>
[start of icevision/models/ross/efficientdet/dataloaders.py]
1 __all__ = [
2 "build_train_batch",
3 "build_valid_batch",
4 "build_infer_batch",
5 "train_dl",
6 "valid_dl",
7 "infer_dl",
8 ]
9
10 from icevision.imports import *
11 from icevision.models.utils import *
12
13
14 def train_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
15 """A `DataLoader` with a custom `collate_fn` that batches items as required for training the model.
16
17 # Arguments
18 dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.
19 batch_tfms: Transforms to be applied at the batch level.
20 **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.
21 The parameter `collate_fn` is already defined internally and cannot be passed here.
22
23 # Returns
24 A Pytorch `DataLoader`.
25 """
26 return transform_dl(
27 dataset=dataset,
28 build_batch=build_train_batch,
29 batch_tfms=batch_tfms,
30 **dataloader_kwargs
31 )
32
33
34 def valid_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
35 """A `DataLoader` with a custom `collate_fn` that batches items as required for validating the model.
36
37 # Arguments
38 dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.
39 batch_tfms: Transforms to be applied at the batch level.
40 **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.
41 The parameter `collate_fn` is already defined internally and cannot be passed here.
42
43 # Returns
44 A Pytorch `DataLoader`.
45 """
46 return transform_dl(
47 dataset=dataset,
48 build_batch=build_valid_batch,
49 batch_tfms=batch_tfms,
50 **dataloader_kwargs
51 )
52
53
54 def infer_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:
55 """A `DataLoader` with a custom `collate_fn` that batches items as required for inferring the model.
56
57 # Arguments
58 dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.
59 batch_tfms: Transforms to be applied at the batch level.
60 **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.
61 The parameter `collate_fn` is already defined internally and cannot be passed here.
62
63 # Returns
64 A Pytorch `DataLoader`.
65 """
66 return transform_dl(
67 dataset=dataset,
68 build_batch=build_infer_batch,
69 batch_tfms=batch_tfms,
70 **dataloader_kwargs
71 )
72
73
74 def build_train_batch(records):
75 """Builds a batch in the format required by the model when training.
76
77 # Arguments
78 records: A `Sequence` of records.
79
80 # Returns
81 A tuple with two items. The first will be a tuple like `(images, targets)`,
82 in the input format required by the model. The second will be a list
83 of the input records.
84
85 # Examples
86
87 Use the result of this function to feed the model.
88 ```python
89 batch, records = build_train_batch(records)
90 outs = model(*batch)
91 ```
92 """
93 batch_images, batch_bboxes, batch_classes = zip(
94 *(process_train_record(record) for record in records)
95 )
96
97 # convert to tensors
98 batch_images = torch.stack(batch_images)
99 batch_bboxes = [tensor(bboxes, dtype=torch.float32) for bboxes in batch_bboxes]
100 batch_classes = [tensor(classes, dtype=torch.float32) for classes in batch_classes]
101
102 # convert to EffDet interface
103 targets = dict(bbox=batch_bboxes, cls=batch_classes)
104
105 return (batch_images, targets), records
106
107
108 def build_valid_batch(records):
109 """Builds a batch in the format required by the model when validating.
110
111 # Arguments
112 records: A `Sequence` of records.
113
114 # Returns
115 A tuple with two items. The first will be a tuple like `(images, targets)`,
116 in the input format required by the model. The second will be a list
117 of the input records.
118
119 # Examples
120
121 Use the result of this function to feed the model.
122 ```python
123 batch, records = build_valid_batch(records)
124 outs = model(*batch)
125 ```
126 """
127 (batch_images, targets), records = build_train_batch(records)
128
129 # convert to EffDet interface, when not training, dummy size and scale is required
130 targets = dict(img_size=None, img_scale=None, **targets)
131
132 return (batch_images, targets), records
133
134
135 def build_infer_batch(records):
136 """Builds a batch in the format required by the model when doing inference.
137
138 # Arguments
139 records: A `Sequence` of records.
140
141 # Returns
142 A tuple with two items. The first will be a tuple like `(images, targets)`,
143 in the input format required by the model. The second will be a list
144 of the input records.
145 Use the result of this function to feed the model.
146 ```python
147 batch, records = build_infer_batch(records)
148 outs = model(*batch)
149 ```
150 """
151 batch_images, batch_sizes, batch_scales = zip(
152 *(process_infer_record(record) for record in records)
153 )
154
155 # convert to tensors
156 batch_images = torch.stack(batch_images)
157 batch_sizes = tensor(batch_sizes, dtype=torch.float32)
158 batch_scales = tensor(batch_scales, dtype=torch.float32)
159
160 # convert to EffDet interface
161 targets = dict(img_size=batch_sizes, img_scale=batch_scales)
162
163 return (batch_images, targets), records
164
165
166 def process_train_record(record) -> tuple:
167 """Extracts information from record and prepares a format required by the EffDet training"""
168 image = im2tensor(record.img)
169 # background and dummy if no label in record
170 classes = record.detection.label_ids if record.detection.label_ids else [0]
171 bboxes = (
172 [bbox.yxyx for bbox in record.detection.bboxes]
173 if len(record.detection.label_ids) > 0
174 else [[0, 0, 0, 0]]
175 )
176 return image, bboxes, classes
177
178
179 def process_infer_record(record) -> tuple:
180 """Extracts information from record and prepares a format required by the EffDet inference"""
181 image = im2tensor(record.img)
182 image_size = image.shape[-2:]
183 image_scale = 1.0
184
185 return image, image_size, image_scale
186
[end of icevision/models/ross/efficientdet/dataloaders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/ross/efficientdet/dataloaders.py b/icevision/models/ross/efficientdet/dataloaders.py
--- a/icevision/models/ross/efficientdet/dataloaders.py
+++ b/icevision/models/ross/efficientdet/dataloaders.py
@@ -179,7 +179,7 @@
def process_infer_record(record) -> tuple:
"""Extracts information from record and prepares a format required by the EffDet inference"""
image = im2tensor(record.img)
- image_size = image.shape[-2:]
+ n_channels, image_height, image_width = image.shape
image_scale = 1.0
-
- return image, image_size, image_scale
+ # EffDet expects image size to be passed in W, H notation
+ return image, (image_width, image_height), image_scale
| {"golden_diff": "diff --git a/icevision/models/ross/efficientdet/dataloaders.py b/icevision/models/ross/efficientdet/dataloaders.py\n--- a/icevision/models/ross/efficientdet/dataloaders.py\n+++ b/icevision/models/ross/efficientdet/dataloaders.py\n@@ -179,7 +179,7 @@\n def process_infer_record(record) -> tuple:\n \"\"\"Extracts information from record and prepares a format required by the EffDet inference\"\"\"\n image = im2tensor(record.img)\n- image_size = image.shape[-2:]\n+ n_channels, image_height, image_width = image.shape\n image_scale = 1.0\n-\n- return image, image_size, image_scale\n+ # EffDet expects image size to be passed in W, H notation\n+ return image, (image_width, image_height), image_scale\n", "issue": "Efficientdet inference returns wrong bbox predictions\n## \ud83d\udc1b Bug\r\nWhen running inference on efficientdet models, the predictions are squeezed to only fit square aspect ratio. This problem is only visible when running efficientdet in rectangular input shape (eg 512x768). Here is a screenshot of default behavior:\r\n\r\n\r\nNote that predictions are squezzed, seemingly to only square image resolution. I have discovered that the bug comes from `process_infer_record` function, where the image input shape is passed to effdet in the wrong notation (H, W instead of W, H).\r\n\r\nI applied that fix and the result is working as expected:\r\n\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Train efficientdet model in rectangular image input shape\r\n2. Run inference\r\n\r\n\n", "before_files": [{"content": "__all__ = [\n \"build_train_batch\",\n \"build_valid_batch\",\n \"build_infer_batch\",\n \"train_dl\",\n \"valid_dl\",\n \"infer_dl\",\n]\n\nfrom icevision.imports import *\nfrom icevision.models.utils import *\n\n\ndef train_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:\n \"\"\"A `DataLoader` with a custom `collate_fn` that batches items as required for training the model.\n\n # Arguments\n dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.\n batch_tfms: Transforms to be applied at the batch level.\n **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.\n The parameter `collate_fn` is already defined internally and cannot be passed here.\n\n # Returns\n A Pytorch `DataLoader`.\n \"\"\"\n return transform_dl(\n dataset=dataset,\n build_batch=build_train_batch,\n batch_tfms=batch_tfms,\n **dataloader_kwargs\n )\n\n\ndef valid_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:\n \"\"\"A `DataLoader` with a custom `collate_fn` that batches items as required for validating the model.\n\n # Arguments\n dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.\n batch_tfms: Transforms to be applied at the batch level.\n **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.\n The parameter `collate_fn` is already defined internally and cannot be passed here.\n\n # Returns\n A Pytorch `DataLoader`.\n \"\"\"\n return transform_dl(\n dataset=dataset,\n build_batch=build_valid_batch,\n batch_tfms=batch_tfms,\n **dataloader_kwargs\n )\n\n\ndef infer_dl(dataset, batch_tfms=None, **dataloader_kwargs) -> DataLoader:\n \"\"\"A `DataLoader` with a custom `collate_fn` that batches items as required for inferring the model.\n\n # Arguments\n dataset: Possibly a `Dataset` object, but more generally, any `Sequence` that returns records.\n batch_tfms: Transforms to be applied at the batch level.\n **dataloader_kwargs: Keyword arguments that will be internally passed to a Pytorch `DataLoader`.\n The parameter `collate_fn` is already defined internally and cannot be passed here.\n\n # Returns\n A Pytorch `DataLoader`.\n \"\"\"\n return transform_dl(\n dataset=dataset,\n build_batch=build_infer_batch,\n batch_tfms=batch_tfms,\n **dataloader_kwargs\n )\n\n\ndef build_train_batch(records):\n \"\"\"Builds a batch in the format required by the model when training.\n\n # Arguments\n records: A `Sequence` of records.\n\n # Returns\n A tuple with two items. The first will be a tuple like `(images, targets)`,\n in the input format required by the model. The second will be a list\n of the input records.\n\n # Examples\n\n Use the result of this function to feed the model.\n ```python\n batch, records = build_train_batch(records)\n outs = model(*batch)\n ```\n \"\"\"\n batch_images, batch_bboxes, batch_classes = zip(\n *(process_train_record(record) for record in records)\n )\n\n # convert to tensors\n batch_images = torch.stack(batch_images)\n batch_bboxes = [tensor(bboxes, dtype=torch.float32) for bboxes in batch_bboxes]\n batch_classes = [tensor(classes, dtype=torch.float32) for classes in batch_classes]\n\n # convert to EffDet interface\n targets = dict(bbox=batch_bboxes, cls=batch_classes)\n\n return (batch_images, targets), records\n\n\ndef build_valid_batch(records):\n \"\"\"Builds a batch in the format required by the model when validating.\n\n # Arguments\n records: A `Sequence` of records.\n\n # Returns\n A tuple with two items. The first will be a tuple like `(images, targets)`,\n in the input format required by the model. The second will be a list\n of the input records.\n\n # Examples\n\n Use the result of this function to feed the model.\n ```python\n batch, records = build_valid_batch(records)\n outs = model(*batch)\n ```\n \"\"\"\n (batch_images, targets), records = build_train_batch(records)\n\n # convert to EffDet interface, when not training, dummy size and scale is required\n targets = dict(img_size=None, img_scale=None, **targets)\n\n return (batch_images, targets), records\n\n\ndef build_infer_batch(records):\n \"\"\"Builds a batch in the format required by the model when doing inference.\n\n # Arguments\n records: A `Sequence` of records.\n\n # Returns\n A tuple with two items. The first will be a tuple like `(images, targets)`,\n in the input format required by the model. The second will be a list\n of the input records.\n Use the result of this function to feed the model.\n ```python\n batch, records = build_infer_batch(records)\n outs = model(*batch)\n ```\n \"\"\"\n batch_images, batch_sizes, batch_scales = zip(\n *(process_infer_record(record) for record in records)\n )\n\n # convert to tensors\n batch_images = torch.stack(batch_images)\n batch_sizes = tensor(batch_sizes, dtype=torch.float32)\n batch_scales = tensor(batch_scales, dtype=torch.float32)\n\n # convert to EffDet interface\n targets = dict(img_size=batch_sizes, img_scale=batch_scales)\n\n return (batch_images, targets), records\n\n\ndef process_train_record(record) -> tuple:\n \"\"\"Extracts information from record and prepares a format required by the EffDet training\"\"\"\n image = im2tensor(record.img)\n # background and dummy if no label in record\n classes = record.detection.label_ids if record.detection.label_ids else [0]\n bboxes = (\n [bbox.yxyx for bbox in record.detection.bboxes]\n if len(record.detection.label_ids) > 0\n else [[0, 0, 0, 0]]\n )\n return image, bboxes, classes\n\n\ndef process_infer_record(record) -> tuple:\n \"\"\"Extracts information from record and prepares a format required by the EffDet inference\"\"\"\n image = im2tensor(record.img)\n image_size = image.shape[-2:]\n image_scale = 1.0\n\n return image, image_size, image_scale\n", "path": "icevision/models/ross/efficientdet/dataloaders.py"}]} | 2,739 | 196 |
gh_patches_debug_34246 | rasdani/github-patches | git_diff | uccser__cs-unplugged-318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support multiple page resources
Currently the create image function for a resource return a single image. Instead it should return a list of images, which would allow multiple page resources.
For example, for 4 pages of a single page resource the content would be:
```
Image output: [A]
Final document: A, A, A, A
```
For 4 pages of a three page resource the content would be:
```
Image output: [A, B, C], [A, B, C], [A, B, C], [A, B, C]
Final document: A, B, C, A, B, C, A, B, C, A, B, C
```
</issue>
<code>
[start of csunplugged/resources/views/generate_resource_pdf.py]
1 """Module for generating custom resource PDFs."""
2
3 from django.http import HttpResponse
4 from django.template.loader import render_to_string
5 from django.contrib.staticfiles import finders
6 from django.conf import settings
7 from PIL import Image
8 from io import BytesIO
9 import importlib
10 import base64
11
12 RESPONSE_CONTENT_DISPOSITION = 'attachment; filename="{filename}.pdf"'
13 MM_TO_PIXEL_RATIO = 3.78
14
15
16 def generate_resource_pdf(request, resource, module_path):
17 """Return a response containing a generated PDF resource.
18
19 Args:
20 request: HTTP request object
21 resource: Object of resource data.
22 module_path: Path to module for generating resource.
23
24 Returns:
25 HTTP response containing generated resource PDF.
26 """
27 # TODO: Weasyprint handling in production
28 import environ
29 env = environ.Env(
30 DJANGO_PRODUCTION=(bool),
31 )
32 if env("DJANGO_PRODUCTION"):
33 return HttpResponse("<html><body>PDF generation is currently not supported in production.</body></html>")
34 else:
35 from weasyprint import HTML, CSS
36 context = dict()
37 get_request = request.GET
38 context["paper_size"] = get_request["paper_size"]
39 context["resource"] = resource
40 context["header_text"] = get_request["header_text"]
41
42 resource_image_generator = importlib.import_module(module_path)
43 filename = "{} ({})".format(resource.name, resource_image_generator.subtitle(get_request, resource))
44 context["filename"] = filename
45
46 num_copies = range(0, int(get_request["copies"]))
47 context["resource_images"] = []
48 for copy in num_copies:
49 context["resource_images"].append(
50 generate_resource_image(get_request, resource, module_path)
51 )
52
53 pdf_html = render_to_string("resources/base-resource-pdf.html", context)
54 html = HTML(string=pdf_html, base_url=settings.STATIC_ROOT)
55 css_file = finders.find("css/print-resource-pdf.css")
56 css_string = open(css_file, encoding="UTF-8").read()
57 base_css = CSS(string=css_string)
58 pdf_file = html.write_pdf(stylesheets=[base_css])
59
60 response = HttpResponse(pdf_file, content_type="application/pdf")
61 response["Content-Disposition"] = RESPONSE_CONTENT_DISPOSITION.format(filename=filename)
62 return response
63
64
65 def generate_resource_image(get_request, resource, module_path):
66 """Retrieve image from resource generator and resize to size.
67
68 Args:
69 get_request: HTTP request object
70 resource: Object of resource data.
71 module_path: Path to module for generating resource.
72
73 Returns:
74 Base64 string of a generated resource image.
75 """
76 # Get image from resource image creator
77 resource_image_generator = importlib.import_module(module_path)
78 image = resource_image_generator.resource_image(get_request, resource)
79
80 # Resize image to reduce file size
81 if get_request["paper_size"] == "a4":
82 max_pixel_height = 267 * MM_TO_PIXEL_RATIO
83 elif get_request["paper_size"] == "letter":
84 max_pixel_height = 249 * MM_TO_PIXEL_RATIO
85 (width, height) = image.size
86 if height > max_pixel_height:
87 ratio = max_pixel_height / height
88 width *= ratio
89 height *= ratio
90 image = image.resize((int(width), int(height)), Image.ANTIALIAS)
91
92 # Save image to buffer
93 image_buffer = BytesIO()
94 image.save(image_buffer, format="PNG")
95
96 # Return base64 of image
97 return base64.b64encode(image_buffer.getvalue())
98
[end of csunplugged/resources/views/generate_resource_pdf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/resources/views/generate_resource_pdf.py b/csunplugged/resources/views/generate_resource_pdf.py
--- a/csunplugged/resources/views/generate_resource_pdf.py
+++ b/csunplugged/resources/views/generate_resource_pdf.py
@@ -63,7 +63,9 @@
def generate_resource_image(get_request, resource, module_path):
- """Retrieve image from resource generator and resize to size.
+ """Retrieve image(s) for one copy of resource from resource generator.
+
+ Images are resized to size.
Args:
get_request: HTTP request object
@@ -71,27 +73,33 @@
module_path: Path to module for generating resource.
Returns:
- Base64 string of a generated resource image.
+ List of Base64 strings of a generated resource images for one copy.
"""
- # Get image from resource image creator
+ # Get images from resource image creator
resource_image_generator = importlib.import_module(module_path)
- image = resource_image_generator.resource_image(get_request, resource)
+ raw_images = resource_image_generator.resource_image(get_request, resource)
+ if not isinstance(raw_images, list):
+ raw_images = [raw_images]
- # Resize image to reduce file size
+ # Resize images to reduce file size
if get_request["paper_size"] == "a4":
max_pixel_height = 267 * MM_TO_PIXEL_RATIO
elif get_request["paper_size"] == "letter":
max_pixel_height = 249 * MM_TO_PIXEL_RATIO
- (width, height) = image.size
- if height > max_pixel_height:
- ratio = max_pixel_height / height
- width *= ratio
- height *= ratio
- image = image.resize((int(width), int(height)), Image.ANTIALIAS)
-
- # Save image to buffer
- image_buffer = BytesIO()
- image.save(image_buffer, format="PNG")
-
- # Return base64 of image
- return base64.b64encode(image_buffer.getvalue())
+
+ images = []
+ for image in raw_images:
+ (width, height) = image.size
+ if height > max_pixel_height:
+ ratio = max_pixel_height / height
+ width *= ratio
+ height *= ratio
+ image = image.resize((int(width), int(height)), Image.ANTIALIAS)
+
+ # Save image to buffer
+ image_buffer = BytesIO()
+ image.save(image_buffer, format="PNG")
+ # Add base64 of image to list of images
+ images.append(base64.b64encode(image_buffer.getvalue()))
+
+ return images
| {"golden_diff": "diff --git a/csunplugged/resources/views/generate_resource_pdf.py b/csunplugged/resources/views/generate_resource_pdf.py\n--- a/csunplugged/resources/views/generate_resource_pdf.py\n+++ b/csunplugged/resources/views/generate_resource_pdf.py\n@@ -63,7 +63,9 @@\n \n \n def generate_resource_image(get_request, resource, module_path):\n- \"\"\"Retrieve image from resource generator and resize to size.\n+ \"\"\"Retrieve image(s) for one copy of resource from resource generator.\n+\n+ Images are resized to size.\n \n Args:\n get_request: HTTP request object\n@@ -71,27 +73,33 @@\n module_path: Path to module for generating resource.\n \n Returns:\n- Base64 string of a generated resource image.\n+ List of Base64 strings of a generated resource images for one copy.\n \"\"\"\n- # Get image from resource image creator\n+ # Get images from resource image creator\n resource_image_generator = importlib.import_module(module_path)\n- image = resource_image_generator.resource_image(get_request, resource)\n+ raw_images = resource_image_generator.resource_image(get_request, resource)\n+ if not isinstance(raw_images, list):\n+ raw_images = [raw_images]\n \n- # Resize image to reduce file size\n+ # Resize images to reduce file size\n if get_request[\"paper_size\"] == \"a4\":\n max_pixel_height = 267 * MM_TO_PIXEL_RATIO\n elif get_request[\"paper_size\"] == \"letter\":\n max_pixel_height = 249 * MM_TO_PIXEL_RATIO\n- (width, height) = image.size\n- if height > max_pixel_height:\n- ratio = max_pixel_height / height\n- width *= ratio\n- height *= ratio\n- image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n-\n- # Save image to buffer\n- image_buffer = BytesIO()\n- image.save(image_buffer, format=\"PNG\")\n-\n- # Return base64 of image\n- return base64.b64encode(image_buffer.getvalue())\n+\n+ images = []\n+ for image in raw_images:\n+ (width, height) = image.size\n+ if height > max_pixel_height:\n+ ratio = max_pixel_height / height\n+ width *= ratio\n+ height *= ratio\n+ image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n+\n+ # Save image to buffer\n+ image_buffer = BytesIO()\n+ image.save(image_buffer, format=\"PNG\")\n+ # Add base64 of image to list of images\n+ images.append(base64.b64encode(image_buffer.getvalue()))\n+\n+ return images\n", "issue": "Support multiple page resources\nCurrently the create image function for a resource return a single image. Instead it should return a list of images, which would allow multiple page resources.\r\n\r\nFor example, for 4 pages of a single page resource the content would be:\r\n\r\n```\r\nImage output: [A]\r\nFinal document: A, A, A, A\r\n```\r\n\r\nFor 4 pages of a three page resource the content would be:\r\n\r\n```\r\nImage output: [A, B, C], [A, B, C], [A, B, C], [A, B, C] \r\nFinal document: A, B, C, A, B, C, A, B, C, A, B, C\r\n```\n", "before_files": [{"content": "\"\"\"Module for generating custom resource PDFs.\"\"\"\n\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.contrib.staticfiles import finders\nfrom django.conf import settings\nfrom PIL import Image\nfrom io import BytesIO\nimport importlib\nimport base64\n\nRESPONSE_CONTENT_DISPOSITION = 'attachment; filename=\"{filename}.pdf\"'\nMM_TO_PIXEL_RATIO = 3.78\n\n\ndef generate_resource_pdf(request, resource, module_path):\n \"\"\"Return a response containing a generated PDF resource.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n module_path: Path to module for generating resource.\n\n Returns:\n HTTP response containing generated resource PDF.\n \"\"\"\n # TODO: Weasyprint handling in production\n import environ\n env = environ.Env(\n DJANGO_PRODUCTION=(bool),\n )\n if env(\"DJANGO_PRODUCTION\"):\n return HttpResponse(\"<html><body>PDF generation is currently not supported in production.</body></html>\")\n else:\n from weasyprint import HTML, CSS\n context = dict()\n get_request = request.GET\n context[\"paper_size\"] = get_request[\"paper_size\"]\n context[\"resource\"] = resource\n context[\"header_text\"] = get_request[\"header_text\"]\n\n resource_image_generator = importlib.import_module(module_path)\n filename = \"{} ({})\".format(resource.name, resource_image_generator.subtitle(get_request, resource))\n context[\"filename\"] = filename\n\n num_copies = range(0, int(get_request[\"copies\"]))\n context[\"resource_images\"] = []\n for copy in num_copies:\n context[\"resource_images\"].append(\n generate_resource_image(get_request, resource, module_path)\n )\n\n pdf_html = render_to_string(\"resources/base-resource-pdf.html\", context)\n html = HTML(string=pdf_html, base_url=settings.STATIC_ROOT)\n css_file = finders.find(\"css/print-resource-pdf.css\")\n css_string = open(css_file, encoding=\"UTF-8\").read()\n base_css = CSS(string=css_string)\n pdf_file = html.write_pdf(stylesheets=[base_css])\n\n response = HttpResponse(pdf_file, content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = RESPONSE_CONTENT_DISPOSITION.format(filename=filename)\n return response\n\n\ndef generate_resource_image(get_request, resource, module_path):\n \"\"\"Retrieve image from resource generator and resize to size.\n\n Args:\n get_request: HTTP request object\n resource: Object of resource data.\n module_path: Path to module for generating resource.\n\n Returns:\n Base64 string of a generated resource image.\n \"\"\"\n # Get image from resource image creator\n resource_image_generator = importlib.import_module(module_path)\n image = resource_image_generator.resource_image(get_request, resource)\n\n # Resize image to reduce file size\n if get_request[\"paper_size\"] == \"a4\":\n max_pixel_height = 267 * MM_TO_PIXEL_RATIO\n elif get_request[\"paper_size\"] == \"letter\":\n max_pixel_height = 249 * MM_TO_PIXEL_RATIO\n (width, height) = image.size\n if height > max_pixel_height:\n ratio = max_pixel_height / height\n width *= ratio\n height *= ratio\n image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n\n # Save image to buffer\n image_buffer = BytesIO()\n image.save(image_buffer, format=\"PNG\")\n\n # Return base64 of image\n return base64.b64encode(image_buffer.getvalue())\n", "path": "csunplugged/resources/views/generate_resource_pdf.py"}]} | 1,648 | 599 |
gh_patches_debug_32375 | rasdani/github-patches | git_diff | getsentry__sentry-python-897 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash in pure_eval
This happened while we were experiencing a DB outage:
```
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 443, in fetch
return await self._execute(query, args, 0, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1445, in _execute
result, _ = await self.__execute(
File "/server/athenian/api/db.py", line 191, in _asyncpg_execute
result = await self._execute_original(query, args, limit, timeout, return_status)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1454, in __execute
return await self._do_execute(query, executor, timeout)
File "/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py", line 1476, in _do_execute
result = await executor(stmt, None)
File "asyncpg/protocol/protocol.pyx", line 196, in bind_execute
return await waiter
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/scope.py", line 353, in apply_to_event
new_event = event_processor(event, hint)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 79, in add_executing_info
pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 128, in pure_eval_frame
expressions.sort(key=closeness, reverse=True)
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 113, in closeness
nodes_before_stmt = [
File "/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py", line 114, in <listcomp>
node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
AttributeError: 'Name' object has no attribute 'first_token'
```
</issue>
<code>
[start of sentry_sdk/integrations/pure_eval.py]
1 from __future__ import absolute_import
2
3 import ast
4
5 from sentry_sdk import Hub, serializer
6 from sentry_sdk._types import MYPY
7 from sentry_sdk.integrations import Integration, DidNotEnable
8 from sentry_sdk.scope import add_global_event_processor
9 from sentry_sdk.utils import walk_exception_chain, iter_stacks
10
11 if MYPY:
12 from typing import Optional, Dict, Any, Tuple, List
13 from types import FrameType
14
15 from sentry_sdk._types import Event, Hint
16
17 try:
18 import executing
19 except ImportError:
20 raise DidNotEnable("executing is not installed")
21
22 try:
23 import pure_eval
24 except ImportError:
25 raise DidNotEnable("pure_eval is not installed")
26
27 try:
28 # Used implicitly, just testing it's available
29 import asttokens # noqa
30 except ImportError:
31 raise DidNotEnable("asttokens is not installed")
32
33
34 class PureEvalIntegration(Integration):
35 identifier = "pure_eval"
36
37 @staticmethod
38 def setup_once():
39 # type: () -> None
40
41 @add_global_event_processor
42 def add_executing_info(event, hint):
43 # type: (Event, Optional[Hint]) -> Optional[Event]
44 if Hub.current.get_integration(PureEvalIntegration) is None:
45 return event
46
47 if hint is None:
48 return event
49
50 exc_info = hint.get("exc_info", None)
51
52 if exc_info is None:
53 return event
54
55 exception = event.get("exception", None)
56
57 if exception is None:
58 return event
59
60 values = exception.get("values", None)
61
62 if values is None:
63 return event
64
65 for exception, (_exc_type, _exc_value, exc_tb) in zip(
66 reversed(values), walk_exception_chain(exc_info)
67 ):
68 sentry_frames = [
69 frame
70 for frame in exception.get("stacktrace", {}).get("frames", [])
71 if frame.get("function")
72 ]
73 tbs = list(iter_stacks(exc_tb))
74 if len(sentry_frames) != len(tbs):
75 continue
76
77 for sentry_frame, tb in zip(sentry_frames, tbs):
78 sentry_frame["vars"] = (
79 pure_eval_frame(tb.tb_frame) or sentry_frame["vars"]
80 )
81 return event
82
83
84 def pure_eval_frame(frame):
85 # type: (FrameType) -> Dict[str, Any]
86 source = executing.Source.for_frame(frame)
87 if not source.tree:
88 return {}
89
90 statements = source.statements_at_line(frame.f_lineno)
91 if not statements:
92 return {}
93
94 scope = stmt = list(statements)[0]
95 while True:
96 # Get the parent first in case the original statement is already
97 # a function definition, e.g. if we're calling a decorator
98 # In that case we still want the surrounding scope, not that function
99 scope = scope.parent
100 if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):
101 break
102
103 evaluator = pure_eval.Evaluator.from_frame(frame)
104 expressions = evaluator.interesting_expressions_grouped(scope)
105
106 def closeness(expression):
107 # type: (Tuple[List[Any], Any]) -> int
108 # Prioritise expressions with a node closer to the statement executed
109 # without being after that statement
110 # A higher return value is better - the expression will appear
111 # earlier in the list of values and is less likely to be trimmed
112 nodes, _value = expression
113 nodes_before_stmt = [
114 node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
115 ]
116 if nodes_before_stmt:
117 # The position of the last node before or in the statement
118 return max(node.first_token.startpos for node in nodes_before_stmt)
119 else:
120 # The position of the first node after the statement
121 # Negative means it's always lower priority than nodes that come before
122 # Less negative means closer to the statement and higher priority
123 return -min(node.first_token.startpos for node in nodes)
124
125 # This adds the first_token and last_token attributes to nodes
126 atok = source.asttokens()
127
128 expressions.sort(key=closeness, reverse=True)
129 return {
130 atok.get_text(nodes[0]): value
131 for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]
132 }
133
[end of sentry_sdk/integrations/pure_eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/pure_eval.py b/sentry_sdk/integrations/pure_eval.py
--- a/sentry_sdk/integrations/pure_eval.py
+++ b/sentry_sdk/integrations/pure_eval.py
@@ -104,23 +104,29 @@
expressions = evaluator.interesting_expressions_grouped(scope)
def closeness(expression):
- # type: (Tuple[List[Any], Any]) -> int
+ # type: (Tuple[List[Any], Any]) -> Tuple[int, int]
# Prioritise expressions with a node closer to the statement executed
# without being after that statement
# A higher return value is better - the expression will appear
# earlier in the list of values and is less likely to be trimmed
nodes, _value = expression
+
+ def start(n):
+ # type: (ast.expr) -> Tuple[int, int]
+ return (n.lineno, n.col_offset)
+
nodes_before_stmt = [
- node for node in nodes if node.first_token.startpos < stmt.last_token.endpos
+ node for node in nodes if start(node) < stmt.last_token.end
]
if nodes_before_stmt:
# The position of the last node before or in the statement
- return max(node.first_token.startpos for node in nodes_before_stmt)
+ return max(start(node) for node in nodes_before_stmt)
else:
# The position of the first node after the statement
# Negative means it's always lower priority than nodes that come before
# Less negative means closer to the statement and higher priority
- return -min(node.first_token.startpos for node in nodes)
+ lineno, col_offset = min(start(node) for node in nodes)
+ return (-lineno, -col_offset)
# This adds the first_token and last_token attributes to nodes
atok = source.asttokens()
| {"golden_diff": "diff --git a/sentry_sdk/integrations/pure_eval.py b/sentry_sdk/integrations/pure_eval.py\n--- a/sentry_sdk/integrations/pure_eval.py\n+++ b/sentry_sdk/integrations/pure_eval.py\n@@ -104,23 +104,29 @@\n expressions = evaluator.interesting_expressions_grouped(scope)\n \n def closeness(expression):\n- # type: (Tuple[List[Any], Any]) -> int\n+ # type: (Tuple[List[Any], Any]) -> Tuple[int, int]\n # Prioritise expressions with a node closer to the statement executed\n # without being after that statement\n # A higher return value is better - the expression will appear\n # earlier in the list of values and is less likely to be trimmed\n nodes, _value = expression\n+\n+ def start(n):\n+ # type: (ast.expr) -> Tuple[int, int]\n+ return (n.lineno, n.col_offset)\n+\n nodes_before_stmt = [\n- node for node in nodes if node.first_token.startpos < stmt.last_token.endpos\n+ node for node in nodes if start(node) < stmt.last_token.end\n ]\n if nodes_before_stmt:\n # The position of the last node before or in the statement\n- return max(node.first_token.startpos for node in nodes_before_stmt)\n+ return max(start(node) for node in nodes_before_stmt)\n else:\n # The position of the first node after the statement\n # Negative means it's always lower priority than nodes that come before\n # Less negative means closer to the statement and higher priority\n- return -min(node.first_token.startpos for node in nodes)\n+ lineno, col_offset = min(start(node) for node in nodes)\n+ return (-lineno, -col_offset)\n \n # This adds the first_token and last_token attributes to nodes\n atok = source.asttokens()\n", "issue": "Crash in pure_eval\nThis happened while we were experiencing a DB outage:\r\n```\r\n File \"/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py\", line 443, in fetch\r\n return await self._execute(query, args, 0, timeout)\r\n File \"/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py\", line 1445, in _execute\r\n result, _ = await self.__execute(\r\n File \"/server/athenian/api/db.py\", line 191, in _asyncpg_execute\r\n result = await self._execute_original(query, args, limit, timeout, return_status)\r\n File \"/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py\", line 1454, in __execute\r\n return await self._do_execute(query, executor, timeout)\r\n File \"/usr/local/lib/python3.8/dist-packages/asyncpg/connection.py\", line 1476, in _do_execute\r\n result = await executor(stmt, None)\r\n File \"asyncpg/protocol/protocol.pyx\", line 196, in bind_execute\r\n return await waiter\r\nasyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/dist-packages/sentry_sdk/scope.py\", line 353, in apply_to_event\r\n new_event = event_processor(event, hint)\r\n File \"/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py\", line 79, in add_executing_info\r\n pure_eval_frame(tb.tb_frame) or sentry_frame[\"vars\"]\r\n File \"/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py\", line 128, in pure_eval_frame\r\n expressions.sort(key=closeness, reverse=True)\r\n File \"/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py\", line 113, in closeness\r\n nodes_before_stmt = [\r\n File \"/usr/local/lib/python3.8/dist-packages/sentry_sdk/integrations/pure_eval.py\", line 114, in <listcomp>\r\n node for node in nodes if node.first_token.startpos < stmt.last_token.endpos\r\nAttributeError: 'Name' object has no attribute 'first_token'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport ast\n\nfrom sentry_sdk import Hub, serializer\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.scope import add_global_event_processor\nfrom sentry_sdk.utils import walk_exception_chain, iter_stacks\n\nif MYPY:\n from typing import Optional, Dict, Any, Tuple, List\n from types import FrameType\n\n from sentry_sdk._types import Event, Hint\n\ntry:\n import executing\nexcept ImportError:\n raise DidNotEnable(\"executing is not installed\")\n\ntry:\n import pure_eval\nexcept ImportError:\n raise DidNotEnable(\"pure_eval is not installed\")\n\ntry:\n # Used implicitly, just testing it's available\n import asttokens # noqa\nexcept ImportError:\n raise DidNotEnable(\"asttokens is not installed\")\n\n\nclass PureEvalIntegration(Integration):\n identifier = \"pure_eval\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n @add_global_event_processor\n def add_executing_info(event, hint):\n # type: (Event, Optional[Hint]) -> Optional[Event]\n if Hub.current.get_integration(PureEvalIntegration) is None:\n return event\n\n if hint is None:\n return event\n\n exc_info = hint.get(\"exc_info\", None)\n\n if exc_info is None:\n return event\n\n exception = event.get(\"exception\", None)\n\n if exception is None:\n return event\n\n values = exception.get(\"values\", None)\n\n if values is None:\n return event\n\n for exception, (_exc_type, _exc_value, exc_tb) in zip(\n reversed(values), walk_exception_chain(exc_info)\n ):\n sentry_frames = [\n frame\n for frame in exception.get(\"stacktrace\", {}).get(\"frames\", [])\n if frame.get(\"function\")\n ]\n tbs = list(iter_stacks(exc_tb))\n if len(sentry_frames) != len(tbs):\n continue\n\n for sentry_frame, tb in zip(sentry_frames, tbs):\n sentry_frame[\"vars\"] = (\n pure_eval_frame(tb.tb_frame) or sentry_frame[\"vars\"]\n )\n return event\n\n\ndef pure_eval_frame(frame):\n # type: (FrameType) -> Dict[str, Any]\n source = executing.Source.for_frame(frame)\n if not source.tree:\n return {}\n\n statements = source.statements_at_line(frame.f_lineno)\n if not statements:\n return {}\n\n scope = stmt = list(statements)[0]\n while True:\n # Get the parent first in case the original statement is already\n # a function definition, e.g. if we're calling a decorator\n # In that case we still want the surrounding scope, not that function\n scope = scope.parent\n if isinstance(scope, (ast.FunctionDef, ast.ClassDef, ast.Module)):\n break\n\n evaluator = pure_eval.Evaluator.from_frame(frame)\n expressions = evaluator.interesting_expressions_grouped(scope)\n\n def closeness(expression):\n # type: (Tuple[List[Any], Any]) -> int\n # Prioritise expressions with a node closer to the statement executed\n # without being after that statement\n # A higher return value is better - the expression will appear\n # earlier in the list of values and is less likely to be trimmed\n nodes, _value = expression\n nodes_before_stmt = [\n node for node in nodes if node.first_token.startpos < stmt.last_token.endpos\n ]\n if nodes_before_stmt:\n # The position of the last node before or in the statement\n return max(node.first_token.startpos for node in nodes_before_stmt)\n else:\n # The position of the first node after the statement\n # Negative means it's always lower priority than nodes that come before\n # Less negative means closer to the statement and higher priority\n return -min(node.first_token.startpos for node in nodes)\n\n # This adds the first_token and last_token attributes to nodes\n atok = source.asttokens()\n\n expressions.sort(key=closeness, reverse=True)\n return {\n atok.get_text(nodes[0]): value\n for nodes, value in expressions[: serializer.MAX_DATABAG_BREADTH]\n }\n", "path": "sentry_sdk/integrations/pure_eval.py"}]} | 2,304 | 418 |
gh_patches_debug_15867 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete user in Dashboard user edit doesn't perform any action
The delete user button does not actually perform a request.
</issue>
<code>
[start of apps/authentication/dashboard/views.py]
1 # -*- encoding: utf-8 -*-
2
3 import json
4
5 from django.conf import settings
6 from django.contrib.auth.decorators import login_required
7 from django.contrib.auth.models import Group
8 from django.core.exceptions import PermissionDenied
9 from django.core.paginator import Paginator
10 from django.core.urlresolvers import reverse, reverse_lazy
11 from django.http import HttpResponse
12 from django.shortcuts import get_object_or_404, render
13 from django.views.generic import DeleteView, DetailView, ListView, UpdateView
14 from guardian.decorators import permission_required
15 from watson.views import SearchView
16
17 from apps.authentication.forms import UserUpdateForm
18 from apps.authentication.models import OnlineUser as User
19 from apps.authentication.models import AllowedUsername
20 from apps.dashboard.tools import DashboardPermissionMixin, get_base_context, has_access
21
22
23 @login_required
24 def index(request):
25 """
26 This is the main dashboard view
27 """
28
29 if not has_access(request):
30 raise PermissionDenied
31
32 context = get_base_context(request)
33
34 return render(request, 'auth/dashboard/index.html', context)
35
36
37 # GROUP MODULE VIEWS
38 @login_required
39 @permission_required('authentication.change_onlineuser', return_403=True)
40 def groups_index(request):
41 """
42 Group module in dashboard that lists groups.
43 """
44
45 if not has_access(request):
46 raise PermissionDenied
47
48 context = get_base_context(request)
49
50 context['groups'] = list(Group.objects.all())
51 context['groups'].sort(key=lambda x: str(x).lower())
52
53 return render(request, 'auth/dashboard/groups_index.html', context)
54
55
56 @login_required
57 @permission_required('authentication.change_onlineuser', return_403=True)
58 def groups_detail(request, pk):
59 """
60 Group module in dashboard that lists groups.
61 """
62
63 if not has_access(request):
64 raise PermissionDenied
65
66 context = get_base_context(request)
67
68 context['group'] = get_object_or_404(Group, pk=pk)
69
70 # AJAX
71 if request.method == 'POST':
72 if request.is_ajax and 'action' in request.POST:
73 resp = {'status': 200}
74 if request.POST['action'] == 'remove_user':
75 user = get_object_or_404(User, pk=int(request.POST['user_id']))
76 context['group'].user_set.remove(user)
77 resp['message'] = '%s ble fjernet fra %s' % (user.get_full_name(), context['group'].name)
78 resp['users'] = [{'user': u.get_full_name(), 'id': u.id} for u in context['group'].user_set.all()]
79 resp['users'].sort(key=lambda x: x['user'])
80
81 return HttpResponse(json.dumps(resp), status=200)
82 elif request.POST['action'] == 'add_user':
83 user = get_object_or_404(User, pk=int(request.POST['user_id']))
84 context['group'].user_set.add(user)
85 resp['full_name'] = user.get_full_name()
86 resp['users'] = [{'user': u.get_full_name(), 'id': u.id} for u in context['group'].user_set.all()]
87 resp['users'].sort(key=lambda x: x['user'])
88 resp['message'] = '%s ble lagt til i %s' % (resp['full_name'], context['group'].name)
89
90 return HttpResponse(json.dumps(resp), status=200)
91
92 return HttpResponse('Ugyldig handling.', status=400)
93
94 if hasattr(settings, 'GROUP_SYNCER') and settings.GROUP_SYNCER:
95 group_id = int(pk)
96 # Groups that list this one as their destination
97 context['sync_group_from'] = []
98 # Groups that list this one as one of their sources
99 context['sync_group_to'] = []
100
101 # Make a dict that simply maps {id: name} for all groups
102 groups = {g.id: g.name for g in Group.objects.all().order_by('id')}
103
104 for job in settings.GROUP_SYNCER:
105 if group_id in job['source']:
106 context['sync_group_to'].extend([groups[g_id] for g_id in job['destination']])
107 if group_id in job['destination']:
108 context['sync_group_from'].extend([groups[g_id] for g_id in job['source']])
109
110 context['group_users'] = list(context['group'].user_set.all())
111
112 context['group_permissions'] = list(context['group'].permissions.all())
113
114 context['group_users'].sort(key=lambda x: str(x).lower())
115 context['group_permissions'].sort(key=lambda x: str(x))
116
117 return render(request, 'auth/dashboard/groups_detail.html', context)
118
119
120 @login_required
121 @permission_required("authentication.view_allowedusername", return_403=True)
122 def members_index(request):
123
124 """
125 Index overview for allowedusernames in dashboard
126 """
127
128 if not has_access(request):
129 raise PermissionDenied
130
131 def merge_names(members):
132 for i in members:
133 user = list(User.objects.filter(ntnu_username=i.username))
134 if user:
135 i.full_name = user[0].get_full_name()
136 return members
137
138 context = get_base_context(request)
139 members = AllowedUsername.objects.all()
140 context['members'] = merge_names(members)
141
142 return render(request, 'auth/dashboard/user_list.html', context)
143
144
145 class UserListView(DashboardPermissionMixin, ListView):
146 model = User
147 queryset = User.objects.all().exclude(id=-1)
148 paginate_by = 25
149 paginator_class = Paginator
150 permission_required = 'authentication.view_onlineuser'
151 template_name = 'auth/dashboard/user_list.html'
152
153
154 class UserSearchView(DashboardPermissionMixin, SearchView):
155 model = User
156 queryset = User.objects.all().exclude(id=-1)
157 paginate_by = 25
158 paginator_class = Paginator
159 permission_required = 'authentication.view_onlineuser'
160 template_name = 'auth/dashboard/user_list.html'
161 empty_query_redirect = reverse_lazy('user_list')
162
163
164 class UserDetailView(DashboardPermissionMixin, DetailView):
165 model = User
166 context_object_name = 'user'
167 permission_required = 'authentication.view_onlineuser'
168 pk_url_kwarg = 'user_id'
169 template_name = 'auth/dashboard/user_detail.html'
170
171
172 class UserUpdateView(DashboardPermissionMixin, UpdateView):
173 form_class = UserUpdateForm
174 model = User
175 permission_required = 'authentication.change_onlineuser'
176 pk_url_kwarg = 'user_id'
177 template_name = 'auth/dashboard/user_edit.html'
178
179 def get_success_url(self):
180 return reverse('dashboard_user_detail', kwargs={'user_id': self.kwargs.get('user_id')})
181
182
183 class UserDeleteView(DashboardPermissionMixin, DeleteView):
184 model = User
185 permission_required = 'authentication.delete_onlineuser'
186 pk_url_kwarg = 'user_id'
187 success_url = reverse_lazy('auth_index')
188
189
190 @login_required
191 @permission_required("authentication.add_allowedusername", return_403=True)
192 def members_new(request):
193 """
194 Create new allowedusername form and handling
195 """
196 if not has_access(request):
197 raise PermissionDenied
198
199 context = get_base_context(request)
200
201 return render(request, 'auth/dashboard/members_new.html', context)
202
[end of apps/authentication/dashboard/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/dashboard/views.py b/apps/authentication/dashboard/views.py
--- a/apps/authentication/dashboard/views.py
+++ b/apps/authentication/dashboard/views.py
@@ -172,6 +172,7 @@
class UserUpdateView(DashboardPermissionMixin, UpdateView):
form_class = UserUpdateForm
model = User
+ context_object_name = 'user'
permission_required = 'authentication.change_onlineuser'
pk_url_kwarg = 'user_id'
template_name = 'auth/dashboard/user_edit.html'
@@ -184,7 +185,7 @@
model = User
permission_required = 'authentication.delete_onlineuser'
pk_url_kwarg = 'user_id'
- success_url = reverse_lazy('auth_index')
+ success_url = reverse_lazy('user_list')
@login_required
| {"golden_diff": "diff --git a/apps/authentication/dashboard/views.py b/apps/authentication/dashboard/views.py\n--- a/apps/authentication/dashboard/views.py\n+++ b/apps/authentication/dashboard/views.py\n@@ -172,6 +172,7 @@\n class UserUpdateView(DashboardPermissionMixin, UpdateView):\n form_class = UserUpdateForm\n model = User\n+ context_object_name = 'user'\n permission_required = 'authentication.change_onlineuser'\n pk_url_kwarg = 'user_id'\n template_name = 'auth/dashboard/user_edit.html'\n@@ -184,7 +185,7 @@\n model = User\n permission_required = 'authentication.delete_onlineuser'\n pk_url_kwarg = 'user_id'\n- success_url = reverse_lazy('auth_index')\n+ success_url = reverse_lazy('user_list')\n \n \n @login_required\n", "issue": "Delete user in Dashboard user edit doesn't perform any action\nThe delete user button does not actually perform a request.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport json\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.paginator import Paginator\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic import DeleteView, DetailView, ListView, UpdateView\nfrom guardian.decorators import permission_required\nfrom watson.views import SearchView\n\nfrom apps.authentication.forms import UserUpdateForm\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import AllowedUsername\nfrom apps.dashboard.tools import DashboardPermissionMixin, get_base_context, has_access\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main dashboard view\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n return render(request, 'auth/dashboard/index.html', context)\n\n\n# GROUP MODULE VIEWS\n@login_required\n@permission_required('authentication.change_onlineuser', return_403=True)\ndef groups_index(request):\n \"\"\"\n Group module in dashboard that lists groups.\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n context['groups'] = list(Group.objects.all())\n context['groups'].sort(key=lambda x: str(x).lower())\n\n return render(request, 'auth/dashboard/groups_index.html', context)\n\n\n@login_required\n@permission_required('authentication.change_onlineuser', return_403=True)\ndef groups_detail(request, pk):\n \"\"\"\n Group module in dashboard that lists groups.\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n context['group'] = get_object_or_404(Group, pk=pk)\n\n # AJAX\n if request.method == 'POST':\n if request.is_ajax and 'action' in request.POST:\n resp = {'status': 200}\n if request.POST['action'] == 'remove_user':\n user = get_object_or_404(User, pk=int(request.POST['user_id']))\n context['group'].user_set.remove(user)\n resp['message'] = '%s ble fjernet fra %s' % (user.get_full_name(), context['group'].name)\n resp['users'] = [{'user': u.get_full_name(), 'id': u.id} for u in context['group'].user_set.all()]\n resp['users'].sort(key=lambda x: x['user'])\n\n return HttpResponse(json.dumps(resp), status=200)\n elif request.POST['action'] == 'add_user':\n user = get_object_or_404(User, pk=int(request.POST['user_id']))\n context['group'].user_set.add(user)\n resp['full_name'] = user.get_full_name()\n resp['users'] = [{'user': u.get_full_name(), 'id': u.id} for u in context['group'].user_set.all()]\n resp['users'].sort(key=lambda x: x['user'])\n resp['message'] = '%s ble lagt til i %s' % (resp['full_name'], context['group'].name)\n\n return HttpResponse(json.dumps(resp), status=200)\n\n return HttpResponse('Ugyldig handling.', status=400)\n\n if hasattr(settings, 'GROUP_SYNCER') and settings.GROUP_SYNCER:\n group_id = int(pk)\n # Groups that list this one as their destination\n context['sync_group_from'] = []\n # Groups that list this one as one of their sources\n context['sync_group_to'] = []\n\n # Make a dict that simply maps {id: name} for all groups\n groups = {g.id: g.name for g in Group.objects.all().order_by('id')}\n\n for job in settings.GROUP_SYNCER:\n if group_id in job['source']:\n context['sync_group_to'].extend([groups[g_id] for g_id in job['destination']])\n if group_id in job['destination']:\n context['sync_group_from'].extend([groups[g_id] for g_id in job['source']])\n\n context['group_users'] = list(context['group'].user_set.all())\n\n context['group_permissions'] = list(context['group'].permissions.all())\n\n context['group_users'].sort(key=lambda x: str(x).lower())\n context['group_permissions'].sort(key=lambda x: str(x))\n\n return render(request, 'auth/dashboard/groups_detail.html', context)\n\n\n@login_required\n@permission_required(\"authentication.view_allowedusername\", return_403=True)\ndef members_index(request):\n\n \"\"\"\n Index overview for allowedusernames in dashboard\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n def merge_names(members):\n for i in members:\n user = list(User.objects.filter(ntnu_username=i.username))\n if user:\n i.full_name = user[0].get_full_name()\n return members\n\n context = get_base_context(request)\n members = AllowedUsername.objects.all()\n context['members'] = merge_names(members)\n\n return render(request, 'auth/dashboard/user_list.html', context)\n\n\nclass UserListView(DashboardPermissionMixin, ListView):\n model = User\n queryset = User.objects.all().exclude(id=-1)\n paginate_by = 25\n paginator_class = Paginator\n permission_required = 'authentication.view_onlineuser'\n template_name = 'auth/dashboard/user_list.html'\n\n\nclass UserSearchView(DashboardPermissionMixin, SearchView):\n model = User\n queryset = User.objects.all().exclude(id=-1)\n paginate_by = 25\n paginator_class = Paginator\n permission_required = 'authentication.view_onlineuser'\n template_name = 'auth/dashboard/user_list.html'\n empty_query_redirect = reverse_lazy('user_list')\n\n\nclass UserDetailView(DashboardPermissionMixin, DetailView):\n model = User\n context_object_name = 'user'\n permission_required = 'authentication.view_onlineuser'\n pk_url_kwarg = 'user_id'\n template_name = 'auth/dashboard/user_detail.html'\n\n\nclass UserUpdateView(DashboardPermissionMixin, UpdateView):\n form_class = UserUpdateForm\n model = User\n permission_required = 'authentication.change_onlineuser'\n pk_url_kwarg = 'user_id'\n template_name = 'auth/dashboard/user_edit.html'\n\n def get_success_url(self):\n return reverse('dashboard_user_detail', kwargs={'user_id': self.kwargs.get('user_id')})\n\n\nclass UserDeleteView(DashboardPermissionMixin, DeleteView):\n model = User\n permission_required = 'authentication.delete_onlineuser'\n pk_url_kwarg = 'user_id'\n success_url = reverse_lazy('auth_index')\n\n\n@login_required\n@permission_required(\"authentication.add_allowedusername\", return_403=True)\ndef members_new(request):\n \"\"\"\n Create new allowedusername form and handling\n \"\"\"\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n return render(request, 'auth/dashboard/members_new.html', context)\n", "path": "apps/authentication/dashboard/views.py"}]} | 2,603 | 177 |
gh_patches_debug_34688 | rasdani/github-patches | git_diff | tensorflow__addons-271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automate Build Process
Currently we have no automated process for building Addons across python version and operating systems. Going forward we'll want this process to be automated.. but it may be challenging for us to start builds without access to the Google internal tooling.
We could conceivably use Travis... but if we can keep consistent CI that would be ideal.
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons
16
17 TensorFlow Addons is a repository of contributions that conform to
18 well-established API patterns,but implement new functionality not available in
19 core TensorFlow.TensorFlow natively supports a large number of operators,
20 layers, metrics, losses, and optimizers. However, in a fast movingfield like
21 ML, there are many interesting new developments that cannot be integrated into
22 core TensorFlow (because their broad applicability is not yet clear, or it is
23 mostly used by a smallersubset of the community).
24 """
25
26 from __future__ import absolute_import
27 from __future__ import division
28 from __future__ import print_function
29
30 import os
31
32 from setuptools import find_packages
33 from setuptools import setup
34 from setuptools.dist import Distribution
35
36 DOCLINES = __doc__.split('\n')
37
38 version = {}
39 base_dir = os.path.dirname(os.path.abspath(__file__))
40 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
41 # yapf: disable
42 exec(fp.read(), version)
43 # yapf: enable
44
45 REQUIRED_PACKAGES = [
46 'six >= 1.10.0',
47 ]
48
49 project_name = 'tensorflow-addons'
50
51
52 class BinaryDistribution(Distribution):
53 """This class is needed in order to create OS specific wheels."""
54
55 def has_ext_modules(self):
56 return True
57
58
59 setup(
60 name=project_name,
61 version=version['__version__'],
62 description=DOCLINES[0],
63 long_description='\n'.join(DOCLINES[2:]),
64 author='Google Inc.',
65 author_email='[email protected]',
66 packages=find_packages(),
67 install_requires=REQUIRED_PACKAGES,
68 include_package_data=True,
69 zip_safe=False,
70 distclass=BinaryDistribution,
71 classifiers=[
72 'Development Status :: 4 - Beta',
73 'Intended Audience :: Developers',
74 'Intended Audience :: Education',
75 'Intended Audience :: Science/Research',
76 'License :: OSI Approved :: Apache Software License',
77 'Programming Language :: Python :: 2.7',
78 'Programming Language :: Python :: 3.4',
79 'Programming Language :: Python :: 3.5',
80 'Programming Language :: Python :: 3.6',
81 'Programming Language :: Python :: 3.7',
82 'Topic :: Scientific/Engineering :: Mathematics',
83 'Topic :: Software Development :: Libraries :: Python Modules',
84 'Topic :: Software Development :: Libraries',
85 ],
86 license='Apache 2.0',
87 keywords='tensorflow addons machine learning',
88 )
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,10 +17,10 @@
TensorFlow Addons is a repository of contributions that conform to
well-established API patterns,but implement new functionality not available in
core TensorFlow.TensorFlow natively supports a large number of operators,
-layers, metrics, losses, and optimizers. However, in a fast movingfield like
+layers, metrics, losses, and optimizers. However, in a fast moving field like
ML, there are many interesting new developments that cannot be integrated into
core TensorFlow (because their broad applicability is not yet clear, or it is
-mostly used by a smallersubset of the community).
+mostly used by a smaller subset of the community).
"""
from __future__ import absolute_import
@@ -28,7 +28,9 @@
from __future__ import print_function
import os
+import sys
+from datetime import datetime
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
@@ -46,7 +48,13 @@
'six >= 1.10.0',
]
-project_name = 'tensorflow-addons'
+if '--nightly' in sys.argv:
+ project_name = 'tfa-nightly'
+ nightly_idx = sys.argv.index('--nightly')
+ sys.argv.pop(nightly_idx)
+ version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
+else:
+ project_name = 'tensorflow-addons'
class BinaryDistribution(Distribution):
@@ -78,7 +86,6 @@
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,10 +17,10 @@\n TensorFlow Addons is a repository of contributions that conform to\n well-established API patterns,but implement new functionality not available in\n core TensorFlow.TensorFlow natively supports a large number of operators,\n-layers, metrics, losses, and optimizers. However, in a fast movingfield like\n+layers, metrics, losses, and optimizers. However, in a fast moving field like\n ML, there are many interesting new developments that cannot be integrated into\n core TensorFlow (because their broad applicability is not yet clear, or it is\n-mostly used by a smallersubset of the community).\n+mostly used by a smaller subset of the community).\n \"\"\"\n \n from __future__ import absolute_import\n@@ -28,7 +28,9 @@\n from __future__ import print_function\n \n import os\n+import sys\n \n+from datetime import datetime\n from setuptools import find_packages\n from setuptools import setup\n from setuptools.dist import Distribution\n@@ -46,7 +48,13 @@\n 'six >= 1.10.0',\n ]\n \n-project_name = 'tensorflow-addons'\n+if '--nightly' in sys.argv:\n+ project_name = 'tfa-nightly'\n+ nightly_idx = sys.argv.index('--nightly')\n+ sys.argv.pop(nightly_idx)\n+ version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n+else:\n+ project_name = 'tensorflow-addons'\n \n \n class BinaryDistribution(Distribution):\n@@ -78,7 +86,6 @@\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n", "issue": "Automate Build Process\nCurrently we have no automated process for building Addons across python version and operating systems. Going forward we'll want this process to be automated.. but it may be challenging for us to start builds without access to the Google internal tooling.\r\n\r\nWe could conceivably use Travis... but if we can keep consistent CI that would be ideal.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons \n\nTensorFlow Addons is a repository of contributions that conform to\nwell-established API patterns,but implement new functionality not available in\ncore TensorFlow.TensorFlow natively supports a large number of operators,\nlayers, metrics, losses, and optimizers. However, in a fast movingfield like\nML, there are many interesting new developments that cannot be integrated into\ncore TensorFlow (because their broad applicability is not yet clear, or it is\nmostly used by a smallersubset of the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\n\nDOCLINES = __doc__.split('\\n')\n\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nproject_name = 'tensorflow-addons'\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]} | 1,442 | 431 |
gh_patches_debug_55042 | rasdani/github-patches | git_diff | pallets__werkzeug-1798 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New Microsoft Edge User Agent
## Background
Microsoft Edge now based on Chromium and the user agent string is updated.
`Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68`
## Simple Code
```python
@app.route('/browser')
def browser():
from flask import request
ua = request.user_agent
return jsonify({
'browser': ua.browser,
'platform': ua.platform,
'user_agent': ua.string,
'version': ua.version,
})
```
## Expected Result
```json
{
"browser": "edge",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.416.68"
}
```
| Key | Value |
| --- | --- |
| browser | **edge** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.416.68** |
## Actual Result
```json
{
"browser": "chrome",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.4044.129"
}
```
| Key | Value |
| --- | --- |
| browser | **chrome** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.4044.129** |
## Environment
- Windows 10 Pro 1909
- Python 3.6.6
- Werkzeug 0.16.1
- Flask 1.1.1
### Related Issues
#818, #1556
</issue>
<code>
[start of src/werkzeug/useragents.py]
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.useragents
4 ~~~~~~~~~~~~~~~~~~~
5
6 This module provides a helper to inspect user agent strings. This module
7 is far from complete but should work for most of the currently available
8 browsers.
9
10
11 :copyright: 2007 Pallets
12 :license: BSD-3-Clause
13 """
14 import re
15
16
17 class UserAgentParser(object):
18 """A simple user agent parser. Used by the `UserAgent`."""
19
20 platforms = (
21 (" cros ", "chromeos"),
22 ("iphone|ios", "iphone"),
23 ("ipad", "ipad"),
24 (r"darwin|mac|os\s*x", "macos"),
25 ("win", "windows"),
26 (r"android", "android"),
27 ("netbsd", "netbsd"),
28 ("openbsd", "openbsd"),
29 ("freebsd", "freebsd"),
30 ("dragonfly", "dragonflybsd"),
31 ("(sun|i86)os", "solaris"),
32 (r"x11|lin(\b|ux)?", "linux"),
33 (r"nintendo\s+wii", "wii"),
34 ("irix", "irix"),
35 ("hp-?ux", "hpux"),
36 ("aix", "aix"),
37 ("sco|unix_sv", "sco"),
38 ("bsd", "bsd"),
39 ("amiga", "amiga"),
40 ("blackberry|playbook", "blackberry"),
41 ("symbian", "symbian"),
42 )
43 browsers = (
44 ("googlebot", "google"),
45 ("msnbot", "msn"),
46 ("yahoo", "yahoo"),
47 ("ask jeeves", "ask"),
48 (r"aol|america\s+online\s+browser", "aol"),
49 (r"opera|opr", "opera"),
50 ("edge", "edge"),
51 ("chrome|crios", "chrome"),
52 ("seamonkey", "seamonkey"),
53 ("firefox|firebird|phoenix|iceweasel", "firefox"),
54 ("galeon", "galeon"),
55 ("safari|version", "safari"),
56 ("webkit", "webkit"),
57 ("camino", "camino"),
58 ("konqueror", "konqueror"),
59 ("k-meleon", "kmeleon"),
60 ("netscape", "netscape"),
61 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
62 ("lynx", "lynx"),
63 ("links", "links"),
64 ("Baiduspider", "baidu"),
65 ("bingbot", "bing"),
66 ("mozilla", "mozilla"),
67 )
68
69 _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
70 _language_re = re.compile(
71 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
72 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
73 )
74
75 def __init__(self):
76 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
77 self.browsers = [
78 (b, re.compile(self._browser_version_re % a, re.I))
79 for a, b in self.browsers
80 ]
81
82 def __call__(self, user_agent):
83 for platform, regex in self.platforms: # noqa: B007
84 match = regex.search(user_agent)
85 if match is not None:
86 break
87 else:
88 platform = None
89 for browser, regex in self.browsers: # noqa: B007
90 match = regex.search(user_agent)
91 if match is not None:
92 version = match.group(1)
93 break
94 else:
95 browser = version = None
96 match = self._language_re.search(user_agent)
97 if match is not None:
98 language = match.group(1) or match.group(2)
99 else:
100 language = None
101 return platform, browser, version, language
102
103
104 class UserAgent(object):
105 """Represents a user agent. Pass it a WSGI environment or a user agent
106 string and you can inspect some of the details from the user agent
107 string via the attributes. The following attributes exist:
108
109 .. attribute:: string
110
111 the raw user agent string
112
113 .. attribute:: platform
114
115 the browser platform. ``None`` if not recognized.
116 The following platforms are currently recognized:
117
118 - `aix`
119 - `amiga`
120 - `android`
121 - `blackberry`
122 - `bsd`
123 - `chromeos`
124 - `dragonflybsd`
125 - `freebsd`
126 - `hpux`
127 - `ipad`
128 - `iphone`
129 - `irix`
130 - `linux`
131 - `macos`
132 - `netbsd`
133 - `openbsd`
134 - `sco`
135 - `solaris`
136 - `symbian`
137 - `wii`
138 - `windows`
139
140 .. attribute:: browser
141
142 the name of the browser. ``None`` if not recognized.
143 The following browsers are currently recognized:
144
145 - `aol` *
146 - `ask` *
147 - `baidu` *
148 - `bing` *
149 - `camino`
150 - `chrome`
151 - `edge`
152 - `firefox`
153 - `galeon`
154 - `google` *
155 - `kmeleon`
156 - `konqueror`
157 - `links`
158 - `lynx`
159 - `mozilla`
160 - `msie`
161 - `msn`
162 - `netscape`
163 - `opera`
164 - `safari`
165 - `seamonkey`
166 - `webkit`
167 - `yahoo` *
168
169 (Browsers marked with a star (``*``) are crawlers.)
170
171 .. attribute:: version
172
173 the version of the browser. ``None`` if not recognized.
174
175 .. attribute:: language
176
177 the language of the browser. ``None`` if not recognized.
178 """
179
180 _parser = UserAgentParser()
181
182 def __init__(self, environ_or_string):
183 if isinstance(environ_or_string, dict):
184 environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
185 self.string = environ_or_string
186 self.platform, self.browser, self.version, self.language = self._parser(
187 environ_or_string
188 )
189
190 def to_header(self):
191 return self.string
192
193 def __str__(self):
194 return self.string
195
196 def __nonzero__(self):
197 return bool(self.browser)
198
199 __bool__ = __nonzero__
200
201 def __repr__(self):
202 return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
203
[end of src/werkzeug/useragents.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -47,7 +47,7 @@
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
- ("edge", "edge"),
+ ("edge|edg", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
| {"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -47,7 +47,7 @@\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n- (\"edge\", \"edge\"),\n+ (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n", "issue": "New Microsoft Edge User Agent\n## Background\r\nMicrosoft Edge now based on Chromium and the user agent string is updated.\r\n`Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68`\r\n\r\n## Simple Code\r\n```python\r\[email protected]('/browser')\r\ndef browser():\r\n from flask import request\r\n ua = request.user_agent\r\n return jsonify({\r\n 'browser': ua.browser,\r\n 'platform': ua.platform,\r\n 'user_agent': ua.string,\r\n 'version': ua.version,\r\n })\r\n```\r\n\r\n## Expected Result\r\n```json\r\n{\r\n \"browser\": \"edge\", \r\n \"platform\": \"windows\", \r\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68\", \r\n \"version\": \"81.0.416.68\"\r\n}\r\n```\r\n\r\n| Key | Value |\r\n| --- | --- |\r\n| browser | **edge** |\r\n| platform | windows |\r\n| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |\r\n| version | **81.0.416.68** |\r\n\r\n\r\n## Actual Result\r\n```json\r\n{\r\n \"browser\": \"chrome\", \r\n \"platform\": \"windows\", \r\n \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68\", \r\n \"version\": \"81.0.4044.129\"\r\n}\r\n```\r\n\r\n| Key | Value |\r\n| --- | --- |\r\n| browser | **chrome** |\r\n| platform | windows |\r\n| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |\r\n| version | **81.0.4044.129** |\r\n\r\n## Environment\r\n- Windows 10 Pro 1909\r\n- Python 3.6.6\r\n- Werkzeug 0.16.1\r\n- Flask 1.1.1\r\n\r\n### Related Issues\r\n#818, #1556\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n", "path": "src/werkzeug/useragents.py"}]} | 3,349 | 151 |
gh_patches_debug_35290 | rasdani/github-patches | git_diff | docarray__docarray-979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(v2): relative file paths in url types
Passing relative file paths gives a validation error:
```python
from docarray import Image
url = 'Test/05978.jpg'
img = Image(url=url)
```
```text
Test/05978.jpg
Traceback (most recent call last):
File "/home/johannes/.config/JetBrains/PyCharmCE2022.3/scratches/scratch_116.py", line 12, in <module>
img = Image(url=url)
File "pydantic/main.py", line 342, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for Image
url
unsupported operand type(s) for +: 'NoneType' and 'str' (type=type_error)
```
</issue>
<code>
[start of docarray/typing/url/any_url.py]
1 from typing import TYPE_CHECKING, Type, TypeVar
2
3 from pydantic import AnyUrl as BaseAnyUrl
4 from pydantic import errors, parse_obj_as
5
6 from docarray.typing.abstract_type import AbstractType
7
8 if TYPE_CHECKING:
9 from pydantic.networks import Parts
10
11 from docarray.proto import NodeProto
12
13 T = TypeVar('T', bound='AnyUrl')
14
15
16 class AnyUrl(BaseAnyUrl, AbstractType):
17 host_required = (
18 False # turn off host requirement to allow passing of local paths as URL
19 )
20
21 def _to_node_protobuf(self) -> 'NodeProto':
22 """Convert Document into a NodeProto protobuf message. This function should
23 be called when the Document is nested into another Document that need to
24 be converted into a protobuf
25
26 :return: the nested item protobuf message
27 """
28 from docarray.proto import NodeProto
29
30 return NodeProto(any_url=str(self))
31
32 @classmethod
33 def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
34 """
35 A method used to validate parts of a URL.
36 Our URLs should be able to function both in local and remote settings.
37 Therefore, we allow missing `scheme`, making it possible to pass a file path.
38 """
39 scheme = parts['scheme']
40 if scheme is None:
41 pass # allow missing scheme, unlike pydantic
42
43 elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
44 raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
45
46 if validate_port:
47 cls._validate_port(parts['port'])
48
49 user = parts['user']
50 if cls.user_required and user is None:
51 raise errors.UrlUserInfoError()
52
53 return parts
54
55 @classmethod
56 def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
57 """
58 read url from a proto msg
59 :param pb_msg:
60 :return: url
61 """
62 return parse_obj_as(cls, pb_msg)
63
[end of docarray/typing/url/any_url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docarray/typing/url/any_url.py b/docarray/typing/url/any_url.py
--- a/docarray/typing/url/any_url.py
+++ b/docarray/typing/url/any_url.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Type, TypeVar
+from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
@@ -34,11 +34,14 @@
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
- Therefore, we allow missing `scheme`, making it possible to pass a file path.
+ Therefore, we allow missing `scheme`, making it possible to pass a file
+ path without prefix.
+ If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
- pass # allow missing scheme, unlike pydantic
+ # allow missing scheme, unlike pydantic
+ pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
@@ -52,6 +55,44 @@
return parts
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme: str,
+ user: Optional[str] = None,
+ password: Optional[str] = None,
+ host: str,
+ port: Optional[str] = None,
+ path: Optional[str] = None,
+ query: Optional[str] = None,
+ fragment: Optional[str] = None,
+ **_kwargs: str,
+ ) -> str:
+ """
+ Build a URL from its parts.
+ The only difference from the pydantic implementation is that we allow
+ missing `scheme`, making it possible to pass a file path without prefix.
+ """
+
+ # allow missing scheme, unlike pydantic
+ scheme_ = scheme if scheme is not None else ''
+ url = super().build(
+ scheme=scheme_,
+ user=user,
+ password=password,
+ host=host,
+ port=port,
+ path=path,
+ query=query,
+ fragment=fragment,
+ **_kwargs,
+ )
+ if scheme is None and url.startswith('://'):
+ # remove the `://` prefix, since scheme is missing
+ url = url[3:]
+ return url
+
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
| {"golden_diff": "diff --git a/docarray/typing/url/any_url.py b/docarray/typing/url/any_url.py\n--- a/docarray/typing/url/any_url.py\n+++ b/docarray/typing/url/any_url.py\n@@ -1,4 +1,4 @@\n-from typing import TYPE_CHECKING, Type, TypeVar\n+from typing import TYPE_CHECKING, Optional, Type, TypeVar\n \n from pydantic import AnyUrl as BaseAnyUrl\n from pydantic import errors, parse_obj_as\n@@ -34,11 +34,14 @@\n \"\"\"\n A method used to validate parts of a URL.\n Our URLs should be able to function both in local and remote settings.\n- Therefore, we allow missing `scheme`, making it possible to pass a file path.\n+ Therefore, we allow missing `scheme`, making it possible to pass a file\n+ path without prefix.\n+ If `scheme` is missing, we assume it is a local file path.\n \"\"\"\n scheme = parts['scheme']\n if scheme is None:\n- pass # allow missing scheme, unlike pydantic\n+ # allow missing scheme, unlike pydantic\n+ pass\n \n elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:\n raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))\n@@ -52,6 +55,44 @@\n \n return parts\n \n+ @classmethod\n+ def build(\n+ cls,\n+ *,\n+ scheme: str,\n+ user: Optional[str] = None,\n+ password: Optional[str] = None,\n+ host: str,\n+ port: Optional[str] = None,\n+ path: Optional[str] = None,\n+ query: Optional[str] = None,\n+ fragment: Optional[str] = None,\n+ **_kwargs: str,\n+ ) -> str:\n+ \"\"\"\n+ Build a URL from its parts.\n+ The only difference from the pydantic implementation is that we allow\n+ missing `scheme`, making it possible to pass a file path without prefix.\n+ \"\"\"\n+\n+ # allow missing scheme, unlike pydantic\n+ scheme_ = scheme if scheme is not None else ''\n+ url = super().build(\n+ scheme=scheme_,\n+ user=user,\n+ password=password,\n+ host=host,\n+ port=port,\n+ path=path,\n+ query=query,\n+ fragment=fragment,\n+ **_kwargs,\n+ )\n+ if scheme is None and url.startswith('://'):\n+ # remove the `://` prefix, since scheme is missing\n+ url = url[3:]\n+ return url\n+\n @classmethod\n def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:\n \"\"\"\n", "issue": "bug(v2): relative file paths in url types\nPassing relative file paths gives a validation error:\n\n```python\nfrom docarray import Image\n\nurl = 'Test/05978.jpg'\nimg = Image(url=url)\n```\n\n```text\nTest/05978.jpg\nTraceback (most recent call last):\n File \"/home/johannes/.config/JetBrains/PyCharmCE2022.3/scratches/scratch_116.py\", line 12, in <module>\n img = Image(url=url)\n File \"pydantic/main.py\", line 342, in pydantic.main.BaseModel.__init__\npydantic.error_wrappers.ValidationError: 1 validation error for Image\nurl\n unsupported operand type(s) for +: 'NoneType' and 'str' (type=type_error)\n```\n\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Type, TypeVar\n\nfrom pydantic import AnyUrl as BaseAnyUrl\nfrom pydantic import errors, parse_obj_as\n\nfrom docarray.typing.abstract_type import AbstractType\n\nif TYPE_CHECKING:\n from pydantic.networks import Parts\n\n from docarray.proto import NodeProto\n\nT = TypeVar('T', bound='AnyUrl')\n\n\nclass AnyUrl(BaseAnyUrl, AbstractType):\n host_required = (\n False # turn off host requirement to allow passing of local paths as URL\n )\n\n def _to_node_protobuf(self) -> 'NodeProto':\n \"\"\"Convert Document into a NodeProto protobuf message. This function should\n be called when the Document is nested into another Document that need to\n be converted into a protobuf\n\n :return: the nested item protobuf message\n \"\"\"\n from docarray.proto import NodeProto\n\n return NodeProto(any_url=str(self))\n\n @classmethod\n def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':\n \"\"\"\n A method used to validate parts of a URL.\n Our URLs should be able to function both in local and remote settings.\n Therefore, we allow missing `scheme`, making it possible to pass a file path.\n \"\"\"\n scheme = parts['scheme']\n if scheme is None:\n pass # allow missing scheme, unlike pydantic\n\n elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:\n raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))\n\n if validate_port:\n cls._validate_port(parts['port'])\n\n user = parts['user']\n if cls.user_required and user is None:\n raise errors.UrlUserInfoError()\n\n return parts\n\n @classmethod\n def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:\n \"\"\"\n read url from a proto msg\n :param pb_msg:\n :return: url\n \"\"\"\n return parse_obj_as(cls, pb_msg)\n", "path": "docarray/typing/url/any_url.py"}]} | 1,290 | 609 |
gh_patches_debug_8271 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase the data upload limit.
## Current Behaviour
By default django supports only 2.5 MB of the data to be uploaded on the web app. Refer [here](https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size) for more info.
## Effects
Due to the low upload limit the file in challenge creation using zip isn't being uploaded on the app as the size exceeds.
## Expected Behaviour
The upload limit must be increased to 10MB.
</issue>
<code>
[start of settings/common.py]
1 """
2 Django settings for evalai project.
3
4 Generated by 'django-admin startproject' using Django 1.10.2.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.10/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.10/ref/settings/
11 """
12
13 import datetime
14 import os
15 import sys
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19 APPS_DIR = os.path.join(BASE_DIR, 'apps')
20
21 sys.path.append(APPS_DIR)
22
23 # Quick-start development settings - unsuitable for production
24 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
25
26 # SECURITY WARNING: keep the secret key used in production secret!
27 SECRET_KEY = os.environ.get('SECRET_KEY', 'random_secret_key')
28
29 # SECURITY WARNING: don't run with debug turned on in production!
30 DEBUG = True
31
32 ALLOWED_HOSTS = []
33
34
35 # Application definition
36
37 DEFAULT_APPS = [
38 'django.contrib.admin',
39 'django.contrib.auth',
40 'django.contrib.contenttypes',
41 'django.contrib.sessions',
42 'django.contrib.messages',
43 'django.contrib.staticfiles',
44 'django.contrib.sites',
45 ]
46
47 OUR_APPS = [
48 'accounts',
49 'analytics',
50 'base',
51 'challenges',
52 'hosts',
53 'jobs',
54 'participants',
55 'web',
56 ]
57
58 THIRD_PARTY_APPS = [
59 'allauth',
60 'allauth.account',
61 'corsheaders',
62 'import_export',
63 'rest_auth',
64 'rest_auth.registration',
65 'rest_framework.authtoken',
66 'rest_framework',
67 'rest_framework_docs',
68 'rest_framework_expiring_authtoken',
69 ]
70
71 INSTALLED_APPS = DEFAULT_APPS + OUR_APPS + THIRD_PARTY_APPS
72
73 MIDDLEWARE = [
74 'corsheaders.middleware.CorsMiddleware',
75 'django.middleware.security.SecurityMiddleware',
76 'django.contrib.sessions.middleware.SessionMiddleware',
77 'django.middleware.common.CommonMiddleware',
78 'django.middleware.csrf.CsrfViewMiddleware',
79 'django.contrib.auth.middleware.AuthenticationMiddleware',
80 'django.contrib.messages.middleware.MessageMiddleware',
81 'django.middleware.clickjacking.XFrameOptionsMiddleware',
82 ]
83
84 ROOT_URLCONF = 'evalai.urls'
85
86
87 TEMPLATES = [
88 {
89 'BACKEND': 'django.template.backends.django.DjangoTemplates',
90 'DIRS': [],
91 'APP_DIRS': True,
92 'OPTIONS': {
93 'context_processors': [
94 'django.template.context_processors.debug',
95 'django.template.context_processors.request',
96 'django.contrib.auth.context_processors.auth',
97 'django.contrib.messages.context_processors.messages',
98 ],
99 },
100 },
101 ]
102
103 WSGI_APPLICATION = 'evalai.wsgi.application'
104
105
106 # Password validation
107 # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
108
109 AUTH_PASSWORD_VALIDATORS = [
110 {
111 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
112 },
113 {
114 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
115 },
116 {
117 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
118 },
119 {
120 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
121 },
122 ]
123
124
125 # Internationalization
126 # https://docs.djangoproject.com/en/1.10/topics/i18n/
127
128 LANGUAGE_CODE = 'en-us'
129
130 TIME_ZONE = 'UTC'
131
132 USE_I18N = True
133
134 USE_L10N = True
135
136 USE_TZ = True
137
138 # Static files (CSS, JavaScript, Images)
139 # https://docs.djangoproject.com/en/1.10/howto/static-files/
140
141 STATIC_URL = '/static/'
142 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
143 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
144 MEDIA_URL = "/media/"
145
146 SITE_ID = 1
147
148 REST_FRAMEWORK = {
149 'DEFAULT_PAGINATION_CLASS': (
150 'rest_framework.pagination.LimitOffsetPagination'),
151 'PAGE_SIZE': 10,
152 'DEFAULT_PERMISSION_CLASSES': [
153 'rest_framework.permissions.IsAuthenticatedOrReadOnly'
154 ],
155 'DEFAULT_AUTHENTICATION_CLASSES': [
156 'rest_framework_expiring_authtoken.authentication.ExpiringTokenAuthentication',
157 ],
158 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
159 'DEFAULT_THROTTLE_CLASSES': (
160 'rest_framework.throttling.AnonRateThrottle',
161 'rest_framework.throttling.UserRateThrottle'
162 ),
163 'DEFAULT_THROTTLE_RATES': {
164 'anon': '100/minute',
165 'user': '100/minute'
166 },
167 'DEFAULT_RENDERER_CLASSES': (
168 'rest_framework.renderers.JSONRenderer',
169 )
170 }
171
172 # ALLAUTH SETTINGS
173 ACCOUNT_EMAIL_REQUIRED = True
174 OLD_PASSWORD_FIELD_ENABLED = True
175 ACCOUNT_CONFIRM_EMAIL_ON_GET = True
176 ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/api/auth/email-confirmed/'
177 ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/api/auth/email-confirmed/'
178
179 AUTHENTICATION_BACKENDS = (
180 # Needed to login by username in Django admin, regardless of `allauth`
181 'django.contrib.auth.backends.ModelBackend',
182 # `allauth` specific authentication methods, such as login by e-mail
183 'allauth.account.auth_backends.AuthenticationBackend',
184 )
185
186 # CORS Settings
187 CORS_ORIGIN_ALLOW_ALL = True
188
189 # REST Framework Expiring Tokens Configuration
190 EXPIRING_TOKEN_LIFESPAN = datetime.timedelta(days=7)
191
192 # Logging
193 LOGGING = {
194 'version': 1,
195 'disable_existing_loggers': False,
196 'root': {
197 'level': 'INFO',
198 'handlers': ['console'],
199 },
200 'filters': {
201 'require_debug_false': {
202 '()': 'django.utils.log.RequireDebugFalse',
203 },
204 'require_debug_true': {
205 '()': 'django.utils.log.RequireDebugTrue',
206 }
207 },
208 'formatters': {
209 'simple': {
210 'format': '[%(asctime)s] %(levelname)s %(message)s',
211 'datefmt': '%Y-%m-%d %H:%M:%S'
212 },
213 'verbose': {
214 'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
215 'datefmt': '%Y-%m-%d %H:%M:%S'
216 }
217 },
218 'handlers': {
219 'console': {
220 'level': 'INFO',
221 'filters': ['require_debug_true'],
222 'class': 'logging.StreamHandler',
223 'formatter': 'simple'
224 },
225 'logfile': {
226 'level': 'DEBUG',
227 'class': 'logging.handlers.RotatingFileHandler',
228 'filename': "/tmp/logfile",
229 'maxBytes': 50000,
230 'backupCount': 10,
231 'formatter': 'verbose'
232 },
233 'mail_admins': {
234 'level': 'ERROR',
235 'class': 'django.utils.log.AdminEmailHandler',
236 'filters': ['require_debug_false'],
237 }
238 },
239 'loggers': {
240 'django': {
241 'handlers': ['console'],
242 'propagate': False,
243 },
244 'django.request': {
245 'handlers': ['mail_admins'],
246 'level': 'ERROR',
247 'propagate': False,
248 },
249 'django.security': {
250 'handlers': ['mail_admins'],
251 'level': 'ERROR',
252 'propagate': False,
253 },
254 'django.db.backends': {
255 'handlers': ['mail_admins'],
256 'level': 'ERROR',
257 'propagate': False,
258 }
259 }
260 }
261
262 CACHES = {
263 'default': {
264 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
265 }
266 }
267
268 RABBITMQ_PARAMETERS = {
269 'HOST': os.environ.get("RABBITMQ_HOST", 'localhost'),
270 'EVALAI_EXCHANGE': {
271 'NAME': 'evalai_submissions',
272 'TYPE': 'topic',
273 },
274 'SUBMISSION_QUEUE': 'submission_task_queue',
275 }
276
277 # To make usermame field read-only, customized serializer is defined.
278 REST_AUTH_SERIALIZERS = {
279 'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',
280 }
281
[end of settings/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/settings/common.py b/settings/common.py
--- a/settings/common.py
+++ b/settings/common.py
@@ -274,6 +274,11 @@
'SUBMISSION_QUEUE': 'submission_task_queue',
}
+# The maximum size in bytes for request body
+# https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size
+FILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB
+DATA_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB
+
# To make usermame field read-only, customized serializer is defined.
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',
| {"golden_diff": "diff --git a/settings/common.py b/settings/common.py\n--- a/settings/common.py\n+++ b/settings/common.py\n@@ -274,6 +274,11 @@\n 'SUBMISSION_QUEUE': 'submission_task_queue',\n }\n \n+# The maximum size in bytes for request body\n+# https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size\n+FILE_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB\n+DATA_UPLOAD_MAX_MEMORY_SIZE = 524288000 # 500 MB\n+\n # To make usermame field read-only, customized serializer is defined.\n REST_AUTH_SERIALIZERS = {\n 'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',\n", "issue": "Increase the data upload limit.\n## Current Behaviour\r\nBy default django supports only 2.5 MB of the data to be uploaded on the web app. Refer [here](https://docs.djangoproject.com/en/1.10/ref/settings/#data-upload-max-memory-size) for more info.\r\n\r\n## Effects\r\nDue to the low upload limit the file in challenge creation using zip isn't being uploaded on the app as the size exceeds.\r\n\r\n## Expected Behaviour\r\nThe upload limit must be increased to 10MB.\n", "before_files": [{"content": "\"\"\"\nDjango settings for evalai project.\n\nGenerated by 'django-admin startproject' using Django 1.10.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport datetime\nimport os\nimport sys\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nAPPS_DIR = os.path.join(BASE_DIR, 'apps')\n\nsys.path.append(APPS_DIR)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'random_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nDEFAULT_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n]\n\nOUR_APPS = [\n 'accounts',\n 'analytics',\n 'base',\n 'challenges',\n 'hosts',\n 'jobs',\n 'participants',\n 'web',\n]\n\nTHIRD_PARTY_APPS = [\n 'allauth',\n 'allauth.account',\n 'corsheaders',\n 'import_export',\n 'rest_auth',\n 'rest_auth.registration',\n 'rest_framework.authtoken',\n 'rest_framework',\n 'rest_framework_docs',\n 'rest_framework_expiring_authtoken',\n]\n\nINSTALLED_APPS = DEFAULT_APPS + OUR_APPS + THIRD_PARTY_APPS\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'evalai.urls'\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'evalai.wsgi.application'\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = \"/media/\"\n\nSITE_ID = 1\n\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': (\n 'rest_framework.pagination.LimitOffsetPagination'),\n 'PAGE_SIZE': 10,\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly'\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework_expiring_authtoken.authentication.ExpiringTokenAuthentication',\n ],\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '100/minute',\n 'user': '100/minute'\n },\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n )\n}\n\n# ALLAUTH SETTINGS\nACCOUNT_EMAIL_REQUIRED = True\nOLD_PASSWORD_FIELD_ENABLED = True\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/api/auth/email-confirmed/'\nACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/api/auth/email-confirmed/'\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# REST Framework Expiring Tokens Configuration\nEXPIRING_TOKEN_LIFESPAN = datetime.timedelta(days=7)\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console'],\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n }\n },\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] %(levelname)s %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n },\n 'verbose': {\n 'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'logfile': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': \"/tmp/logfile\",\n 'maxBytes': 50000,\n 'backupCount': 10,\n 'formatter': 'verbose'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'django.security': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'django.db.backends': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': False,\n }\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n }\n}\n\nRABBITMQ_PARAMETERS = {\n 'HOST': os.environ.get(\"RABBITMQ_HOST\", 'localhost'),\n 'EVALAI_EXCHANGE': {\n 'NAME': 'evalai_submissions',\n 'TYPE': 'topic',\n },\n 'SUBMISSION_QUEUE': 'submission_task_queue',\n}\n\n# To make usermame field read-only, customized serializer is defined.\nREST_AUTH_SERIALIZERS = {\n 'USER_DETAILS_SERIALIZER': 'accounts.serializers.ProfileSerializer',\n}\n", "path": "settings/common.py"}]} | 3,167 | 171 |
gh_patches_debug_19647 | rasdani/github-patches | git_diff | cobbler__cobbler-3581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Backport] /usr/lib/PXELINUX/linux.c32 does not exist, can't create a symlink to it
### Original feature issue
- Issue: #3574
- PR: #3576
### Target release
- [x] release33
- [ ] release32
- [ ] release30
### Reason
Stabilization for Debian of Cobbler 3.3.x.
</issue>
<code>
[start of cobbler/actions/mkloaders.py]
1 """Cobbler action to create bootable Grub2 images.
2
3 This action calls grub2-mkimage for all bootloader formats configured in
4 Cobbler's settings. See man(1) grub2-mkimage for available formats.
5 """
6 import logging
7 import pathlib
8 import re
9 import subprocess
10 import sys
11 import typing
12
13 from cobbler import utils
14
15
16 # NOTE: does not warrant being a class, but all Cobbler actions use a class's ".run()" as the entrypoint
17 class MkLoaders:
18 """
19 Action to create bootloader images.
20 """
21
22 def __init__(self, api):
23 """
24 MkLoaders constructor.
25
26 :param api: CobblerAPI instance for accessing settings
27 """
28 self.logger = logging.getLogger()
29 self.bootloaders_dir = pathlib.Path(api.settings().bootloaders_dir)
30 # GRUB 2
31 self.grub2_mod_dir = pathlib.Path(api.settings().grub2_mod_dir)
32 self.boot_loaders_formats: typing.Dict = api.settings().bootloaders_formats
33 self.modules: typing.List = api.settings().bootloaders_modules
34 # Syslinux
35 self.syslinux_folder = pathlib.Path(api.settings().syslinux_dir)
36 self.syslinux_memdisk_folder = pathlib.Path(api.settings().syslinux_memdisk_folder)
37 self.syslinux_pxelinux_folder = pathlib.Path(api.settings().syslinux_pxelinux_folder)
38 # Shim
39 self.shim_glob = pathlib.Path(api.settings().bootloaders_shim_folder)
40 self.shim_regex = re.compile(api.settings().bootloaders_shim_file)
41 # iPXE
42 self.ipxe_folder = pathlib.Path(api.settings().bootloaders_ipxe_folder)
43
44 def run(self):
45 """
46 Run GrubImages action. If the files or executables for the bootloader is not available we bail out and skip the
47 creation after it is logged that this is not available.
48 """
49 self.create_directories()
50
51 self.make_shim()
52 self.make_ipxe()
53 self.make_syslinux()
54 self.make_grub()
55
56 def make_shim(self):
57 """
58 Create symlink of the shim bootloader in case it is available on the system.
59 """
60 # Check well-known locations
61 # Absolute paths are not supported BUT we can get around that: https://stackoverflow.com/a/51108375/4730773
62 parts = self.shim_glob.parts
63 start_at = 1 if self.shim_glob.is_absolute() else 0
64 bootloader_path_parts = pathlib.Path(*parts[start_at:])
65 results = sorted(pathlib.Path(self.shim_glob.root).glob(str(bootloader_path_parts)))
66 # If no match, then report and bail out.
67 if len(results) <= 0:
68 self.logger.info('Unable to find the folder which should be scanned for "shim.efi"! Bailing out of linking '
69 'the shim!')
70 return
71 # Now scan the folders with the regex
72 target_shim = None
73 for possible_folder in results:
74 for child in possible_folder.iterdir():
75 if self.shim_regex.search(str(child)):
76 target_shim = child.resolve()
77 break
78 # If no match is found report and return
79 if target_shim is None:
80 self.logger.info('Unable to find "shim.efi" file. Please adjust "bootloaders_shim_file" regex. Bailing out '
81 'of linking the shim!')
82 return
83 # Symlink the absolute target of the match
84 symlink(
85 target_shim,
86 self.bootloaders_dir.joinpath(pathlib.Path("grub/shim.efi")),
87 skip_existing=True
88 )
89
90 def make_ipxe(self):
91 """
92 Create symlink of the iPXE bootloader in case it is available on the system.
93 """
94 if not self.ipxe_folder.exists():
95 self.logger.info('ipxe directory did not exist. Please adjust the "bootloaders_ipxe_folder". Bailing out '
96 'of iPXE setup!')
97 return
98 symlink(
99 self.ipxe_folder.joinpath("undionly.kpxe"),
100 self.bootloaders_dir.joinpath(pathlib.Path("undionly.pxe")),
101 skip_existing=True
102 )
103
104 def make_syslinux(self):
105 """
106 Create symlink of the important syslinux bootloader files in case they are available on the system.
107 """
108 if not utils.command_existing("syslinux"):
109 self.logger.info("syslinux command not available. Bailing out of syslinux setup!")
110 return
111 syslinux_version = get_syslinux_version()
112 # Make modules
113 symlink(
114 self.syslinux_folder.joinpath("menu.c32"),
115 self.bootloaders_dir.joinpath("menu.c32"),
116 skip_existing=True
117 )
118 # According to https://wiki.syslinux.org/wiki/index.php?title=Library_modules,
119 # 'menu.c32' depends on 'libutil.c32'.
120 libutil_c32_path = self.syslinux_folder.joinpath("libutil.c32")
121 if syslinux_version > 4 and libutil_c32_path.exists():
122 symlink(
123 libutil_c32_path,
124 self.bootloaders_dir.joinpath("libutil.c32"),
125 skip_existing=True,
126 )
127 if syslinux_version < 5:
128 # This file is only required for Syslinux 5 and newer.
129 # Source: https://wiki.syslinux.org/wiki/index.php?title=Library_modules
130 self.logger.info('syslinux version 4 detected! Skip making symlink of "ldlinux.c32" file!')
131 else:
132 symlink(
133 self.syslinux_folder.joinpath("ldlinux.c32"),
134 self.bootloaders_dir.joinpath("ldlinux.c32"),
135 skip_existing=True
136 )
137 # Make memdisk
138 symlink(
139 self.syslinux_memdisk_folder.joinpath("memdisk"),
140 self.bootloaders_dir.joinpath("memdisk"),
141 skip_existing=True
142 )
143 # Make pxelinux.0
144 symlink(
145 self.syslinux_pxelinux_folder.joinpath("pxelinux.0"),
146 self.bootloaders_dir.joinpath("pxelinux.0"),
147 skip_existing=True
148 )
149 # Make linux.c32 for syslinux + wimboot
150 libcom32_c32_path = self.syslinux_folder.joinpath("libcom32.c32")
151 if syslinux_version > 4 and libcom32_c32_path.exists():
152 symlink(
153 self.syslinux_pxelinux_folder.joinpath("linux.c32"),
154 self.bootloaders_dir.joinpath("linux.c32"),
155 skip_existing=True,
156 )
157 # Make libcom32.c32
158 # 'linux.c32' depends on 'libcom32.c32'
159 symlink(
160 self.syslinux_pxelinux_folder.joinpath("libcom32.c32"),
161 self.bootloaders_dir.joinpath("libcom32.c32"),
162 skip_existing=True,
163 )
164
165 def make_grub(self):
166 """
167 Create symlink of the GRUB 2 bootloader in case it is available on the system. Additionally build the loaders
168 for other architectures if the modules to do so are available.
169 """
170 if not utils.command_existing("grub2-mkimage"):
171 self.logger.info("grub2-mkimage command not available. Bailing out of GRUB2 generation!")
172 return
173
174 for image_format, options in self.boot_loaders_formats.items():
175 bl_mod_dir = options.get("mod_dir", image_format)
176 mod_dir = self.grub2_mod_dir.joinpath(bl_mod_dir)
177 if not mod_dir.exists():
178 self.logger.info(
179 'GRUB2 modules directory for arch "%s" did no exist. Skipping GRUB2 creation',
180 image_format
181 )
182 continue
183 try:
184 mkimage(
185 image_format,
186 self.bootloaders_dir.joinpath("grub", options["binary_name"]),
187 self.modules + options.get("extra_modules", []),
188 )
189 except subprocess.CalledProcessError:
190 self.logger.info('grub2-mkimage failed for arch "%s"! Maybe you did forget to install the grub modules '
191 'for the architecture?', image_format)
192 utils.log_exc()
193 # don't create module symlinks if grub2-mkimage is unsuccessful
194 continue
195 self.logger.info('Successfully built bootloader for arch "%s"!', image_format)
196
197 # Create a symlink for GRUB 2 modules
198 # assumes a single GRUB can be used to boot all kinds of distros
199 # if this assumption turns out incorrect, individual "grub" subdirectories are needed
200 symlink(
201 mod_dir,
202 self.bootloaders_dir.joinpath("grub", bl_mod_dir),
203 skip_existing=True
204 )
205
206 def create_directories(self):
207 """
208 Create the required directories so that this succeeds. If existing, do nothing. This should create the tree for
209 all supported bootloaders, regardless of the capabilities to symlink/install/build them.
210 """
211 if not self.bootloaders_dir.exists():
212 raise FileNotFoundError("Main bootloader directory not found! Please create it yourself!")
213
214 grub_dir = self.bootloaders_dir.joinpath("grub")
215 if not grub_dir.exists():
216 grub_dir.mkdir(mode=0o644)
217
218
219 # NOTE: move this to cobbler.utils?
220 # cobbler.utils.linkfile does a lot of things, it might be worth it to have a
221 # function just for symbolic links
222 def symlink(target: pathlib.Path, link: pathlib.Path, skip_existing: bool = False):
223 """Create a symlink LINK pointing to TARGET.
224
225 :param target: File/directory that the link will point to. The file/directory must exist.
226 :param link: Filename for the link.
227 :param skip_existing: Controls if existing links are skipped, defaults to False.
228 :raises FileNotFoundError: ``target`` is not an existing file.
229 :raises FileExistsError: ``skip_existing`` is False and ``link`` already exists.
230 """
231
232 if not target.exists():
233 raise FileNotFoundError(
234 f"{target} does not exist, can't create a symlink to it."
235 )
236 try:
237 link.symlink_to(target)
238 except FileExistsError:
239 if not skip_existing:
240 raise
241
242
243 def mkimage(image_format: str, image_filename: pathlib.Path, modules: typing.List):
244 """Create a bootable image of GRUB using grub2-mkimage.
245
246 :param image_format: Format of the image that is being created. See man(1)
247 grub2-mkimage for a list of supported formats.
248 :param image_filename: Location of the image that is being created.
249 :param modules: List of GRUB modules to include into the image
250 :raises subprocess.CalledProcessError: Error raised by ``subprocess.run``.
251 """
252
253 if not image_filename.parent.exists():
254 image_filename.parent.mkdir(parents=True)
255
256 cmd = ["grub2-mkimage"]
257 cmd.extend(("--format", image_format))
258 cmd.extend(("--output", str(image_filename)))
259 cmd.append("--prefix=")
260 cmd.extend(modules)
261
262 # The Exception raised by subprocess already contains everything useful, it's simpler to use that than roll our
263 # own custom exception together with cobbler.utils.subprocess_* functions
264 subprocess.run(cmd, check=True)
265
266
267 def get_syslinux_version() -> int:
268 """
269 This calls syslinux and asks for the version number.
270
271 :return: The major syslinux release number.
272 :raises subprocess.CalledProcessError: Error raised by ``subprocess.run`` in case syslinux does not return zero.
273 """
274 # Example output: "syslinux 4.04 Copyright 1994-2011 H. Peter Anvin et al"
275 cmd = ["syslinux", "-v"]
276 completed_process = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
277 encoding=sys.getdefaultencoding())
278 output = completed_process.stdout.split()
279 return int(float(output[1]))
280
[end of cobbler/actions/mkloaders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cobbler/actions/mkloaders.py b/cobbler/actions/mkloaders.py
--- a/cobbler/actions/mkloaders.py
+++ b/cobbler/actions/mkloaders.py
@@ -150,14 +150,14 @@
libcom32_c32_path = self.syslinux_folder.joinpath("libcom32.c32")
if syslinux_version > 4 and libcom32_c32_path.exists():
symlink(
- self.syslinux_pxelinux_folder.joinpath("linux.c32"),
+ self.syslinux_folder.joinpath("linux.c32"),
self.bootloaders_dir.joinpath("linux.c32"),
skip_existing=True,
)
# Make libcom32.c32
# 'linux.c32' depends on 'libcom32.c32'
symlink(
- self.syslinux_pxelinux_folder.joinpath("libcom32.c32"),
+ self.syslinux_folder.joinpath("libcom32.c32"),
self.bootloaders_dir.joinpath("libcom32.c32"),
skip_existing=True,
)
| {"golden_diff": "diff --git a/cobbler/actions/mkloaders.py b/cobbler/actions/mkloaders.py\n--- a/cobbler/actions/mkloaders.py\n+++ b/cobbler/actions/mkloaders.py\n@@ -150,14 +150,14 @@\n libcom32_c32_path = self.syslinux_folder.joinpath(\"libcom32.c32\")\n if syslinux_version > 4 and libcom32_c32_path.exists():\n symlink(\n- self.syslinux_pxelinux_folder.joinpath(\"linux.c32\"),\n+ self.syslinux_folder.joinpath(\"linux.c32\"),\n self.bootloaders_dir.joinpath(\"linux.c32\"),\n skip_existing=True,\n )\n # Make libcom32.c32\n # 'linux.c32' depends on 'libcom32.c32'\n symlink(\n- self.syslinux_pxelinux_folder.joinpath(\"libcom32.c32\"),\n+ self.syslinux_folder.joinpath(\"libcom32.c32\"),\n self.bootloaders_dir.joinpath(\"libcom32.c32\"),\n skip_existing=True,\n )\n", "issue": "[Backport] /usr/lib/PXELINUX/linux.c32 does not exist, can't create a symlink to it\n### Original feature issue\r\n\r\n- Issue: #3574\r\n- PR: #3576 \r\n\r\n### Target release\r\n\r\n- [x] release33\r\n- [ ] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilization for Debian of Cobbler 3.3.x.\r\n\n", "before_files": [{"content": "\"\"\"Cobbler action to create bootable Grub2 images.\n\nThis action calls grub2-mkimage for all bootloader formats configured in\nCobbler's settings. See man(1) grub2-mkimage for available formats.\n\"\"\"\nimport logging\nimport pathlib\nimport re\nimport subprocess\nimport sys\nimport typing\n\nfrom cobbler import utils\n\n\n# NOTE: does not warrant being a class, but all Cobbler actions use a class's \".run()\" as the entrypoint\nclass MkLoaders:\n \"\"\"\n Action to create bootloader images.\n \"\"\"\n\n def __init__(self, api):\n \"\"\"\n MkLoaders constructor.\n\n :param api: CobblerAPI instance for accessing settings\n \"\"\"\n self.logger = logging.getLogger()\n self.bootloaders_dir = pathlib.Path(api.settings().bootloaders_dir)\n # GRUB 2\n self.grub2_mod_dir = pathlib.Path(api.settings().grub2_mod_dir)\n self.boot_loaders_formats: typing.Dict = api.settings().bootloaders_formats\n self.modules: typing.List = api.settings().bootloaders_modules\n # Syslinux\n self.syslinux_folder = pathlib.Path(api.settings().syslinux_dir)\n self.syslinux_memdisk_folder = pathlib.Path(api.settings().syslinux_memdisk_folder)\n self.syslinux_pxelinux_folder = pathlib.Path(api.settings().syslinux_pxelinux_folder)\n # Shim\n self.shim_glob = pathlib.Path(api.settings().bootloaders_shim_folder)\n self.shim_regex = re.compile(api.settings().bootloaders_shim_file)\n # iPXE\n self.ipxe_folder = pathlib.Path(api.settings().bootloaders_ipxe_folder)\n\n def run(self):\n \"\"\"\n Run GrubImages action. If the files or executables for the bootloader is not available we bail out and skip the\n creation after it is logged that this is not available.\n \"\"\"\n self.create_directories()\n\n self.make_shim()\n self.make_ipxe()\n self.make_syslinux()\n self.make_grub()\n\n def make_shim(self):\n \"\"\"\n Create symlink of the shim bootloader in case it is available on the system.\n \"\"\"\n # Check well-known locations\n # Absolute paths are not supported BUT we can get around that: https://stackoverflow.com/a/51108375/4730773\n parts = self.shim_glob.parts\n start_at = 1 if self.shim_glob.is_absolute() else 0\n bootloader_path_parts = pathlib.Path(*parts[start_at:])\n results = sorted(pathlib.Path(self.shim_glob.root).glob(str(bootloader_path_parts)))\n # If no match, then report and bail out.\n if len(results) <= 0:\n self.logger.info('Unable to find the folder which should be scanned for \"shim.efi\"! Bailing out of linking '\n 'the shim!')\n return\n # Now scan the folders with the regex\n target_shim = None\n for possible_folder in results:\n for child in possible_folder.iterdir():\n if self.shim_regex.search(str(child)):\n target_shim = child.resolve()\n break\n # If no match is found report and return\n if target_shim is None:\n self.logger.info('Unable to find \"shim.efi\" file. Please adjust \"bootloaders_shim_file\" regex. Bailing out '\n 'of linking the shim!')\n return\n # Symlink the absolute target of the match\n symlink(\n target_shim,\n self.bootloaders_dir.joinpath(pathlib.Path(\"grub/shim.efi\")),\n skip_existing=True\n )\n\n def make_ipxe(self):\n \"\"\"\n Create symlink of the iPXE bootloader in case it is available on the system.\n \"\"\"\n if not self.ipxe_folder.exists():\n self.logger.info('ipxe directory did not exist. Please adjust the \"bootloaders_ipxe_folder\". Bailing out '\n 'of iPXE setup!')\n return\n symlink(\n self.ipxe_folder.joinpath(\"undionly.kpxe\"),\n self.bootloaders_dir.joinpath(pathlib.Path(\"undionly.pxe\")),\n skip_existing=True\n )\n\n def make_syslinux(self):\n \"\"\"\n Create symlink of the important syslinux bootloader files in case they are available on the system.\n \"\"\"\n if not utils.command_existing(\"syslinux\"):\n self.logger.info(\"syslinux command not available. Bailing out of syslinux setup!\")\n return\n syslinux_version = get_syslinux_version()\n # Make modules\n symlink(\n self.syslinux_folder.joinpath(\"menu.c32\"),\n self.bootloaders_dir.joinpath(\"menu.c32\"),\n skip_existing=True\n )\n # According to https://wiki.syslinux.org/wiki/index.php?title=Library_modules,\n # 'menu.c32' depends on 'libutil.c32'.\n libutil_c32_path = self.syslinux_folder.joinpath(\"libutil.c32\")\n if syslinux_version > 4 and libutil_c32_path.exists():\n symlink(\n libutil_c32_path,\n self.bootloaders_dir.joinpath(\"libutil.c32\"),\n skip_existing=True,\n )\n if syslinux_version < 5:\n # This file is only required for Syslinux 5 and newer.\n # Source: https://wiki.syslinux.org/wiki/index.php?title=Library_modules\n self.logger.info('syslinux version 4 detected! Skip making symlink of \"ldlinux.c32\" file!')\n else:\n symlink(\n self.syslinux_folder.joinpath(\"ldlinux.c32\"),\n self.bootloaders_dir.joinpath(\"ldlinux.c32\"),\n skip_existing=True\n )\n # Make memdisk\n symlink(\n self.syslinux_memdisk_folder.joinpath(\"memdisk\"),\n self.bootloaders_dir.joinpath(\"memdisk\"),\n skip_existing=True\n )\n # Make pxelinux.0\n symlink(\n self.syslinux_pxelinux_folder.joinpath(\"pxelinux.0\"),\n self.bootloaders_dir.joinpath(\"pxelinux.0\"),\n skip_existing=True\n )\n # Make linux.c32 for syslinux + wimboot\n libcom32_c32_path = self.syslinux_folder.joinpath(\"libcom32.c32\")\n if syslinux_version > 4 and libcom32_c32_path.exists():\n symlink(\n self.syslinux_pxelinux_folder.joinpath(\"linux.c32\"),\n self.bootloaders_dir.joinpath(\"linux.c32\"),\n skip_existing=True,\n )\n # Make libcom32.c32\n # 'linux.c32' depends on 'libcom32.c32'\n symlink(\n self.syslinux_pxelinux_folder.joinpath(\"libcom32.c32\"),\n self.bootloaders_dir.joinpath(\"libcom32.c32\"),\n skip_existing=True,\n )\n\n def make_grub(self):\n \"\"\"\n Create symlink of the GRUB 2 bootloader in case it is available on the system. Additionally build the loaders\n for other architectures if the modules to do so are available.\n \"\"\"\n if not utils.command_existing(\"grub2-mkimage\"):\n self.logger.info(\"grub2-mkimage command not available. Bailing out of GRUB2 generation!\")\n return\n\n for image_format, options in self.boot_loaders_formats.items():\n bl_mod_dir = options.get(\"mod_dir\", image_format)\n mod_dir = self.grub2_mod_dir.joinpath(bl_mod_dir)\n if not mod_dir.exists():\n self.logger.info(\n 'GRUB2 modules directory for arch \"%s\" did no exist. Skipping GRUB2 creation',\n image_format\n )\n continue\n try:\n mkimage(\n image_format,\n self.bootloaders_dir.joinpath(\"grub\", options[\"binary_name\"]),\n self.modules + options.get(\"extra_modules\", []),\n )\n except subprocess.CalledProcessError:\n self.logger.info('grub2-mkimage failed for arch \"%s\"! Maybe you did forget to install the grub modules '\n 'for the architecture?', image_format)\n utils.log_exc()\n # don't create module symlinks if grub2-mkimage is unsuccessful\n continue\n self.logger.info('Successfully built bootloader for arch \"%s\"!', image_format)\n\n # Create a symlink for GRUB 2 modules\n # assumes a single GRUB can be used to boot all kinds of distros\n # if this assumption turns out incorrect, individual \"grub\" subdirectories are needed\n symlink(\n mod_dir,\n self.bootloaders_dir.joinpath(\"grub\", bl_mod_dir),\n skip_existing=True\n )\n\n def create_directories(self):\n \"\"\"\n Create the required directories so that this succeeds. If existing, do nothing. This should create the tree for\n all supported bootloaders, regardless of the capabilities to symlink/install/build them.\n \"\"\"\n if not self.bootloaders_dir.exists():\n raise FileNotFoundError(\"Main bootloader directory not found! Please create it yourself!\")\n\n grub_dir = self.bootloaders_dir.joinpath(\"grub\")\n if not grub_dir.exists():\n grub_dir.mkdir(mode=0o644)\n\n\n# NOTE: move this to cobbler.utils?\n# cobbler.utils.linkfile does a lot of things, it might be worth it to have a\n# function just for symbolic links\ndef symlink(target: pathlib.Path, link: pathlib.Path, skip_existing: bool = False):\n \"\"\"Create a symlink LINK pointing to TARGET.\n\n :param target: File/directory that the link will point to. The file/directory must exist.\n :param link: Filename for the link.\n :param skip_existing: Controls if existing links are skipped, defaults to False.\n :raises FileNotFoundError: ``target`` is not an existing file.\n :raises FileExistsError: ``skip_existing`` is False and ``link`` already exists.\n \"\"\"\n\n if not target.exists():\n raise FileNotFoundError(\n f\"{target} does not exist, can't create a symlink to it.\"\n )\n try:\n link.symlink_to(target)\n except FileExistsError:\n if not skip_existing:\n raise\n\n\ndef mkimage(image_format: str, image_filename: pathlib.Path, modules: typing.List):\n \"\"\"Create a bootable image of GRUB using grub2-mkimage.\n\n :param image_format: Format of the image that is being created. See man(1)\n grub2-mkimage for a list of supported formats.\n :param image_filename: Location of the image that is being created.\n :param modules: List of GRUB modules to include into the image\n :raises subprocess.CalledProcessError: Error raised by ``subprocess.run``.\n \"\"\"\n\n if not image_filename.parent.exists():\n image_filename.parent.mkdir(parents=True)\n\n cmd = [\"grub2-mkimage\"]\n cmd.extend((\"--format\", image_format))\n cmd.extend((\"--output\", str(image_filename)))\n cmd.append(\"--prefix=\")\n cmd.extend(modules)\n\n # The Exception raised by subprocess already contains everything useful, it's simpler to use that than roll our\n # own custom exception together with cobbler.utils.subprocess_* functions\n subprocess.run(cmd, check=True)\n\n\ndef get_syslinux_version() -> int:\n \"\"\"\n This calls syslinux and asks for the version number.\n\n :return: The major syslinux release number.\n :raises subprocess.CalledProcessError: Error raised by ``subprocess.run`` in case syslinux does not return zero.\n \"\"\"\n # Example output: \"syslinux 4.04 Copyright 1994-2011 H. Peter Anvin et al\"\n cmd = [\"syslinux\", \"-v\"]\n completed_process = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n encoding=sys.getdefaultencoding())\n output = completed_process.stdout.split()\n return int(float(output[1]))\n", "path": "cobbler/actions/mkloaders.py"}]} | 3,973 | 257 |
gh_patches_debug_6924 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-55 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Other] Allow access to webhooks for readthedocs
I recently set up the [readthedocs site](https://paperless-ngx.readthedocs.io/en/latest/). Unfortunately, I don't have access to the webhooks settings of the project.
I requested access to it from the project owner. When granted, the docs will automatically update.
Also we should change some items in https://github.com/paperless-ngx/paperless-ngx/blob/master/docs/conf.py
(namely `project = u'Paperless-ng'` and `copyright = u'2021, Daniel Quinn, Jonas Winkler'`)
Should we just add `paperless-ngx team` to it?
</issue>
<code>
[start of docs/conf.py]
1 import sphinx_rtd_theme
2
3
4 __version__ = None
5 exec(open("../src/paperless/version.py").read())
6
7
8 extensions = [
9 'sphinx.ext.autodoc',
10 'sphinx.ext.intersphinx',
11 'sphinx.ext.todo',
12 'sphinx.ext.imgmath',
13 'sphinx.ext.viewcode',
14 'sphinx_rtd_theme',
15 ]
16
17 # Add any paths that contain templates here, relative to this directory.
18 # templates_path = ['_templates']
19
20 # The suffix of source filenames.
21 source_suffix = '.rst'
22
23 # The encoding of source files.
24 #source_encoding = 'utf-8-sig'
25
26 # The master toctree document.
27 master_doc = 'index'
28
29 # General information about the project.
30 project = u'Paperless-ng'
31 copyright = u'2021, Daniel Quinn, Jonas Winkler'
32
33 # The version info for the project you're documenting, acts as replacement for
34 # |version| and |release|, also used in various other places throughout the
35 # built documents.
36 #
37
38 #
39 # If the build process ever explodes here, it's because you've set the version
40 # number in paperless.version to a tuple with 3 numbers in it.
41 #
42
43 # The short X.Y version.
44 version = ".".join([str(_) for _ in __version__[:2]])
45 # The full version, including alpha/beta/rc tags.
46 release = ".".join([str(_) for _ in __version__[:3]])
47
48 # The language for content autogenerated by Sphinx. Refer to documentation
49 # for a list of supported languages.
50 #language = None
51
52 # There are two options for replacing |today|: either, you set today to some
53 # non-false value, then it is used:
54 #today = ''
55 # Else, today_fmt is used as the format for a strftime call.
56 #today_fmt = '%B %d, %Y'
57
58 # List of patterns, relative to source directory, that match files and
59 # directories to ignore when looking for source files.
60 exclude_patterns = ['_build']
61
62 # The reST default role (used for this markup: `text`) to use for all
63 # documents.
64 #default_role = None
65
66 # If true, '()' will be appended to :func: etc. cross-reference text.
67 #add_function_parentheses = True
68
69 # If true, the current module name will be prepended to all description
70 # unit titles (such as .. function::).
71 #add_module_names = True
72
73 # If true, sectionauthor and moduleauthor directives will be shown in the
74 # output. They are ignored by default.
75 #show_authors = False
76
77 # The name of the Pygments (syntax highlighting) style to use.
78 pygments_style = 'sphinx'
79
80 # A list of ignored prefixes for module index sorting.
81 #modindex_common_prefix = []
82
83 # If true, keep warnings as "system message" paragraphs in the built documents.
84 #keep_warnings = False
85
86
87 # -- Options for HTML output ----------------------------------------------
88
89 # The theme to use for HTML and HTML Help pages. See the documentation for
90 # a list of builtin themes.
91 html_theme = 'sphinx_rtd_theme'
92
93 # Theme options are theme-specific and customize the look and feel of a theme
94 # further. For a list of options available for each theme, see the
95 # documentation.
96 #html_theme_options = {}
97
98 # Add any paths that contain custom themes here, relative to this directory.
99 html_theme_path = []
100
101 # The name for this set of Sphinx documents. If None, it defaults to
102 # "<project> v<release> documentation".
103 #html_title = None
104
105 # A shorter title for the navigation bar. Default is the same as html_title.
106 #html_short_title = None
107
108 # The name of an image file (relative to this directory) to place at the top
109 # of the sidebar.
110 #html_logo = None
111
112 # The name of an image file (within the static path) to use as favicon of the
113 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
114 # pixels large.
115 #html_favicon = None
116
117 # Add any paths that contain custom static files (such as style sheets) here,
118 # relative to this directory. They are copied after the builtin static files,
119 # so a file named "default.css" will overwrite the builtin "default.css".
120 html_static_path = ['_static']
121
122 # Add any extra paths that contain custom files (such as robots.txt or
123 # .htaccess) here, relative to this directory. These files are copied
124 # directly to the root of the documentation.
125 #html_extra_path = []
126
127 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
128 # using the given strftime format.
129 #html_last_updated_fmt = '%b %d, %Y'
130
131 # If true, SmartyPants will be used to convert quotes and dashes to
132 # typographically correct entities.
133 #html_use_smartypants = True
134
135 # Custom sidebar templates, maps document names to template names.
136 #html_sidebars = {}
137
138 # Additional templates that should be rendered to pages, maps page names to
139 # template names.
140 #html_additional_pages = {}
141
142 # If false, no module index is generated.
143 #html_domain_indices = True
144
145 # If false, no index is generated.
146 #html_use_index = True
147
148 # If true, the index is split into individual pages for each letter.
149 #html_split_index = False
150
151 # If true, links to the reST sources are added to the pages.
152 #html_show_sourcelink = True
153
154 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
155 #html_show_sphinx = True
156
157 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
158 #html_show_copyright = True
159
160 # If true, an OpenSearch description file will be output, and all pages will
161 # contain a <link> tag referring to it. The value of this option must be the
162 # base URL from which the finished HTML is served.
163 #html_use_opensearch = ''
164
165 # This is the file name suffix for HTML files (e.g. ".xhtml").
166 #html_file_suffix = None
167
168 # Output file base name for HTML help builder.
169 htmlhelp_basename = 'paperless'
170
171 # -- Options for LaTeX output ---------------------------------------------
172
173 latex_elements = {
174 # The paper size ('letterpaper' or 'a4paper').
175 #'papersize': 'letterpaper',
176
177 # The font size ('10pt', '11pt' or '12pt').
178 #'pointsize': '10pt',
179
180 # Additional stuff for the LaTeX preamble.
181 #'preamble': '',
182 }
183
184 # Grouping the document tree into LaTeX files. List of tuples
185 # (source start file, target name, title,
186 # author, documentclass [howto, manual, or own class]).
187 latex_documents = [
188 ('index', 'paperless.tex', u'Paperless Documentation',
189 u'Daniel Quinn', 'manual'),
190 ]
191
192 # The name of an image file (relative to this directory) to place at the top of
193 # the title page.
194 #latex_logo = None
195
196 # For "manual" documents, if this is true, then toplevel headings are parts,
197 # not chapters.
198 #latex_use_parts = False
199
200 # If true, show page references after internal links.
201 #latex_show_pagerefs = False
202
203 # If true, show URL addresses after external links.
204 #latex_show_urls = False
205
206 # Documents to append as an appendix to all manuals.
207 #latex_appendices = []
208
209 # If false, no module index is generated.
210 #latex_domain_indices = True
211
212
213 # -- Options for manual page output ---------------------------------------
214
215 # One entry per manual page. List of tuples
216 # (source start file, name, description, authors, manual section).
217 man_pages = [
218 ('index', 'paperless', u'Paperless Documentation',
219 [u'Daniel Quinn'], 1)
220 ]
221
222 # If true, show URL addresses after external links.
223 #man_show_urls = False
224
225
226 # -- Options for Texinfo output -------------------------------------------
227
228 # Grouping the document tree into Texinfo files. List of tuples
229 # (source start file, target name, title, author,
230 # dir menu entry, description, category)
231 texinfo_documents = [
232 ('index', 'Paperless', u'Paperless Documentation',
233 u'Daniel Quinn', 'paperless', 'Scan, index, and archive all of your paper documents.',
234 'Miscellaneous'),
235 ]
236
237 # Documents to append as an appendix to all manuals.
238 #texinfo_appendices = []
239
240 # If false, no module index is generated.
241 #texinfo_domain_indices = True
242
243 # How to display URL addresses: 'footnote', 'no', or 'inline'.
244 #texinfo_show_urls = 'footnote'
245
246 # If true, do not generate a @detailmenu in the "Top" node's menu.
247 #texinfo_no_detailmenu = False
248
249
250 # -- Options for Epub output ----------------------------------------------
251
252 # Bibliographic Dublin Core info.
253 epub_title = u'Paperless'
254 epub_author = u'Daniel Quinn'
255 epub_publisher = u'Daniel Quinn'
256 epub_copyright = u'2015, Daniel Quinn'
257
258 # The basename for the epub file. It defaults to the project name.
259 #epub_basename = u'Paperless'
260
261 # The HTML theme for the epub output. Since the default themes are not optimized
262 # for small screen space, using the same theme for HTML and epub output is
263 # usually not wise. This defaults to 'epub', a theme designed to save visual
264 # space.
265 #epub_theme = 'epub'
266
267 # The language of the text. It defaults to the language option
268 # or en if the language is not set.
269 #epub_language = ''
270
271 # The scheme of the identifier. Typical schemes are ISBN or URL.
272 #epub_scheme = ''
273
274 # The unique identifier of the text. This can be a ISBN number
275 # or the project homepage.
276 #epub_identifier = ''
277
278 # A unique identification for the text.
279 #epub_uid = ''
280
281 # A tuple containing the cover image and cover page html template filenames.
282 #epub_cover = ()
283
284 # A sequence of (type, uri, title) tuples for the guide element of content.opf.
285 #epub_guide = ()
286
287 # HTML files that should be inserted before the pages created by sphinx.
288 # The format is a list of tuples containing the path and title.
289 #epub_pre_files = []
290
291 # HTML files shat should be inserted after the pages created by sphinx.
292 # The format is a list of tuples containing the path and title.
293 #epub_post_files = []
294
295 # A list of files that should not be packed into the epub file.
296 epub_exclude_files = ['search.html']
297
298 # The depth of the table of contents in toc.ncx.
299 #epub_tocdepth = 3
300
301 # Allow duplicate toc entries.
302 #epub_tocdup = True
303
304 # Choose between 'default' and 'includehidden'.
305 #epub_tocscope = 'default'
306
307 # Fix unsupported image types using the PIL.
308 #epub_fix_images = False
309
310 # Scale large images.
311 #epub_max_image_width = 0
312
313 # How to display URL addresses: 'footnote', 'no', or 'inline'.
314 #epub_show_urls = 'inline'
315
316 # If false, no index is generated.
317 #epub_use_index = True
318
319
320 # Example configuration for intersphinx: refer to the Python standard library.
321 intersphinx_mapping = {'http://docs.python.org/': None}
322
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -27,8 +27,8 @@
master_doc = 'index'
# General information about the project.
-project = u'Paperless-ng'
-copyright = u'2021, Daniel Quinn, Jonas Winkler'
+project = u'Paperless-ngx'
+copyright = u'2015-2022, Daniel Quinn, Jonas Winkler, and the paperless-ngx team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -27,8 +27,8 @@\n master_doc = 'index'\n \n # General information about the project.\n-project = u'Paperless-ng'\n-copyright = u'2021, Daniel Quinn, Jonas Winkler'\n+project = u'Paperless-ngx'\n+copyright = u'2015-2022, Daniel Quinn, Jonas Winkler, and the paperless-ngx team'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "[Other] Allow access to webhooks for readthedocs\nI recently set up the [readthedocs site](https://paperless-ngx.readthedocs.io/en/latest/). Unfortunately, I don't have access to the webhooks settings of the project. \r\nI requested access to it from the project owner. When granted, the docs will automatically update.\r\nAlso we should change some items in https://github.com/paperless-ngx/paperless-ngx/blob/master/docs/conf.py \r\n(namely `project = u'Paperless-ng'` and `copyright = u'2021, Daniel Quinn, Jonas Winkler'`)\r\n\r\nShould we just add `paperless-ngx team` to it?\n", "before_files": [{"content": "import sphinx_rtd_theme\n\n\n__version__ = None\nexec(open(\"../src/paperless/version.py\").read())\n\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.imgmath',\n 'sphinx.ext.viewcode',\n 'sphinx_rtd_theme',\n]\n\n# Add any paths that contain templates here, relative to this directory.\n# templates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Paperless-ng'\ncopyright = u'2021, Daniel Quinn, Jonas Winkler'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n\n#\n# If the build process ever explodes here, it's because you've set the version\n# number in paperless.version to a tuple with 3 numbers in it.\n#\n\n# The short X.Y version.\nversion = \".\".join([str(_) for _ in __version__[:2]])\n# The full version, including alpha/beta/rc tags.\nrelease = \".\".join([str(_) for _ in __version__[:3]])\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'paperless'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'paperless.tex', u'Paperless Documentation',\n u'Daniel Quinn', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'paperless', u'Paperless Documentation',\n [u'Daniel Quinn'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Paperless', u'Paperless Documentation',\n u'Daniel Quinn', 'paperless', 'Scan, index, and archive all of your paper documents.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'Paperless'\nepub_author = u'Daniel Quinn'\nepub_publisher = u'Daniel Quinn'\nepub_copyright = u'2015, Daniel Quinn'\n\n# The basename for the epub file. It defaults to the project name.\n#epub_basename = u'Paperless'\n\n# The HTML theme for the epub output. Since the default themes are not optimized\n# for small screen space, using the same theme for HTML and epub output is\n# usually not wise. This defaults to 'epub', a theme designed to save visual\n# space.\n#epub_theme = 'epub'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Choose between 'default' and 'includehidden'.\n#epub_tocscope = 'default'\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'http://docs.python.org/': None}\n", "path": "docs/conf.py"}]} | 4,042 | 152 |
gh_patches_debug_22838 | rasdani/github-patches | git_diff | kartoza__prj.app-485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certificate logos need to be rendered larger
<img width="1651" alt="screen shot 2017-07-31 at 9 02 22 am" src="https://user-images.githubusercontent.com/178003/28766753-3d9d6c24-75d1-11e7-8222-5c26ac826c7a.png">
Can you make the project and certifying organization logos bigger in the layout please? Maybe 2x vertical and horizontal?
</issue>
<code>
[start of django_project/certification/views/certificate.py]
1 # coding=utf-8
2 from django.http import Http404, HttpResponse
3 from django.views.generic import CreateView, DetailView
4 from django.core.urlresolvers import reverse
5 from braces.views import LoginRequiredMixin
6 from reportlab.pdfgen import canvas
7 from reportlab.lib.pagesizes import A4, landscape
8 from reportlab.lib.utils import ImageReader
9 from ..models import Certificate, Course, Attendee
10 from ..forms import CertificateForm
11 from base.models.project import Project
12
13
14 class CertificateMixin(object):
15 """Mixin class to provide standard settings for Certificate."""
16
17 model = Certificate
18 form_class = CertificateForm
19
20
21 class CertificateCreateView(
22 LoginRequiredMixin, CertificateMixin, CreateView):
23 """Create view for Certificate."""
24
25 context_object_name = 'certificate'
26 template_name = 'certificate/create.html'
27
28 def get_success_url(self):
29 """Define the redirect URL.
30
31 After successful creation of the object, the User will be redirected
32 to the Course detail page.
33
34 :returns: URL
35 :rtype: HttpResponse
36 """
37
38 return reverse('course-detail', kwargs={
39 'project_slug': self.project_slug,
40 'organisation_slug': self.organisation_slug,
41 'slug': self.course_slug
42 })
43
44 def get_context_data(self, **kwargs):
45 """Get the context data which is passed to a template.
46
47 :param kwargs: Any arguments to pass to the superclass.
48 :type kwargs: dict
49
50 :returns: Context data which will be passed to the template.
51 :rtype: dict
52 """
53
54 context = super(
55 CertificateCreateView, self).get_context_data(**kwargs)
56 context['course'] = Course.objects.get(slug=self.course_slug)
57 context['attendee'] = Attendee.objects.get(pk=self.pk)
58 return context
59
60 def get_form_kwargs(self):
61 """Get keyword arguments from form.
62
63 :returns keyword argument from the form
64 :rtype: dict
65 """
66
67 kwargs = super(CertificateCreateView, self).get_form_kwargs()
68 self.project_slug = self.kwargs.get('project_slug', None)
69 self.organisation_slug = self.kwargs.get('organisation_slug', None)
70 self.course_slug = self.kwargs.get('course_slug', None)
71 self.pk = self.kwargs.get('pk', None)
72 self.course = Course.objects.get(slug=self.course_slug)
73 self.attendee = Attendee.objects.get(pk=self.pk)
74 kwargs.update({
75 'user': self.request.user,
76 'course': self.course,
77 'attendee': self.attendee,
78 })
79 return kwargs
80
81
82 class CertificateDetailView(DetailView):
83 """Detail view for Certificate."""
84
85 model = Certificate
86 context_object_name = 'certificate'
87 template_name = 'certificate/detail.html'
88
89 def get_context_data(self, **kwargs):
90 """Get the context data which is passed to a template.
91
92 :param kwargs: Any arguments to pass to the superclass.
93 :type kwargs: dict
94
95 :returns: Context data which will be passed to the template.
96 :rtype: dict
97 """
98
99 self.certificateID = self.kwargs.get('id', None)
100 self.project_slug = self.kwargs.get('project_slug', None)
101 context = super(
102 CertificateDetailView, self).get_context_data(**kwargs)
103 issued_id = \
104 Certificate.objects.all().values_list('certificateID', flat=True)
105 if self.certificateID in issued_id:
106 context['certificate'] = \
107 Certificate.objects.get(certificateID=self.certificateID)
108 context['project_slug'] = self.project_slug
109 return context
110
111 def get_queryset(self):
112 """Get the queryset for this view.
113
114 :returns: Queryset which is all certificate in the
115 corresponding organisation.
116 :rtype: QuerySet
117 """
118
119 qs = Certificate.objects.all()
120 return qs
121
122 def get_object(self, queryset=None):
123 """Get the object for this view.
124
125 :param queryset: A query set
126 :type queryset: QuerySet
127
128 :returns: Queryset which is filtered to only show a certificate
129 depends on the input certificate ID.
130 :rtype: QuerySet
131 :raises: Http404
132 """
133
134 if queryset is None:
135 queryset = self.get_queryset()
136 certificateID = self.kwargs.get('id', None)
137 if certificateID:
138 try:
139 obj = queryset.get(certificateID=certificateID)
140 return obj
141 except Certificate.DoesNotExist:
142 return None
143 else:
144 raise Http404('Sorry! Certificate by this ID is not exist.')
145
146
147 def certificate_pdf_view(request, **kwargs):
148
149 project_slug = kwargs.pop('project_slug')
150 course_slug = kwargs.pop('course_slug')
151 pk = kwargs.pop('pk')
152 project = Project.objects.get(slug=project_slug)
153 course = Course.objects.get(slug=course_slug)
154 attendee = Attendee.objects.get(pk=pk)
155 certificate = Certificate.objects.get(course=course, attendee=attendee)
156 current_site = request.META['HTTP_HOST']
157
158 # Create the HttpResponse object with the appropriate PDF headers.
159 response = HttpResponse(content_type='application/pdf')
160 response['Content-Disposition'] = 'filename="certificate.pdf"'
161
162 # Create the PDF object, using the response object as its "file."
163 page = canvas.Canvas(response, pagesize=landscape(A4))
164 width, height = A4
165 center = height * 0.5
166
167 if project.image_file:
168 project_logo = ImageReader(project.image_file)
169 else:
170 project_logo = None
171
172 if course.certifying_organisation.logo:
173 organisation_logo = ImageReader(course.certifying_organisation.logo)
174 else:
175 organisation_logo = None
176
177 if project.signature:
178 project_owner_signature = ImageReader(project.signature)
179 else:
180 project_owner_signature = None
181
182 if course.course_convener.signature:
183 convener_signature = ImageReader(course.course_convener.signature)
184 else:
185 convener_signature = None
186
187 if course.template_certificate:
188 background = ImageReader(course.template_certificate)
189 else:
190 background = None
191
192 # Certificate margin.
193 margin_right = height - 50
194 margin_left = 50
195 margin_bottom = 50
196 max_left = margin_right - 50
197
198 # Draw things on the PDF. Here's where the PDF generation happens.
199 # See the ReportLab documentation for the full list of functionality.
200 if background is not None:
201 page.drawImage(
202 background, 0, 0, height=width, width=height,
203 preserveAspectRatio=True, mask='auto')
204 page.setFillColorRGB(0.1, 0.1, 0.1)
205 page.setFont('Times-Roman', 18)
206 # page.drawString(margin_left, 480, project.name)
207 # page.drawRightString(
208 # (margin_right), 480, course.certifying_organisation.name)
209
210 if project_logo is not None:
211 page.drawImage(
212 project_logo, 50, 500, width=50, height=50,
213 preserveAspectRatio=True, mask='auto')
214
215 if organisation_logo is not None:
216 page.drawImage(
217 organisation_logo, max_left, 500, height=50, width=50,
218 preserveAspectRatio=True, anchor='c', mask='auto')
219
220 page.setFont('Times-Bold', 26)
221 page.drawCentredString(center, 480, 'Certificate of Completion')
222 page.drawCentredString(
223 center, 400, '%s %s' % (attendee.firstname, attendee.surname))
224 page.setFont('Times-Roman', 16)
225 page.drawCentredString(
226 center, 360, 'Has attended and completed the course:')
227 page.setFont('Times-Bold', 20)
228 page.drawCentredString(center, 300, course.course_type.name)
229 page.setFont('Times-Roman', 16)
230 page.drawCentredString(
231 center, 270,
232 'From %s %s %s to %s %s %s'
233 % (course.start_date.day, course.start_date.strftime('%B'),
234 course.start_date.year, course.end_date.day,
235 course.end_date.strftime('%B'), course.end_date.year))
236 page.setFillColorRGB(0.1, 0.1, 0.1)
237 page.drawCentredString(
238 center, 220, 'Convened by %s %s at %s' % (
239 course.course_convener.user.first_name,
240 course.course_convener.user.last_name, course.training_center))
241
242 if project_owner_signature is not None:
243 page.drawImage(
244 project_owner_signature,
245 (margin_left + 100), (margin_bottom + 70), width=100, height=70,
246 preserveAspectRatio=True, anchor='s', mask='auto')
247
248 if convener_signature is not None:
249 page.drawImage(
250 convener_signature, (margin_right - 200), (margin_bottom + 70),
251 width=100, height=70, preserveAspectRatio=True, anchor='s',
252 mask='auto')
253
254 page.setFont('Times-Italic', 12)
255 page.drawCentredString(
256 (margin_left + 150), (margin_bottom + 60),
257 '%s %s' % (project.owner.first_name, project.owner.last_name))
258 page.drawCentredString(
259 (margin_right - 150), (margin_bottom + 60),
260 '%s %s' % (
261 course.course_convener.user.first_name,
262 course.course_convener.user.last_name))
263 page.line(
264 (margin_left + 70), (margin_bottom + 55),
265 (margin_left + 230), (margin_bottom + 55))
266 page.line(
267 (margin_right - 70), (margin_bottom + 55),
268 (margin_right - 230), (margin_bottom + 55))
269 page.setFont('Times-Roman', 13)
270 page.drawCentredString(
271 (margin_left + 150), (margin_bottom + 40), 'Project Owner')
272 page.drawCentredString(
273 (margin_right - 150), (margin_bottom + 40), 'Convener')
274
275 # Footnotes.
276 page.setFont('Times-Roman', 14)
277 page.drawString(
278 margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)
279 page.setFont('Times-Roman', 8)
280 page.drawString(
281 margin_left, (margin_bottom - 20),
282 'You can verify this certificate by visiting '
283 'http://%s/en/%s/certificate/%s/.'
284 % (current_site, project.slug, certificate.certificateID))
285
286 # Close the PDF object cleanly.
287 page.showPage()
288 page.save()
289 return response
290
[end of django_project/certification/views/certificate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py
--- a/django_project/certification/views/certificate.py
+++ b/django_project/certification/views/certificate.py
@@ -193,7 +193,7 @@
margin_right = height - 50
margin_left = 50
margin_bottom = 50
- max_left = margin_right - 50
+ max_left = margin_right - 100
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
@@ -209,12 +209,12 @@
if project_logo is not None:
page.drawImage(
- project_logo, 50, 500, width=50, height=50,
+ project_logo, 50, 450, width=100, height=100,
preserveAspectRatio=True, mask='auto')
if organisation_logo is not None:
page.drawImage(
- organisation_logo, max_left, 500, height=50, width=50,
+ organisation_logo, max_left, 450, height=100, width=100,
preserveAspectRatio=True, anchor='c', mask='auto')
page.setFont('Times-Bold', 26)
| {"golden_diff": "diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py\n--- a/django_project/certification/views/certificate.py\n+++ b/django_project/certification/views/certificate.py\n@@ -193,7 +193,7 @@\n margin_right = height - 50\n margin_left = 50\n margin_bottom = 50\n- max_left = margin_right - 50\n+ max_left = margin_right - 100\n \n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n@@ -209,12 +209,12 @@\n \n if project_logo is not None:\n page.drawImage(\n- project_logo, 50, 500, width=50, height=50,\n+ project_logo, 50, 450, width=100, height=100,\n preserveAspectRatio=True, mask='auto')\n \n if organisation_logo is not None:\n page.drawImage(\n- organisation_logo, max_left, 500, height=50, width=50,\n+ organisation_logo, max_left, 450, height=100, width=100,\n preserveAspectRatio=True, anchor='c', mask='auto')\n \n page.setFont('Times-Bold', 26)\n", "issue": "Certificate logos need to be rendered larger\n<img width=\"1651\" alt=\"screen shot 2017-07-31 at 9 02 22 am\" src=\"https://user-images.githubusercontent.com/178003/28766753-3d9d6c24-75d1-11e7-8222-5c26ac826c7a.png\">\r\n\r\n\r\nCan you make the project and certifying organization logos bigger in the layout please? Maybe 2x vertical and horizontal?\n", "before_files": [{"content": "# coding=utf-8\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import CreateView, DetailView\nfrom django.core.urlresolvers import reverse\nfrom braces.views import LoginRequiredMixin\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.utils import ImageReader\nfrom ..models import Certificate, Course, Attendee\nfrom ..forms import CertificateForm\nfrom base.models.project import Project\n\n\nclass CertificateMixin(object):\n \"\"\"Mixin class to provide standard settings for Certificate.\"\"\"\n\n model = Certificate\n form_class = CertificateForm\n\n\nclass CertificateCreateView(\n LoginRequiredMixin, CertificateMixin, CreateView):\n \"\"\"Create view for Certificate.\"\"\"\n\n context_object_name = 'certificate'\n template_name = 'certificate/create.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful creation of the object, the User will be redirected\n to the Course detail page.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n\n return reverse('course-detail', kwargs={\n 'project_slug': self.project_slug,\n 'organisation_slug': self.organisation_slug,\n 'slug': self.course_slug\n })\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n context = super(\n CertificateCreateView, self).get_context_data(**kwargs)\n context['course'] = Course.objects.get(slug=self.course_slug)\n context['attendee'] = Attendee.objects.get(pk=self.pk)\n return context\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n\n kwargs = super(CertificateCreateView, self).get_form_kwargs()\n self.project_slug = self.kwargs.get('project_slug', None)\n self.organisation_slug = self.kwargs.get('organisation_slug', None)\n self.course_slug = self.kwargs.get('course_slug', None)\n self.pk = self.kwargs.get('pk', None)\n self.course = Course.objects.get(slug=self.course_slug)\n self.attendee = Attendee.objects.get(pk=self.pk)\n kwargs.update({\n 'user': self.request.user,\n 'course': self.course,\n 'attendee': self.attendee,\n })\n return kwargs\n\n\nclass CertificateDetailView(DetailView):\n \"\"\"Detail view for Certificate.\"\"\"\n\n model = Certificate\n context_object_name = 'certificate'\n template_name = 'certificate/detail.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n self.certificateID = self.kwargs.get('id', None)\n self.project_slug = self.kwargs.get('project_slug', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n issued_id = \\\n Certificate.objects.all().values_list('certificateID', flat=True)\n if self.certificateID in issued_id:\n context['certificate'] = \\\n Certificate.objects.get(certificateID=self.certificateID)\n context['project_slug'] = self.project_slug\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for this view.\n\n :returns: Queryset which is all certificate in the\n corresponding organisation.\n :rtype: QuerySet\n \"\"\"\n\n qs = Certificate.objects.all()\n return qs\n\n def get_object(self, queryset=None):\n \"\"\"Get the object for this view.\n\n :param queryset: A query set\n :type queryset: QuerySet\n\n :returns: Queryset which is filtered to only show a certificate\n depends on the input certificate ID.\n :rtype: QuerySet\n :raises: Http404\n \"\"\"\n\n if queryset is None:\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n try:\n obj = queryset.get(certificateID=certificateID)\n return obj\n except Certificate.DoesNotExist:\n return None\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n\n\ndef certificate_pdf_view(request, **kwargs):\n\n project_slug = kwargs.pop('project_slug')\n course_slug = kwargs.pop('course_slug')\n pk = kwargs.pop('pk')\n project = Project.objects.get(slug=project_slug)\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n current_site = request.META['HTTP_HOST']\n\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"certificate.pdf\"'\n\n # Create the PDF object, using the response object as its \"file.\"\n page = canvas.Canvas(response, pagesize=landscape(A4))\n width, height = A4\n center = height * 0.5\n\n if project.image_file:\n project_logo = ImageReader(project.image_file)\n else:\n project_logo = None\n\n if course.certifying_organisation.logo:\n organisation_logo = ImageReader(course.certifying_organisation.logo)\n else:\n organisation_logo = None\n\n if project.signature:\n project_owner_signature = ImageReader(project.signature)\n else:\n project_owner_signature = None\n\n if course.course_convener.signature:\n convener_signature = ImageReader(course.course_convener.signature)\n else:\n convener_signature = None\n\n if course.template_certificate:\n background = ImageReader(course.template_certificate)\n else:\n background = None\n\n # Certificate margin.\n margin_right = height - 50\n margin_left = 50\n margin_bottom = 50\n max_left = margin_right - 50\n\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n if background is not None:\n page.drawImage(\n background, 0, 0, height=width, width=height,\n preserveAspectRatio=True, mask='auto')\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.setFont('Times-Roman', 18)\n # page.drawString(margin_left, 480, project.name)\n # page.drawRightString(\n # (margin_right), 480, course.certifying_organisation.name)\n\n if project_logo is not None:\n page.drawImage(\n project_logo, 50, 500, width=50, height=50,\n preserveAspectRatio=True, mask='auto')\n\n if organisation_logo is not None:\n page.drawImage(\n organisation_logo, max_left, 500, height=50, width=50,\n preserveAspectRatio=True, anchor='c', mask='auto')\n\n page.setFont('Times-Bold', 26)\n page.drawCentredString(center, 480, 'Certificate of Completion')\n page.drawCentredString(\n center, 400, '%s %s' % (attendee.firstname, attendee.surname))\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 360, 'Has attended and completed the course:')\n page.setFont('Times-Bold', 20)\n page.drawCentredString(center, 300, course.course_type.name)\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 270,\n 'From %s %s %s to %s %s %s'\n % (course.start_date.day, course.start_date.strftime('%B'),\n course.start_date.year, course.end_date.day,\n course.end_date.strftime('%B'), course.end_date.year))\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.drawCentredString(\n center, 220, 'Convened by %s %s at %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name, course.training_center))\n\n if project_owner_signature is not None:\n page.drawImage(\n project_owner_signature,\n (margin_left + 100), (margin_bottom + 70), width=100, height=70,\n preserveAspectRatio=True, anchor='s', mask='auto')\n\n if convener_signature is not None:\n page.drawImage(\n convener_signature, (margin_right - 200), (margin_bottom + 70),\n width=100, height=70, preserveAspectRatio=True, anchor='s',\n mask='auto')\n\n page.setFont('Times-Italic', 12)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 60),\n '%s %s' % (project.owner.first_name, project.owner.last_name))\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 60),\n '%s %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name))\n page.line(\n (margin_left + 70), (margin_bottom + 55),\n (margin_left + 230), (margin_bottom + 55))\n page.line(\n (margin_right - 70), (margin_bottom + 55),\n (margin_right - 230), (margin_bottom + 55))\n page.setFont('Times-Roman', 13)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 40), 'Project Owner')\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 40), 'Convener')\n\n # Footnotes.\n page.setFont('Times-Roman', 14)\n page.drawString(\n margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n 'You can verify this certificate by visiting '\n 'http://%s/en/%s/certificate/%s/.'\n % (current_site, project.slug, certificate.certificateID))\n\n # Close the PDF object cleanly.\n page.showPage()\n page.save()\n return response\n", "path": "django_project/certification/views/certificate.py"}]} | 3,799 | 321 |
gh_patches_debug_6359 | rasdani/github-patches | git_diff | pantsbuild__pants-15405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `update_env` to `process_execution::local`.
Replaces any `{chroot}` placeholders in the requested process env with the sandbox workdir.
This reflects the behavior for interactive processes executed with the `run` goal.
</issue>
<code>
[start of src/python/pants/core/goals/publish.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3 """Goal for publishing packaged targets to any repository or registry etc.
4
5 Plugins implement the publish protocol that provides this goal with the processes to run in order to
6 publish the artifacts.
7
8 The publish protocol consists of defining two union members and one rule, returning the processes to
9 run. See the doc for the corresponding classses in this module for details on the classes to define.
10
11 Example rule:
12
13 @rule
14 async def publish_example(request: PublishToMyRepoRequest, ...) -> PublishProcesses:
15 # Create `InteractiveProcess` instances as required by the `request`.
16 return PublishProcesses(...)
17 """
18
19
20 from __future__ import annotations
21
22 import collections
23 import json
24 import logging
25 from abc import ABCMeta
26 from dataclasses import asdict, dataclass, field, is_dataclass, replace
27 from itertools import chain
28 from typing import ClassVar, Generic, Type, TypeVar
29
30 from typing_extensions import final
31
32 from pants.core.goals.package import BuiltPackage, PackageFieldSet
33 from pants.engine.addresses import Address
34 from pants.engine.collection import Collection
35 from pants.engine.console import Console
36 from pants.engine.goal import Goal, GoalSubsystem
37 from pants.engine.process import InteractiveProcess, InteractiveProcessResult
38 from pants.engine.rules import Effect, Get, MultiGet, collect_rules, goal_rule, rule
39 from pants.engine.target import (
40 FieldSet,
41 ImmutableValue,
42 NoApplicableTargetsBehavior,
43 TargetRootsToFieldSets,
44 TargetRootsToFieldSetsRequest,
45 )
46 from pants.engine.unions import UnionMembership, UnionRule, union
47 from pants.option.option_types import StrOption
48 from pants.util.frozendict import FrozenDict
49
50 logger = logging.getLogger(__name__)
51
52
53 _F = TypeVar("_F", bound=FieldSet)
54
55
56 class PublishOutputData(FrozenDict[str, ImmutableValue]):
57 pass
58
59
60 @union
61 @dataclass(frozen=True)
62 class PublishRequest(Generic[_F]):
63 """Implement a union member subclass of this union class along with a PublishFieldSet subclass
64 that appoints that member subclass in order to receive publish requests for targets compatible
65 with the field set.
66
67 The `packages` hold all artifacts produced for a given target to be published.
68
69 Example:
70
71 PublishToMyRepoRequest(PublishRequest):
72 pass
73
74 PublishToMyRepoFieldSet(PublishFieldSet):
75 publish_request_type = PublishToMyRepoRequest
76
77 # Standard FieldSet semantics from here on:
78 required_fields = (MyRepositories,)
79 ...
80 """
81
82 field_set: _F
83 packages: tuple[BuiltPackage, ...]
84
85
86 _T = TypeVar("_T", bound=PublishRequest)
87
88
89 @union
90 @dataclass(frozen=True)
91 class PublishFieldSet(Generic[_T], FieldSet, metaclass=ABCMeta):
92 """FieldSet for PublishRequest.
93
94 Union members may list any fields required to fulfill the instantiation of the
95 `PublishProcesses` result of the publish rule.
96 """
97
98 # Subclasses must provide this, to a union member (subclass) of `PublishRequest`.
99 publish_request_type: ClassVar[Type[_T]]
100
101 @final
102 def _request(self, packages: tuple[BuiltPackage, ...]) -> _T:
103 """Internal helper for the core publish goal."""
104 return self.publish_request_type(field_set=self, packages=packages)
105
106 @final
107 @classmethod
108 def rules(cls) -> tuple[UnionRule, ...]:
109 """Helper method for registering the union members."""
110 return (
111 UnionRule(PublishFieldSet, cls),
112 UnionRule(PublishRequest, cls.publish_request_type),
113 )
114
115 def get_output_data(self) -> PublishOutputData:
116 return PublishOutputData({"target": self.address})
117
118
119 @dataclass(frozen=True)
120 class PublishPackages:
121 """Processes to run in order to publish the named artifacts.
122
123 The `names` should list all artifacts being published by the `process` command.
124
125 The `process` may be `None`, indicating that it will not be published. This will be logged as
126 `skipped`. If the process returns a non zero exit code, it will be logged as `failed`.
127
128 The `description` may be a reason explaining why the publish was skipped, or identifying which
129 repository the artifacts are published to.
130 """
131
132 names: tuple[str, ...]
133 process: InteractiveProcess | None = None
134 description: str | None = None
135 data: PublishOutputData = field(default_factory=PublishOutputData)
136
137 def get_output_data(self, **extra_data) -> PublishOutputData:
138 return PublishOutputData(
139 {
140 "names": self.names,
141 **self.data,
142 **extra_data,
143 }
144 )
145
146
147 class PublishProcesses(Collection[PublishPackages]):
148 """Collection of what processes to run for all built packages.
149
150 This is returned from implementing rules in response to a PublishRequest.
151
152 Depending on the capabilities of the publishing tool, the work may be partitioned based on
153 number of artifacts and/or repositories to publish to.
154 """
155
156
157 @dataclass(frozen=True)
158 class PublishProcessesRequest:
159 """Internal request taking all field sets for a target and turning it into a `PublishProcesses`
160 collection (via registered publish plugins)."""
161
162 package_field_sets: tuple[PackageFieldSet, ...]
163 publish_field_sets: tuple[PublishFieldSet, ...]
164
165
166 class PublishSubsystem(GoalSubsystem):
167 name = "publish"
168 help = "Publish deliverables (assets, distributions, images, etc)."
169
170 @classmethod
171 def activated(cls, union_membership: UnionMembership) -> bool:
172 return PackageFieldSet in union_membership and PublishFieldSet in union_membership
173
174 output = StrOption(
175 "--output",
176 default=None,
177 help="Filename for JSON structured publish information.",
178 )
179
180
181 class Publish(Goal):
182 subsystem_cls = PublishSubsystem
183
184
185 @goal_rule
186 async def run_publish(console: Console, publish: PublishSubsystem) -> Publish:
187 target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet(
188 Get(
189 TargetRootsToFieldSets,
190 TargetRootsToFieldSetsRequest(
191 PackageFieldSet,
192 goal_description="",
193 # Don't warn/error here because it's already covered by `PublishFieldSet`.
194 no_applicable_targets_behavior=NoApplicableTargetsBehavior.ignore,
195 ),
196 ),
197 Get(
198 TargetRootsToFieldSets,
199 TargetRootsToFieldSetsRequest(
200 PublishFieldSet,
201 goal_description="the `publish` goal",
202 no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
203 ),
204 ),
205 )
206
207 # Only keep field sets that both package something, and have something to publish.
208 targets = set(target_roots_to_package_field_sets.targets).intersection(
209 set(target_roots_to_publish_field_sets.targets)
210 )
211
212 if not targets:
213 return Publish(exit_code=0)
214
215 # Build all packages and request the processes to run for each field set.
216 processes = await MultiGet(
217 Get(
218 PublishProcesses,
219 PublishProcessesRequest(
220 target_roots_to_package_field_sets.mapping[tgt],
221 target_roots_to_publish_field_sets.mapping[tgt],
222 ),
223 )
224 for tgt in targets
225 )
226
227 # Run all processes interactively.
228 exit_code: int = 0
229 outputs: list[PublishOutputData] = []
230 results: list[str] = []
231
232 for pub in chain.from_iterable(processes):
233 if not pub.process:
234 sigil = console.sigil_skipped()
235 status = "skipped"
236 if pub.description:
237 status += f" {pub.description}"
238 for name in pub.names:
239 results.append(f"{sigil} {name} {status}.")
240 outputs.append(pub.get_output_data(published=False, status=status))
241 continue
242
243 res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process)
244 if res.exit_code == 0:
245 sigil = console.sigil_succeeded()
246 status = "published"
247 prep = "to"
248 else:
249 sigil = console.sigil_failed()
250 status = "failed"
251 prep = "for"
252 exit_code = res.exit_code
253
254 if pub.description:
255 status += f" {prep} {pub.description}"
256
257 for name in pub.names:
258 results.append(f"{sigil} {name} {status}.")
259
260 outputs.append(
261 pub.get_output_data(
262 exit_code=res.exit_code,
263 published=res.exit_code == 0,
264 status=status,
265 )
266 )
267
268 console.print_stderr("")
269 if not results:
270 sigil = console.sigil_skipped()
271 console.print_stderr(f"{sigil} Nothing published.")
272
273 # We collect all results to the end, so all output from the interactive processes are done,
274 # before printing the results.
275 for line in results:
276 console.print_stderr(line)
277
278 # Log structured output
279 output_data = json.dumps(outputs, cls=_PublishJsonEncoder, indent=2, sort_keys=True)
280 logger.debug(f"Publish result data:\n{output_data}")
281 if publish.output:
282 with open(publish.output, mode="w") as fd:
283 fd.write(output_data)
284
285 return Publish(exit_code)
286
287
288 class _PublishJsonEncoder(json.JSONEncoder):
289 safe_to_str_types = (Address,)
290
291 def default(self, o):
292 """Return a serializable object for o."""
293 if is_dataclass(o):
294 return asdict(o)
295 if isinstance(o, collections.abc.Mapping):
296 return dict(o)
297 if isinstance(o, collections.abc.Sequence):
298 return list(o)
299 try:
300 return super().default(o)
301 except TypeError:
302 return str(o)
303
304
305 @rule
306 async def package_for_publish(request: PublishProcessesRequest) -> PublishProcesses:
307 packages = await MultiGet(
308 Get(BuiltPackage, PackageFieldSet, field_set) for field_set in request.package_field_sets
309 )
310
311 for pkg in packages:
312 for artifact in pkg.artifacts:
313 if artifact.relpath:
314 logger.info(f"Packaged {artifact.relpath}")
315 elif artifact.extra_log_lines:
316 logger.info(str(artifact.extra_log_lines[0]))
317
318 publish = await MultiGet(
319 Get(
320 PublishProcesses,
321 PublishRequest,
322 field_set._request(packages),
323 )
324 for field_set in request.publish_field_sets
325 )
326
327 # Flatten and dress each publish processes collection with data about its origin.
328 publish_processes = [
329 replace(
330 publish_process,
331 data=PublishOutputData({**publish_process.data, **field_set.get_output_data()}),
332 )
333 for processes, field_set in zip(publish, request.publish_field_sets)
334 for publish_process in processes
335 ]
336
337 return PublishProcesses(publish_processes)
338
339
340 def rules():
341 return collect_rules()
342
[end of src/python/pants/core/goals/publish.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/core/goals/publish.py b/src/python/pants/core/goals/publish.py
--- a/src/python/pants/core/goals/publish.py
+++ b/src/python/pants/core/goals/publish.py
@@ -240,6 +240,7 @@
outputs.append(pub.get_output_data(published=False, status=status))
continue
+ logger.debug(f"Execute {pub.process}")
res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process)
if res.exit_code == 0:
sigil = console.sigil_succeeded()
| {"golden_diff": "diff --git a/src/python/pants/core/goals/publish.py b/src/python/pants/core/goals/publish.py\n--- a/src/python/pants/core/goals/publish.py\n+++ b/src/python/pants/core/goals/publish.py\n@@ -240,6 +240,7 @@\n outputs.append(pub.get_output_data(published=False, status=status))\n continue\n \n+ logger.debug(f\"Execute {pub.process}\")\n res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process)\n if res.exit_code == 0:\n sigil = console.sigil_succeeded()\n", "issue": "Add `update_env` to `process_execution::local`.\nReplaces any `{chroot}` placeholders in the requested process env with the sandbox workdir.\r\n\r\nThis reflects the behavior for interactive processes executed with the `run` goal.\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\"\"\"Goal for publishing packaged targets to any repository or registry etc.\n\nPlugins implement the publish protocol that provides this goal with the processes to run in order to\npublish the artifacts.\n\nThe publish protocol consists of defining two union members and one rule, returning the processes to\nrun. See the doc for the corresponding classses in this module for details on the classes to define.\n\nExample rule:\n\n @rule\n async def publish_example(request: PublishToMyRepoRequest, ...) -> PublishProcesses:\n # Create `InteractiveProcess` instances as required by the `request`.\n return PublishProcesses(...)\n\"\"\"\n\n\nfrom __future__ import annotations\n\nimport collections\nimport json\nimport logging\nfrom abc import ABCMeta\nfrom dataclasses import asdict, dataclass, field, is_dataclass, replace\nfrom itertools import chain\nfrom typing import ClassVar, Generic, Type, TypeVar\n\nfrom typing_extensions import final\n\nfrom pants.core.goals.package import BuiltPackage, PackageFieldSet\nfrom pants.engine.addresses import Address\nfrom pants.engine.collection import Collection\nfrom pants.engine.console import Console\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.process import InteractiveProcess, InteractiveProcessResult\nfrom pants.engine.rules import Effect, Get, MultiGet, collect_rules, goal_rule, rule\nfrom pants.engine.target import (\n FieldSet,\n ImmutableValue,\n NoApplicableTargetsBehavior,\n TargetRootsToFieldSets,\n TargetRootsToFieldSetsRequest,\n)\nfrom pants.engine.unions import UnionMembership, UnionRule, union\nfrom pants.option.option_types import StrOption\nfrom pants.util.frozendict import FrozenDict\n\nlogger = logging.getLogger(__name__)\n\n\n_F = TypeVar(\"_F\", bound=FieldSet)\n\n\nclass PublishOutputData(FrozenDict[str, ImmutableValue]):\n pass\n\n\n@union\n@dataclass(frozen=True)\nclass PublishRequest(Generic[_F]):\n \"\"\"Implement a union member subclass of this union class along with a PublishFieldSet subclass\n that appoints that member subclass in order to receive publish requests for targets compatible\n with the field set.\n\n The `packages` hold all artifacts produced for a given target to be published.\n\n Example:\n\n PublishToMyRepoRequest(PublishRequest):\n pass\n\n PublishToMyRepoFieldSet(PublishFieldSet):\n publish_request_type = PublishToMyRepoRequest\n\n # Standard FieldSet semantics from here on:\n required_fields = (MyRepositories,)\n ...\n \"\"\"\n\n field_set: _F\n packages: tuple[BuiltPackage, ...]\n\n\n_T = TypeVar(\"_T\", bound=PublishRequest)\n\n\n@union\n@dataclass(frozen=True)\nclass PublishFieldSet(Generic[_T], FieldSet, metaclass=ABCMeta):\n \"\"\"FieldSet for PublishRequest.\n\n Union members may list any fields required to fulfill the instantiation of the\n `PublishProcesses` result of the publish rule.\n \"\"\"\n\n # Subclasses must provide this, to a union member (subclass) of `PublishRequest`.\n publish_request_type: ClassVar[Type[_T]]\n\n @final\n def _request(self, packages: tuple[BuiltPackage, ...]) -> _T:\n \"\"\"Internal helper for the core publish goal.\"\"\"\n return self.publish_request_type(field_set=self, packages=packages)\n\n @final\n @classmethod\n def rules(cls) -> tuple[UnionRule, ...]:\n \"\"\"Helper method for registering the union members.\"\"\"\n return (\n UnionRule(PublishFieldSet, cls),\n UnionRule(PublishRequest, cls.publish_request_type),\n )\n\n def get_output_data(self) -> PublishOutputData:\n return PublishOutputData({\"target\": self.address})\n\n\n@dataclass(frozen=True)\nclass PublishPackages:\n \"\"\"Processes to run in order to publish the named artifacts.\n\n The `names` should list all artifacts being published by the `process` command.\n\n The `process` may be `None`, indicating that it will not be published. This will be logged as\n `skipped`. If the process returns a non zero exit code, it will be logged as `failed`.\n\n The `description` may be a reason explaining why the publish was skipped, or identifying which\n repository the artifacts are published to.\n \"\"\"\n\n names: tuple[str, ...]\n process: InteractiveProcess | None = None\n description: str | None = None\n data: PublishOutputData = field(default_factory=PublishOutputData)\n\n def get_output_data(self, **extra_data) -> PublishOutputData:\n return PublishOutputData(\n {\n \"names\": self.names,\n **self.data,\n **extra_data,\n }\n )\n\n\nclass PublishProcesses(Collection[PublishPackages]):\n \"\"\"Collection of what processes to run for all built packages.\n\n This is returned from implementing rules in response to a PublishRequest.\n\n Depending on the capabilities of the publishing tool, the work may be partitioned based on\n number of artifacts and/or repositories to publish to.\n \"\"\"\n\n\n@dataclass(frozen=True)\nclass PublishProcessesRequest:\n \"\"\"Internal request taking all field sets for a target and turning it into a `PublishProcesses`\n collection (via registered publish plugins).\"\"\"\n\n package_field_sets: tuple[PackageFieldSet, ...]\n publish_field_sets: tuple[PublishFieldSet, ...]\n\n\nclass PublishSubsystem(GoalSubsystem):\n name = \"publish\"\n help = \"Publish deliverables (assets, distributions, images, etc).\"\n\n @classmethod\n def activated(cls, union_membership: UnionMembership) -> bool:\n return PackageFieldSet in union_membership and PublishFieldSet in union_membership\n\n output = StrOption(\n \"--output\",\n default=None,\n help=\"Filename for JSON structured publish information.\",\n )\n\n\nclass Publish(Goal):\n subsystem_cls = PublishSubsystem\n\n\n@goal_rule\nasync def run_publish(console: Console, publish: PublishSubsystem) -> Publish:\n target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet(\n Get(\n TargetRootsToFieldSets,\n TargetRootsToFieldSetsRequest(\n PackageFieldSet,\n goal_description=\"\",\n # Don't warn/error here because it's already covered by `PublishFieldSet`.\n no_applicable_targets_behavior=NoApplicableTargetsBehavior.ignore,\n ),\n ),\n Get(\n TargetRootsToFieldSets,\n TargetRootsToFieldSetsRequest(\n PublishFieldSet,\n goal_description=\"the `publish` goal\",\n no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,\n ),\n ),\n )\n\n # Only keep field sets that both package something, and have something to publish.\n targets = set(target_roots_to_package_field_sets.targets).intersection(\n set(target_roots_to_publish_field_sets.targets)\n )\n\n if not targets:\n return Publish(exit_code=0)\n\n # Build all packages and request the processes to run for each field set.\n processes = await MultiGet(\n Get(\n PublishProcesses,\n PublishProcessesRequest(\n target_roots_to_package_field_sets.mapping[tgt],\n target_roots_to_publish_field_sets.mapping[tgt],\n ),\n )\n for tgt in targets\n )\n\n # Run all processes interactively.\n exit_code: int = 0\n outputs: list[PublishOutputData] = []\n results: list[str] = []\n\n for pub in chain.from_iterable(processes):\n if not pub.process:\n sigil = console.sigil_skipped()\n status = \"skipped\"\n if pub.description:\n status += f\" {pub.description}\"\n for name in pub.names:\n results.append(f\"{sigil} {name} {status}.\")\n outputs.append(pub.get_output_data(published=False, status=status))\n continue\n\n res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process)\n if res.exit_code == 0:\n sigil = console.sigil_succeeded()\n status = \"published\"\n prep = \"to\"\n else:\n sigil = console.sigil_failed()\n status = \"failed\"\n prep = \"for\"\n exit_code = res.exit_code\n\n if pub.description:\n status += f\" {prep} {pub.description}\"\n\n for name in pub.names:\n results.append(f\"{sigil} {name} {status}.\")\n\n outputs.append(\n pub.get_output_data(\n exit_code=res.exit_code,\n published=res.exit_code == 0,\n status=status,\n )\n )\n\n console.print_stderr(\"\")\n if not results:\n sigil = console.sigil_skipped()\n console.print_stderr(f\"{sigil} Nothing published.\")\n\n # We collect all results to the end, so all output from the interactive processes are done,\n # before printing the results.\n for line in results:\n console.print_stderr(line)\n\n # Log structured output\n output_data = json.dumps(outputs, cls=_PublishJsonEncoder, indent=2, sort_keys=True)\n logger.debug(f\"Publish result data:\\n{output_data}\")\n if publish.output:\n with open(publish.output, mode=\"w\") as fd:\n fd.write(output_data)\n\n return Publish(exit_code)\n\n\nclass _PublishJsonEncoder(json.JSONEncoder):\n safe_to_str_types = (Address,)\n\n def default(self, o):\n \"\"\"Return a serializable object for o.\"\"\"\n if is_dataclass(o):\n return asdict(o)\n if isinstance(o, collections.abc.Mapping):\n return dict(o)\n if isinstance(o, collections.abc.Sequence):\n return list(o)\n try:\n return super().default(o)\n except TypeError:\n return str(o)\n\n\n@rule\nasync def package_for_publish(request: PublishProcessesRequest) -> PublishProcesses:\n packages = await MultiGet(\n Get(BuiltPackage, PackageFieldSet, field_set) for field_set in request.package_field_sets\n )\n\n for pkg in packages:\n for artifact in pkg.artifacts:\n if artifact.relpath:\n logger.info(f\"Packaged {artifact.relpath}\")\n elif artifact.extra_log_lines:\n logger.info(str(artifact.extra_log_lines[0]))\n\n publish = await MultiGet(\n Get(\n PublishProcesses,\n PublishRequest,\n field_set._request(packages),\n )\n for field_set in request.publish_field_sets\n )\n\n # Flatten and dress each publish processes collection with data about its origin.\n publish_processes = [\n replace(\n publish_process,\n data=PublishOutputData({**publish_process.data, **field_set.get_output_data()}),\n )\n for processes, field_set in zip(publish, request.publish_field_sets)\n for publish_process in processes\n ]\n\n return PublishProcesses(publish_processes)\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/core/goals/publish.py"}]} | 3,861 | 129 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.